Refactor: unify image handling in CanvasIO via helpers

Removed duplicate code from CanvasIO.ts and replaced it with unified helpers from ImageUtils.ts. All tensor-to-image conversions and image creation now use centralized utility functions for consistency and maintainability.
This commit is contained in:
Dariusz L
2025-08-09 03:07:18 +02:00
parent 64c5e49707
commit da37900b33
4 changed files with 291 additions and 383 deletions

View File

@@ -2,6 +2,7 @@ import { createCanvas } from "./utils/CommonUtils.js";
import { createModuleLogger } from "./utils/LoggerUtils.js";
import { showErrorNotification } from "./utils/NotificationUtils.js";
import { webSocketManager } from "./utils/WebSocketManager.js";
import { scaleImageToFit, createImageFromSource, tensorToImageData, createImageFromImageData } from "./utils/ImageUtils.js";
const log = createModuleLogger('CanvasIO');
export class CanvasIO {
constructor(canvas) {
@@ -247,17 +248,12 @@ export class CanvasIO {
async addInputToCanvas(inputImage, inputMask) {
try {
log.debug("Adding input to canvas:", { inputImage });
const { canvas: tempCanvas, ctx: tempCtx } = createCanvas(inputImage.width, inputImage.height);
if (!tempCtx)
throw new Error("Could not create temp context");
const imgData = new ImageData(new Uint8ClampedArray(inputImage.data), inputImage.width, inputImage.height);
tempCtx.putImageData(imgData, 0, 0);
const image = new Image();
await new Promise((resolve, reject) => {
image.onload = resolve;
image.onerror = reject;
image.src = tempCanvas.toDataURL();
});
// Use unified tensorToImageData for RGB image
const imageData = tensorToImageData(inputImage, 'rgb');
if (!imageData)
throw new Error("Failed to convert input image tensor");
// Create HTMLImageElement from ImageData
const image = await createImageFromImageData(imageData);
const bounds = this.canvas.outputAreaBounds;
const scale = Math.min(bounds.width / inputImage.width * 0.8, bounds.height / inputImage.height * 0.8);
const layer = await this.canvas.canvasLayers.addLayerWithImage(image, {
@@ -283,17 +279,10 @@ export class CanvasIO {
if (!tensor || !tensor.data || !tensor.width || !tensor.height) {
throw new Error("Invalid tensor data");
}
const { canvas, ctx } = createCanvas(tensor.width, tensor.height, '2d', { willReadFrequently: true });
if (!ctx)
throw new Error("Could not create canvas context");
const imageData = new ImageData(new Uint8ClampedArray(tensor.data), tensor.width, tensor.height);
ctx.putImageData(imageData, 0, 0);
return new Promise((resolve, reject) => {
const img = new Image();
img.onload = () => resolve(img);
img.onerror = (e) => reject(new Error("Failed to load image: " + e));
img.src = canvas.toDataURL();
});
const imageData = tensorToImageData(tensor, 'rgb');
if (!imageData)
throw new Error("Failed to convert tensor to image data");
return await createImageFromImageData(imageData);
}
catch (error) {
log.error("Error converting tensor to image:", error);
@@ -526,51 +515,17 @@ export class CanvasIO {
const len = maskOutput.data.length;
channels = Math.max(1, Math.floor(len / (width * height)));
}
// Create GRAYSCALE image from tensor; RGB = luminance, A = 255 (MaskTool.setMask reads luminance)
// Use unified tensorToImageData for masks
const maskImageData = tensorToImageData(maskOutput, 'grayscale');
if (!maskImageData)
throw new Error("Failed to convert mask tensor to image data");
// Create canvas and put image data
const { canvas: maskCanvas, ctx } = createCanvas(width, height, '2d', { willReadFrequently: true });
if (!ctx)
throw new Error("Could not create mask context");
const imgData = ctx.createImageData(width, height);
const arr = maskOutput.data;
const min = (maskOutput.min_val !== undefined) ? maskOutput.min_val : 0;
const max = (maskOutput.max_val !== undefined) ? maskOutput.max_val : 1;
const denom = (max - min) || 1;
const pixelCount = width * height;
for (let i = 0; i < pixelCount; i++) {
const baseIndex = i * channels;
let v;
if (channels === 1) {
v = arr[i];
}
else if (channels >= 3) {
// If image-like, compute luminance from RGB channels
const r = arr[baseIndex + 0] ?? 0;
const g = arr[baseIndex + 1] ?? 0;
const b = arr[baseIndex + 2] ?? 0;
v = 0.299 * r + 0.587 * g + 0.114 * b;
}
else {
v = arr[baseIndex] ?? 0;
}
let norm = (v - min) / denom;
if (!isFinite(norm))
norm = 0;
norm = Math.max(0, Math.min(1, norm));
const lum = Math.round(norm * 255);
const o = i * 4;
imgData.data[o] = lum; // R
imgData.data[o + 1] = lum; // G
imgData.data[o + 2] = lum; // B
imgData.data[o + 3] = 255; // A fixed (MaskTool computes alpha from luminance)
}
ctx.putImageData(imgData, 0, 0);
ctx.putImageData(maskImageData, 0, 0);
// Convert to HTMLImageElement
const maskImg = await new Promise((resolve, reject) => {
const img = new Image();
img.onload = () => resolve(img);
img.onerror = reject;
img.src = maskCanvas.toDataURL();
});
const maskImg = await createImageFromSource(maskCanvas.toDataURL());
// Respect fit_on_add (scale to output area)
const widgets = this.canvas.node.widgets;
const fitOnAddWidget = widgets ? widgets.find((w) => w.name === "fit_on_add") : null;
@@ -578,19 +533,7 @@ export class CanvasIO {
let finalMaskImg = maskImg;
if (shouldFit) {
const bounds = this.canvas.outputAreaBounds;
const scale = Math.min(bounds.width / maskImg.width, bounds.height / maskImg.height);
const scaledWidth = Math.max(1, Math.round(maskImg.width * scale));
const scaledHeight = Math.max(1, Math.round(maskImg.height * scale));
const { canvas: scaledCanvas, ctx: scaledCtx } = createCanvas(scaledWidth, scaledHeight, '2d', { willReadFrequently: true });
if (!scaledCtx)
throw new Error("Could not create scaled mask context");
scaledCtx.drawImage(maskImg, 0, 0, scaledWidth, scaledHeight);
finalMaskImg = await new Promise((resolve, reject) => {
const img = new Image();
img.onload = () => resolve(img);
img.onerror = reject;
img.src = scaledCanvas.toDataURL();
});
finalMaskImg = await scaleImageToFit(maskImg, bounds.width, bounds.height);
}
// Apply to MaskTool (centers internally)
if (this.canvas.maskTool) {
@@ -719,12 +662,7 @@ export class CanvasIO {
log.info(`Processing batch of ${batch.length} images from backend`);
for (let i = 0; i < batch.length; i++) {
const imgData = batch[i];
const img = new Image();
await new Promise((resolve, reject) => {
img.onload = resolve;
img.onerror = reject;
img.src = imgData.data;
});
const img = await createImageFromSource(imgData.data);
// Add image to canvas with unique name
await this.canvas.canvasLayers.addLayerWithImage(img, { name: `Batch Image ${i + 1}` }, addMode, this.canvas.outputAreaBounds);
log.debug(`Added batch image ${i + 1}/${batch.length} from backend`);
@@ -735,12 +673,7 @@ export class CanvasIO {
}
else if (inputData.input_image) {
// Handle single image (backward compatibility)
const img = new Image();
await new Promise((resolve, reject) => {
img.onload = resolve;
img.onerror = reject;
img.src = inputData.input_image;
});
const img = await createImageFromSource(inputData.input_image);
// Add image to canvas at output area position
await this.canvas.canvasLayers.addLayerWithImage(img, {}, addMode, this.canvas.outputAreaBounds);
log.info("Single input image added as new layer to canvas");
@@ -758,40 +691,18 @@ export class CanvasIO {
if (allowMask && !maskLoaded && hasMaskInput && inputData.input_mask) {
log.info("Processing input mask");
// Load mask image
const maskImg = new Image();
await new Promise((resolve, reject) => {
maskImg.onload = resolve;
maskImg.onerror = reject;
maskImg.src = inputData.input_mask;
});
const maskImg = await createImageFromSource(inputData.input_mask);
// Determine if we should fit the mask or use it at original size
const fitOnAddWidget2 = this.canvas.node.widgets.find((w) => w.name === "fit_on_add");
const shouldFit = fitOnAddWidget2 && fitOnAddWidget2.value;
let finalMaskImg = maskImg;
if (shouldFit && this.canvas.maskTool) {
// Scale mask to fit output area if fit_on_add is enabled
const bounds = this.canvas.outputAreaBounds;
const scale = Math.min(bounds.width / maskImg.width, bounds.height / maskImg.height);
// Create scaled mask canvas
const scaledWidth = Math.round(maskImg.width * scale);
const scaledHeight = Math.round(maskImg.height * scale);
const { canvas: scaledCanvas, ctx: scaledCtx } = createCanvas(scaledWidth, scaledHeight, '2d', { willReadFrequently: true });
if (!scaledCtx)
throw new Error("Could not create scaled mask context");
// Draw scaled mask
scaledCtx.drawImage(maskImg, 0, 0, scaledWidth, scaledHeight);
// Convert scaled canvas to image
const scaledMaskImg = new Image();
await new Promise((resolve, reject) => {
scaledMaskImg.onload = resolve;
scaledMaskImg.onerror = reject;
scaledMaskImg.src = scaledCanvas.toDataURL();
});
// Apply scaled mask to mask tool
this.canvas.maskTool.setMask(scaledMaskImg, true);
finalMaskImg = await scaleImageToFit(maskImg, bounds.width, bounds.height);
}
else if (this.canvas.maskTool) {
// Apply mask at original size
this.canvas.maskTool.setMask(maskImg, true);
// Apply to MaskTool (centers internally)
if (this.canvas.maskTool) {
this.canvas.maskTool.setMask(finalMaskImg, true);
}
this.canvas.maskAppliedFromInput = true;
// Save the mask state
@@ -903,51 +814,10 @@ export class CanvasIO {
}
}
convertTensorToImageData(tensor) {
try {
const shape = tensor.shape;
const height = shape[1];
const width = shape[2];
const channels = shape[3];
log.debug("Converting tensor:", {
shape: shape,
dataRange: {
min: tensor.min_val,
max: tensor.max_val
}
});
const imageData = new ImageData(width, height);
const data = new Uint8ClampedArray(width * height * 4);
const flatData = tensor.data;
const pixelCount = width * height;
for (let i = 0; i < pixelCount; i++) {
const pixelIndex = i * 4;
const tensorIndex = i * channels;
for (let c = 0; c < channels; c++) {
const value = flatData[tensorIndex + c];
const normalizedValue = (value - tensor.min_val) / (tensor.max_val - tensor.min_val);
data[pixelIndex + c] = Math.round(normalizedValue * 255);
}
data[pixelIndex + 3] = 255;
}
imageData.data.set(data);
return imageData;
}
catch (error) {
log.error("Error converting tensor:", error);
return null;
}
return tensorToImageData(tensor, 'rgb');
}
async createImageFromData(imageData) {
return new Promise((resolve, reject) => {
const { canvas, ctx } = createCanvas(imageData.width, imageData.height, '2d', { willReadFrequently: true });
if (!ctx)
throw new Error("Could not create canvas context");
ctx.putImageData(imageData, 0, 0);
const img = new Image();
img.onload = () => resolve(img);
img.onerror = reject;
img.src = canvas.toDataURL();
});
return createImageFromImageData(imageData);
}
async processMaskData(maskData) {
try {
@@ -1007,12 +877,7 @@ export class CanvasIO {
log.info(`Received ${result.images.length} new images, adding to canvas.`);
const newLayers = [];
for (const imageData of result.images) {
const img = new Image();
await new Promise((resolve, reject) => {
img.onload = resolve;
img.onerror = reject;
img.src = imageData;
});
const img = await createImageFromSource(imageData);
let processedImage = img;
// If there's a custom shape, clip the image to that shape
if (this.canvas.outputAreaShape && this.canvas.outputAreaShape.isClosed) {
@@ -1039,33 +904,27 @@ export class CanvasIO {
}
}
async clipImageToShape(image, shape) {
return new Promise((resolve, reject) => {
const { canvas, ctx } = createCanvas(image.width, image.height);
if (!ctx) {
reject(new Error("Could not create canvas context for clipping"));
return;
}
// Draw the image first
ctx.drawImage(image, 0, 0);
// Calculate custom shape position accounting for extensions
// Custom shape should maintain its relative position within the original canvas area
const ext = this.canvas.outputAreaExtensionEnabled ? this.canvas.outputAreaExtensions : { top: 0, bottom: 0, left: 0, right: 0 };
const shapeOffsetX = ext.left; // Add left extension to maintain relative position
const shapeOffsetY = ext.top; // Add top extension to maintain relative position
// Create a clipping mask using the shape with extension offset
ctx.globalCompositeOperation = 'destination-in';
ctx.beginPath();
ctx.moveTo(shape.points[0].x + shapeOffsetX, shape.points[0].y + shapeOffsetY);
for (let i = 1; i < shape.points.length; i++) {
ctx.lineTo(shape.points[i].x + shapeOffsetX, shape.points[i].y + shapeOffsetY);
}
ctx.closePath();
ctx.fill();
// Create a new image from the clipped canvas
const clippedImage = new Image();
clippedImage.onload = () => resolve(clippedImage);
clippedImage.onerror = () => reject(new Error("Failed to create clipped image"));
clippedImage.src = canvas.toDataURL();
});
const { canvas, ctx } = createCanvas(image.width, image.height);
if (!ctx) {
throw new Error("Could not create canvas context for clipping");
}
// Draw the image first
ctx.drawImage(image, 0, 0);
// Calculate custom shape position accounting for extensions
// Custom shape should maintain its relative position within the original canvas area
const ext = this.canvas.outputAreaExtensionEnabled ? this.canvas.outputAreaExtensions : { top: 0, bottom: 0, left: 0, right: 0 };
const shapeOffsetX = ext.left; // Add left extension to maintain relative position
const shapeOffsetY = ext.top; // Add top extension to maintain relative position
// Create a clipping mask using the shape with extension offset
ctx.globalCompositeOperation = 'destination-in';
ctx.beginPath();
ctx.moveTo(shape.points[0].x + shapeOffsetX, shape.points[0].y + shapeOffsetY);
for (let i = 1; i < shape.points.length; i++) {
ctx.lineTo(shape.points[i].x + shapeOffsetX, shape.points[i].y + shapeOffsetY);
}
ctx.closePath();
ctx.fill();
// Create a new image from the clipped canvas
return await createImageFromSource(canvas.toDataURL());
}
}

View File

@@ -314,3 +314,102 @@ export function canvasToMaskImage(canvas) {
img.src = canvas.toDataURL();
});
}
/**
* Scales an image to fit within specified bounds while maintaining aspect ratio
* @param image - Image to scale
* @param targetWidth - Target width to fit within
* @param targetHeight - Target height to fit within
* @returns Promise with scaled Image element
*/
export async function scaleImageToFit(image, targetWidth, targetHeight) {
const scale = Math.min(targetWidth / image.width, targetHeight / image.height);
const scaledWidth = Math.max(1, Math.round(image.width * scale));
const scaledHeight = Math.max(1, Math.round(image.height * scale));
const { canvas, ctx } = createCanvas(scaledWidth, scaledHeight, '2d', { willReadFrequently: true });
if (!ctx)
throw new Error("Could not create scaled image context");
ctx.drawImage(image, 0, 0, scaledWidth, scaledHeight);
return new Promise((resolve, reject) => {
const scaledImg = new Image();
scaledImg.onload = () => resolve(scaledImg);
scaledImg.onerror = reject;
scaledImg.src = canvas.toDataURL();
});
}
/**
* Unified tensor to image data conversion
* Handles both RGB images and grayscale masks
* @param tensor - Input tensor data
* @param mode - 'rgb' for images or 'grayscale' for masks
* @returns ImageData object
*/
export function tensorToImageData(tensor, mode = 'rgb') {
try {
const shape = tensor.shape;
const height = shape[1];
const width = shape[2];
const channels = shape[3] || 1; // Default to 1 for masks
log.debug("Converting tensor:", { shape, channels, mode });
const imageData = new ImageData(width, height);
const data = new Uint8ClampedArray(width * height * 4);
const flatData = tensor.data;
const pixelCount = width * height;
const min = tensor.min_val ?? 0;
const max = tensor.max_val ?? 1;
const denom = (max - min) || 1;
for (let i = 0; i < pixelCount; i++) {
const pixelIndex = i * 4;
const tensorIndex = i * channels;
let lum;
if (mode === 'grayscale' || channels === 1) {
lum = flatData[tensorIndex];
}
else {
// Compute luminance for RGB
const r = flatData[tensorIndex + 0] ?? 0;
const g = flatData[tensorIndex + 1] ?? 0;
const b = flatData[tensorIndex + 2] ?? 0;
lum = 0.299 * r + 0.587 * g + 0.114 * b;
}
let norm = (lum - min) / denom;
if (!isFinite(norm))
norm = 0;
norm = Math.max(0, Math.min(1, norm));
const value = Math.round(norm * 255);
if (mode === 'grayscale') {
// For masks: RGB = value, A = 255 (MaskTool reads luminance)
data[pixelIndex] = value;
data[pixelIndex + 1] = value;
data[pixelIndex + 2] = value;
data[pixelIndex + 3] = 255;
}
else {
// For images: RGB from channels, A = 255
for (let c = 0; c < Math.min(3, channels); c++) {
const channelValue = flatData[tensorIndex + c];
const channelNorm = (channelValue - min) / denom;
data[pixelIndex + c] = Math.round(channelNorm * 255);
}
data[pixelIndex + 3] = 255;
}
}
imageData.data.set(data);
return imageData;
}
catch (error) {
log.error("Error converting tensor:", error);
return null;
}
}
/**
* Creates an HTMLImageElement from ImageData
* @param imageData - Input ImageData
* @returns Promise with HTMLImageElement
*/
export async function createImageFromImageData(imageData) {
const { canvas, ctx } = createCanvas(imageData.width, imageData.height, '2d', { willReadFrequently: true });
if (!ctx)
throw new Error("Could not create canvas context");
ctx.putImageData(imageData, 0, 0);
return await createImageFromSource(canvas.toDataURL());
}

View File

@@ -2,7 +2,7 @@ import { createCanvas } from "./utils/CommonUtils.js";
import { createModuleLogger } from "./utils/LoggerUtils.js";
import { showErrorNotification } from "./utils/NotificationUtils.js";
import { webSocketManager } from "./utils/WebSocketManager.js";
import { scaleImageToFit } from "./utils/ImageUtils.js";
import { scaleImageToFit, createImageFromSource, tensorToImageData, createImageFromImageData } from "./utils/ImageUtils.js";
import type { Canvas } from './Canvas';
import type { Layer, Shape } from './types';
@@ -283,22 +283,12 @@ export class CanvasIO {
try {
log.debug("Adding input to canvas:", { inputImage });
const { canvas: tempCanvas, ctx: tempCtx } = createCanvas(inputImage.width, inputImage.height);
if (!tempCtx) throw new Error("Could not create temp context");
// Use unified tensorToImageData for RGB image
const imageData = tensorToImageData(inputImage, 'rgb');
if (!imageData) throw new Error("Failed to convert input image tensor");
const imgData = new ImageData(
new Uint8ClampedArray(inputImage.data),
inputImage.width,
inputImage.height
);
tempCtx.putImageData(imgData, 0, 0);
const image = new Image();
await new Promise((resolve, reject) => {
image.onload = resolve;
image.onerror = reject;
image.src = tempCanvas.toDataURL();
});
// Create HTMLImageElement from ImageData
const image = await createImageFromImageData(imageData);
const bounds = this.canvas.outputAreaBounds;
const scale = Math.min(
@@ -334,23 +324,10 @@ export class CanvasIO {
throw new Error("Invalid tensor data");
}
const { canvas, ctx } = createCanvas(tensor.width, tensor.height, '2d', { willReadFrequently: true });
if (!ctx) throw new Error("Could not create canvas context");
const imageData = tensorToImageData(tensor, 'rgb');
if (!imageData) throw new Error("Failed to convert tensor to image data");
const imageData = new ImageData(
new Uint8ClampedArray(tensor.data),
tensor.width,
tensor.height
);
ctx.putImageData(imageData, 0, 0);
return new Promise((resolve, reject) => {
const img = new Image();
img.onload = () => resolve(img);
img.onerror = (e) => reject(new Error("Failed to load image: " + e));
img.src = canvas.toDataURL();
});
return await createImageFromImageData(imageData);
} catch (error) {
log.error("Error converting tensor to image:", error);
throw error;
@@ -610,53 +587,17 @@ export class CanvasIO {
channels = Math.max(1, Math.floor(len / (width * height)));
}
// Create GRAYSCALE image from tensor; RGB = luminance, A = 255 (MaskTool.setMask reads luminance)
// Use unified tensorToImageData for masks
const maskImageData = tensorToImageData(maskOutput, 'grayscale');
if (!maskImageData) throw new Error("Failed to convert mask tensor to image data");
// Create canvas and put image data
const { canvas: maskCanvas, ctx } = createCanvas(width, height, '2d', { willReadFrequently: true });
if (!ctx) throw new Error("Could not create mask context");
const imgData = ctx.createImageData(width, height);
const arr: any = maskOutput.data;
const min = (maskOutput.min_val !== undefined) ? maskOutput.min_val : 0;
const max = (maskOutput.max_val !== undefined) ? maskOutput.max_val : 1;
const denom = (max - min) || 1;
const pixelCount = width * height;
for (let i = 0; i < pixelCount; i++) {
const baseIndex = i * channels;
let v: number;
if (channels === 1) {
v = arr[i];
} else if (channels >= 3) {
// If image-like, compute luminance from RGB channels
const r = arr[baseIndex + 0] ?? 0;
const g = arr[baseIndex + 1] ?? 0;
const b = arr[baseIndex + 2] ?? 0;
v = 0.299 * r + 0.587 * g + 0.114 * b;
} else {
v = arr[baseIndex] ?? 0;
}
let norm = (v - min) / denom;
if (!isFinite(norm)) norm = 0;
norm = Math.max(0, Math.min(1, norm));
const lum = Math.round(norm * 255);
const o = i * 4;
imgData.data[o] = lum; // R
imgData.data[o + 1] = lum; // G
imgData.data[o + 2] = lum; // B
imgData.data[o + 3] = 255; // A fixed (MaskTool computes alpha from luminance)
}
ctx.putImageData(imgData, 0, 0);
ctx.putImageData(maskImageData, 0, 0);
// Convert to HTMLImageElement
const maskImg = await new Promise<HTMLImageElement>((resolve, reject) => {
const img = new Image();
img.onload = () => resolve(img);
img.onerror = reject;
img.src = maskCanvas.toDataURL();
});
const maskImg = await createImageFromSource(maskCanvas.toDataURL());
// Respect fit_on_add (scale to output area)
const widgets = this.canvas.node.widgets;
@@ -802,25 +743,20 @@ export class CanvasIO {
const batch = inputData.input_images_batch;
log.info(`Processing batch of ${batch.length} images from backend`);
for (let i = 0; i < batch.length; i++) {
const imgData = batch[i];
const img = new Image();
await new Promise((resolve, reject) => {
img.onload = resolve;
img.onerror = reject;
img.src = imgData.data;
});
// Add image to canvas with unique name
await this.canvas.canvasLayers.addLayerWithImage(
img,
{ name: `Batch Image ${i + 1}` },
addMode,
this.canvas.outputAreaBounds
);
log.debug(`Added batch image ${i + 1}/${batch.length} from backend`);
}
for (let i = 0; i < batch.length; i++) {
const imgData = batch[i];
const img = await createImageFromSource(imgData.data);
// Add image to canvas with unique name
await this.canvas.canvasLayers.addLayerWithImage(
img,
{ name: `Batch Image ${i + 1}` },
addMode,
this.canvas.outputAreaBounds
);
log.debug(`Added batch image ${i + 1}/${batch.length} from backend`);
}
log.info(`All ${batch.length} batch images added from backend`);
this.canvas.render();
@@ -828,12 +764,7 @@ export class CanvasIO {
} else if (inputData.input_image) {
// Handle single image (backward compatibility)
const img = new Image();
await new Promise((resolve, reject) => {
img.onload = resolve;
img.onerror = reject;
img.src = inputData.input_image;
});
const img = await createImageFromSource(inputData.input_image);
// Add image to canvas at output area position
await this.canvas.canvasLayers.addLayerWithImage(
@@ -858,12 +789,7 @@ export class CanvasIO {
log.info("Processing input mask");
// Load mask image
const maskImg = new Image();
await new Promise((resolve, reject) => {
maskImg.onload = resolve;
maskImg.onerror = reject;
maskImg.src = inputData.input_mask;
});
const maskImg = await createImageFromSource(inputData.input_mask);
// Determine if we should fit the mask or use it at original size
const fitOnAddWidget2 = this.canvas.node.widgets.find((w) => w.name === "fit_on_add");
@@ -1005,59 +931,11 @@ export class CanvasIO {
}
convertTensorToImageData(tensor: any): ImageData | null {
try {
const shape = tensor.shape;
const height = shape[1];
const width = shape[2];
const channels = shape[3];
log.debug("Converting tensor:", {
shape: shape,
dataRange: {
min: tensor.min_val,
max: tensor.max_val
}
});
const imageData = new ImageData(width, height);
const data = new Uint8ClampedArray(width * height * 4);
const flatData = tensor.data;
const pixelCount = width * height;
for (let i = 0; i < pixelCount; i++) {
const pixelIndex = i * 4;
const tensorIndex = i * channels;
for (let c = 0; c < channels; c++) {
const value = flatData[tensorIndex + c];
const normalizedValue = (value - tensor.min_val) / (tensor.max_val - tensor.min_val);
data[pixelIndex + c] = Math.round(normalizedValue * 255);
}
data[pixelIndex + 3] = 255;
}
imageData.data.set(data);
return imageData;
} catch (error) {
log.error("Error converting tensor:", error);
return null;
}
return tensorToImageData(tensor, 'rgb');
}
async createImageFromData(imageData: ImageData): Promise<HTMLImageElement> {
return new Promise((resolve, reject) => {
const { canvas, ctx } = createCanvas(imageData.width, imageData.height, '2d', { willReadFrequently: true });
if (!ctx) throw new Error("Could not create canvas context");
ctx.putImageData(imageData, 0, 0);
const img = new Image();
img.onload = () => resolve(img);
img.onerror = reject;
img.src = canvas.toDataURL();
});
return createImageFromImageData(imageData);
}
async processMaskData(maskData: any): Promise<void> {
@@ -1124,12 +1002,7 @@ export class CanvasIO {
const newLayers: (Layer | null)[] = [];
for (const imageData of result.images) {
const img = new Image();
await new Promise((resolve, reject) => {
img.onload = resolve;
img.onerror = reject;
img.src = imageData;
});
const img = await createImageFromSource(imageData);
let processedImage = img;
@@ -1158,37 +1031,31 @@ export class CanvasIO {
}
async clipImageToShape(image: HTMLImageElement, shape: Shape): Promise<HTMLImageElement> {
return new Promise((resolve, reject) => {
const { canvas, ctx } = createCanvas(image.width, image.height);
if (!ctx) {
reject(new Error("Could not create canvas context for clipping"));
return;
}
const { canvas, ctx } = createCanvas(image.width, image.height);
if (!ctx) {
throw new Error("Could not create canvas context for clipping");
}
// Draw the image first
ctx.drawImage(image, 0, 0);
// Draw the image first
ctx.drawImage(image, 0, 0);
// Calculate custom shape position accounting for extensions
// Custom shape should maintain its relative position within the original canvas area
const ext = this.canvas.outputAreaExtensionEnabled ? this.canvas.outputAreaExtensions : { top: 0, bottom: 0, left: 0, right: 0 };
const shapeOffsetX = ext.left; // Add left extension to maintain relative position
const shapeOffsetY = ext.top; // Add top extension to maintain relative position
// Calculate custom shape position accounting for extensions
// Custom shape should maintain its relative position within the original canvas area
const ext = this.canvas.outputAreaExtensionEnabled ? this.canvas.outputAreaExtensions : { top: 0, bottom: 0, left: 0, right: 0 };
const shapeOffsetX = ext.left; // Add left extension to maintain relative position
const shapeOffsetY = ext.top; // Add top extension to maintain relative position
// Create a clipping mask using the shape with extension offset
ctx.globalCompositeOperation = 'destination-in';
ctx.beginPath();
ctx.moveTo(shape.points[0].x + shapeOffsetX, shape.points[0].y + shapeOffsetY);
for (let i = 1; i < shape.points.length; i++) {
ctx.lineTo(shape.points[i].x + shapeOffsetX, shape.points[i].y + shapeOffsetY);
}
ctx.closePath();
ctx.fill();
// Create a clipping mask using the shape with extension offset
ctx.globalCompositeOperation = 'destination-in';
ctx.beginPath();
ctx.moveTo(shape.points[0].x + shapeOffsetX, shape.points[0].y + shapeOffsetY);
for (let i = 1; i < shape.points.length; i++) {
ctx.lineTo(shape.points[i].x + shapeOffsetX, shape.points[i].y + shapeOffsetY);
}
ctx.closePath();
ctx.fill();
// Create a new image from the clipped canvas
const clippedImage = new Image();
clippedImage.onload = () => resolve(clippedImage);
clippedImage.onerror = () => reject(new Error("Failed to create clipped image"));
clippedImage.src = canvas.toDataURL();
});
// Create a new image from the clipped canvas
return await createImageFromSource(canvas.toDataURL());
}
}

View File

@@ -411,3 +411,86 @@ export async function scaleImageToFit(image: HTMLImageElement, targetWidth: numb
scaledImg.src = canvas.toDataURL();
});
}
/**
* Unified tensor to image data conversion
* Handles both RGB images and grayscale masks
* @param tensor - Input tensor data
* @param mode - 'rgb' for images or 'grayscale' for masks
* @returns ImageData object
*/
export function tensorToImageData(tensor: any, mode: 'rgb' | 'grayscale' = 'rgb'): ImageData | null {
try {
const shape = tensor.shape;
const height = shape[1];
const width = shape[2];
const channels = shape[3] || 1; // Default to 1 for masks
log.debug("Converting tensor:", { shape, channels, mode });
const imageData = new ImageData(width, height);
const data = new Uint8ClampedArray(width * height * 4);
const flatData = tensor.data;
const pixelCount = width * height;
const min = tensor.min_val ?? 0;
const max = tensor.max_val ?? 1;
const denom = (max - min) || 1;
for (let i = 0; i < pixelCount; i++) {
const pixelIndex = i * 4;
const tensorIndex = i * channels;
let lum: number;
if (mode === 'grayscale' || channels === 1) {
lum = flatData[tensorIndex];
} else {
// Compute luminance for RGB
const r = flatData[tensorIndex + 0] ?? 0;
const g = flatData[tensorIndex + 1] ?? 0;
const b = flatData[tensorIndex + 2] ?? 0;
lum = 0.299 * r + 0.587 * g + 0.114 * b;
}
let norm = (lum - min) / denom;
if (!isFinite(norm)) norm = 0;
norm = Math.max(0, Math.min(1, norm));
const value = Math.round(norm * 255);
if (mode === 'grayscale') {
// For masks: RGB = value, A = 255 (MaskTool reads luminance)
data[pixelIndex] = value;
data[pixelIndex + 1] = value;
data[pixelIndex + 2] = value;
data[pixelIndex + 3] = 255;
} else {
// For images: RGB from channels, A = 255
for (let c = 0; c < Math.min(3, channels); c++) {
const channelValue = flatData[tensorIndex + c];
const channelNorm = (channelValue - min) / denom;
data[pixelIndex + c] = Math.round(channelNorm * 255);
}
data[pixelIndex + 3] = 255;
}
}
imageData.data.set(data);
return imageData;
} catch (error) {
log.error("Error converting tensor:", error);
return null;
}
}
/**
* Creates an HTMLImageElement from ImageData
* @param imageData - Input ImageData
* @returns Promise with HTMLImageElement
*/
export async function createImageFromImageData(imageData: ImageData): Promise<HTMLImageElement> {
const { canvas, ctx } = createCanvas(imageData.width, imageData.height, '2d', { willReadFrequently: true });
if (!ctx) throw new Error("Could not create canvas context");
ctx.putImageData(imageData, 0, 0);
return await createImageFromSource(canvas.toDataURL());
}