Added mask and image input

This commit is contained in:
Dariusz L
2025-08-08 22:23:15 +02:00
parent bf55d13f67
commit afdac52144
14 changed files with 1344 additions and 113 deletions

View File

@@ -179,6 +179,10 @@ class LayerForgeNode:
"trigger": ("INT", {"default": 0, "min": 0, "max": 99999999, "step": 1}),
"node_id": ("STRING", {"default": "0"}),
},
"optional": {
"input_image": ("IMAGE",),
"input_mask": ("MASK",),
},
"hidden": {
"prompt": ("PROMPT",),
"unique_id": ("UNIQUE_ID",),
@@ -239,7 +243,7 @@ class LayerForgeNode:
_processing_lock = threading.Lock()
def process_canvas_image(self, fit_on_add, show_preview, auto_refresh_after_generation, trigger, node_id, prompt=None, unique_id=None):
def process_canvas_image(self, fit_on_add, show_preview, auto_refresh_after_generation, trigger, node_id, input_image=None, input_mask=None, prompt=None, unique_id=None):
try:
@@ -250,6 +254,59 @@ class LayerForgeNode:
log_info(f"Lock acquired. Starting process_canvas_image for node_id: {node_id} (fallback unique_id: {unique_id})")
# Handle input image and mask if provided
if input_image is not None or input_mask is not None:
log_info(f"Input data detected for node {node_id} - Image: {input_image is not None}, Mask: {input_mask is not None}")
# Store input data for frontend to retrieve
with self.__class__._storage_lock:
input_data = {}
if input_image is not None:
# Convert image tensor to base64
if isinstance(input_image, torch.Tensor):
# Ensure correct shape [B, H, W, C]
if input_image.dim() == 3:
input_image = input_image.unsqueeze(0)
# Convert to numpy and then to PIL
img_np = (input_image.squeeze(0).cpu().numpy() * 255).astype(np.uint8)
pil_img = Image.fromarray(img_np, 'RGB')
# Convert to base64
buffered = io.BytesIO()
pil_img.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode()
input_data['input_image'] = f"data:image/png;base64,{img_str}"
input_data['input_image_width'] = pil_img.width
input_data['input_image_height'] = pil_img.height
log_debug(f"Stored input image: {pil_img.width}x{pil_img.height}")
if input_mask is not None:
# Convert mask tensor to base64
if isinstance(input_mask, torch.Tensor):
# Ensure correct shape
if input_mask.dim() == 2:
input_mask = input_mask.unsqueeze(0)
if input_mask.dim() == 3 and input_mask.shape[0] == 1:
input_mask = input_mask.squeeze(0)
# Convert to numpy and then to PIL
mask_np = (input_mask.cpu().numpy() * 255).astype(np.uint8)
pil_mask = Image.fromarray(mask_np, 'L')
# Convert to base64
mask_buffered = io.BytesIO()
pil_mask.save(mask_buffered, format="PNG")
mask_str = base64.b64encode(mask_buffered.getvalue()).decode()
input_data['input_mask'] = f"data:image/png;base64,{mask_str}"
log_debug(f"Stored input mask: {pil_mask.width}x{pil_mask.height}")
input_data['fit_on_add'] = fit_on_add
# Store in a special key for input data
self.__class__._canvas_data_storage[f"{node_id}_input"] = input_data
storage_key = node_id
processed_image = None
@@ -433,6 +490,37 @@ class LayerForgeNode:
log_info("WebSocket connection closed")
return ws
@PromptServer.instance.routes.get("/layerforge/get_input_data/{node_id}")
async def get_input_data(request):
try:
node_id = request.match_info["node_id"]
log_debug(f"Checking for input data for node: {node_id}")
with cls._storage_lock:
input_key = f"{node_id}_input"
input_data = cls._canvas_data_storage.pop(input_key, None)
if input_data:
log_info(f"Input data found for node {node_id}, sending to frontend")
return web.json_response({
'success': True,
'has_input': True,
'data': input_data
})
else:
log_debug(f"No input data found for node {node_id}")
return web.json_response({
'success': True,
'has_input': False
})
except Exception as e:
log_error(f"Error in get_input_data: {str(e)}")
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
@PromptServer.instance.routes.get("/ycnode/get_canvas_data/{node_id}")
async def get_canvas_data(request):
try:
@@ -911,4 +999,3 @@ def convert_tensor_to_base64(tensor, alpha_mask=None, original_alpha=None):
log_error(f"Error in convert_tensor_to_base64: {str(e)}")
log_debug(f"Tensor shape: {tensor.shape}, dtype: {tensor.dtype}")
raise

View File

@@ -73,6 +73,8 @@ export class Canvas {
this.canvasContainer = null;
this.dataInitialized = false;
this.pendingDataCheck = null;
this.pendingInputDataCheck = null;
this.inputDataLoaded = false;
this.imageCache = new Map();
this.requestSaveState = () => { };
this.outputAreaShape = null;
@@ -372,6 +374,10 @@ export class Canvas {
return widget ? widget.value : false;
};
const handleExecutionStart = () => {
// Check for input data when execution starts, but don't reset the flag
log.debug('Execution started, checking for input data...');
// Don't reset inputDataLoaded here - we want to remember if we already loaded this input
this.canvasIO.checkForInputData();
if (getAutoRefreshValue()) {
lastExecutionStartTime = Date.now();
// Store a snapshot of the context for the upcoming batch
@@ -394,6 +400,9 @@ export class Canvas {
}
};
const handleExecutionSuccess = async () => {
// Always check for input data after execution completes
log.debug('Execution success, checking for input data...');
await this.canvasIO.checkForInputData();
if (getAutoRefreshValue()) {
log.info('Auto-refresh triggered, importing latest images.');
if (!this.pendingBatchContext) {

View File

@@ -314,12 +314,26 @@ export class CanvasIO {
async initNodeData() {
try {
log.info("Starting node data initialization...");
// First check for input data from the backend (new feature)
await this.checkForInputData();
// If we've already loaded input data, don't continue with old initialization
if (this.canvas.inputDataLoaded) {
log.debug("Input data already loaded, skipping old initialization");
this.canvas.dataInitialized = true;
return;
}
if (!this.canvas.node || !this.canvas.node.inputs) {
log.debug("Node or inputs not ready");
return this.scheduleDataCheck();
}
if (this.canvas.node.inputs[0] && this.canvas.node.inputs[0].link) {
const imageLinkId = this.canvas.node.inputs[0].link;
// Check if we already loaded this link
if (this.canvas.lastLoadedLinkId === imageLinkId) {
log.debug(`Link ${imageLinkId} already loaded via new system, marking as initialized`);
this.canvas.dataInitialized = true;
return;
}
const imageData = window.app.nodeOutputs[imageLinkId];
if (imageData) {
log.debug("Found image data:", imageData);
@@ -331,6 +345,10 @@ export class CanvasIO {
return this.scheduleDataCheck();
}
}
else {
// No input connected, mark as initialized to stop repeated checks
this.canvas.dataInitialized = true;
}
if (this.canvas.node.inputs[1] && this.canvas.node.inputs[1].link) {
const maskLinkId = this.canvas.node.inputs[1].link;
const maskData = window.app.nodeOutputs[maskLinkId];
@@ -345,6 +363,321 @@ export class CanvasIO {
return this.scheduleDataCheck();
}
}
async checkForInputData() {
try {
const nodeId = this.canvas.node.id;
log.info(`Checking for input data for node ${nodeId}...`);
// Track loaded links separately for image and mask
let imageLoaded = false;
let maskLoaded = false;
// First, try to get data from connected node's output if available
if (this.canvas.node.inputs && this.canvas.node.inputs[0] && this.canvas.node.inputs[0].link) {
const linkId = this.canvas.node.inputs[0].link;
const graph = this.canvas.node.graph;
// Check if we already loaded this link
if (this.canvas.lastLoadedLinkId === linkId) {
log.debug(`Image link ${linkId} already loaded`);
imageLoaded = true;
}
else {
if (graph) {
const link = graph.links[linkId];
if (link) {
const sourceNode = graph.getNodeById(link.origin_id);
if (sourceNode && sourceNode.imgs && sourceNode.imgs.length > 0) {
// The connected node has images in its output
log.info("Found image in connected node's output, loading directly");
const img = sourceNode.imgs[0];
// Mark this link as loaded
this.canvas.lastLoadedLinkId = linkId;
// DON'T clear existing layers - just add a new one
// Determine add mode
const fitOnAddWidget = this.canvas.node.widgets.find((w) => w.name === "fit_on_add");
const addMode = (fitOnAddWidget && fitOnAddWidget.value) ? 'fit' : 'center';
// Add the image to canvas as a new layer
await this.canvas.canvasLayers.addLayerWithImage(img, {}, addMode, this.canvas.outputAreaBounds);
this.canvas.inputDataLoaded = true;
imageLoaded = true;
log.info("Input image added as new layer from connected node");
this.canvas.render();
this.canvas.saveState();
}
}
}
}
}
// Check for mask input separately
if (this.canvas.node.inputs && this.canvas.node.inputs[1] && this.canvas.node.inputs[1].link) {
const maskLinkId = this.canvas.node.inputs[1].link;
// Check if we already loaded this mask link
if (this.canvas.lastLoadedMaskLinkId === maskLinkId) {
log.debug(`Mask link ${maskLinkId} already loaded`);
maskLoaded = true;
}
else {
// Try to get mask tensor from nodeOutputs using origin_id (not link id)
const graph = this.canvas.node.graph;
let maskOutput = null;
if (graph) {
const link = graph.links[maskLinkId];
if (link && link.origin_id) {
// Use origin_id to get the actual node output
const nodeOutput = window.app?.nodeOutputs?.[link.origin_id];
log.debug(`Looking for mask output from origin node ${link.origin_id}, found:`, !!nodeOutput);
if (nodeOutput) {
log.debug(`Node ${link.origin_id} output structure:`, {
hasData: !!nodeOutput.data,
hasShape: !!nodeOutput.shape,
dataType: typeof nodeOutput.data,
shapeType: typeof nodeOutput.shape,
keys: Object.keys(nodeOutput)
});
// Only use if it has actual tensor data
if (nodeOutput.data && nodeOutput.shape) {
maskOutput = nodeOutput;
}
}
}
}
if (maskOutput && maskOutput.data && maskOutput.shape) {
try {
// Derive dimensions from shape or explicit width/height
let width = maskOutput.width || 0;
let height = maskOutput.height || 0;
const shape = maskOutput.shape; // e.g. [1,H,W] or [1,H,W,1]
if ((!width || !height) && Array.isArray(shape)) {
if (shape.length >= 3) {
height = shape[1];
width = shape[2];
}
else if (shape.length === 2) {
height = shape[0];
width = shape[1];
}
}
if (!width || !height) {
throw new Error("Cannot determine mask dimensions from nodeOutputs");
}
// Determine channels count
let channels = 1;
if (Array.isArray(shape) && shape.length >= 4) {
channels = shape[3];
}
else if (maskOutput.channels) {
channels = maskOutput.channels;
}
else {
const len = maskOutput.data.length;
channels = Math.max(1, Math.floor(len / (width * height)));
}
// Create GRAYSCALE image from tensor; RGB = luminance, A = 255 (MaskTool.setMask reads luminance)
const { canvas: maskCanvas, ctx } = createCanvas(width, height, '2d', { willReadFrequently: true });
if (!ctx)
throw new Error("Could not create mask context");
const imgData = ctx.createImageData(width, height);
const arr = maskOutput.data;
const min = (maskOutput.min_val !== undefined) ? maskOutput.min_val : 0;
const max = (maskOutput.max_val !== undefined) ? maskOutput.max_val : 1;
const denom = (max - min) || 1;
const pixelCount = width * height;
for (let i = 0; i < pixelCount; i++) {
const baseIndex = i * channels;
let v;
if (channels === 1) {
v = arr[i];
}
else if (channels >= 3) {
// If image-like, compute luminance from RGB channels
const r = arr[baseIndex + 0] ?? 0;
const g = arr[baseIndex + 1] ?? 0;
const b = arr[baseIndex + 2] ?? 0;
v = 0.299 * r + 0.587 * g + 0.114 * b;
}
else {
v = arr[baseIndex] ?? 0;
}
let norm = (v - min) / denom;
if (!isFinite(norm))
norm = 0;
norm = Math.max(0, Math.min(1, norm));
const lum = Math.round(norm * 255);
const o = i * 4;
imgData.data[o] = lum; // R
imgData.data[o + 1] = lum; // G
imgData.data[o + 2] = lum; // B
imgData.data[o + 3] = 255; // A fixed (MaskTool computes alpha from luminance)
}
ctx.putImageData(imgData, 0, 0);
// Convert to HTMLImageElement
const maskImg = await new Promise((resolve, reject) => {
const img = new Image();
img.onload = () => resolve(img);
img.onerror = reject;
img.src = maskCanvas.toDataURL();
});
// Respect fit_on_add (scale to output area)
const widgets = this.canvas.node.widgets;
const fitOnAddWidget = widgets ? widgets.find((w) => w.name === "fit_on_add") : null;
const shouldFit = fitOnAddWidget && fitOnAddWidget.value;
let finalMaskImg = maskImg;
if (shouldFit) {
const bounds = this.canvas.outputAreaBounds;
const scale = Math.min(bounds.width / maskImg.width, bounds.height / maskImg.height);
const scaledWidth = Math.max(1, Math.round(maskImg.width * scale));
const scaledHeight = Math.max(1, Math.round(maskImg.height * scale));
const { canvas: scaledCanvas, ctx: scaledCtx } = createCanvas(scaledWidth, scaledHeight, '2d', { willReadFrequently: true });
if (!scaledCtx)
throw new Error("Could not create scaled mask context");
scaledCtx.drawImage(maskImg, 0, 0, scaledWidth, scaledHeight);
finalMaskImg = await new Promise((resolve, reject) => {
const img = new Image();
img.onload = () => resolve(img);
img.onerror = reject;
img.src = scaledCanvas.toDataURL();
});
}
// Apply to MaskTool (centers internally)
if (this.canvas.maskTool) {
this.canvas.maskTool.setMask(finalMaskImg, true);
this.canvas.canvasState.saveMaskState();
this.canvas.render();
// Mark this mask link as loaded to avoid re-applying
this.canvas.lastLoadedMaskLinkId = maskLinkId;
maskLoaded = true;
log.info("Applied input mask from nodeOutputs immediately on connection" + (shouldFit ? " (fitted to output area)" : ""));
}
}
catch (err) {
log.warn("Failed to apply mask from nodeOutputs immediately; will wait for backend input_mask after execution", err);
}
}
else {
// nodeOutputs exist but don't have tensor data yet (need workflow execution)
log.info(`Mask node ${graph.links[maskLinkId]?.origin_id} found but has no tensor data yet. Mask will be applied automatically after workflow execution.`);
// Don't retry - data won't be available until workflow runs
}
}
}
// If both are already loaded from connected nodes, we're done
const nodeInputs = this.canvas.node.inputs;
if (imageLoaded && (!nodeInputs || !nodeInputs[1] || maskLoaded)) {
return;
}
// If no data from connected node, check backend
const response = await fetch(`/layerforge/get_input_data/${nodeId}`);
const result = await response.json();
if (result.success && result.has_input) {
// Check if we already loaded image data (by checking the current link)
if (!imageLoaded && this.canvas.node.inputs && this.canvas.node.inputs[0] && this.canvas.node.inputs[0].link) {
const currentLinkId = this.canvas.node.inputs[0].link;
if (this.canvas.lastLoadedLinkId !== currentLinkId) {
// Mark this link as loaded
this.canvas.lastLoadedLinkId = currentLinkId;
imageLoaded = false; // Will load from backend
}
}
// Check if we already loaded mask data
if (!maskLoaded && this.canvas.node.inputs && this.canvas.node.inputs[1] && this.canvas.node.inputs[1].link) {
const currentMaskLinkId = this.canvas.node.inputs[1].link;
if (this.canvas.lastLoadedMaskLinkId !== currentMaskLinkId) {
// Mark this mask link as loaded
this.canvas.lastLoadedMaskLinkId = currentMaskLinkId;
maskLoaded = false; // Will load from backend
}
}
log.info("Input data found from backend, adding to canvas");
const inputData = result.data;
// DON'T clear existing layers - just add new ones
// Mark that we've loaded input data to avoid reloading
this.canvas.inputDataLoaded = true;
// Determine add mode based on fit_on_add setting
const widgets = this.canvas.node.widgets;
const fitOnAddWidget = widgets ? widgets.find((w) => w.name === "fit_on_add") : null;
const addMode = (fitOnAddWidget && fitOnAddWidget.value) ? 'fit' : 'center';
// Load input image if provided and not already loaded
if (!imageLoaded && inputData.input_image) {
const img = new Image();
await new Promise((resolve, reject) => {
img.onload = resolve;
img.onerror = reject;
img.src = inputData.input_image;
});
// Add image to canvas at output area position
const layer = await this.canvas.canvasLayers.addLayerWithImage(img, {}, addMode, this.canvas.outputAreaBounds // Place at output area
);
// Don't apply mask to the layer anymore - we'll handle it separately
log.info("Input image added as new layer to canvas");
this.canvas.render();
this.canvas.saveState();
}
else {
log.debug("No input image in data");
}
// Handle mask separately (not tied to layer) if not already loaded
if (!maskLoaded && inputData.input_mask) {
log.info("Processing input mask");
// Load mask image
const maskImg = new Image();
await new Promise((resolve, reject) => {
maskImg.onload = resolve;
maskImg.onerror = reject;
maskImg.src = inputData.input_mask;
});
// Determine if we should fit the mask or use it at original size
const fitOnAddWidget = this.canvas.node.widgets.find((w) => w.name === "fit_on_add");
const shouldFit = fitOnAddWidget && fitOnAddWidget.value;
if (shouldFit && this.canvas.maskTool) {
// Scale mask to fit output area if fit_on_add is enabled
const bounds = this.canvas.outputAreaBounds;
const scale = Math.min(bounds.width / maskImg.width, bounds.height / maskImg.height);
// Create scaled mask canvas
const scaledWidth = Math.round(maskImg.width * scale);
const scaledHeight = Math.round(maskImg.height * scale);
const { canvas: scaledCanvas, ctx: scaledCtx } = createCanvas(scaledWidth, scaledHeight, '2d', { willReadFrequently: true });
if (!scaledCtx)
throw new Error("Could not create scaled mask context");
// Draw scaled mask
scaledCtx.drawImage(maskImg, 0, 0, scaledWidth, scaledHeight);
// Convert scaled canvas to image
const scaledMaskImg = new Image();
await new Promise((resolve, reject) => {
scaledMaskImg.onload = resolve;
scaledMaskImg.onerror = reject;
scaledMaskImg.src = scaledCanvas.toDataURL();
});
// Apply scaled mask to mask tool
this.canvas.maskTool.setMask(scaledMaskImg, true);
}
else if (this.canvas.maskTool) {
// Apply mask at original size
this.canvas.maskTool.setMask(maskImg, true);
}
// Save the mask state
this.canvas.canvasState.saveMaskState();
log.info("Applied input mask to mask tool" + (shouldFit ? " (fitted to output area)" : " (original size)"));
}
}
else {
log.debug("No input data from backend");
// Don't schedule another check - we'll only check when explicitly triggered
}
}
catch (error) {
log.error("Error checking for input data:", error);
// Don't schedule another check on error
}
}
scheduleInputDataCheck() {
// Schedule a retry for mask data check when nodeOutputs are not ready yet
if (this.canvas.pendingInputDataCheck) {
clearTimeout(this.canvas.pendingInputDataCheck);
}
this.canvas.pendingInputDataCheck = window.setTimeout(() => {
this.canvas.pendingInputDataCheck = null;
log.debug("Retrying input data check for mask...");
this.checkForInputData();
}, 500); // Shorter delay for mask data retry
}
scheduleDataCheck() {
if (this.canvas.pendingDataCheck) {
clearTimeout(this.canvas.pendingDataCheck);

View File

@@ -911,7 +911,9 @@ async function createCanvasWidget(node, widget, app) {
height: "100%"
}
}, [controlPanel, canvasContainer, layersPanelContainer]);
node.addDOMWidget("mainContainer", "widget", mainContainer);
if (node.addDOMWidget) {
node.addDOMWidget("mainContainer", "widget", mainContainer);
}
const openEditorBtn = controlPanel.querySelector(`#open-editor-btn-${node.id}`);
let backdrop = null;
let originalParent = null;
@@ -1000,7 +1002,11 @@ async function createCanvasWidget(node, widget, app) {
if (!window.canvasExecutionStates) {
window.canvasExecutionStates = new Map();
}
node.canvasWidget = canvas;
// Store the entire widget object, not just the canvas
node.canvasWidget = {
canvas: canvas,
panel: controlPanel
};
setTimeout(() => {
canvas.loadInitialState();
if (canvas.canvasLayersPanel) {
@@ -1017,7 +1023,7 @@ async function createCanvasWidget(node, widget, app) {
if (canvas && canvas.setPreviewVisibility) {
canvas.setPreviewVisibility(value);
}
if (node.graph && node.graph.canvas) {
if (node.graph && node.graph.canvas && node.setDirtyCanvas) {
node.setDirtyCanvas(true, true);
}
};
@@ -1096,9 +1102,117 @@ app.registerExtension({
const canvasWidget = await createCanvasWidget(this, null, app);
canvasNodeInstances.set(this.id, canvasWidget);
log.info(`Registered CanvasNode instance for ID: ${this.id}`);
// Store the canvas widget on the node
this.canvasWidget = canvasWidget;
// Check if there are already connected inputs
setTimeout(() => {
this.setDirtyCanvas(true, true);
}, 100);
if (this.inputs && this.inputs.length > 0) {
// Check if input_image (index 0) is connected
if (this.inputs[0] && this.inputs[0].link) {
log.info("Input image already connected on node creation, checking for data...");
if (canvasWidget.canvas && canvasWidget.canvas.canvasIO) {
canvasWidget.canvas.inputDataLoaded = false;
canvasWidget.canvas.canvasIO.checkForInputData();
}
}
}
if (this.setDirtyCanvas) {
this.setDirtyCanvas(true, true);
}
}, 500);
};
// Add onConnectionsChange handler to detect when inputs are connected
nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) {
log.info(`onConnectionsChange called: type=${type}, index=${index}, connected=${connected}`, link_info);
// Check if this is an input connection (type 1 = INPUT)
if (type === 1) {
// Get the canvas widget - it might be in different places
const canvasWidget = this.canvasWidget;
const canvas = canvasWidget?.canvas || canvasWidget;
if (!canvas || !canvas.canvasIO) {
log.warn("Canvas not ready in onConnectionsChange, scheduling retry...");
// Retry multiple times with increasing delays
const retryDelays = [500, 1000, 2000];
let retryCount = 0;
const tryAgain = () => {
const retryCanvas = this.canvasWidget?.canvas || this.canvasWidget;
if (retryCanvas && retryCanvas.canvasIO) {
log.info("Canvas now ready, checking for input data...");
if (connected) {
retryCanvas.inputDataLoaded = false;
retryCanvas.canvasIO.checkForInputData();
}
}
else if (retryCount < retryDelays.length) {
log.warn(`Canvas still not ready, retry ${retryCount + 1}/${retryDelays.length}...`);
setTimeout(tryAgain, retryDelays[retryCount++]);
}
else {
log.error("Canvas failed to initialize after multiple retries");
}
};
setTimeout(tryAgain, retryDelays[retryCount++]);
return;
}
// Handle input_image connection (index 0)
if (index === 0) {
if (connected && link_info) {
log.info("Input image connected, marking for data check...");
// Reset the input data loaded flag to allow loading the new connection
canvas.inputDataLoaded = false;
// Also reset the last loaded image source and link ID to allow the new image
canvas.lastLoadedImageSrc = undefined;
canvas.lastLoadedLinkId = undefined;
// Mark that we have a pending input connection
canvas.hasPendingInputConnection = true;
// Check for data immediately when connected
setTimeout(() => {
log.info("Checking for input data after connection...");
canvas.canvasIO.checkForInputData();
}, 500);
}
else {
log.info("Input image disconnected");
canvas.hasPendingInputConnection = false;
// Reset when disconnected so a new connection can load
canvas.inputDataLoaded = false;
canvas.lastLoadedImageSrc = undefined;
canvas.lastLoadedLinkId = undefined;
}
}
// Handle input_mask connection (index 1)
if (index === 1) {
if (connected && link_info) {
log.info("Input mask connected");
// Mark that we have a pending mask connection
canvas.hasPendingMaskConnection = true;
// Check for data immediately when connected
setTimeout(() => {
log.info("Checking for input data after mask connection...");
canvas.canvasIO.checkForInputData();
}, 500);
}
else {
log.info("Input mask disconnected");
canvas.hasPendingMaskConnection = false;
}
}
}
};
// Add onExecuted handler to check for input data after workflow execution
const originalOnExecuted = nodeType.prototype.onExecuted;
nodeType.prototype.onExecuted = function (message) {
log.info("Node executed, checking for input data...");
const canvas = this.canvasWidget?.canvas || this.canvasWidget;
if (canvas && canvas.canvasIO) {
// Don't reset inputDataLoaded - just check for new data
// The checkForInputData method will handle checking if we already loaded this image
canvas.canvasIO.checkForInputData();
}
// Call original if it exists
if (originalOnExecuted) {
originalOnExecuted.apply(this, arguments);
}
};
const onRemoved = nodeType.prototype.onRemoved;
nodeType.prototype.onRemoved = function () {

View File

@@ -424,7 +424,6 @@ export class MaskEditorIntegration {
boundsPos: { x: bounds.x, y: bounds.y },
maskSize: { width: bounds.width, height: bounds.height }
});
// Use the chunk system instead of direct canvas manipulation
this.maskTool.setMask(maskAsImage);
// Update node preview using PreviewUtils
await updateNodePreview(this.canvas, this.node, true);

View File

@@ -1445,13 +1445,44 @@ export class MaskTool {
this.isOverlayVisible = !this.isOverlayVisible;
log.info(`Mask overlay visibility toggled to: ${this.isOverlayVisible}`);
}
setMask(image) {
// Clear existing mask chunks in the output area first
setMask(image, isFromInputMask = false) {
const bounds = this.canvasInstance.outputAreaBounds;
this.clearMaskInArea(bounds.x, bounds.y, image.width, image.height);
// Add the new mask using the chunk system
this.addMask(image);
log.info(`MaskTool set new mask using chunk system at bounds (${bounds.x}, ${bounds.y})`);
if (isFromInputMask) {
// For INPUT MASK - process black background to transparent using luminance
// Center like input images
const centerX = bounds.x + (bounds.width - image.width) / 2;
const centerY = bounds.y + (bounds.height - image.height) / 2;
// Prepare mask where alpha = luminance (white = applied, black = transparent)
const { canvas: maskCanvas, ctx } = createCanvas(image.width, image.height, '2d', { willReadFrequently: true });
if (!ctx)
throw new Error("Could not create mask processing context");
ctx.drawImage(image, 0, 0);
const imgData = ctx.getImageData(0, 0, image.width, image.height);
const data = imgData.data;
for (let i = 0; i < data.length; i += 4) {
const r = data[i], g = data[i + 1], b = data[i + 2];
const lum = Math.round(0.299 * r + 0.587 * g + 0.114 * b);
data[i] = 255; // force white color (color channels ignored downstream)
data[i + 1] = 255;
data[i + 2] = 255;
data[i + 3] = lum; // alpha encodes mask strength: white -> strong, black -> 0
}
ctx.putImageData(imgData, 0, 0);
// Clear target area and apply to chunked system at centered position
this.clearMaskInArea(centerX, centerY, image.width, image.height);
this.applyMaskCanvasToChunks(maskCanvas, centerX, centerY);
// Refresh state and UI
this.updateActiveMaskCanvas(true);
this.canvasInstance.canvasState.saveMaskState();
this.canvasInstance.render();
log.info(`MaskTool set INPUT MASK at centered position (${centerX}, ${centerY}) using luminance as alpha`);
}
else {
// For SAM Detector and other sources - just clear and add without processing
this.clearMaskInArea(bounds.x, bounds.y, bounds.width, bounds.height);
this.addMask(image);
log.info(`MaskTool set mask using chunk system at bounds (${bounds.x}, ${bounds.y})`);
}
}
/**
* Clears mask data in a specific area by clearing affected chunks

View File

@@ -242,35 +242,61 @@ async function handleSAMDetectorResult(node, resultImage) {
// Try to reload the image with a fresh request
log.debug("Attempting to reload SAM result image");
const originalSrc = resultImage.src;
// Add cache-busting parameter to force fresh load
const url = new URL(originalSrc);
url.searchParams.set('_t', Date.now().toString());
await new Promise((resolve, reject) => {
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = () => {
// Copy the loaded image data to the original image
resultImage.src = img.src;
resultImage.width = img.width;
resultImage.height = img.height;
log.debug("SAM result image reloaded successfully", {
width: img.width,
height: img.height,
originalSrc: originalSrc,
newSrc: img.src
// Check if it's a data URL (base64) - don't add parameters to data URLs
if (originalSrc.startsWith('data:')) {
log.debug("Image is a data URL, skipping reload with parameters");
// For data URLs, just ensure the image is loaded
if (!resultImage.complete || resultImage.naturalWidth === 0) {
await new Promise((resolve, reject) => {
const img = new Image();
img.onload = () => {
resultImage.width = img.width;
resultImage.height = img.height;
log.debug("Data URL image loaded successfully", {
width: img.width,
height: img.height
});
resolve(img);
};
img.onerror = (error) => {
log.error("Failed to load data URL image", error);
reject(error);
};
img.src = originalSrc; // Use original src without modifications
});
resolve(img);
};
img.onerror = (error) => {
log.error("Failed to reload SAM result image", {
originalSrc: originalSrc,
newSrc: url.toString(),
error: error
});
reject(error);
};
img.src = url.toString();
});
}
}
else {
// For regular URLs, add cache-busting parameter
const url = new URL(originalSrc);
url.searchParams.set('_t', Date.now().toString());
await new Promise((resolve, reject) => {
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = () => {
// Copy the loaded image data to the original image
resultImage.src = img.src;
resultImage.width = img.width;
resultImage.height = img.height;
log.debug("SAM result image reloaded successfully", {
width: img.width,
height: img.height,
originalSrc: originalSrc,
newSrc: img.src
});
resolve(img);
};
img.onerror = (error) => {
log.error("Failed to reload SAM result image", {
originalSrc: originalSrc,
newSrc: url.toString(),
error: error
});
reject(error);
};
img.src = url.toString();
});
}
}
}
catch (error) {
@@ -290,27 +316,37 @@ async function handleSAMDetectorResult(node, resultImage) {
// Apply mask to LayerForge canvas using MaskTool.setMask method
log.debug("Checking canvas and maskTool availability", {
hasCanvas: !!canvas,
hasCanvasProperty: !!canvas.canvas,
canvasCanvasKeys: canvas.canvas ? Object.keys(canvas.canvas) : [],
hasMaskTool: !!canvas.maskTool,
hasCanvasMaskTool: !!(canvas.canvas && canvas.canvas.maskTool),
maskToolType: typeof canvas.maskTool,
canvasMaskToolType: canvas.canvas ? typeof canvas.canvas.maskTool : 'undefined',
canvasKeys: Object.keys(canvas)
});
if (!canvas.maskTool) {
// Get the actual Canvas object and its maskTool
const actualCanvas = canvas.canvas || canvas;
const maskTool = actualCanvas.maskTool;
if (!maskTool) {
log.error("MaskTool is not available. Canvas state:", {
hasCanvas: !!canvas,
hasActualCanvas: !!actualCanvas,
canvasConstructor: canvas.constructor.name,
actualCanvasConstructor: actualCanvas ? actualCanvas.constructor.name : 'undefined',
canvasKeys: Object.keys(canvas),
maskToolValue: canvas.maskTool
actualCanvasKeys: actualCanvas ? Object.keys(actualCanvas) : [],
maskToolValue: maskTool
});
throw new Error("Mask tool not available or not initialized");
}
log.debug("Applying SAM mask to canvas using addMask method");
// Use the addMask method which overlays on existing mask without clearing it
canvas.maskTool.addMask(maskAsImage);
log.debug("Applying SAM mask to canvas using setMask method");
// Use the setMask method which clears existing mask and sets new one
maskTool.setMask(maskAsImage);
// Update canvas and save state (same as MaskEditorIntegration)
canvas.render();
canvas.saveState();
actualCanvas.render();
actualCanvas.saveState();
// Update node preview using PreviewUtils
await updateNodePreview(canvas, node, true);
await updateNodePreview(actualCanvas, node, true);
log.info("SAM Detector mask applied successfully to LayerForge canvas");
// Show success notification
showSuccessNotification("SAM Detector mask applied to LayerForge!");
@@ -340,13 +376,20 @@ export function setupSAMDetectorHook(node, options) {
try {
log.info("Intercepted 'Open in SAM Detector' - automatically sending to clipspace and starting monitoring");
// Automatically send canvas to clipspace and start monitoring
if (node.canvasWidget && node.canvasWidget.canvas) {
const canvas = node.canvasWidget; // canvasWidget IS the Canvas object
// Use ImageUploadUtils to upload canvas
if (node.canvasWidget) {
const canvasWidget = node.canvasWidget;
const canvas = canvasWidget.canvas || canvasWidget; // Get actual Canvas object
// Use ImageUploadUtils to upload canvas and get server URL (Impact Pack compatibility)
const uploadResult = await uploadCanvasAsImage(canvas, {
filenamePrefix: 'layerforge-sam',
nodeId: node.id
});
log.debug("Uploaded canvas for SAM Detector", {
filename: uploadResult.filename,
imageUrl: uploadResult.imageUrl,
width: uploadResult.imageElement.width,
height: uploadResult.imageElement.height
});
// Set the image to the node for clipspace
node.imgs = [uploadResult.imageElement];
node.clipspaceImg = uploadResult.imageElement;

View File

@@ -91,6 +91,11 @@ export class Canvas {
onStateChange: (() => void) | undefined;
pendingBatchContext: any;
pendingDataCheck: number | null;
pendingInputDataCheck: number | null;
inputDataLoaded: boolean;
lastLoadedImageSrc?: string;
lastLoadedLinkId?: number;
lastLoadedMaskLinkId?: number;
previewVisible: boolean;
requestSaveState: () => void;
viewport: Viewport;
@@ -138,6 +143,8 @@ export class Canvas {
this.dataInitialized = false;
this.pendingDataCheck = null;
this.pendingInputDataCheck = null;
this.inputDataLoaded = false;
this.imageCache = new Map();
this.requestSaveState = () => {};
@@ -483,6 +490,11 @@ export class Canvas {
};
const handleExecutionStart = () => {
// Check for input data when execution starts, but don't reset the flag
log.debug('Execution started, checking for input data...');
// Don't reset inputDataLoaded here - we want to remember if we already loaded this input
this.canvasIO.checkForInputData();
if (getAutoRefreshValue()) {
lastExecutionStartTime = Date.now();
// Store a snapshot of the context for the upcoming batch
@@ -506,6 +518,10 @@ export class Canvas {
};
const handleExecutionSuccess = async () => {
// Always check for input data after execution completes
log.debug('Execution success, checking for input data...');
await this.canvasIO.checkForInputData();
if (getAutoRefreshValue()) {
log.info('Auto-refresh triggered, importing latest images.');

View File

@@ -372,6 +372,16 @@ export class CanvasIO {
try {
log.info("Starting node data initialization...");
// First check for input data from the backend (new feature)
await this.checkForInputData();
// If we've already loaded input data, don't continue with old initialization
if (this.canvas.inputDataLoaded) {
log.debug("Input data already loaded, skipping old initialization");
this.canvas.dataInitialized = true;
return;
}
if (!this.canvas.node || !(this.canvas.node as any).inputs) {
log.debug("Node or inputs not ready");
return this.scheduleDataCheck();
@@ -379,6 +389,14 @@ export class CanvasIO {
if ((this.canvas.node as any).inputs[0] && (this.canvas.node as any).inputs[0].link) {
const imageLinkId = (this.canvas.node as any).inputs[0].link;
// Check if we already loaded this link
if (this.canvas.lastLoadedLinkId === imageLinkId) {
log.debug(`Link ${imageLinkId} already loaded via new system, marking as initialized`);
this.canvas.dataInitialized = true;
return;
}
const imageData = (window as any).app.nodeOutputs[imageLinkId];
if (imageData) {
@@ -389,6 +407,9 @@ export class CanvasIO {
log.debug("Image data not available yet");
return this.scheduleDataCheck();
}
} else {
// No input connected, mark as initialized to stop repeated checks
this.canvas.dataInitialized = true;
}
if ((this.canvas.node as any).inputs[1] && (this.canvas.node as any).inputs[1].link) {
@@ -407,6 +428,373 @@ export class CanvasIO {
}
}
async checkForInputData(): Promise<void> {
try {
const nodeId = this.canvas.node.id;
log.info(`Checking for input data for node ${nodeId}...`);
// Track loaded links separately for image and mask
let imageLoaded = false;
let maskLoaded = false;
// First, try to get data from connected node's output if available
if (this.canvas.node.inputs && this.canvas.node.inputs[0] && this.canvas.node.inputs[0].link) {
const linkId = this.canvas.node.inputs[0].link;
const graph = (this.canvas.node as any).graph;
// Check if we already loaded this link
if (this.canvas.lastLoadedLinkId === linkId) {
log.debug(`Image link ${linkId} already loaded`);
imageLoaded = true;
} else {
if (graph) {
const link = graph.links[linkId];
if (link) {
const sourceNode = graph.getNodeById(link.origin_id);
if (sourceNode && sourceNode.imgs && sourceNode.imgs.length > 0) {
// The connected node has images in its output
log.info("Found image in connected node's output, loading directly");
const img = sourceNode.imgs[0];
// Mark this link as loaded
this.canvas.lastLoadedLinkId = linkId;
// DON'T clear existing layers - just add a new one
// Determine add mode
const fitOnAddWidget = this.canvas.node.widgets.find((w) => w.name === "fit_on_add");
const addMode = (fitOnAddWidget && fitOnAddWidget.value) ? 'fit' : 'center';
// Add the image to canvas as a new layer
await this.canvas.canvasLayers.addLayerWithImage(
img,
{},
addMode,
this.canvas.outputAreaBounds
);
this.canvas.inputDataLoaded = true;
imageLoaded = true;
log.info("Input image added as new layer from connected node");
this.canvas.render();
this.canvas.saveState();
}
}
}
}
}
// Check for mask input separately
if (this.canvas.node.inputs && this.canvas.node.inputs[1] && this.canvas.node.inputs[1].link) {
const maskLinkId = this.canvas.node.inputs[1].link;
// Check if we already loaded this mask link
if (this.canvas.lastLoadedMaskLinkId === maskLinkId) {
log.debug(`Mask link ${maskLinkId} already loaded`);
maskLoaded = true;
} else {
// Try to get mask tensor from nodeOutputs using origin_id (not link id)
const graph = (this.canvas.node as any).graph;
let maskOutput = null;
if (graph) {
const link = graph.links[maskLinkId];
if (link && link.origin_id) {
// Use origin_id to get the actual node output
const nodeOutput = (window as any).app?.nodeOutputs?.[link.origin_id];
log.debug(`Looking for mask output from origin node ${link.origin_id}, found:`, !!nodeOutput);
if (nodeOutput) {
log.debug(`Node ${link.origin_id} output structure:`, {
hasData: !!nodeOutput.data,
hasShape: !!nodeOutput.shape,
dataType: typeof nodeOutput.data,
shapeType: typeof nodeOutput.shape,
keys: Object.keys(nodeOutput)
});
// Only use if it has actual tensor data
if (nodeOutput.data && nodeOutput.shape) {
maskOutput = nodeOutput;
}
}
}
}
if (maskOutput && maskOutput.data && maskOutput.shape) {
try {
// Derive dimensions from shape or explicit width/height
let width = (maskOutput.width as number) || 0;
let height = (maskOutput.height as number) || 0;
const shape = maskOutput.shape as number[]; // e.g. [1,H,W] or [1,H,W,1]
if ((!width || !height) && Array.isArray(shape)) {
if (shape.length >= 3) {
height = shape[1];
width = shape[2];
} else if (shape.length === 2) {
height = shape[0];
width = shape[1];
}
}
if (!width || !height) {
throw new Error("Cannot determine mask dimensions from nodeOutputs");
}
// Determine channels count
let channels = 1;
if (Array.isArray(shape) && shape.length >= 4) {
channels = shape[3];
} else if ((maskOutput as any).channels) {
channels = (maskOutput as any).channels;
} else {
const len = (maskOutput.data as any).length;
channels = Math.max(1, Math.floor(len / (width * height)));
}
// Create GRAYSCALE image from tensor; RGB = luminance, A = 255 (MaskTool.setMask reads luminance)
const { canvas: maskCanvas, ctx } = createCanvas(width, height, '2d', { willReadFrequently: true });
if (!ctx) throw new Error("Could not create mask context");
const imgData = ctx.createImageData(width, height);
const arr: any = maskOutput.data;
const min = (maskOutput.min_val !== undefined) ? maskOutput.min_val : 0;
const max = (maskOutput.max_val !== undefined) ? maskOutput.max_val : 1;
const denom = (max - min) || 1;
const pixelCount = width * height;
for (let i = 0; i < pixelCount; i++) {
const baseIndex = i * channels;
let v: number;
if (channels === 1) {
v = arr[i];
} else if (channels >= 3) {
// If image-like, compute luminance from RGB channels
const r = arr[baseIndex + 0] ?? 0;
const g = arr[baseIndex + 1] ?? 0;
const b = arr[baseIndex + 2] ?? 0;
v = 0.299 * r + 0.587 * g + 0.114 * b;
} else {
v = arr[baseIndex] ?? 0;
}
let norm = (v - min) / denom;
if (!isFinite(norm)) norm = 0;
norm = Math.max(0, Math.min(1, norm));
const lum = Math.round(norm * 255);
const o = i * 4;
imgData.data[o] = lum; // R
imgData.data[o + 1] = lum; // G
imgData.data[o + 2] = lum; // B
imgData.data[o + 3] = 255; // A fixed (MaskTool computes alpha from luminance)
}
ctx.putImageData(imgData, 0, 0);
// Convert to HTMLImageElement
const maskImg = await new Promise<HTMLImageElement>((resolve, reject) => {
const img = new Image();
img.onload = () => resolve(img);
img.onerror = reject;
img.src = maskCanvas.toDataURL();
});
// Respect fit_on_add (scale to output area)
const widgets = this.canvas.node.widgets;
const fitOnAddWidget = widgets ? widgets.find((w: any) => w.name === "fit_on_add") : null;
const shouldFit = fitOnAddWidget && fitOnAddWidget.value;
let finalMaskImg: HTMLImageElement = maskImg;
if (shouldFit) {
const bounds = this.canvas.outputAreaBounds;
const scale = Math.min(bounds.width / maskImg.width, bounds.height / maskImg.height);
const scaledWidth = Math.max(1, Math.round(maskImg.width * scale));
const scaledHeight = Math.max(1, Math.round(maskImg.height * scale));
const { canvas: scaledCanvas, ctx: scaledCtx } = createCanvas(scaledWidth, scaledHeight, '2d', { willReadFrequently: true });
if (!scaledCtx) throw new Error("Could not create scaled mask context");
scaledCtx.drawImage(maskImg, 0, 0, scaledWidth, scaledHeight);
finalMaskImg = await new Promise<HTMLImageElement>((resolve, reject) => {
const img = new Image();
img.onload = () => resolve(img);
img.onerror = reject;
img.src = scaledCanvas.toDataURL();
});
}
// Apply to MaskTool (centers internally)
if (this.canvas.maskTool) {
this.canvas.maskTool.setMask(finalMaskImg, true);
this.canvas.canvasState.saveMaskState();
this.canvas.render();
// Mark this mask link as loaded to avoid re-applying
this.canvas.lastLoadedMaskLinkId = maskLinkId;
maskLoaded = true;
log.info("Applied input mask from nodeOutputs immediately on connection" + (shouldFit ? " (fitted to output area)" : ""));
}
} catch (err) {
log.warn("Failed to apply mask from nodeOutputs immediately; will wait for backend input_mask after execution", err);
}
} else {
// nodeOutputs exist but don't have tensor data yet (need workflow execution)
log.info(`Mask node ${graph.links[maskLinkId]?.origin_id} found but has no tensor data yet. Mask will be applied automatically after workflow execution.`);
// Don't retry - data won't be available until workflow runs
}
}
}
// If both are already loaded from connected nodes, we're done
const nodeInputs = this.canvas.node.inputs;
if (imageLoaded && (!nodeInputs || !nodeInputs[1] || maskLoaded)) {
return;
}
// If no data from connected node, check backend
const response = await fetch(`/layerforge/get_input_data/${nodeId}`);
const result = await response.json();
if (result.success && result.has_input) {
// Check if we already loaded image data (by checking the current link)
if (!imageLoaded && this.canvas.node.inputs && this.canvas.node.inputs[0] && this.canvas.node.inputs[0].link) {
const currentLinkId = this.canvas.node.inputs[0].link;
if (this.canvas.lastLoadedLinkId !== currentLinkId) {
// Mark this link as loaded
this.canvas.lastLoadedLinkId = currentLinkId;
imageLoaded = false; // Will load from backend
}
}
// Check if we already loaded mask data
if (!maskLoaded && this.canvas.node.inputs && this.canvas.node.inputs[1] && this.canvas.node.inputs[1].link) {
const currentMaskLinkId = this.canvas.node.inputs[1].link;
if (this.canvas.lastLoadedMaskLinkId !== currentMaskLinkId) {
// Mark this mask link as loaded
this.canvas.lastLoadedMaskLinkId = currentMaskLinkId;
maskLoaded = false; // Will load from backend
}
}
log.info("Input data found from backend, adding to canvas");
const inputData = result.data;
// DON'T clear existing layers - just add new ones
// Mark that we've loaded input data to avoid reloading
this.canvas.inputDataLoaded = true;
// Determine add mode based on fit_on_add setting
const widgets = this.canvas.node.widgets;
const fitOnAddWidget = widgets ? widgets.find((w: any) => w.name === "fit_on_add") : null;
const addMode = (fitOnAddWidget && fitOnAddWidget.value) ? 'fit' : 'center';
// Load input image if provided and not already loaded
if (!imageLoaded && inputData.input_image) {
const img = new Image();
await new Promise((resolve, reject) => {
img.onload = resolve;
img.onerror = reject;
img.src = inputData.input_image;
});
// Add image to canvas at output area position
const layer = await this.canvas.canvasLayers.addLayerWithImage(
img,
{},
addMode,
this.canvas.outputAreaBounds // Place at output area
);
// Don't apply mask to the layer anymore - we'll handle it separately
log.info("Input image added as new layer to canvas");
this.canvas.render();
this.canvas.saveState();
} else {
log.debug("No input image in data");
}
// Handle mask separately (not tied to layer) if not already loaded
if (!maskLoaded && inputData.input_mask) {
log.info("Processing input mask");
// Load mask image
const maskImg = new Image();
await new Promise((resolve, reject) => {
maskImg.onload = resolve;
maskImg.onerror = reject;
maskImg.src = inputData.input_mask;
});
// Determine if we should fit the mask or use it at original size
const fitOnAddWidget = this.canvas.node.widgets.find((w) => w.name === "fit_on_add");
const shouldFit = fitOnAddWidget && fitOnAddWidget.value;
if (shouldFit && this.canvas.maskTool) {
// Scale mask to fit output area if fit_on_add is enabled
const bounds = this.canvas.outputAreaBounds;
const scale = Math.min(
bounds.width / maskImg.width,
bounds.height / maskImg.height
);
// Create scaled mask canvas
const scaledWidth = Math.round(maskImg.width * scale);
const scaledHeight = Math.round(maskImg.height * scale);
const { canvas: scaledCanvas, ctx: scaledCtx } = createCanvas(
scaledWidth,
scaledHeight,
'2d',
{ willReadFrequently: true }
);
if (!scaledCtx) throw new Error("Could not create scaled mask context");
// Draw scaled mask
scaledCtx.drawImage(maskImg, 0, 0, scaledWidth, scaledHeight);
// Convert scaled canvas to image
const scaledMaskImg = new Image();
await new Promise((resolve, reject) => {
scaledMaskImg.onload = resolve;
scaledMaskImg.onerror = reject;
scaledMaskImg.src = scaledCanvas.toDataURL();
});
// Apply scaled mask to mask tool
this.canvas.maskTool.setMask(scaledMaskImg, true);
} else if (this.canvas.maskTool) {
// Apply mask at original size
this.canvas.maskTool.setMask(maskImg, true);
}
// Save the mask state
this.canvas.canvasState.saveMaskState();
log.info("Applied input mask to mask tool" + (shouldFit ? " (fitted to output area)" : " (original size)"));
}
} else {
log.debug("No input data from backend");
// Don't schedule another check - we'll only check when explicitly triggered
}
} catch (error) {
log.error("Error checking for input data:", error);
// Don't schedule another check on error
}
}
scheduleInputDataCheck(): void {
// Schedule a retry for mask data check when nodeOutputs are not ready yet
if (this.canvas.pendingInputDataCheck) {
clearTimeout(this.canvas.pendingInputDataCheck);
}
this.canvas.pendingInputDataCheck = window.setTimeout(() => {
this.canvas.pendingInputDataCheck = null;
log.debug("Retrying input data check for mask...");
this.checkForInputData();
}, 500); // Shorter delay for mask data retry
}
scheduleDataCheck(): void {
if (this.canvas.pendingDataCheck) {
clearTimeout(this.canvas.pendingDataCheck);

View File

@@ -1029,7 +1029,9 @@ $el("label.clipboard-switch.mask-switch", {
}
}, [controlPanel, canvasContainer, layersPanelContainer]) as HTMLDivElement;
node.addDOMWidget("mainContainer", "widget", mainContainer);
if (node.addDOMWidget) {
node.addDOMWidget("mainContainer", "widget", mainContainer);
}
const openEditorBtn = controlPanel.querySelector(`#open-editor-btn-${node.id}`) as HTMLButtonElement;
let backdrop: HTMLDivElement | null = null;
@@ -1141,7 +1143,12 @@ $el("label.clipboard-switch.mask-switch", {
if (!(window as any).canvasExecutionStates) {
(window as any).canvasExecutionStates = new Map<string, any>();
}
(node as any).canvasWidget = canvas;
// Store the entire widget object, not just the canvas
(node as any).canvasWidget = {
canvas: canvas,
panel: controlPanel
};
setTimeout(() => {
canvas.loadInitialState();
@@ -1163,7 +1170,7 @@ $el("label.clipboard-switch.mask-switch", {
canvas.setPreviewVisibility(value);
}
if ((node as any).graph && (node as any).graph.canvas) {
if ((node as any).graph && (node as any).graph.canvas && node.setDirtyCanvas) {
node.setDirtyCanvas(true, true);
}
};
@@ -1255,10 +1262,125 @@ app.registerExtension({
const canvasWidget = await createCanvasWidget(this, null, app);
canvasNodeInstances.set(this.id, canvasWidget);
log.info(`Registered CanvasNode instance for ID: ${this.id}`);
// Store the canvas widget on the node
(this as any).canvasWidget = canvasWidget;
// Check if there are already connected inputs
setTimeout(() => {
this.setDirtyCanvas(true, true);
}, 100);
if (this.inputs && this.inputs.length > 0) {
// Check if input_image (index 0) is connected
if (this.inputs[0] && this.inputs[0].link) {
log.info("Input image already connected on node creation, checking for data...");
if (canvasWidget.canvas && canvasWidget.canvas.canvasIO) {
canvasWidget.canvas.inputDataLoaded = false;
canvasWidget.canvas.canvasIO.checkForInputData();
}
}
}
if (this.setDirtyCanvas) {
this.setDirtyCanvas(true, true);
}
}, 500);
};
// Add onConnectionsChange handler to detect when inputs are connected
nodeType.prototype.onConnectionsChange = function (this: ComfyNode, type: number, index: number, connected: boolean, link_info: any) {
log.info(`onConnectionsChange called: type=${type}, index=${index}, connected=${connected}`, link_info);
// Check if this is an input connection (type 1 = INPUT)
if (type === 1) {
// Get the canvas widget - it might be in different places
const canvasWidget = (this as any).canvasWidget;
const canvas = canvasWidget?.canvas || canvasWidget;
if (!canvas || !canvas.canvasIO) {
log.warn("Canvas not ready in onConnectionsChange, scheduling retry...");
// Retry multiple times with increasing delays
const retryDelays = [500, 1000, 2000];
let retryCount = 0;
const tryAgain = () => {
const retryCanvas = (this as any).canvasWidget?.canvas || (this as any).canvasWidget;
if (retryCanvas && retryCanvas.canvasIO) {
log.info("Canvas now ready, checking for input data...");
if (connected) {
retryCanvas.inputDataLoaded = false;
retryCanvas.canvasIO.checkForInputData();
}
} else if (retryCount < retryDelays.length) {
log.warn(`Canvas still not ready, retry ${retryCount + 1}/${retryDelays.length}...`);
setTimeout(tryAgain, retryDelays[retryCount++]);
} else {
log.error("Canvas failed to initialize after multiple retries");
}
};
setTimeout(tryAgain, retryDelays[retryCount++]);
return;
}
// Handle input_image connection (index 0)
if (index === 0) {
if (connected && link_info) {
log.info("Input image connected, marking for data check...");
// Reset the input data loaded flag to allow loading the new connection
canvas.inputDataLoaded = false;
// Also reset the last loaded image source and link ID to allow the new image
canvas.lastLoadedImageSrc = undefined;
canvas.lastLoadedLinkId = undefined;
// Mark that we have a pending input connection
canvas.hasPendingInputConnection = true;
// Check for data immediately when connected
setTimeout(() => {
log.info("Checking for input data after connection...");
canvas.canvasIO.checkForInputData();
}, 500);
} else {
log.info("Input image disconnected");
canvas.hasPendingInputConnection = false;
// Reset when disconnected so a new connection can load
canvas.inputDataLoaded = false;
canvas.lastLoadedImageSrc = undefined;
canvas.lastLoadedLinkId = undefined;
}
}
// Handle input_mask connection (index 1)
if (index === 1) {
if (connected && link_info) {
log.info("Input mask connected");
// Mark that we have a pending mask connection
canvas.hasPendingMaskConnection = true;
// Check for data immediately when connected
setTimeout(() => {
log.info("Checking for input data after mask connection...");
canvas.canvasIO.checkForInputData();
}, 500);
} else {
log.info("Input mask disconnected");
canvas.hasPendingMaskConnection = false;
}
}
}
};
// Add onExecuted handler to check for input data after workflow execution
const originalOnExecuted = nodeType.prototype.onExecuted;
nodeType.prototype.onExecuted = function (this: ComfyNode, message: any) {
log.info("Node executed, checking for input data...");
const canvas = (this as any).canvasWidget?.canvas || (this as any).canvasWidget;
if (canvas && canvas.canvasIO) {
// Don't reset inputDataLoaded - just check for new data
// The checkForInputData method will handle checking if we already loaded this image
canvas.canvasIO.checkForInputData();
}
// Call original if it exists
if (originalOnExecuted) {
originalOnExecuted.apply(this, arguments as any);
}
};
const onRemoved = nodeType.prototype.onRemoved;

View File

@@ -507,7 +507,6 @@ export class MaskEditorIntegration {
maskSize: {width: bounds.width, height: bounds.height}
});
// Use the chunk system instead of direct canvas manipulation
this.maskTool.setMask(maskAsImage);
// Update node preview using PreviewUtils

View File

@@ -1793,15 +1793,47 @@ export class MaskTool {
log.info(`Mask overlay visibility toggled to: ${this.isOverlayVisible}`);
}
setMask(image: HTMLImageElement): void {
// Clear existing mask chunks in the output area first
setMask(image: HTMLImageElement, isFromInputMask: boolean = false): void {
const bounds = this.canvasInstance.outputAreaBounds;
this.clearMaskInArea(bounds.x, bounds.y, image.width, image.height);
// Add the new mask using the chunk system
this.addMask(image);
log.info(`MaskTool set new mask using chunk system at bounds (${bounds.x}, ${bounds.y})`);
if (isFromInputMask) {
// For INPUT MASK - process black background to transparent using luminance
// Center like input images
const centerX = bounds.x + (bounds.width - image.width) / 2;
const centerY = bounds.y + (bounds.height - image.height) / 2;
// Prepare mask where alpha = luminance (white = applied, black = transparent)
const { canvas: maskCanvas, ctx } = createCanvas(image.width, image.height, '2d', { willReadFrequently: true });
if (!ctx) throw new Error("Could not create mask processing context");
ctx.drawImage(image, 0, 0);
const imgData = ctx.getImageData(0, 0, image.width, image.height);
const data = imgData.data;
for (let i = 0; i < data.length; i += 4) {
const r = data[i], g = data[i + 1], b = data[i + 2];
const lum = Math.round(0.299 * r + 0.587 * g + 0.114 * b);
data[i] = 255; // force white color (color channels ignored downstream)
data[i + 1] = 255;
data[i + 2] = 255;
data[i + 3] = lum; // alpha encodes mask strength: white -> strong, black -> 0
}
ctx.putImageData(imgData, 0, 0);
// Clear target area and apply to chunked system at centered position
this.clearMaskInArea(centerX, centerY, image.width, image.height);
this.applyMaskCanvasToChunks(maskCanvas, centerX, centerY);
// Refresh state and UI
this.updateActiveMaskCanvas(true);
this.canvasInstance.canvasState.saveMaskState();
this.canvasInstance.render();
log.info(`MaskTool set INPUT MASK at centered position (${centerX}, ${centerY}) using luminance as alpha`);
} else {
// For SAM Detector and other sources - just clear and add without processing
this.clearMaskInArea(bounds.x, bounds.y, bounds.width, bounds.height);
this.addMask(image);
log.info(`MaskTool set mask using chunk system at bounds (${bounds.x}, ${bounds.y})`);
}
}
/**

View File

@@ -282,36 +282,61 @@ async function handleSAMDetectorResult(node: ComfyNode, resultImage: HTMLImageEl
log.debug("Attempting to reload SAM result image");
const originalSrc = resultImage.src;
// Add cache-busting parameter to force fresh load
const url = new URL(originalSrc);
url.searchParams.set('_t', Date.now().toString());
await new Promise((resolve, reject) => {
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = () => {
// Copy the loaded image data to the original image
resultImage.src = img.src;
resultImage.width = img.width;
resultImage.height = img.height;
log.debug("SAM result image reloaded successfully", {
width: img.width,
height: img.height,
originalSrc: originalSrc,
newSrc: img.src
// Check if it's a data URL (base64) - don't add parameters to data URLs
if (originalSrc.startsWith('data:')) {
log.debug("Image is a data URL, skipping reload with parameters");
// For data URLs, just ensure the image is loaded
if (!resultImage.complete || resultImage.naturalWidth === 0) {
await new Promise((resolve, reject) => {
const img = new Image();
img.onload = () => {
resultImage.width = img.width;
resultImage.height = img.height;
log.debug("Data URL image loaded successfully", {
width: img.width,
height: img.height
});
resolve(img);
};
img.onerror = (error) => {
log.error("Failed to load data URL image", error);
reject(error);
};
img.src = originalSrc; // Use original src without modifications
});
resolve(img);
};
img.onerror = (error) => {
log.error("Failed to reload SAM result image", {
originalSrc: originalSrc,
newSrc: url.toString(),
error: error
});
reject(error);
};
img.src = url.toString();
});
}
} else {
// For regular URLs, add cache-busting parameter
const url = new URL(originalSrc);
url.searchParams.set('_t', Date.now().toString());
await new Promise((resolve, reject) => {
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = () => {
// Copy the loaded image data to the original image
resultImage.src = img.src;
resultImage.width = img.width;
resultImage.height = img.height;
log.debug("SAM result image reloaded successfully", {
width: img.width,
height: img.height,
originalSrc: originalSrc,
newSrc: img.src
});
resolve(img);
};
img.onerror = (error) => {
log.error("Failed to reload SAM result image", {
originalSrc: originalSrc,
newSrc: url.toString(),
error: error
});
reject(error);
};
img.src = url.toString();
});
}
}
} catch (error) {
log.error("Failed to load image from SAM Detector.", error);
@@ -333,32 +358,43 @@ async function handleSAMDetectorResult(node: ComfyNode, resultImage: HTMLImageEl
// Apply mask to LayerForge canvas using MaskTool.setMask method
log.debug("Checking canvas and maskTool availability", {
hasCanvas: !!canvas,
hasCanvasProperty: !!canvas.canvas,
canvasCanvasKeys: canvas.canvas ? Object.keys(canvas.canvas) : [],
hasMaskTool: !!canvas.maskTool,
hasCanvasMaskTool: !!(canvas.canvas && canvas.canvas.maskTool),
maskToolType: typeof canvas.maskTool,
canvasMaskToolType: canvas.canvas ? typeof canvas.canvas.maskTool : 'undefined',
canvasKeys: Object.keys(canvas)
});
if (!canvas.maskTool) {
// Get the actual Canvas object and its maskTool
const actualCanvas = canvas.canvas || canvas;
const maskTool = actualCanvas.maskTool;
if (!maskTool) {
log.error("MaskTool is not available. Canvas state:", {
hasCanvas: !!canvas,
hasActualCanvas: !!actualCanvas,
canvasConstructor: canvas.constructor.name,
actualCanvasConstructor: actualCanvas ? actualCanvas.constructor.name : 'undefined',
canvasKeys: Object.keys(canvas),
maskToolValue: canvas.maskTool
actualCanvasKeys: actualCanvas ? Object.keys(actualCanvas) : [],
maskToolValue: maskTool
});
throw new Error("Mask tool not available or not initialized");
}
log.debug("Applying SAM mask to canvas using addMask method");
log.debug("Applying SAM mask to canvas using setMask method");
// Use the addMask method which overlays on existing mask without clearing it
canvas.maskTool.addMask(maskAsImage);
// Use the setMask method which clears existing mask and sets new one
maskTool.setMask(maskAsImage);
// Update canvas and save state (same as MaskEditorIntegration)
canvas.render();
canvas.saveState();
actualCanvas.render();
actualCanvas.saveState();
// Update node preview using PreviewUtils
await updateNodePreview(canvas, node, true);
await updateNodePreview(actualCanvas, node, true);
log.info("SAM Detector mask applied successfully to LayerForge canvas");
@@ -399,15 +435,23 @@ export function setupSAMDetectorHook(node: ComfyNode, options: any[]) {
log.info("Intercepted 'Open in SAM Detector' - automatically sending to clipspace and starting monitoring");
// Automatically send canvas to clipspace and start monitoring
if ((node as any).canvasWidget && (node as any).canvasWidget.canvas) {
const canvas = (node as any).canvasWidget; // canvasWidget IS the Canvas object
if ((node as any).canvasWidget) {
const canvasWidget = (node as any).canvasWidget;
const canvas = canvasWidget.canvas || canvasWidget; // Get actual Canvas object
// Use ImageUploadUtils to upload canvas
// Use ImageUploadUtils to upload canvas and get server URL (Impact Pack compatibility)
const uploadResult = await uploadCanvasAsImage(canvas, {
filenamePrefix: 'layerforge-sam',
nodeId: node.id
});
log.debug("Uploaded canvas for SAM Detector", {
filename: uploadResult.filename,
imageUrl: uploadResult.imageUrl,
width: uploadResult.imageElement.width,
height: uploadResult.imageElement.height
});
// Set the image to the node for clipspace
node.imgs = [uploadResult.imageElement];
(node as any).clipspaceImg = uploadResult.imageElement;

View File

@@ -1,6 +1,14 @@
import type { Canvas as CanvasClass } from './Canvas';
import type { CanvasLayers } from './CanvasLayers';
export interface ComfyWidget {
name: string;
type: string;
value: any;
callback?: (value: any) => void;
options?: any;
}
export interface Layer {
id: string;
image: HTMLImageElement;
@@ -32,15 +40,16 @@ export interface Layer {
export interface ComfyNode {
id: number;
type: string;
widgets: ComfyWidget[];
imgs?: HTMLImageElement[];
widgets: any[];
size: [number, number];
graph: any;
canvasWidget?: any;
size?: [number, number];
onResize?: () => void;
addDOMWidget: (name: string, type: string, element: HTMLElement, options?: any) => any;
addWidget: (type: string, name: string, value: any, callback?: (value: any) => void, options?: any) => any;
setDirtyCanvas: (force: boolean, dirty: boolean) => void;
setDirtyCanvas?: (dirty: boolean, propagate: boolean) => void;
graph?: any;
onRemoved?: () => void;
addDOMWidget?: (name: string, type: string, element: HTMLElement) => void;
inputs?: Array<{ link: any }>;
}
declare global {
@@ -79,8 +88,13 @@ export interface Canvas {
imageCache: any;
dataInitialized: boolean;
pendingDataCheck: number | null;
pendingInputDataCheck: number | null;
pendingBatchContext: any;
canvasLayers: any;
inputDataLoaded: boolean;
lastLoadedLinkId: any;
lastLoadedMaskLinkId: any;
outputAreaBounds: OutputAreaBounds;
saveState: () => void;
render: () => void;
updateSelection: (layers: Layer[]) => void;