- Migrate 11 character JSONs from old wardrobe keys to _BODY_GROUP_KEYS format - Add is_favourite/is_nsfw columns to Preset model - Add HTTP response validation and timeouts to ComfyUI client - Add path traversal protection on replace cover route - Deduplicate services/mcp.py (4 functions → 2 generic + 2 wrappers) - Extract apply_library_filters() and clean_html_text() shared helpers - Add named constants for 17 ComfyUI workflow node IDs - Fix bare except clauses in services/llm.py - Fix tags schema in ensure_default_outfit() (list → dict) - Convert f-string logging to lazy % formatting - Add 5-minute polling timeout to frontend waitForJob() - Improve migration error handling (non-duplicate errors log at WARNING) - Update CLAUDE.md to reflect all changes Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
632 lines
25 KiB
Python
632 lines
25 KiB
Python
import json
|
|
import logging
|
|
import random
|
|
|
|
from flask import session
|
|
from models import Settings, Checkpoint
|
|
from utils import _resolve_lora_weight
|
|
from services.prompts import _cross_dedup_prompts
|
|
|
|
logger = logging.getLogger('gaze')
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# ComfyUI workflow node IDs (must match comfy_workflow.json)
|
|
# ---------------------------------------------------------------------------
|
|
NODE_KSAMPLER = "3"
|
|
NODE_CHECKPOINT = "4"
|
|
NODE_LATENT = "5"
|
|
NODE_POSITIVE = "6"
|
|
NODE_NEGATIVE = "7"
|
|
NODE_VAE_DECODE = "8"
|
|
NODE_SAVE = "9"
|
|
NODE_FACE_DETAILER = "11"
|
|
NODE_HAND_DETAILER = "13"
|
|
NODE_FACE_PROMPT = "14"
|
|
NODE_HAND_PROMPT = "15"
|
|
NODE_LORA_CHAR = "16"
|
|
NODE_LORA_OUTFIT = "17"
|
|
NODE_LORA_ACTION = "18"
|
|
NODE_LORA_STYLE = "19"
|
|
NODE_LORA_CHAR_B = "20"
|
|
NODE_VAE_LOADER = "21"
|
|
|
|
# Node IDs used by DetailerForEach in multi-char mode
|
|
_SEGS_DETAILER_NODES = ['46', '47', '53', '54']
|
|
# Node IDs for per-character CLIP prompts in multi-char mode
|
|
_SEGS_PROMPT_NODES = ['44', '45', '51', '52']
|
|
|
|
|
|
def _log_workflow_prompts(label, workflow):
|
|
"""Log the final assembled ComfyUI prompts in a consistent, readable block."""
|
|
sep = "=" * 72
|
|
active_loras = []
|
|
lora_details = []
|
|
|
|
# Collect detailed LoRA information
|
|
for node_id, label_str in [(NODE_LORA_CHAR, "char/look"), (NODE_LORA_OUTFIT, "outfit"), (NODE_LORA_ACTION, "action"), (NODE_LORA_STYLE, "style/detail/scene"), (NODE_LORA_CHAR_B, "char_b")]:
|
|
if node_id in workflow:
|
|
name = workflow[node_id]["inputs"].get("lora_name", "")
|
|
if name:
|
|
strength_model = workflow[node_id]["inputs"].get("strength_model", "?")
|
|
strength_clip = workflow[node_id]["inputs"].get("strength_clip", "?")
|
|
|
|
# Short version for summary
|
|
if isinstance(strength_model, float):
|
|
active_loras.append(f"{label_str}:{name.split('/')[-1]}@{strength_model:.3f}")
|
|
else:
|
|
active_loras.append(f"{label_str}:{name.split('/')[-1]}@{strength_model}")
|
|
|
|
# Detailed version
|
|
lora_details.append(f" Node {node_id} ({label_str}): {name}")
|
|
lora_details.append(f" strength_model={strength_model}, strength_clip={strength_clip}")
|
|
|
|
# Extract VAE information
|
|
vae_info = "(integrated)"
|
|
if NODE_VAE_LOADER in workflow:
|
|
vae_info = workflow[NODE_VAE_LOADER]['inputs'].get('vae_name', '(custom)')
|
|
|
|
# Extract adetailer information
|
|
adetailer_info = []
|
|
# Single-char mode: FaceDetailer nodes 11 + 13
|
|
for node_id, node_name in [(NODE_FACE_DETAILER, "Face"), (NODE_HAND_DETAILER, "Hand")]:
|
|
if node_id in workflow:
|
|
adetailer_info.append(f" {node_name} (Node {node_id}): steps={workflow[node_id]['inputs'].get('steps', '?')}, "
|
|
f"cfg={workflow[node_id]['inputs'].get('cfg', '?')}, "
|
|
f"denoise={workflow[node_id]['inputs'].get('denoise', '?')}")
|
|
# Multi-char mode: SEGS DetailerForEach nodes
|
|
for node_id, node_name in [("46", "Person A"), ("47", "Person B"), ("53", "Face A"), ("54", "Face B")]:
|
|
if node_id in workflow:
|
|
adetailer_info.append(f" {node_name} (Node {node_id}): steps={workflow[node_id]['inputs'].get('steps', '?')}, "
|
|
f"cfg={workflow[node_id]['inputs'].get('cfg', '?')}, "
|
|
f"denoise={workflow[node_id]['inputs'].get('denoise', '?')}")
|
|
|
|
face_text = workflow.get(NODE_FACE_PROMPT, {}).get('inputs', {}).get('text', '')
|
|
hand_text = workflow.get(NODE_HAND_PROMPT, {}).get('inputs', {}).get('text', '')
|
|
|
|
lines = [
|
|
sep,
|
|
f" WORKFLOW PROMPTS [{label}]",
|
|
sep,
|
|
" MODEL CONFIGURATION:",
|
|
f" Checkpoint : {workflow[NODE_CHECKPOINT]['inputs'].get('ckpt_name', '(not set)')}",
|
|
f" VAE : {vae_info}",
|
|
"",
|
|
" GENERATION SETTINGS:",
|
|
f" Seed : {workflow[NODE_KSAMPLER]['inputs'].get('seed', '(not set)')}",
|
|
f" Resolution : {workflow[NODE_LATENT]['inputs'].get('width', '?')} x {workflow[NODE_LATENT]['inputs'].get('height', '?')}",
|
|
f" Sampler : {workflow[NODE_KSAMPLER]['inputs'].get('sampler_name', '?')} / {workflow[NODE_KSAMPLER]['inputs'].get('scheduler', '?')}",
|
|
f" Steps : {workflow[NODE_KSAMPLER]['inputs'].get('steps', '?')}",
|
|
f" CFG Scale : {workflow[NODE_KSAMPLER]['inputs'].get('cfg', '?')}",
|
|
f" Denoise : {workflow[NODE_KSAMPLER]['inputs'].get('denoise', '1.0')}",
|
|
]
|
|
|
|
# Add LoRA details
|
|
if active_loras:
|
|
lines.append("")
|
|
lines.append(" LORA CONFIGURATION:")
|
|
lines.extend(lora_details)
|
|
else:
|
|
lines.append("")
|
|
lines.append(" LORA CONFIGURATION: (none)")
|
|
|
|
# Add adetailer details
|
|
if adetailer_info:
|
|
lines.append("")
|
|
lines.append(" ADETAILER CONFIGURATION:")
|
|
lines.extend(adetailer_info)
|
|
|
|
# Add prompts
|
|
lines.extend([
|
|
"",
|
|
" PROMPTS:",
|
|
f" [+] Positive : {workflow[NODE_POSITIVE]['inputs'].get('text', '')}",
|
|
f" [-] Negative : {workflow[NODE_NEGATIVE]['inputs'].get('text', '')}",
|
|
])
|
|
|
|
if face_text:
|
|
lines.append(f" [F] Face : {face_text}")
|
|
if hand_text:
|
|
lines.append(f" [H] Hand : {hand_text}")
|
|
|
|
# Multi-char per-character prompts
|
|
for node_id, lbl in [("44", "Person A"), ("45", "Person B"), ("51", "Face A"), ("52", "Face B")]:
|
|
txt = workflow.get(node_id, {}).get('inputs', {}).get('text', '')
|
|
if txt:
|
|
lines.append(f" [{lbl}] : {txt}")
|
|
|
|
lines.append(sep)
|
|
logger.info("\n%s", "\n".join(lines))
|
|
|
|
|
|
def _apply_checkpoint_settings(workflow, ckpt_data):
|
|
"""Apply checkpoint-specific sampler/prompt/VAE settings to the workflow."""
|
|
steps = ckpt_data.get('steps')
|
|
cfg = ckpt_data.get('cfg')
|
|
sampler_name = ckpt_data.get('sampler_name')
|
|
scheduler = ckpt_data.get('scheduler')
|
|
base_positive = ckpt_data.get('base_positive', '')
|
|
base_negative = ckpt_data.get('base_negative', '')
|
|
vae = ckpt_data.get('vae', 'integrated')
|
|
|
|
# KSampler (node 3)
|
|
if steps and NODE_KSAMPLER in workflow:
|
|
workflow[NODE_KSAMPLER]['inputs']['steps'] = int(steps)
|
|
if cfg and NODE_KSAMPLER in workflow:
|
|
workflow[NODE_KSAMPLER]['inputs']['cfg'] = float(cfg)
|
|
if sampler_name and NODE_KSAMPLER in workflow:
|
|
workflow[NODE_KSAMPLER]['inputs']['sampler_name'] = sampler_name
|
|
if scheduler and NODE_KSAMPLER in workflow:
|
|
workflow[NODE_KSAMPLER]['inputs']['scheduler'] = scheduler
|
|
|
|
# Face/hand detailers (nodes 11, 13) + multi-char SEGS detailers
|
|
for node_id in [NODE_FACE_DETAILER, NODE_HAND_DETAILER] + _SEGS_DETAILER_NODES:
|
|
if node_id in workflow:
|
|
if steps:
|
|
workflow[node_id]['inputs']['steps'] = int(steps)
|
|
if cfg:
|
|
workflow[node_id]['inputs']['cfg'] = float(cfg)
|
|
if sampler_name:
|
|
workflow[node_id]['inputs']['sampler_name'] = sampler_name
|
|
if scheduler:
|
|
workflow[node_id]['inputs']['scheduler'] = scheduler
|
|
|
|
# Prepend base_positive to all positive prompt nodes
|
|
if base_positive:
|
|
for node_id in [NODE_POSITIVE, NODE_FACE_PROMPT, NODE_HAND_PROMPT] + _SEGS_PROMPT_NODES:
|
|
if node_id in workflow:
|
|
workflow[node_id]['inputs']['text'] = f"{base_positive}, {workflow[node_id]['inputs']['text']}"
|
|
|
|
# Append base_negative to negative prompt (shared by main + detailers via node 7)
|
|
if base_negative and NODE_NEGATIVE in workflow:
|
|
workflow[NODE_NEGATIVE]['inputs']['text'] = f"{workflow[NODE_NEGATIVE]['inputs']['text']}, {base_negative}"
|
|
|
|
# VAE: if not integrated, inject a VAELoader node and rewire
|
|
if vae and vae != 'integrated':
|
|
workflow[NODE_VAE_LOADER] = {
|
|
'inputs': {'vae_name': vae},
|
|
'class_type': 'VAELoader'
|
|
}
|
|
if NODE_VAE_DECODE in workflow:
|
|
workflow[NODE_VAE_DECODE]['inputs']['vae'] = [NODE_VAE_LOADER, 0]
|
|
for node_id in [NODE_FACE_DETAILER, NODE_HAND_DETAILER] + _SEGS_DETAILER_NODES:
|
|
if node_id in workflow:
|
|
workflow[node_id]['inputs']['vae'] = [NODE_VAE_LOADER, 0]
|
|
|
|
return workflow
|
|
|
|
|
|
def _get_default_checkpoint():
|
|
"""Return (checkpoint_path, checkpoint_data) from the database Settings, session, or fall back to workflow file."""
|
|
ckpt_path = session.get('default_checkpoint')
|
|
|
|
# If no session checkpoint, try to read from database Settings
|
|
if not ckpt_path:
|
|
settings = Settings.query.first()
|
|
if settings and settings.default_checkpoint:
|
|
ckpt_path = settings.default_checkpoint
|
|
logger.debug("Loaded default checkpoint from database: %s", ckpt_path)
|
|
|
|
# If still no checkpoint, try to read from the workflow file
|
|
if not ckpt_path:
|
|
try:
|
|
with open('comfy_workflow.json', 'r') as f:
|
|
workflow = json.load(f)
|
|
ckpt_path = workflow.get(NODE_CHECKPOINT, {}).get('inputs', {}).get('ckpt_name')
|
|
logger.debug("Loaded default checkpoint from workflow file: %s", ckpt_path)
|
|
except Exception:
|
|
pass
|
|
|
|
if not ckpt_path:
|
|
return None, None
|
|
|
|
ckpt = Checkpoint.query.filter_by(checkpoint_path=ckpt_path).first()
|
|
if not ckpt:
|
|
# Checkpoint path exists but not in DB - return path with empty data
|
|
return ckpt_path, {}
|
|
return ckpt.checkpoint_path, ckpt.data or {}
|
|
|
|
|
|
def _inject_multi_char_detailers(workflow, prompts, model_source, clip_source):
|
|
"""Replace single FaceDetailer (node 11) with per-character SEGS-based detailers.
|
|
|
|
Injects person detection + face detection pipelines that order detections
|
|
left-to-right and apply character A's prompt to the left person/face and
|
|
character B's prompt to the right person/face.
|
|
|
|
Nodes added:
|
|
40 - Person detector (UltralyticsDetectorProvider)
|
|
41 - Person SEGS detection (BboxDetectorSEGS)
|
|
42 - Filter: left person (char A)
|
|
43 - Filter: right person (char B)
|
|
44 - CLIPTextEncode: char A body prompt
|
|
45 - CLIPTextEncode: char B body prompt
|
|
46 - DetailerForEach: person A
|
|
47 - DetailerForEach: person B
|
|
48 - Face SEGS detection (BboxDetectorSEGS, reuses face detector node 10)
|
|
49 - Filter: left face (char A)
|
|
50 - Filter: right face (char B)
|
|
51 - CLIPTextEncode: char A face prompt
|
|
52 - CLIPTextEncode: char B face prompt
|
|
53 - DetailerForEach: face A
|
|
54 - DetailerForEach: face B
|
|
|
|
Image flow: VAEDecode(8) → PersonA(46) → PersonB(47) → FaceA(53) → FaceB(54) → Hand(13)
|
|
"""
|
|
vae_source = [NODE_CHECKPOINT, 2]
|
|
|
|
# Remove old single face detailer and its prompt — we replace them
|
|
workflow.pop(NODE_FACE_DETAILER, None)
|
|
workflow.pop(NODE_FACE_PROMPT, None)
|
|
|
|
# --- Person detection ---
|
|
workflow['40'] = {
|
|
'inputs': {'model_name': 'segm/person_yolov8m-seg.pt'},
|
|
'class_type': 'UltralyticsDetectorProvider'
|
|
}
|
|
|
|
workflow['41'] = {
|
|
'inputs': {
|
|
'bbox_detector': ['40', 0],
|
|
'image': [NODE_VAE_DECODE, 0],
|
|
'threshold': 0.5,
|
|
'dilation': 10,
|
|
'crop_factor': 3.0,
|
|
'drop_size': 10,
|
|
'labels': 'all',
|
|
},
|
|
'class_type': 'BboxDetectorSEGS'
|
|
}
|
|
|
|
# Order by x1 ascending (left to right), pick index 0 = leftmost person
|
|
workflow['42'] = {
|
|
'inputs': {
|
|
'segs': ['41', 0],
|
|
'target': 'x1',
|
|
'order': False,
|
|
'take_start': 0,
|
|
'take_count': 1,
|
|
},
|
|
'class_type': 'ImpactSEGSOrderedFilter'
|
|
}
|
|
|
|
# Pick index 1 = rightmost person
|
|
workflow['43'] = {
|
|
'inputs': {
|
|
'segs': ['41', 0],
|
|
'target': 'x1',
|
|
'order': False,
|
|
'take_start': 1,
|
|
'take_count': 1,
|
|
},
|
|
'class_type': 'ImpactSEGSOrderedFilter'
|
|
}
|
|
|
|
# --- Per-character body prompts ---
|
|
workflow['44'] = {
|
|
'inputs': {'text': prompts.get('char_a_main', ''), 'clip': clip_source},
|
|
'class_type': 'CLIPTextEncode'
|
|
}
|
|
workflow['45'] = {
|
|
'inputs': {'text': prompts.get('char_b_main', ''), 'clip': clip_source},
|
|
'class_type': 'CLIPTextEncode'
|
|
}
|
|
|
|
# --- Person detailing (DetailerForEach) ---
|
|
_person_base = {
|
|
'guide_size': 512,
|
|
'guide_size_for': True,
|
|
'max_size': 1024,
|
|
'seed': 0, # overwritten by seed step
|
|
'steps': 20, # overwritten by checkpoint settings
|
|
'cfg': 3.5, # overwritten by checkpoint settings
|
|
'sampler_name': 'euler_ancestral',
|
|
'scheduler': 'normal',
|
|
'denoise': 0.4,
|
|
'feather': 5,
|
|
'noise_mask': True,
|
|
'force_inpaint': True,
|
|
'wildcard': '',
|
|
'cycle': 1,
|
|
'inpaint_model': False,
|
|
'noise_mask_feather': 20,
|
|
}
|
|
|
|
workflow['46'] = {
|
|
'inputs': {
|
|
**_person_base,
|
|
'image': [NODE_VAE_DECODE, 0],
|
|
'segs': ['42', 0],
|
|
'model': model_source,
|
|
'clip': clip_source,
|
|
'vae': vae_source,
|
|
'positive': ['44', 0],
|
|
'negative': [NODE_NEGATIVE, 0],
|
|
},
|
|
'class_type': 'DetailerForEach'
|
|
}
|
|
|
|
workflow['47'] = {
|
|
'inputs': {
|
|
**_person_base,
|
|
'image': ['46', 0], # chains from person A output
|
|
'segs': ['43', 0],
|
|
'model': model_source,
|
|
'clip': clip_source,
|
|
'vae': vae_source,
|
|
'positive': ['45', 0],
|
|
'negative': [NODE_NEGATIVE, 0],
|
|
},
|
|
'class_type': 'DetailerForEach'
|
|
}
|
|
|
|
# --- Face detection (on person-detailed image) ---
|
|
workflow['48'] = {
|
|
'inputs': {
|
|
'bbox_detector': ['10', 0], # reuse existing face YOLO detector
|
|
'image': ['47', 0],
|
|
'threshold': 0.5,
|
|
'dilation': 10,
|
|
'crop_factor': 3.0,
|
|
'drop_size': 10,
|
|
'labels': 'all',
|
|
},
|
|
'class_type': 'BboxDetectorSEGS'
|
|
}
|
|
|
|
workflow['49'] = {
|
|
'inputs': {
|
|
'segs': ['48', 0],
|
|
'target': 'x1',
|
|
'order': False,
|
|
'take_start': 0,
|
|
'take_count': 1,
|
|
},
|
|
'class_type': 'ImpactSEGSOrderedFilter'
|
|
}
|
|
|
|
workflow['50'] = {
|
|
'inputs': {
|
|
'segs': ['48', 0],
|
|
'target': 'x1',
|
|
'order': False,
|
|
'take_start': 1,
|
|
'take_count': 1,
|
|
},
|
|
'class_type': 'ImpactSEGSOrderedFilter'
|
|
}
|
|
|
|
# --- Per-character face prompts ---
|
|
workflow['51'] = {
|
|
'inputs': {'text': prompts.get('char_a_face', ''), 'clip': clip_source},
|
|
'class_type': 'CLIPTextEncode'
|
|
}
|
|
workflow['52'] = {
|
|
'inputs': {'text': prompts.get('char_b_face', ''), 'clip': clip_source},
|
|
'class_type': 'CLIPTextEncode'
|
|
}
|
|
|
|
# --- Face detailing (DetailerForEach) ---
|
|
_face_base = {
|
|
'guide_size': 384,
|
|
'guide_size_for': True,
|
|
'max_size': 1024,
|
|
'seed': 0,
|
|
'steps': 20,
|
|
'cfg': 3.5,
|
|
'sampler_name': 'euler_ancestral',
|
|
'scheduler': 'normal',
|
|
'denoise': 0.5,
|
|
'feather': 5,
|
|
'noise_mask': True,
|
|
'force_inpaint': True,
|
|
'wildcard': '',
|
|
'cycle': 1,
|
|
'inpaint_model': False,
|
|
'noise_mask_feather': 20,
|
|
}
|
|
|
|
workflow['53'] = {
|
|
'inputs': {
|
|
**_face_base,
|
|
'image': ['47', 0],
|
|
'segs': ['49', 0],
|
|
'model': model_source,
|
|
'clip': clip_source,
|
|
'vae': vae_source,
|
|
'positive': ['51', 0],
|
|
'negative': [NODE_NEGATIVE, 0],
|
|
},
|
|
'class_type': 'DetailerForEach'
|
|
}
|
|
|
|
workflow['54'] = {
|
|
'inputs': {
|
|
**_face_base,
|
|
'image': ['53', 0], # chains from face A output
|
|
'segs': ['50', 0],
|
|
'model': model_source,
|
|
'clip': clip_source,
|
|
'vae': vae_source,
|
|
'positive': ['52', 0],
|
|
'negative': [NODE_NEGATIVE, 0],
|
|
},
|
|
'class_type': 'DetailerForEach'
|
|
}
|
|
|
|
# Rewire hand detailer: image input from last face detailer instead of old node 11
|
|
if NODE_HAND_DETAILER in workflow:
|
|
workflow[NODE_HAND_DETAILER]['inputs']['image'] = ['54', 0]
|
|
|
|
logger.debug("Injected multi-char SEGS detailers (nodes 40-54)")
|
|
|
|
|
|
def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_negative=None, outfit=None, action=None, style=None, detailer=None, scene=None, width=None, height=None, checkpoint_data=None, look=None, fixed_seed=None, character_b=None):
|
|
# 1. Update prompts using replacement to preserve embeddings
|
|
workflow[NODE_POSITIVE]["inputs"]["text"] = workflow[NODE_POSITIVE]["inputs"]["text"].replace("{{POSITIVE_PROMPT}}", prompts["main"])
|
|
|
|
if custom_negative:
|
|
workflow[NODE_NEGATIVE]["inputs"]["text"] = f"{custom_negative}, {workflow[NODE_NEGATIVE]['inputs']['text']}"
|
|
|
|
if NODE_FACE_PROMPT in workflow:
|
|
workflow[NODE_FACE_PROMPT]["inputs"]["text"] = workflow[NODE_FACE_PROMPT]["inputs"]["text"].replace("{{FACE_PROMPT}}", prompts["face"])
|
|
if NODE_HAND_PROMPT in workflow:
|
|
workflow[NODE_HAND_PROMPT]["inputs"]["text"] = workflow[NODE_HAND_PROMPT]["inputs"]["text"].replace("{{HAND_PROMPT}}", prompts["hand"])
|
|
|
|
# 2. Update Checkpoint - always set one, fall back to default if not provided
|
|
if not checkpoint:
|
|
default_ckpt, default_ckpt_data = _get_default_checkpoint()
|
|
checkpoint = default_ckpt
|
|
if not checkpoint_data:
|
|
checkpoint_data = default_ckpt_data
|
|
if checkpoint:
|
|
workflow[NODE_CHECKPOINT]["inputs"]["ckpt_name"] = checkpoint
|
|
else:
|
|
raise ValueError("No checkpoint specified and no default checkpoint configured")
|
|
|
|
# 3. Handle LoRAs - Node 16 for character, Node 17 for outfit, Node 18 for action, Node 19 for style/detailer
|
|
# Start with direct checkpoint connections
|
|
model_source = [NODE_CHECKPOINT, 0]
|
|
clip_source = [NODE_CHECKPOINT, 1]
|
|
|
|
# Look negative prompt (applied before character LoRA)
|
|
if look:
|
|
look_negative = look.data.get('negative', '')
|
|
if look_negative:
|
|
workflow[NODE_NEGATIVE]["inputs"]["text"] = f"{look_negative}, {workflow[NODE_NEGATIVE]['inputs']['text']}"
|
|
|
|
# Character LoRA (Node 16) — look LoRA overrides character LoRA when present
|
|
if look:
|
|
char_lora_data = look.data.get('lora', {})
|
|
else:
|
|
char_lora_data = character.data.get('lora', {}) if character else {}
|
|
char_lora_name = char_lora_data.get('lora_name')
|
|
|
|
if char_lora_name and NODE_LORA_CHAR in workflow:
|
|
_w16 = _resolve_lora_weight(char_lora_data)
|
|
workflow[NODE_LORA_CHAR]["inputs"]["lora_name"] = char_lora_name
|
|
workflow[NODE_LORA_CHAR]["inputs"]["strength_model"] = _w16
|
|
workflow[NODE_LORA_CHAR]["inputs"]["strength_clip"] = _w16
|
|
workflow[NODE_LORA_CHAR]["inputs"]["model"] = [NODE_CHECKPOINT, 0] # From checkpoint
|
|
workflow[NODE_LORA_CHAR]["inputs"]["clip"] = [NODE_CHECKPOINT, 1] # From checkpoint
|
|
model_source = [NODE_LORA_CHAR, 0]
|
|
clip_source = [NODE_LORA_CHAR, 1]
|
|
logger.debug("Character LoRA: %s @ %s", char_lora_name, _w16)
|
|
|
|
# Outfit LoRA (Node 17) - chains from character LoRA or checkpoint
|
|
outfit_lora_data = outfit.data.get('lora', {}) if outfit else {}
|
|
outfit_lora_name = outfit_lora_data.get('lora_name')
|
|
|
|
if outfit_lora_name and NODE_LORA_OUTFIT in workflow:
|
|
_w17 = _resolve_lora_weight({**{'lora_weight': 0.8}, **outfit_lora_data})
|
|
workflow[NODE_LORA_OUTFIT]["inputs"]["lora_name"] = outfit_lora_name
|
|
workflow[NODE_LORA_OUTFIT]["inputs"]["strength_model"] = _w17
|
|
workflow[NODE_LORA_OUTFIT]["inputs"]["strength_clip"] = _w17
|
|
# Chain from character LoRA (node 16) or checkpoint (node 4)
|
|
workflow[NODE_LORA_OUTFIT]["inputs"]["model"] = model_source
|
|
workflow[NODE_LORA_OUTFIT]["inputs"]["clip"] = clip_source
|
|
model_source = [NODE_LORA_OUTFIT, 0]
|
|
clip_source = [NODE_LORA_OUTFIT, 1]
|
|
logger.debug("Outfit LoRA: %s @ %s", outfit_lora_name, _w17)
|
|
|
|
# Action LoRA (Node 18) - chains from previous LoRA or checkpoint
|
|
action_lora_data = action.data.get('lora', {}) if action else {}
|
|
action_lora_name = action_lora_data.get('lora_name')
|
|
|
|
if action_lora_name and NODE_LORA_ACTION in workflow:
|
|
_w18 = _resolve_lora_weight(action_lora_data)
|
|
workflow[NODE_LORA_ACTION]["inputs"]["lora_name"] = action_lora_name
|
|
workflow[NODE_LORA_ACTION]["inputs"]["strength_model"] = _w18
|
|
workflow[NODE_LORA_ACTION]["inputs"]["strength_clip"] = _w18
|
|
# Chain from previous source
|
|
workflow[NODE_LORA_ACTION]["inputs"]["model"] = model_source
|
|
workflow[NODE_LORA_ACTION]["inputs"]["clip"] = clip_source
|
|
model_source = [NODE_LORA_ACTION, 0]
|
|
clip_source = [NODE_LORA_ACTION, 1]
|
|
logger.debug("Action LoRA: %s @ %s", action_lora_name, _w18)
|
|
|
|
# Style/Detailer/Scene LoRA (Node 19) - chains from previous LoRA or checkpoint
|
|
# Priority: Style > Detailer > Scene (Scene LoRAs are rare but supported)
|
|
target_obj = style or detailer or scene
|
|
style_lora_data = target_obj.data.get('lora', {}) if target_obj else {}
|
|
style_lora_name = style_lora_data.get('lora_name')
|
|
|
|
if style_lora_name and NODE_LORA_STYLE in workflow:
|
|
_w19 = _resolve_lora_weight(style_lora_data)
|
|
workflow[NODE_LORA_STYLE]["inputs"]["lora_name"] = style_lora_name
|
|
workflow[NODE_LORA_STYLE]["inputs"]["strength_model"] = _w19
|
|
workflow[NODE_LORA_STYLE]["inputs"]["strength_clip"] = _w19
|
|
# Chain from previous source
|
|
workflow[NODE_LORA_STYLE]["inputs"]["model"] = model_source
|
|
workflow[NODE_LORA_STYLE]["inputs"]["clip"] = clip_source
|
|
model_source = [NODE_LORA_STYLE, 0]
|
|
clip_source = [NODE_LORA_STYLE, 1]
|
|
logger.debug("Style/Detailer LoRA: %s @ %s", style_lora_name, _w19)
|
|
|
|
# Second character LoRA (Node 20) - for multi-character generation
|
|
if character_b:
|
|
char_b_lora_data = character_b.data.get('lora', {})
|
|
char_b_lora_name = char_b_lora_data.get('lora_name')
|
|
if char_b_lora_name and NODE_LORA_CHAR_B in workflow:
|
|
_w20 = _resolve_lora_weight(char_b_lora_data)
|
|
workflow[NODE_LORA_CHAR_B]["inputs"]["lora_name"] = char_b_lora_name
|
|
workflow[NODE_LORA_CHAR_B]["inputs"]["strength_model"] = _w20
|
|
workflow[NODE_LORA_CHAR_B]["inputs"]["strength_clip"] = _w20
|
|
workflow[NODE_LORA_CHAR_B]["inputs"]["model"] = model_source
|
|
workflow[NODE_LORA_CHAR_B]["inputs"]["clip"] = clip_source
|
|
model_source = [NODE_LORA_CHAR_B, 0]
|
|
clip_source = [NODE_LORA_CHAR_B, 1]
|
|
logger.debug("Character B LoRA: %s @ %s", char_b_lora_name, _w20)
|
|
|
|
# 3b. Multi-char: inject per-character SEGS detailers (replaces node 11/14)
|
|
if character_b:
|
|
_inject_multi_char_detailers(workflow, prompts, model_source, clip_source)
|
|
|
|
# Apply connections to all model/clip consumers (conditional on node existence)
|
|
for nid in [NODE_KSAMPLER, NODE_FACE_DETAILER, NODE_HAND_DETAILER] + _SEGS_DETAILER_NODES:
|
|
if nid in workflow:
|
|
workflow[nid]["inputs"]["model"] = model_source
|
|
|
|
for nid in [NODE_POSITIVE, NODE_NEGATIVE, NODE_FACE_DETAILER, NODE_HAND_DETAILER, NODE_FACE_PROMPT, NODE_HAND_PROMPT] + _SEGS_PROMPT_NODES:
|
|
if nid in workflow:
|
|
workflow[nid]["inputs"]["clip"] = clip_source
|
|
|
|
# 4. Randomize seeds (or use a fixed seed for reproducible batches like Strengths Gallery)
|
|
gen_seed = fixed_seed if fixed_seed is not None else random.randint(1, 10**15)
|
|
for nid in [NODE_KSAMPLER, NODE_FACE_DETAILER, NODE_HAND_DETAILER] + _SEGS_DETAILER_NODES:
|
|
if nid in workflow:
|
|
workflow[nid]["inputs"]["seed"] = gen_seed
|
|
|
|
# 5. Set image dimensions
|
|
if NODE_LATENT in workflow:
|
|
if width:
|
|
workflow[NODE_LATENT]["inputs"]["width"] = int(width)
|
|
if height:
|
|
workflow[NODE_LATENT]["inputs"]["height"] = int(height)
|
|
|
|
# 6. Apply checkpoint-specific settings (steps, cfg, sampler, base prompts, VAE)
|
|
if checkpoint_data:
|
|
workflow = _apply_checkpoint_settings(workflow, checkpoint_data)
|
|
|
|
# 7. Sync sampler/scheduler from main KSampler to adetailer nodes
|
|
sampler_name = workflow[NODE_KSAMPLER]["inputs"].get("sampler_name")
|
|
scheduler = workflow[NODE_KSAMPLER]["inputs"].get("scheduler")
|
|
for node_id in [NODE_FACE_DETAILER, NODE_HAND_DETAILER] + _SEGS_DETAILER_NODES:
|
|
if node_id in workflow:
|
|
if sampler_name:
|
|
workflow[node_id]["inputs"]["sampler_name"] = sampler_name
|
|
if scheduler:
|
|
workflow[node_id]["inputs"]["scheduler"] = scheduler
|
|
|
|
# 8. Cross-deduplicate: remove tags shared between positive and negative
|
|
pos_text, neg_text = _cross_dedup_prompts(
|
|
workflow[NODE_POSITIVE]["inputs"]["text"],
|
|
workflow[NODE_NEGATIVE]["inputs"]["text"]
|
|
)
|
|
workflow[NODE_POSITIVE]["inputs"]["text"] = pos_text
|
|
workflow[NODE_NEGATIVE]["inputs"]["text"] = neg_text
|
|
|
|
# 9. Final prompt debug — logged after all transformations are complete
|
|
_log_workflow_prompts("_prepare_workflow", workflow)
|
|
|
|
return workflow
|