Updated generation pages.

This commit is contained in:
Aodhan Collins
2026-03-15 17:45:17 +00:00
parent 79bbf669e2
commit d756ea1d0e
30 changed files with 2033 additions and 189 deletions

View File

@@ -6,6 +6,22 @@ from flask import current_app
logger = logging.getLogger('gaze')
def get_loaded_checkpoint():
"""Return the checkpoint path currently loaded in ComfyUI, or None."""
try:
url = current_app.config.get('COMFYUI_URL', 'http://127.0.0.1:8188')
resp = requests.get(f'{url}/history', timeout=3)
if resp.ok:
history = resp.json()
if history:
latest = max(history.values(), key=lambda j: j.get('status', {}).get('status_str', ''))
nodes = latest.get('prompt', [None, None, {}])[2]
return nodes.get('4', {}).get('inputs', {}).get('ckpt_name')
except Exception:
pass
return None
def _ensure_checkpoint_loaded(checkpoint_path):
"""Check if the desired checkpoint is loaded in ComfyUI, and force reload if not."""
if not checkpoint_path:

View File

@@ -1,6 +1,6 @@
import re
from models import db, Character
from utils import _IDENTITY_KEYS, _WARDROBE_KEYS, parse_orientation
from utils import _IDENTITY_KEYS, _WARDROBE_KEYS, _BODY_GROUP_KEYS, parse_orientation
def _dedup_tags(prompt_str):
@@ -114,15 +114,21 @@ def build_prompt(data, selected_fields=None, default_fields=None, active_outfit=
style_data = data.get('style', {})
participants = data.get('participants', {})
# Pre-calculate Hand/Glove priority
# Priority: wardrobe gloves > wardrobe hands (outfit) > identity hands (character)
hand_val = ""
if wardrobe.get('gloves') and is_selected('wardrobe', 'gloves'):
hand_val = wardrobe.get('gloves')
elif wardrobe.get('hands') and is_selected('wardrobe', 'hands'):
hand_val = wardrobe.get('hands')
elif identity.get('hands') and is_selected('identity', 'hands'):
hand_val = identity.get('hands')
# Helper: collect selected values from identity + wardrobe for a given group key
def _group_val(key):
parts = []
id_val = identity.get(key, '')
wd_val = wardrobe.get(key, '')
if id_val and is_selected('identity', key):
val = id_val
# Filter out conflicting tags from base if participants data is present
if participants and key == 'base':
val = re.sub(r'\b(1girl|1boy|solo)\b', '', val).replace(', ,', ',').strip(', ')
if val:
parts.append(val)
if wd_val and is_selected('wardrobe', key):
parts.append(wd_val)
return ', '.join(parts)
# 1. Main Prompt
parts = []
@@ -131,12 +137,10 @@ def build_prompt(data, selected_fields=None, default_fields=None, active_outfit=
if participants:
if participants.get('solo_focus') == 'true':
parts.append('(solo focus:1.2)')
orientation = participants.get('orientation', '')
if orientation:
parts.extend(parse_orientation(orientation))
else:
# Default behavior
parts.append("(solo:1.2)")
# Use character_id (underscores to spaces) for tags compatibility
@@ -144,13 +148,10 @@ def build_prompt(data, selected_fields=None, default_fields=None, active_outfit=
if char_tag and is_selected('special', 'name'):
parts.append(char_tag)
for key in ['base_specs', 'hair', 'eyes', 'extra']:
val = identity.get(key)
if val and is_selected('identity', key):
# Filter out conflicting tags if participants data is present
if participants and key == 'base_specs':
# Remove 1girl, 1boy, solo, etc.
val = re.sub(r'\b(1girl|1boy|solo)\b', '', val).replace(', ,', ',').strip(', ')
# Add all body groups to main prompt
for key in _BODY_GROUP_KEYS:
val = _group_val(key)
if val:
parts.append(val)
# Add defaults (expression, pose, scene)
@@ -159,21 +160,12 @@ def build_prompt(data, selected_fields=None, default_fields=None, active_outfit=
if val and is_selected('defaults', key):
parts.append(val)
# Add hand priority value to main prompt
if hand_val:
parts.append(hand_val)
for key in ['full_body', 'top', 'bottom', 'headwear', 'legwear', 'footwear', 'accessories']:
val = wardrobe.get(key)
if val and is_selected('wardrobe', key):
parts.append(val)
# Standard character styles
char_aesthetic = data.get('styles', {}).get('aesthetic')
if char_aesthetic and is_selected('styles', 'aesthetic'):
parts.append(f"{char_aesthetic} style")
# New Styles Gallery logic
# Styles Gallery logic
if style_data.get('artist_name') and is_selected('style', 'artist_name'):
parts.append(f"by {style_data['artist_name']}")
if style_data.get('artistic_style') and is_selected('style', 'artistic_style'):
@@ -187,26 +179,98 @@ def build_prompt(data, selected_fields=None, default_fields=None, active_outfit=
if lora.get('lora_triggers') and is_selected('lora', 'lora_triggers'):
parts.append(lora.get('lora_triggers'))
# 2. Face Prompt: Tag, Eyes, Expression, Headwear, Action details
# 2. Face Prompt: head group + expression + action head
face_parts = []
if char_tag and is_selected('special', 'name'): face_parts.append(char_tag)
if identity.get('eyes') and is_selected('identity', 'eyes'): face_parts.append(identity.get('eyes'))
if defaults.get('expression') and is_selected('defaults', 'expression'): face_parts.append(defaults.get('expression'))
if wardrobe.get('headwear') and is_selected('wardrobe', 'headwear'): face_parts.append(wardrobe.get('headwear'))
if char_tag and is_selected('special', 'name'):
face_parts.append(char_tag)
head_val = _group_val('head')
if head_val:
face_parts.append(head_val)
if defaults.get('expression') and is_selected('defaults', 'expression'):
face_parts.append(defaults.get('expression'))
if action_data.get('head') and is_selected('action', 'head'):
face_parts.append(action_data.get('head'))
# Add specific Action expression details if available
if action_data.get('head') and is_selected('action', 'head'): face_parts.append(action_data.get('head'))
if action_data.get('eyes') and is_selected('action', 'eyes'): face_parts.append(action_data.get('eyes'))
# 3. Hand Prompt: hands group + action hands
hand_parts = []
hands_val = _group_val('hands')
if hands_val:
hand_parts.append(hands_val)
if action_data.get('hands') and is_selected('action', 'hands'):
hand_parts.append(action_data.get('hands'))
# 3. Hand Prompt: Hand value (Gloves or Hands), Action details
hand_parts = [hand_val] if hand_val else []
if action_data.get('arms') and is_selected('action', 'arms'): hand_parts.append(action_data.get('arms'))
if action_data.get('hands') and is_selected('action', 'hands'): hand_parts.append(action_data.get('hands'))
# 4. Feet Prompt: feet group + action feet
feet_parts = []
feet_val = _group_val('feet')
if feet_val:
feet_parts.append(feet_val)
if action_data.get('feet') and is_selected('action', 'feet'):
feet_parts.append(action_data.get('feet'))
return {
"main": _dedup_tags(", ".join(parts)),
"face": _dedup_tags(", ".join(face_parts)),
"hand": _dedup_tags(", ".join(hand_parts))
"hand": _dedup_tags(", ".join(hand_parts)),
"feet": _dedup_tags(", ".join(feet_parts)),
}
def build_multi_prompt(char_a, char_b, extras_prompt=''):
"""Build prompts for a two-character generation using BREAK separation.
Returns dict with combined prompts (main, face, hand) and per-character
prompts (char_a_main, char_a_face, char_b_main, char_b_face) for the
per-person/face ADetailer passes.
"""
# Build individual prompts with all fields selected
prompts_a = build_prompt(char_a.data)
prompts_b = build_prompt(char_b.data)
# Strip solo/orientation tags from individual prompts — we'll add combined ones
_solo_orientation_tags = {
'solo', '(solo:1.2)', '(solo focus:1.2)',
'1girl', '1boy', '2girls', '2boys', '3girls', '3boys',
'hetero', 'yuri', 'yaoi',
'multiple girls', 'multiple boys',
}
def _strip_tags(prompt_str, tags_to_remove):
parts = [t.strip() for t in prompt_str.split(',') if t.strip()]
return ', '.join(p for p in parts if p.lower() not in tags_to_remove)
main_a = _strip_tags(prompts_a['main'], _solo_orientation_tags)
main_b = _strip_tags(prompts_b['main'], _solo_orientation_tags)
# Compute combined orientation
orient_a = char_a.data.get('participants', {}).get('orientation', '1F')
orient_b = char_b.data.get('participants', {}).get('orientation', '1F')
# Count total M and F across both characters
combined_m = orient_a.upper().count('M') + orient_b.upper().count('M')
combined_f = orient_a.upper().count('F') + orient_b.upper().count('F')
combined_orientation = f"{combined_m}M{combined_f}F" if combined_m else f"{combined_f}F"
if combined_f == 0:
combined_orientation = f"{combined_m}M"
orientation_tags = parse_orientation(combined_orientation)
# Build combined main prompt with BREAK separation
orientation_str = ', '.join(orientation_tags)
combined_main = f"{orientation_str}, {main_a} BREAK {main_b}"
if extras_prompt:
combined_main = f"{extras_prompt}, {combined_main}"
# Merge face/hand prompts for the hand detailer (shared, not per-character)
hand_parts = [p for p in [prompts_a['hand'], prompts_b['hand']] if p]
return {
"main": _dedup_tags(combined_main),
"face": "", # not used — per-character face prompts go to SEGS detailers
"hand": _dedup_tags(', '.join(hand_parts)),
# Per-character prompts for SEGS-based ADetailer passes
"char_a_main": _dedup_tags(main_a),
"char_a_face": _dedup_tags(prompts_a['face']),
"char_b_main": _dedup_tags(main_b),
"char_b_face": _dedup_tags(prompts_b['face']),
}
@@ -220,7 +284,7 @@ def build_extras_prompt(actions, outfits, scenes, styles, detailers):
if lora.get('lora_triggers'):
parts.append(lora['lora_triggers'])
parts.extend(data.get('tags', []))
for key in ['full_body', 'additional']:
for key in _BODY_GROUP_KEYS:
val = data.get('action', {}).get(key)
if val:
parts.append(val)
@@ -228,7 +292,7 @@ def build_extras_prompt(actions, outfits, scenes, styles, detailers):
for outfit in outfits:
data = outfit.data
wardrobe = data.get('wardrobe', {})
for key in ['full_body', 'headwear', 'top', 'bottom', 'legwear', 'footwear', 'hands', 'accessories']:
for key in _BODY_GROUP_KEYS:
val = wardrobe.get(key)
if val:
parts.append(val)

View File

@@ -153,14 +153,13 @@ def ensure_default_outfit():
"outfit_id": "default",
"outfit_name": "Default",
"wardrobe": {
"full_body": "",
"headwear": "",
"top": "",
"bottom": "",
"legwear": "",
"footwear": "",
"base": "",
"head": "",
"upper_body": "",
"lower_body": "",
"hands": "",
"accessories": ""
"feet": "",
"additional": ""
},
"lora": {
"lora_name": "",
@@ -360,7 +359,8 @@ def _resolve_preset_fields(preset_data):
char_cfg = preset_data.get('character', {})
fields = char_cfg.get('fields', {})
for key in ['base_specs', 'hair', 'eyes', 'hands', 'arms', 'torso', 'pelvis', 'legs', 'feet', 'extra']:
from utils import _BODY_GROUP_KEYS
for key in _BODY_GROUP_KEYS:
val = fields.get('identity', {}).get(key, True)
if val == 'random':
val = random.choice([True, False])
@@ -375,7 +375,7 @@ def _resolve_preset_fields(preset_data):
selected.append(f'defaults::{key}')
wardrobe_cfg = fields.get('wardrobe', {})
for key in ['full_body', 'headwear', 'top', 'bottom', 'legwear', 'footwear', 'hands', 'gloves', 'accessories']:
for key in _BODY_GROUP_KEYS:
val = wardrobe_cfg.get('fields', {}).get(key, True)
if val == 'random':
val = random.choice([True, False])

View File

@@ -9,6 +9,11 @@ from services.prompts import _cross_dedup_prompts
logger = logging.getLogger('gaze')
# Node IDs used by DetailerForEach in multi-char mode
_SEGS_DETAILER_NODES = ['46', '47', '53', '54']
# Node IDs for per-character CLIP prompts in multi-char mode
_SEGS_PROMPT_NODES = ['44', '45', '51', '52']
def _log_workflow_prompts(label, workflow):
"""Log the final assembled ComfyUI prompts in a consistent, readable block."""
@@ -17,7 +22,7 @@ def _log_workflow_prompts(label, workflow):
lora_details = []
# Collect detailed LoRA information
for node_id, label_str in [("16", "char/look"), ("17", "outfit"), ("18", "action"), ("19", "style/detail/scene")]:
for node_id, label_str in [("16", "char/look"), ("17", "outfit"), ("18", "action"), ("19", "style/detail/scene"), ("20", "char_b")]:
if node_id in workflow:
name = workflow[node_id]["inputs"].get("lora_name", "")
if name:
@@ -41,11 +46,18 @@ def _log_workflow_prompts(label, workflow):
# Extract adetailer information
adetailer_info = []
# Single-char mode: FaceDetailer nodes 11 + 13
for node_id, node_name in [("11", "Face"), ("13", "Hand")]:
if node_id in workflow:
adetailer_info.append(f" {node_name} (Node {node_id}): steps={workflow[node_id]['inputs'].get('steps', '?')}, "
f"cfg={workflow[node_id]['inputs'].get('cfg', '?')}, "
f"denoise={workflow[node_id]['inputs'].get('denoise', '?')}")
# Multi-char mode: SEGS DetailerForEach nodes
for node_id, node_name in [("46", "Person A"), ("47", "Person B"), ("53", "Face A"), ("54", "Face B")]:
if node_id in workflow:
adetailer_info.append(f" {node_name} (Node {node_id}): steps={workflow[node_id]['inputs'].get('steps', '?')}, "
f"cfg={workflow[node_id]['inputs'].get('cfg', '?')}, "
f"denoise={workflow[node_id]['inputs'].get('denoise', '?')}")
face_text = workflow.get('14', {}).get('inputs', {}).get('text', '')
hand_text = workflow.get('15', {}).get('inputs', {}).get('text', '')
@@ -95,6 +107,12 @@ def _log_workflow_prompts(label, workflow):
if hand_text:
lines.append(f" [H] Hand : {hand_text}")
# Multi-char per-character prompts
for node_id, lbl in [("44", "Person A"), ("45", "Person B"), ("51", "Face A"), ("52", "Face B")]:
txt = workflow.get(node_id, {}).get('inputs', {}).get('text', '')
if txt:
lines.append(f" [{lbl}] : {txt}")
lines.append(sep)
logger.info("\n%s", "\n".join(lines))
@@ -119,8 +137,8 @@ def _apply_checkpoint_settings(workflow, ckpt_data):
if scheduler and '3' in workflow:
workflow['3']['inputs']['scheduler'] = scheduler
# Face/hand detailers (nodes 11, 13)
for node_id in ['11', '13']:
# Face/hand detailers (nodes 11, 13) + multi-char SEGS detailers
for node_id in ['11', '13'] + _SEGS_DETAILER_NODES:
if node_id in workflow:
if steps:
workflow[node_id]['inputs']['steps'] = int(steps)
@@ -131,9 +149,9 @@ def _apply_checkpoint_settings(workflow, ckpt_data):
if scheduler:
workflow[node_id]['inputs']['scheduler'] = scheduler
# Prepend base_positive to positive prompts (main + face/hand detailers)
# Prepend base_positive to all positive prompt nodes
if base_positive:
for node_id in ['6', '14', '15']:
for node_id in ['6', '14', '15'] + _SEGS_PROMPT_NODES:
if node_id in workflow:
workflow[node_id]['inputs']['text'] = f"{base_positive}, {workflow[node_id]['inputs']['text']}"
@@ -149,7 +167,7 @@ def _apply_checkpoint_settings(workflow, ckpt_data):
}
if '8' in workflow:
workflow['8']['inputs']['vae'] = ['21', 0]
for node_id in ['11', '13']:
for node_id in ['11', '13'] + _SEGS_DETAILER_NODES:
if node_id in workflow:
workflow[node_id]['inputs']['vae'] = ['21', 0]
@@ -187,12 +205,246 @@ def _get_default_checkpoint():
return ckpt.checkpoint_path, ckpt.data or {}
def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_negative=None, outfit=None, action=None, style=None, detailer=None, scene=None, width=None, height=None, checkpoint_data=None, look=None, fixed_seed=None):
def _inject_multi_char_detailers(workflow, prompts, model_source, clip_source):
"""Replace single FaceDetailer (node 11) with per-character SEGS-based detailers.
Injects person detection + face detection pipelines that order detections
left-to-right and apply character A's prompt to the left person/face and
character B's prompt to the right person/face.
Nodes added:
40 - Person detector (UltralyticsDetectorProvider)
41 - Person SEGS detection (BboxDetectorSEGS)
42 - Filter: left person (char A)
43 - Filter: right person (char B)
44 - CLIPTextEncode: char A body prompt
45 - CLIPTextEncode: char B body prompt
46 - DetailerForEach: person A
47 - DetailerForEach: person B
48 - Face SEGS detection (BboxDetectorSEGS, reuses face detector node 10)
49 - Filter: left face (char A)
50 - Filter: right face (char B)
51 - CLIPTextEncode: char A face prompt
52 - CLIPTextEncode: char B face prompt
53 - DetailerForEach: face A
54 - DetailerForEach: face B
Image flow: VAEDecode(8) → PersonA(46) → PersonB(47) → FaceA(53) → FaceB(54) → Hand(13)
"""
vae_source = ["4", 2]
# Remove old single face detailer and its prompt — we replace them
workflow.pop('11', None)
workflow.pop('14', None)
# --- Person detection ---
workflow['40'] = {
'inputs': {'model_name': 'segm/person_yolov8m-seg.pt'},
'class_type': 'UltralyticsDetectorProvider'
}
workflow['41'] = {
'inputs': {
'bbox_detector': ['40', 0],
'image': ['8', 0],
'threshold': 0.5,
'dilation': 10,
'crop_factor': 3.0,
'drop_size': 10,
'labels': 'all',
},
'class_type': 'BboxDetectorSEGS'
}
# Order by x1 ascending (left to right), pick index 0 = leftmost person
workflow['42'] = {
'inputs': {
'segs': ['41', 0],
'target': 'x1',
'order': False,
'take_start': 0,
'take_count': 1,
},
'class_type': 'ImpactSEGSOrderedFilter'
}
# Pick index 1 = rightmost person
workflow['43'] = {
'inputs': {
'segs': ['41', 0],
'target': 'x1',
'order': False,
'take_start': 1,
'take_count': 1,
},
'class_type': 'ImpactSEGSOrderedFilter'
}
# --- Per-character body prompts ---
workflow['44'] = {
'inputs': {'text': prompts.get('char_a_main', ''), 'clip': clip_source},
'class_type': 'CLIPTextEncode'
}
workflow['45'] = {
'inputs': {'text': prompts.get('char_b_main', ''), 'clip': clip_source},
'class_type': 'CLIPTextEncode'
}
# --- Person detailing (DetailerForEach) ---
_person_base = {
'guide_size': 512,
'guide_size_for': True,
'max_size': 1024,
'seed': 0, # overwritten by seed step
'steps': 20, # overwritten by checkpoint settings
'cfg': 3.5, # overwritten by checkpoint settings
'sampler_name': 'euler_ancestral',
'scheduler': 'normal',
'denoise': 0.4,
'feather': 5,
'noise_mask': True,
'force_inpaint': True,
'wildcard': '',
'cycle': 1,
'inpaint_model': False,
'noise_mask_feather': 20,
}
workflow['46'] = {
'inputs': {
**_person_base,
'image': ['8', 0],
'segs': ['42', 0],
'model': model_source,
'clip': clip_source,
'vae': vae_source,
'positive': ['44', 0],
'negative': ['7', 0],
},
'class_type': 'DetailerForEach'
}
workflow['47'] = {
'inputs': {
**_person_base,
'image': ['46', 0], # chains from person A output
'segs': ['43', 0],
'model': model_source,
'clip': clip_source,
'vae': vae_source,
'positive': ['45', 0],
'negative': ['7', 0],
},
'class_type': 'DetailerForEach'
}
# --- Face detection (on person-detailed image) ---
workflow['48'] = {
'inputs': {
'bbox_detector': ['10', 0], # reuse existing face YOLO detector
'image': ['47', 0],
'threshold': 0.5,
'dilation': 10,
'crop_factor': 3.0,
'drop_size': 10,
'labels': 'all',
},
'class_type': 'BboxDetectorSEGS'
}
workflow['49'] = {
'inputs': {
'segs': ['48', 0],
'target': 'x1',
'order': False,
'take_start': 0,
'take_count': 1,
},
'class_type': 'ImpactSEGSOrderedFilter'
}
workflow['50'] = {
'inputs': {
'segs': ['48', 0],
'target': 'x1',
'order': False,
'take_start': 1,
'take_count': 1,
},
'class_type': 'ImpactSEGSOrderedFilter'
}
# --- Per-character face prompts ---
workflow['51'] = {
'inputs': {'text': prompts.get('char_a_face', ''), 'clip': clip_source},
'class_type': 'CLIPTextEncode'
}
workflow['52'] = {
'inputs': {'text': prompts.get('char_b_face', ''), 'clip': clip_source},
'class_type': 'CLIPTextEncode'
}
# --- Face detailing (DetailerForEach) ---
_face_base = {
'guide_size': 384,
'guide_size_for': True,
'max_size': 1024,
'seed': 0,
'steps': 20,
'cfg': 3.5,
'sampler_name': 'euler_ancestral',
'scheduler': 'normal',
'denoise': 0.5,
'feather': 5,
'noise_mask': True,
'force_inpaint': True,
'wildcard': '',
'cycle': 1,
'inpaint_model': False,
'noise_mask_feather': 20,
}
workflow['53'] = {
'inputs': {
**_face_base,
'image': ['47', 0],
'segs': ['49', 0],
'model': model_source,
'clip': clip_source,
'vae': vae_source,
'positive': ['51', 0],
'negative': ['7', 0],
},
'class_type': 'DetailerForEach'
}
workflow['54'] = {
'inputs': {
**_face_base,
'image': ['53', 0], # chains from face A output
'segs': ['50', 0],
'model': model_source,
'clip': clip_source,
'vae': vae_source,
'positive': ['52', 0],
'negative': ['7', 0],
},
'class_type': 'DetailerForEach'
}
# Rewire hand detailer: image input from last face detailer instead of old node 11
if '13' in workflow:
workflow['13']['inputs']['image'] = ['54', 0]
logger.debug("Injected multi-char SEGS detailers (nodes 40-54)")
def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_negative=None, outfit=None, action=None, style=None, detailer=None, scene=None, width=None, height=None, checkpoint_data=None, look=None, fixed_seed=None, character_b=None):
# 1. Update prompts using replacement to preserve embeddings
workflow["6"]["inputs"]["text"] = workflow["6"]["inputs"]["text"].replace("{{POSITIVE_PROMPT}}", prompts["main"])
if custom_negative:
workflow["7"]["inputs"]["text"] = f"{workflow['7']['inputs']['text']}, {custom_negative}"
workflow["7"]["inputs"]["text"] = f"{custom_negative}, {workflow['7']['inputs']['text']}"
if "14" in workflow:
workflow["14"]["inputs"]["text"] = workflow["14"]["inputs"]["text"].replace("{{FACE_PROMPT}}", prompts["face"])
@@ -289,23 +541,39 @@ def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_nega
clip_source = ["19", 1]
logger.debug("Style/Detailer LoRA: %s @ %s", style_lora_name, _w19)
# Apply connections to all model/clip consumers
workflow["3"]["inputs"]["model"] = model_source
workflow["11"]["inputs"]["model"] = model_source
workflow["13"]["inputs"]["model"] = model_source
# Second character LoRA (Node 20) - for multi-character generation
if character_b:
char_b_lora_data = character_b.data.get('lora', {})
char_b_lora_name = char_b_lora_data.get('lora_name')
if char_b_lora_name and "20" in workflow:
_w20 = _resolve_lora_weight(char_b_lora_data)
workflow["20"]["inputs"]["lora_name"] = char_b_lora_name
workflow["20"]["inputs"]["strength_model"] = _w20
workflow["20"]["inputs"]["strength_clip"] = _w20
workflow["20"]["inputs"]["model"] = model_source
workflow["20"]["inputs"]["clip"] = clip_source
model_source = ["20", 0]
clip_source = ["20", 1]
logger.debug("Character B LoRA: %s @ %s", char_b_lora_name, _w20)
workflow["6"]["inputs"]["clip"] = clip_source
workflow["7"]["inputs"]["clip"] = clip_source
workflow["11"]["inputs"]["clip"] = clip_source
workflow["13"]["inputs"]["clip"] = clip_source
workflow["14"]["inputs"]["clip"] = clip_source
workflow["15"]["inputs"]["clip"] = clip_source
# 3b. Multi-char: inject per-character SEGS detailers (replaces node 11/14)
if character_b:
_inject_multi_char_detailers(workflow, prompts, model_source, clip_source)
# Apply connections to all model/clip consumers (conditional on node existence)
for nid in ["3", "11", "13"] + _SEGS_DETAILER_NODES:
if nid in workflow:
workflow[nid]["inputs"]["model"] = model_source
for nid in ["6", "7", "11", "13", "14", "15"] + _SEGS_PROMPT_NODES:
if nid in workflow:
workflow[nid]["inputs"]["clip"] = clip_source
# 4. Randomize seeds (or use a fixed seed for reproducible batches like Strengths Gallery)
gen_seed = fixed_seed if fixed_seed is not None else random.randint(1, 10**15)
workflow["3"]["inputs"]["seed"] = gen_seed
if "11" in workflow: workflow["11"]["inputs"]["seed"] = gen_seed
if "13" in workflow: workflow["13"]["inputs"]["seed"] = gen_seed
for nid in ["3", "11", "13"] + _SEGS_DETAILER_NODES:
if nid in workflow:
workflow[nid]["inputs"]["seed"] = gen_seed
# 5. Set image dimensions
if "5" in workflow:
@@ -321,7 +589,7 @@ def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_nega
# 7. Sync sampler/scheduler from main KSampler to adetailer nodes
sampler_name = workflow["3"]["inputs"].get("sampler_name")
scheduler = workflow["3"]["inputs"].get("scheduler")
for node_id in ["11", "13"]:
for node_id in ["11", "13"] + _SEGS_DETAILER_NODES:
if node_id in workflow:
if sampler_name:
workflow[node_id]["inputs"]["sampler_name"] = sampler_name