Add extra prompts, endless generation, random character default, and small fixes

- Add extra positive/negative prompt textareas to all 9 detail pages with session persistence
- Add Endless generation button to all detail pages (continuous preview generation until stopped)
- Default character selector to "Random Character" on all secondary detail pages
- Fix queue clear endpoint (remove spurious auth check)
- Refactor app.py into routes/ and services/ modules
- Update CLAUDE.md with new architecture documentation
- Various data file updates and cleanup

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Aodhan Collins
2026-03-13 02:07:16 +00:00
parent 1b8a798c31
commit 5e4348ebc1
170 changed files with 17367 additions and 9781 deletions

0
services/__init__.py Normal file
View File

117
services/comfyui.py Normal file
View File

@@ -0,0 +1,117 @@
import json
import logging
import requests
from flask import current_app
logger = logging.getLogger('gaze')
def _ensure_checkpoint_loaded(checkpoint_path):
"""Check if the desired checkpoint is loaded in ComfyUI, and force reload if not."""
if not checkpoint_path:
return
try:
# Get currently loaded checkpoint from ComfyUI history
url = current_app.config.get('COMFYUI_URL', 'http://127.0.0.1:8188')
resp = requests.get(f'{url}/history', timeout=3)
if resp.ok:
history = resp.json()
if history:
latest = max(history.values(), key=lambda j: j.get('status', {}).get('status_str', ''))
nodes = latest.get('prompt', [None, None, {}])[2]
loaded_ckpt = nodes.get('4', {}).get('inputs', {}).get('ckpt_name')
# If the loaded checkpoint matches what we want, no action needed
if loaded_ckpt == checkpoint_path:
logger.info(f"Checkpoint {checkpoint_path} already loaded in ComfyUI")
return
# Checkpoint doesn't match or couldn't determine - force unload all models
logger.info(f"Forcing ComfyUI to unload models to ensure {checkpoint_path} loads")
requests.post(f'{url}/free', json={'unload_models': True}, timeout=5)
except Exception as e:
logger.warning(f"Failed to check/force checkpoint reload: {e}")
def queue_prompt(prompt_workflow, client_id=None):
"""POST a workflow to ComfyUI's /prompt endpoint."""
# Ensure the checkpoint in the workflow is loaded in ComfyUI
checkpoint_path = prompt_workflow.get('4', {}).get('inputs', {}).get('ckpt_name')
_ensure_checkpoint_loaded(checkpoint_path)
p = {"prompt": prompt_workflow}
if client_id:
p["client_id"] = client_id
url = current_app.config['COMFYUI_URL']
# Log the full request being sent to ComfyUI
logger.debug("=" * 80)
logger.debug("COMFYUI REQUEST - Sending prompt to %s/prompt", url)
logger.debug("Checkpoint: %s", checkpoint_path)
logger.debug("Client ID: %s", client_id if client_id else "(none)")
logger.debug("Full workflow JSON:")
logger.debug(json.dumps(prompt_workflow, indent=2))
logger.debug("=" * 80)
data = json.dumps(p).encode('utf-8')
response = requests.post(f"{url}/prompt", data=data)
response_json = response.json()
# Log the response from ComfyUI
logger.debug("COMFYUI RESPONSE - Status: %s", response.status_code)
logger.debug("Response JSON: %s", json.dumps(response_json, indent=2))
if 'prompt_id' in response_json:
logger.info("ComfyUI accepted prompt with ID: %s", response_json['prompt_id'])
else:
logger.error("ComfyUI rejected prompt: %s", response_json)
logger.debug("=" * 80)
return response_json
def get_history(prompt_id):
"""Poll ComfyUI /history for results of a given prompt_id."""
url = current_app.config['COMFYUI_URL']
response = requests.get(f"{url}/history/{prompt_id}")
history_json = response.json()
# Log detailed history response for debugging
if prompt_id in history_json:
logger.debug("=" * 80)
logger.debug("COMFYUI HISTORY - Prompt ID: %s", prompt_id)
logger.debug("Status: %s", response.status_code)
# Extract key information from the history
prompt_data = history_json[prompt_id]
if 'status' in prompt_data:
logger.debug("Generation status: %s", prompt_data['status'])
if 'outputs' in prompt_data:
logger.debug("Outputs available: %s", list(prompt_data['outputs'].keys()))
for node_id, output in prompt_data['outputs'].items():
if 'images' in output:
logger.debug(" Node %s produced %d image(s)", node_id, len(output['images']))
for img in output['images']:
logger.debug(" - %s (subfolder: %s, type: %s)",
img.get('filename'), img.get('subfolder'), img.get('type'))
logger.debug("Full history response:")
logger.debug(json.dumps(history_json, indent=2))
logger.debug("=" * 80)
else:
logger.debug("History not yet available for prompt ID: %s", prompt_id)
return history_json
def get_image(filename, subfolder, folder_type):
"""Retrieve a generated image from ComfyUI's /view endpoint."""
url = current_app.config['COMFYUI_URL']
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
logger.debug("Fetching image from ComfyUI: filename=%s, subfolder=%s, type=%s",
filename, subfolder, folder_type)
response = requests.get(f"{url}/view", params=data)
logger.debug("Image retrieved: %d bytes (status: %s)", len(response.content), response.status_code)
return response.content

66
services/file_io.py Normal file
View File

@@ -0,0 +1,66 @@
import os
from models import Settings, Character, Look
from utils import _LORA_DEFAULTS
def get_available_loras(category):
"""Return sorted list of LoRA paths for the given category.
category: one of 'characters','outfits','actions','styles','scenes','detailers'
"""
settings = Settings.query.first()
lora_dir = (getattr(settings, f'lora_dir_{category}', None) if settings else None) or _LORA_DEFAULTS.get(category, '')
if not lora_dir or not os.path.isdir(lora_dir):
return []
subfolder = os.path.basename(lora_dir.rstrip('/'))
return sorted(f"Illustrious/{subfolder}/{f}" for f in os.listdir(lora_dir) if f.endswith('.safetensors'))
def get_available_checkpoints():
settings = Settings.query.first()
checkpoint_dirs_str = (settings.checkpoint_dirs if settings else None) or \
'/ImageModels/Stable-diffusion/Illustrious,/ImageModels/Stable-diffusion/Noob'
checkpoints = []
for ckpt_dir in checkpoint_dirs_str.split(','):
ckpt_dir = ckpt_dir.strip()
if not ckpt_dir or not os.path.isdir(ckpt_dir):
continue
prefix = os.path.basename(ckpt_dir.rstrip('/'))
for f in os.listdir(ckpt_dir):
if f.endswith('.safetensors') or f.endswith('.ckpt'):
checkpoints.append(f"{prefix}/{f}")
return sorted(checkpoints)
def _count_look_assignments():
"""Return a dict mapping look_id to the count of characters it's assigned to."""
assignment_counts = {}
looks = Look.query.all()
for look in looks:
if look.character_id:
assignment_counts[look.look_id] = 1
else:
assignment_counts[look.look_id] = 0
return assignment_counts
def _count_outfit_lora_assignments():
"""Return a dict mapping outfit LoRA filename to the count of characters using it."""
assignment_counts = {}
characters = Character.query.all()
for character in characters:
char_lora = character.data.get('lora', {}).get('lora_name', '')
if char_lora and 'Clothing' in char_lora:
assignment_counts[char_lora] = assignment_counts.get(char_lora, 0) + 1
wardrobe = character.data.get('wardrobe', {})
if 'default' in wardrobe and isinstance(wardrobe.get('default'), dict):
for outfit_name, outfit_data in wardrobe.items():
if isinstance(outfit_data, dict):
outfit_lora = outfit_data.get('lora', {})
if isinstance(outfit_lora, dict):
lora_name = outfit_lora.get('lora_name', '')
if lora_name:
assignment_counts[lora_name] = assignment_counts.get(lora_name, 0) + 1
return assignment_counts

265
services/job_queue.py Normal file
View File

@@ -0,0 +1,265 @@
import os
import time
import uuid
import logging
import threading
from collections import deque
from flask import current_app
from models import db
from services.comfyui import queue_prompt, get_history, get_image
logger = logging.getLogger('gaze')
# ---------------------------------------------------------------------------
# Generation Job Queue
# ---------------------------------------------------------------------------
# Each job is a dict:
# id — unique UUID string
# label — human-readable description (e.g. "Tifa Lockhart preview")
# status — 'pending' | 'processing' | 'done' | 'failed' | 'paused' | 'removed'
# workflow — the fully-prepared ComfyUI workflow dict
# finalize_fn — callable(comfy_prompt_id, job) that saves the image; called after ComfyUI finishes
# error — error message string (when status == 'failed')
# result — dict with image_url etc. (set by finalize_fn on success)
# created_at — unix timestamp
# comfy_prompt_id — the prompt_id returned by ComfyUI (set when processing starts)
_job_queue_lock = threading.Lock()
_job_queue = deque() # ordered list of job dicts (pending + paused + processing)
_job_history = {} # job_id -> job dict (all jobs ever added, for status lookup)
_queue_worker_event = threading.Event() # signals worker that a new job is available
# Stored reference to the Flask app, set by init_queue_worker()
_app = None
def _enqueue_job(label, workflow, finalize_fn):
"""Add a generation job to the queue. Returns the job dict."""
job = {
'id': str(uuid.uuid4()),
'label': label,
'status': 'pending',
'workflow': workflow,
'finalize_fn': finalize_fn,
'error': None,
'result': None,
'created_at': time.time(),
'comfy_prompt_id': None,
}
with _job_queue_lock:
_job_queue.append(job)
_job_history[job['id']] = job
logger.info("Job queued: [%s] %s", job['id'][:8], label)
_queue_worker_event.set()
return job
def _queue_worker():
"""Background thread: processes jobs from _job_queue sequentially."""
while True:
_queue_worker_event.wait()
_queue_worker_event.clear()
while True:
job = None
with _job_queue_lock:
# Find the first pending job
for j in _job_queue:
if j['status'] == 'pending':
job = j
break
if job is None:
break # No pending jobs — go back to waiting
# Mark as processing
with _job_queue_lock:
job['status'] = 'processing'
logger.info("=" * 80)
logger.info("JOB STARTED: [%s] %s", job['id'][:8], job['label'])
logger.info("Job created at: %s", time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(job['created_at'])))
# Log workflow summary before sending to ComfyUI
workflow = job['workflow']
logger.info("Workflow summary:")
logger.info(" Checkpoint: %s", workflow.get('4', {}).get('inputs', {}).get('ckpt_name', '(not set)'))
logger.info(" Seed: %s", workflow.get('3', {}).get('inputs', {}).get('seed', '(not set)'))
logger.info(" Resolution: %sx%s",
workflow.get('5', {}).get('inputs', {}).get('width', '?'),
workflow.get('5', {}).get('inputs', {}).get('height', '?'))
logger.info(" Sampler: %s / %s (steps=%s, cfg=%s)",
workflow.get('3', {}).get('inputs', {}).get('sampler_name', '?'),
workflow.get('3', {}).get('inputs', {}).get('scheduler', '?'),
workflow.get('3', {}).get('inputs', {}).get('steps', '?'),
workflow.get('3', {}).get('inputs', {}).get('cfg', '?'))
# Log active LoRAs
active_loras = []
for node_id, label_str in [("16", "char/look"), ("17", "outfit"), ("18", "action"), ("19", "style/detail/scene")]:
if node_id in workflow:
lora_name = workflow[node_id]["inputs"].get("lora_name", "")
if lora_name:
strength = workflow[node_id]["inputs"].get("strength_model", "?")
active_loras.append(f"{label_str}:{lora_name.split('/')[-1]}@{strength}")
logger.info(" Active LoRAs: %s", ' | '.join(active_loras) if active_loras else '(none)')
# Log prompts
logger.info(" Positive prompt: %s", workflow.get('6', {}).get('inputs', {}).get('text', '(not set)')[:200])
logger.info(" Negative prompt: %s", workflow.get('7', {}).get('inputs', {}).get('text', '(not set)')[:200])
logger.info("=" * 80)
try:
with _app.app_context():
# Send workflow to ComfyUI
logger.info("Sending workflow to ComfyUI...")
prompt_response = queue_prompt(job['workflow'])
if 'prompt_id' not in prompt_response:
raise Exception(f"ComfyUI rejected job: {prompt_response.get('error', 'unknown error')}")
comfy_id = prompt_response['prompt_id']
with _job_queue_lock:
job['comfy_prompt_id'] = comfy_id
logger.info("Job [%s] queued in ComfyUI as %s", job['id'][:8], comfy_id)
# Poll until done (max ~10 minutes)
max_retries = 300
finished = False
poll_count = 0
logger.info("Polling ComfyUI for completion (max %d retries, 2s interval)...", max_retries)
while max_retries > 0:
history = get_history(comfy_id)
if comfy_id in history:
finished = True
logger.info("Generation completed after %d polls (%d seconds)",
poll_count, poll_count * 2)
break
poll_count += 1
if poll_count % 10 == 0: # Log every 20 seconds
logger.info("Still waiting for generation... (%d polls, %d seconds elapsed)",
poll_count, poll_count * 2)
time.sleep(2)
max_retries -= 1
if not finished:
raise Exception("ComfyUI generation timed out")
logger.info("Job [%s] generation complete, finalizing...", job['id'][:8])
# Run the finalize callback (saves image to disk / DB)
job['finalize_fn'](comfy_id, job)
with _job_queue_lock:
job['status'] = 'done'
logger.info("=" * 80)
logger.info("JOB COMPLETED: [%s] %s", job['id'][:8], job['label'])
logger.info("=" * 80)
except Exception as e:
logger.error("=" * 80)
logger.exception("JOB FAILED: [%s] %s%s", job['id'][:8], job['label'], e)
logger.error("=" * 80)
with _job_queue_lock:
job['status'] = 'failed'
job['error'] = str(e)
# Remove completed/failed jobs from the active queue (keep in history)
with _job_queue_lock:
try:
_job_queue.remove(job)
except ValueError:
pass # Already removed (e.g. by user)
# Periodically purge old finished jobs from history to avoid unbounded growth
_prune_job_history()
def _make_finalize(category, slug, db_model_class=None, action=None):
"""Return a finalize callback for a standard queue job.
category — upload sub-directory name (e.g. 'characters', 'outfits')
slug — entity slug used for the upload folder name
db_model_class — SQLAlchemy model class for cover-image DB update; None = skip
action — 'replace' → update DB; None → always update; anything else → skip
"""
def _finalize(comfy_prompt_id, job):
logger.debug("=" * 80)
logger.debug("FINALIZE - Starting finalization for prompt ID: %s", comfy_prompt_id)
logger.debug("Category: %s, Slug: %s, Action: %s", category, slug, action)
history = get_history(comfy_prompt_id)
outputs = history[comfy_prompt_id]['outputs']
logger.debug("Processing outputs from %d node(s)", len(outputs))
for node_id, node_output in outputs.items():
logger.debug(" Node %s: %s", node_id, list(node_output.keys()))
if 'images' in node_output:
logger.debug(" Found %d image(s) in node %s", len(node_output['images']), node_id)
image_info = node_output['images'][0]
logger.debug(" Image info: filename=%s, subfolder=%s, type=%s",
image_info['filename'], image_info['subfolder'], image_info['type'])
image_data = get_image(image_info['filename'], image_info['subfolder'], image_info['type'])
upload_folder = current_app.config['UPLOAD_FOLDER']
folder = os.path.join(upload_folder, f"{category}/{slug}")
os.makedirs(folder, exist_ok=True)
filename = f"gen_{int(time.time())}.png"
full_path = os.path.join(folder, filename)
logger.debug(" Saving image to: %s", full_path)
with open(full_path, 'wb') as f:
f.write(image_data)
logger.info("Image saved: %s (%d bytes)", full_path, len(image_data))
relative_path = f"{category}/{slug}/{filename}"
# Include the seed used for this generation
used_seed = job['workflow'].get('3', {}).get('inputs', {}).get('seed')
job['result'] = {
'image_url': f'/static/uploads/{relative_path}',
'relative_path': relative_path,
'seed': used_seed,
}
if db_model_class and (action is None or action == 'replace'):
logger.debug(" Updating database: %s.image_path = %s", db_model_class.__name__, relative_path)
obj = db_model_class.query.filter_by(slug=slug).first()
if obj:
obj.image_path = relative_path
db.session.commit()
logger.debug(" Database updated successfully")
else:
logger.warning(" Object not found in database: %s(slug=%s)", db_model_class.__name__, slug)
else:
logger.debug(" Skipping database update (db_model_class=%s, action=%s)",
db_model_class.__name__ if db_model_class else None, action)
logger.debug("FINALIZE - Completed successfully")
logger.debug("=" * 80)
return
logger.warning("FINALIZE - No images found in outputs!")
logger.debug("=" * 80)
return _finalize
def _prune_job_history(max_age_seconds=3600):
"""Remove completed/failed jobs older than max_age_seconds from _job_history."""
cutoff = time.time() - max_age_seconds
with _job_queue_lock:
stale = [jid for jid, j in _job_history.items()
if j['status'] in ('done', 'failed', 'removed') and j['created_at'] < cutoff]
for jid in stale:
del _job_history[jid]
def init_queue_worker(flask_app):
"""Store the Flask app reference and start the background worker thread.
Called once from app.py during startup.
"""
global _app
_app = flask_app
worker = threading.Thread(target=_queue_worker, daemon=True, name='queue-worker')
worker.start()

203
services/llm.py Normal file
View File

@@ -0,0 +1,203 @@
import os
import json
import asyncio
import requests
from flask import request as flask_request
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from models import Settings
DANBOORU_TOOLS = [
{
"type": "function",
"function": {
"name": "search_tags",
"description": "Prefix/full-text search for Danbooru tags. Returns rich tag objects ordered by relevance.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search string. Trailing * added automatically."},
"limit": {"type": "integer", "description": "Max results (1-200)", "default": 20},
"category": {"type": "string", "enum": ["general", "artist", "copyright", "character", "meta"], "description": "Optional category filter."}
},
"required": ["query"]
}
}
},
{
"type": "function",
"function": {
"name": "validate_tags",
"description": "Exact-match validation for a list of tags. Splits into valid, deprecated, and invalid.",
"parameters": {
"type": "object",
"properties": {
"tags": {"type": "array", "items": {"type": "string"}, "description": "Tags to validate."}
},
"required": ["tags"]
}
}
},
{
"type": "function",
"function": {
"name": "suggest_tags",
"description": "Autocomplete-style suggestions for a partial or approximate tag. Sorted by post count.",
"parameters": {
"type": "object",
"properties": {
"partial": {"type": "string", "description": "Partial tag or rough approximation."},
"limit": {"type": "integer", "description": "Max suggestions (1-50)", "default": 10},
"category": {"type": "string", "enum": ["general", "artist", "copyright", "character", "meta"], "description": "Optional category filter."}
},
"required": ["partial"]
}
}
}
]
async def _run_mcp_tool(name, arguments):
server_params = StdioServerParameters(
command="docker",
args=["run", "--rm", "-i", "danbooru-mcp:latest"],
)
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
result = await session.call_tool(name, arguments)
return result.content[0].text
def call_mcp_tool(name, arguments):
try:
return asyncio.run(_run_mcp_tool(name, arguments))
except Exception as e:
print(f"MCP Tool Error: {e}")
return json.dumps({"error": str(e)})
def load_prompt(filename):
path = os.path.join('data/prompts', filename)
if os.path.exists(path):
with open(path, 'r') as f:
return f.read()
return None
def call_llm(prompt, system_prompt="You are a creative assistant."):
settings = Settings.query.first()
if not settings:
raise ValueError("Settings not configured.")
is_local = settings.llm_provider != 'openrouter'
if not is_local:
if not settings.openrouter_api_key:
raise ValueError("OpenRouter API Key not configured. Please configure it in Settings.")
url = "https://openrouter.ai/api/v1/chat/completions"
headers = {
"Authorization": f"Bearer {settings.openrouter_api_key}",
"Content-Type": "application/json",
"HTTP-Referer": flask_request.url_root,
"X-Title": "Character Browser"
}
model = settings.openrouter_model or 'google/gemini-2.0-flash-001'
else:
# Local provider (Ollama or LMStudio)
if not settings.local_base_url:
raise ValueError(f"{settings.llm_provider.title()} Base URL not configured.")
url = f"{settings.local_base_url.rstrip('/')}/chat/completions"
headers = {"Content-Type": "application/json"}
model = settings.local_model
if not model:
raise ValueError(f"No local model selected for {settings.llm_provider.title()}. Please select one in Settings.")
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
]
max_turns = 10
use_tools = True
format_retries = 3 # retries allowed for unexpected response format
while max_turns > 0:
max_turns -= 1
data = {
"model": model,
"messages": messages,
}
# Only add tools if supported/requested
if use_tools:
data["tools"] = DANBOORU_TOOLS
data["tool_choice"] = "auto"
try:
response = requests.post(url, headers=headers, json=data)
# If 400 Bad Request and we were using tools, try once without tools
if response.status_code == 400 and use_tools:
print(f"LLM Provider {settings.llm_provider} rejected tools. Retrying without tool calling...")
use_tools = False
max_turns += 1 # Reset turn for the retry
continue
response.raise_for_status()
result = response.json()
# Validate expected OpenAI-compatible response shape
if 'choices' not in result or not result['choices']:
raise KeyError('choices')
message = result['choices'][0].get('message')
if message is None:
raise KeyError('message')
if message.get('tool_calls'):
messages.append(message)
for tool_call in message['tool_calls']:
name = tool_call['function']['name']
args = json.loads(tool_call['function']['arguments'])
print(f"Executing MCP tool: {name}({args})")
tool_result = call_mcp_tool(name, args)
messages.append({
"role": "tool",
"tool_call_id": tool_call['id'],
"name": name,
"content": tool_result
})
continue
return message['content']
except requests.exceptions.RequestException as e:
error_body = ""
try: error_body = f" - Body: {response.text}"
except: pass
raise RuntimeError(f"LLM API request failed: {str(e)}{error_body}") from e
except (KeyError, IndexError) as e:
# Log the raw response to help diagnose the issue
raw = ""
try: raw = response.text[:500]
except: pass
print(f"Unexpected LLM response format (key={e}). Raw response: {raw}")
if format_retries > 0:
format_retries -= 1
max_turns += 1 # don't burn a turn on a format error
# Ask the model to try again with the correct format
messages.append({
"role": "user",
"content": (
"Your previous response was not in the expected format. "
"Please respond with valid JSON only, exactly as specified in the system prompt. "
"Do not include any explanation or markdown — only the raw JSON object."
)
})
print(f"Retrying after format error ({format_retries} retries left)…")
continue
raise RuntimeError(f"Unexpected LLM response format after retries: {str(e)}") from e
raise RuntimeError("LLM tool calling loop exceeded maximum turns")

155
services/mcp.py Normal file
View File

@@ -0,0 +1,155 @@
import os
import subprocess
# Path to the MCP docker-compose projects, relative to the main app file.
MCP_TOOLS_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tools')
MCP_COMPOSE_DIR = os.path.join(MCP_TOOLS_DIR, 'danbooru-mcp')
MCP_REPO_URL = 'https://git.liveaodh.com/aodhan/danbooru-mcp'
CHAR_MCP_COMPOSE_DIR = os.path.join(MCP_TOOLS_DIR, 'character-mcp')
CHAR_MCP_REPO_URL = 'https://git.liveaodh.com/aodhan/character-mcp.git'
def _ensure_mcp_repo():
"""Clone or update the danbooru-mcp source repository inside tools/.
- If ``tools/danbooru-mcp/`` does not exist, clone from MCP_REPO_URL.
- If it already exists, run ``git pull`` to fetch the latest changes.
Errors are non-fatal.
"""
os.makedirs(MCP_TOOLS_DIR, exist_ok=True)
try:
if not os.path.isdir(MCP_COMPOSE_DIR):
print(f'Cloning danbooru-mcp from {MCP_REPO_URL}')
subprocess.run(
['git', 'clone', MCP_REPO_URL, MCP_COMPOSE_DIR],
timeout=120, check=True,
)
print('danbooru-mcp cloned successfully.')
else:
print('Updating danbooru-mcp via git pull …')
subprocess.run(
['git', 'pull'],
cwd=MCP_COMPOSE_DIR,
timeout=60, check=True,
)
print('danbooru-mcp updated.')
except FileNotFoundError:
print('WARNING: git not found on PATH — danbooru-mcp repo will not be cloned/updated.')
except subprocess.CalledProcessError as e:
print(f'WARNING: git operation failed for danbooru-mcp: {e}')
except subprocess.TimeoutExpired:
print('WARNING: git timed out while cloning/updating danbooru-mcp.')
except Exception as e:
print(f'WARNING: Could not clone/update danbooru-mcp repo: {e}')
def ensure_mcp_server_running():
"""Ensure the danbooru-mcp repo is present/up-to-date, then start the
Docker container if it is not already running.
Uses ``docker compose up -d`` so the image is built automatically on first
run. Errors are non-fatal — the app will still start even if Docker is
unavailable.
Skipped when ``SKIP_MCP_AUTOSTART=true`` (set by docker-compose, where the
danbooru-mcp service is managed by compose instead).
"""
if os.environ.get('SKIP_MCP_AUTOSTART', '').lower() == 'true':
print('SKIP_MCP_AUTOSTART set — skipping danbooru-mcp auto-start.')
return
_ensure_mcp_repo()
try:
result = subprocess.run(
['docker', 'ps', '--filter', 'name=danbooru-mcp', '--format', '{{.Names}}'],
capture_output=True, text=True, timeout=10,
)
if 'danbooru-mcp' in result.stdout:
print('danbooru-mcp container already running.')
return
# Container not running — start it via docker compose
print('Starting danbooru-mcp container via docker compose …')
subprocess.run(
['docker', 'compose', 'up', '-d'],
cwd=MCP_COMPOSE_DIR,
timeout=120,
)
print('danbooru-mcp container started.')
except FileNotFoundError:
print('WARNING: docker not found on PATH — danbooru-mcp will not be started automatically.')
except subprocess.TimeoutExpired:
print('WARNING: docker timed out while starting danbooru-mcp.')
except Exception as e:
print(f'WARNING: Could not ensure danbooru-mcp is running: {e}')
def _ensure_character_mcp_repo():
"""Clone or update the character-mcp source repository inside tools/.
- If ``tools/character-mcp/`` does not exist, clone from CHAR_MCP_REPO_URL.
- If it already exists, run ``git pull`` to fetch the latest changes.
Errors are non-fatal.
"""
os.makedirs(MCP_TOOLS_DIR, exist_ok=True)
try:
if not os.path.isdir(CHAR_MCP_COMPOSE_DIR):
print(f'Cloning character-mcp from {CHAR_MCP_REPO_URL}')
subprocess.run(
['git', 'clone', CHAR_MCP_REPO_URL, CHAR_MCP_COMPOSE_DIR],
timeout=120, check=True,
)
print('character-mcp cloned successfully.')
else:
print('Updating character-mcp via git pull …')
subprocess.run(
['git', 'pull'],
cwd=CHAR_MCP_COMPOSE_DIR,
timeout=60, check=True,
)
print('character-mcp updated.')
except FileNotFoundError:
print('WARNING: git not found on PATH — character-mcp repo will not be cloned/updated.')
except subprocess.CalledProcessError as e:
print(f'WARNING: git operation failed for character-mcp: {e}')
except subprocess.TimeoutExpired:
print('WARNING: git timed out while cloning/updating character-mcp.')
except Exception as e:
print(f'WARNING: Could not clone/update character-mcp repo: {e}')
def ensure_character_mcp_server_running():
"""Ensure the character-mcp repo is present/up-to-date, then start the
Docker container if it is not already running.
Uses ``docker compose up -d`` so the image is built automatically on first
run. Errors are non-fatal — the app will still start even if Docker is
unavailable.
Skipped when ``SKIP_MCP_AUTOSTART=true`` (set by docker-compose, where the
character-mcp service is managed by compose instead).
"""
if os.environ.get('SKIP_MCP_AUTOSTART', '').lower() == 'true':
print('SKIP_MCP_AUTOSTART set — skipping character-mcp auto-start.')
return
_ensure_character_mcp_repo()
try:
result = subprocess.run(
['docker', 'ps', '--filter', 'name=character-mcp', '--format', '{{.Names}}'],
capture_output=True, text=True, timeout=10,
)
if 'character-mcp' in result.stdout:
print('character-mcp container already running.')
return
# Container not running — start it via docker compose
print('Starting character-mcp container via docker compose …')
subprocess.run(
['docker', 'compose', 'up', '-d'],
cwd=CHAR_MCP_COMPOSE_DIR,
timeout=120,
)
print('character-mcp container started.')
except FileNotFoundError:
print('WARNING: docker not found on PATH — character-mcp will not be started automatically.')
except subprocess.TimeoutExpired:
print('WARNING: docker timed out while starting character-mcp.')
except Exception as e:
print(f'WARNING: Could not ensure character-mcp is running: {e}')

274
services/prompts.py Normal file
View File

@@ -0,0 +1,274 @@
import re
from models import db, Character
from utils import _IDENTITY_KEYS, _WARDROBE_KEYS, parse_orientation
def _dedup_tags(prompt_str):
"""Remove duplicate tags from a comma-separated prompt string, preserving first-occurrence order."""
seen = set()
result = []
for tag in prompt_str.split(','):
t = tag.strip()
if t and t.lower() not in seen:
seen.add(t.lower())
result.append(t)
return ', '.join(result)
def _cross_dedup_prompts(positive, negative):
"""Remove tags shared between positive and negative prompts.
Repeatedly strips the first occurrence from each side until the tag exists
on only one side. Equal counts cancel out completely; any excess on one side
retains the remainder, allowing deliberate overrides (e.g. adding a tag twice
in the positive while it appears once in the negative leaves one copy positive).
"""
def parse_tags(s):
return [t.strip() for t in s.split(',') if t.strip()]
pos_tags = parse_tags(positive)
neg_tags = parse_tags(negative)
shared = {t.lower() for t in pos_tags} & {t.lower() for t in neg_tags}
for tag_lower in shared:
while (
any(t.lower() == tag_lower for t in pos_tags) and
any(t.lower() == tag_lower for t in neg_tags)
):
pos_tags.pop(next(i for i, t in enumerate(pos_tags) if t.lower() == tag_lower))
neg_tags.pop(next(i for i, t in enumerate(neg_tags) if t.lower() == tag_lower))
return ', '.join(pos_tags), ', '.join(neg_tags)
def _resolve_character(character_slug):
"""Resolve a character_slug string (possibly '__random__') to a Character instance."""
if character_slug == '__random__':
return Character.query.order_by(db.func.random()).first()
if character_slug:
return Character.query.filter_by(slug=character_slug).first()
return None
def _ensure_character_fields(character, selected_fields, include_wardrobe=True, include_defaults=False):
"""Mutate selected_fields in place to include essential character identity/wardrobe/name keys.
include_wardrobe — also inject active wardrobe keys (default True)
include_defaults — also inject defaults::expression and defaults::pose (for outfit/look previews)
"""
identity = character.data.get('identity', {})
for key in _IDENTITY_KEYS:
if identity.get(key):
field_key = f'identity::{key}'
if field_key not in selected_fields:
selected_fields.append(field_key)
if include_defaults:
for key in ['expression', 'pose']:
if character.data.get('defaults', {}).get(key):
field_key = f'defaults::{key}'
if field_key not in selected_fields:
selected_fields.append(field_key)
if 'special::name' not in selected_fields:
selected_fields.append('special::name')
if include_wardrobe:
wardrobe = character.get_active_wardrobe()
for key in _WARDROBE_KEYS:
if wardrobe.get(key):
field_key = f'wardrobe::{key}'
if field_key not in selected_fields:
selected_fields.append(field_key)
def _append_background(prompts, character=None):
"""Append a (color-prefixed) simple background tag to prompts['main']."""
primary_color = character.data.get('styles', {}).get('primary_color', '') if character else ''
bg = f"{primary_color} simple background" if primary_color else "simple background"
prompts['main'] = f"{prompts['main']}, {bg}"
def build_prompt(data, selected_fields=None, default_fields=None, active_outfit='default'):
def is_selected(section, key):
# Priority:
# 1. Manual selection from form (if list is not empty)
# 2. Default fields (saved per character)
# 3. Select all (fallback)
if selected_fields is not None and len(selected_fields) > 0:
return f"{section}::{key}" in selected_fields
if default_fields:
return f"{section}::{key}" in default_fields
return True
identity = data.get('identity', {})
# Get wardrobe - handle both new nested format and legacy flat format
wardrobe_data = data.get('wardrobe', {})
if 'default' in wardrobe_data and isinstance(wardrobe_data.get('default'), dict):
# New nested format - get active outfit
wardrobe = wardrobe_data.get(active_outfit or 'default', wardrobe_data.get('default', {}))
else:
# Legacy flat format
wardrobe = wardrobe_data
defaults = data.get('defaults', {})
action_data = data.get('action', {})
style_data = data.get('style', {})
participants = data.get('participants', {})
# Pre-calculate Hand/Glove priority
# Priority: wardrobe gloves > wardrobe hands (outfit) > identity hands (character)
hand_val = ""
if wardrobe.get('gloves') and is_selected('wardrobe', 'gloves'):
hand_val = wardrobe.get('gloves')
elif wardrobe.get('hands') and is_selected('wardrobe', 'hands'):
hand_val = wardrobe.get('hands')
elif identity.get('hands') and is_selected('identity', 'hands'):
hand_val = identity.get('hands')
# 1. Main Prompt
parts = []
# Handle participants logic
if participants:
if participants.get('solo_focus') == 'true':
parts.append('(solo focus:1.2)')
orientation = participants.get('orientation', '')
if orientation:
parts.extend(parse_orientation(orientation))
else:
# Default behavior
parts.append("(solo:1.2)")
# Use character_id (underscores to spaces) for tags compatibility
char_tag = data.get('character_id', '').replace('_', ' ')
if char_tag and is_selected('special', 'name'):
parts.append(char_tag)
for key in ['base_specs', 'hair', 'eyes', 'extra']:
val = identity.get(key)
if val and is_selected('identity', key):
# Filter out conflicting tags if participants data is present
if participants and key == 'base_specs':
# Remove 1girl, 1boy, solo, etc.
val = re.sub(r'\b(1girl|1boy|solo)\b', '', val).replace(', ,', ',').strip(', ')
parts.append(val)
# Add defaults (expression, pose, scene)
for key in ['expression', 'pose', 'scene']:
val = defaults.get(key)
if val and is_selected('defaults', key):
parts.append(val)
# Add hand priority value to main prompt
if hand_val:
parts.append(hand_val)
for key in ['full_body', 'top', 'bottom', 'headwear', 'legwear', 'footwear', 'accessories']:
val = wardrobe.get(key)
if val and is_selected('wardrobe', key):
parts.append(val)
# Standard character styles
char_aesthetic = data.get('styles', {}).get('aesthetic')
if char_aesthetic and is_selected('styles', 'aesthetic'):
parts.append(f"{char_aesthetic} style")
# New Styles Gallery logic
if style_data.get('artist_name') and is_selected('style', 'artist_name'):
parts.append(f"by {style_data['artist_name']}")
if style_data.get('artistic_style') and is_selected('style', 'artistic_style'):
parts.append(style_data['artistic_style'])
tags = data.get('tags', [])
if tags and is_selected('special', 'tags'):
parts.extend(tags)
lora = data.get('lora', {})
if lora.get('lora_triggers') and is_selected('lora', 'lora_triggers'):
parts.append(lora.get('lora_triggers'))
# 2. Face Prompt: Tag, Eyes, Expression, Headwear, Action details
face_parts = []
if char_tag and is_selected('special', 'name'): face_parts.append(char_tag)
if identity.get('eyes') and is_selected('identity', 'eyes'): face_parts.append(identity.get('eyes'))
if defaults.get('expression') and is_selected('defaults', 'expression'): face_parts.append(defaults.get('expression'))
if wardrobe.get('headwear') and is_selected('wardrobe', 'headwear'): face_parts.append(wardrobe.get('headwear'))
# Add specific Action expression details if available
if action_data.get('head') and is_selected('action', 'head'): face_parts.append(action_data.get('head'))
if action_data.get('eyes') and is_selected('action', 'eyes'): face_parts.append(action_data.get('eyes'))
# 3. Hand Prompt: Hand value (Gloves or Hands), Action details
hand_parts = [hand_val] if hand_val else []
if action_data.get('arms') and is_selected('action', 'arms'): hand_parts.append(action_data.get('arms'))
if action_data.get('hands') and is_selected('action', 'hands'): hand_parts.append(action_data.get('hands'))
return {
"main": _dedup_tags(", ".join(parts)),
"face": _dedup_tags(", ".join(face_parts)),
"hand": _dedup_tags(", ".join(hand_parts))
}
def build_extras_prompt(actions, outfits, scenes, styles, detailers):
"""Combine positive prompt text from all selected category items."""
parts = []
for action in actions:
data = action.data
lora = data.get('lora', {})
if lora.get('lora_triggers'):
parts.append(lora['lora_triggers'])
parts.extend(data.get('tags', []))
for key in ['full_body', 'additional']:
val = data.get('action', {}).get(key)
if val:
parts.append(val)
for outfit in outfits:
data = outfit.data
wardrobe = data.get('wardrobe', {})
for key in ['full_body', 'headwear', 'top', 'bottom', 'legwear', 'footwear', 'hands', 'accessories']:
val = wardrobe.get(key)
if val:
parts.append(val)
lora = data.get('lora', {})
if lora.get('lora_triggers'):
parts.append(lora['lora_triggers'])
parts.extend(data.get('tags', []))
for scene in scenes:
data = scene.data
scene_fields = data.get('scene', {})
for key in ['background', 'foreground', 'lighting']:
val = scene_fields.get(key)
if val:
parts.append(val)
lora = data.get('lora', {})
if lora.get('lora_triggers'):
parts.append(lora['lora_triggers'])
parts.extend(data.get('tags', []))
for style in styles:
data = style.data
style_fields = data.get('style', {})
if style_fields.get('artist_name'):
parts.append(f"by {style_fields['artist_name']}")
if style_fields.get('artistic_style'):
parts.append(style_fields['artistic_style'])
lora = data.get('lora', {})
if lora.get('lora_triggers'):
parts.append(lora['lora_triggers'])
for detailer in detailers:
data = detailer.data
prompt = data.get('prompt', '')
if isinstance(prompt, list):
parts.extend(p for p in prompt if p)
elif prompt:
parts.append(prompt)
lora = data.get('lora', {})
if lora.get('lora_triggers'):
parts.append(lora['lora_triggers'])
return ", ".join(p for p in parts if p)

701
services/sync.py Normal file
View File

@@ -0,0 +1,701 @@
import os
import json
import re
import random
import logging
from flask import current_app
from sqlalchemy.orm.attributes import flag_modified
from models import (
db, Character, Look, Outfit, Action, Style, Scene, Detailer, Checkpoint, Preset
)
logger = logging.getLogger('gaze')
def sync_characters():
if not os.path.exists(current_app.config['CHARACTERS_DIR']):
return
current_ids = []
for filename in os.listdir(current_app.config['CHARACTERS_DIR']):
if filename.endswith('.json'):
file_path = os.path.join(current_app.config['CHARACTERS_DIR'], filename)
try:
with open(file_path, 'r') as f:
data = json.load(f)
char_id = data.get('character_id')
if not char_id:
continue
current_ids.append(char_id)
# Generate URL-safe slug: remove special characters from character_id
slug = re.sub(r'[^a-zA-Z0-9_]', '', char_id)
# Check if character already exists
character = Character.query.filter_by(character_id=char_id).first()
name = data.get('character_name', char_id.replace('_', ' ').title())
if character:
character.data = data
character.name = name
character.slug = slug
character.filename = filename
# Check if cover image still exists
if character.image_path:
full_img_path = os.path.join(current_app.config['UPLOAD_FOLDER'], character.image_path)
if not os.path.exists(full_img_path):
print(f"Image missing for {character.name}, clearing path.")
character.image_path = None
# Explicitly tell SQLAlchemy the JSON field was modified
flag_modified(character, "data")
else:
new_char = Character(
character_id=char_id,
slug=slug,
filename=filename,
name=name,
data=data
)
db.session.add(new_char)
except Exception as e:
print(f"Error importing {filename}: {e}")
# Remove characters that are no longer in the folder
all_characters = Character.query.all()
for char in all_characters:
if char.character_id not in current_ids:
db.session.delete(char)
db.session.commit()
def sync_outfits():
if not os.path.exists(current_app.config['CLOTHING_DIR']):
return
current_ids = []
for filename in os.listdir(current_app.config['CLOTHING_DIR']):
if filename.endswith('.json'):
file_path = os.path.join(current_app.config['CLOTHING_DIR'], filename)
try:
with open(file_path, 'r') as f:
data = json.load(f)
outfit_id = data.get('outfit_id') or filename.replace('.json', '')
current_ids.append(outfit_id)
# Generate URL-safe slug: remove special characters from outfit_id
slug = re.sub(r'[^a-zA-Z0-9_]', '', outfit_id)
# Check if outfit already exists
outfit = Outfit.query.filter_by(outfit_id=outfit_id).first()
name = data.get('outfit_name', outfit_id.replace('_', ' ').title())
if outfit:
outfit.data = data
outfit.name = name
outfit.slug = slug
outfit.filename = filename
# Check if cover image still exists
if outfit.image_path:
full_img_path = os.path.join(current_app.config['UPLOAD_FOLDER'], outfit.image_path)
if not os.path.exists(full_img_path):
print(f"Image missing for {outfit.name}, clearing path.")
outfit.image_path = None
# Explicitly tell SQLAlchemy the JSON field was modified
flag_modified(outfit, "data")
else:
new_outfit = Outfit(
outfit_id=outfit_id,
slug=slug,
filename=filename,
name=name,
data=data
)
db.session.add(new_outfit)
except Exception as e:
print(f"Error importing outfit {filename}: {e}")
# Remove outfits that are no longer in the folder
all_outfits = Outfit.query.all()
for outfit in all_outfits:
if outfit.outfit_id not in current_ids:
db.session.delete(outfit)
db.session.commit()
def ensure_default_outfit():
"""Ensure a default outfit file exists and is registered in the database.
Checks if data/clothing/default.json exists, creates it with a minimal
wardrobe structure if missing, and ensures a corresponding Outfit database
entry exists.
"""
default_outfit_path = os.path.join(current_app.config['CLOTHING_DIR'], 'default.json')
# Check if default outfit file exists
if not os.path.exists(default_outfit_path):
logger.info("Default outfit file not found at %s, creating it...", default_outfit_path)
# Ensure the clothing directory exists
os.makedirs(current_app.config['CLOTHING_DIR'], exist_ok=True)
# Create minimal default outfit structure
default_outfit_data = {
"outfit_id": "default",
"outfit_name": "Default",
"wardrobe": {
"full_body": "",
"headwear": "",
"top": "",
"bottom": "",
"legwear": "",
"footwear": "",
"hands": "",
"accessories": ""
},
"lora": {
"lora_name": "",
"lora_weight": 0.8,
"lora_triggers": ""
},
"tags": []
}
try:
# Write the default outfit file
with open(default_outfit_path, 'w') as f:
json.dump(default_outfit_data, f, indent=2)
logger.info("Created default outfit file at %s", default_outfit_path)
except Exception as e:
logger.error("Failed to create default outfit file: %s", e)
return False
# Check if Outfit database entry exists
outfit = Outfit.query.filter_by(outfit_id='default').first()
if not outfit:
logger.info("Default Outfit database entry not found, creating it...")
# Load the outfit data (either existing or newly created)
try:
with open(default_outfit_path, 'r') as f:
outfit_data = json.load(f)
except Exception as e:
logger.error("Failed to read default outfit file: %s", e)
return False
# Create database entry
try:
new_outfit = Outfit(
outfit_id='default',
slug='default',
filename='default.json',
name='Default',
data=outfit_data
)
db.session.add(new_outfit)
db.session.commit()
logger.info("Created default Outfit database entry")
except Exception as e:
logger.error("Failed to create default Outfit database entry: %s", e)
db.session.rollback()
return False
else:
logger.debug("Default Outfit database entry already exists")
logger.info("Default outfit verification complete")
return True
def sync_looks():
if not os.path.exists(current_app.config['LOOKS_DIR']):
return
current_ids = []
for filename in os.listdir(current_app.config['LOOKS_DIR']):
if filename.endswith('.json'):
file_path = os.path.join(current_app.config['LOOKS_DIR'], filename)
try:
with open(file_path, 'r') as f:
data = json.load(f)
look_id = data.get('look_id') or filename.replace('.json', '')
current_ids.append(look_id)
slug = re.sub(r'[^a-zA-Z0-9_]', '', look_id)
look = Look.query.filter_by(look_id=look_id).first()
name = data.get('look_name', look_id.replace('_', ' ').title())
character_id = data.get('character_id', None)
if look:
look.data = data
look.name = name
look.slug = slug
look.filename = filename
look.character_id = character_id
if look.image_path:
full_img_path = os.path.join(current_app.config['UPLOAD_FOLDER'], look.image_path)
if not os.path.exists(full_img_path):
look.image_path = None
flag_modified(look, "data")
else:
new_look = Look(
look_id=look_id,
slug=slug,
filename=filename,
name=name,
character_id=character_id,
data=data
)
db.session.add(new_look)
except Exception as e:
print(f"Error importing look {filename}: {e}")
all_looks = Look.query.all()
for look in all_looks:
if look.look_id not in current_ids:
db.session.delete(look)
db.session.commit()
def sync_presets():
if not os.path.exists(current_app.config['PRESETS_DIR']):
return
current_ids = []
for filename in os.listdir(current_app.config['PRESETS_DIR']):
if filename.endswith('.json'):
file_path = os.path.join(current_app.config['PRESETS_DIR'], filename)
try:
with open(file_path, 'r') as f:
data = json.load(f)
preset_id = data.get('preset_id') or filename.replace('.json', '')
current_ids.append(preset_id)
slug = re.sub(r'[^a-zA-Z0-9_]', '', preset_id)
preset = Preset.query.filter_by(preset_id=preset_id).first()
name = data.get('preset_name', preset_id.replace('_', ' ').title())
if preset:
preset.data = data
preset.name = name
preset.slug = slug
preset.filename = filename
if preset.image_path:
full_img_path = os.path.join(current_app.config['UPLOAD_FOLDER'], preset.image_path)
if not os.path.exists(full_img_path):
preset.image_path = None
flag_modified(preset, "data")
else:
new_preset = Preset(
preset_id=preset_id,
slug=slug,
filename=filename,
name=name,
data=data
)
db.session.add(new_preset)
except Exception as e:
print(f"Error importing preset {filename}: {e}")
all_presets = Preset.query.all()
for preset in all_presets:
if preset.preset_id not in current_ids:
db.session.delete(preset)
db.session.commit()
# ---------------------------------------------------------------------------
# Preset helpers
# ---------------------------------------------------------------------------
_PRESET_ENTITY_MAP = {
'character': (Character, 'character_id'),
'outfit': (Outfit, 'outfit_id'),
'action': (Action, 'action_id'),
'style': (Style, 'style_id'),
'scene': (Scene, 'scene_id'),
'detailer': (Detailer, 'detailer_id'),
'look': (Look, 'look_id'),
'checkpoint': (Checkpoint, 'checkpoint_path'),
}
def _resolve_preset_entity(entity_type, entity_id):
"""Resolve a preset entity_id ('random', specific ID, or None) to an ORM object."""
if not entity_id:
return None
model_class, id_field = _PRESET_ENTITY_MAP[entity_type]
if entity_id == 'random':
return model_class.query.order_by(db.func.random()).first()
return model_class.query.filter(getattr(model_class, id_field) == entity_id).first()
def _resolve_preset_fields(preset_data):
"""Convert preset field toggle dicts into a selected_fields list.
Each field value: True = include, False = exclude, 'random' = randomly decide.
Returns a list of 'section::key' strings for fields that are active.
"""
selected = []
char_cfg = preset_data.get('character', {})
fields = char_cfg.get('fields', {})
for key in ['base_specs', 'hair', 'eyes', 'hands', 'arms', 'torso', 'pelvis', 'legs', 'feet', 'extra']:
val = fields.get('identity', {}).get(key, True)
if val == 'random':
val = random.choice([True, False])
if val:
selected.append(f'identity::{key}')
for key in ['expression', 'pose', 'scene']:
val = fields.get('defaults', {}).get(key, False)
if val == 'random':
val = random.choice([True, False])
if val:
selected.append(f'defaults::{key}')
wardrobe_cfg = fields.get('wardrobe', {})
for key in ['full_body', 'headwear', 'top', 'bottom', 'legwear', 'footwear', 'hands', 'gloves', 'accessories']:
val = wardrobe_cfg.get('fields', {}).get(key, True)
if val == 'random':
val = random.choice([True, False])
if val:
selected.append(f'wardrobe::{key}')
# Always include name and lora triggers
selected.append('special::name')
if char_cfg.get('use_lora', True):
selected.append('lora::lora_triggers')
return selected
def sync_actions():
if not os.path.exists(current_app.config['ACTIONS_DIR']):
return
current_ids = []
for filename in os.listdir(current_app.config['ACTIONS_DIR']):
if filename.endswith('.json'):
file_path = os.path.join(current_app.config['ACTIONS_DIR'], filename)
try:
with open(file_path, 'r') as f:
data = json.load(f)
action_id = data.get('action_id') or filename.replace('.json', '')
current_ids.append(action_id)
# Generate URL-safe slug
slug = re.sub(r'[^a-zA-Z0-9_]', '', action_id)
# Check if action already exists
action = Action.query.filter_by(action_id=action_id).first()
name = data.get('action_name', action_id.replace('_', ' ').title())
if action:
action.data = data
action.name = name
action.slug = slug
action.filename = filename
# Check if cover image still exists
if action.image_path:
full_img_path = os.path.join(current_app.config['UPLOAD_FOLDER'], action.image_path)
if not os.path.exists(full_img_path):
print(f"Image missing for {action.name}, clearing path.")
action.image_path = None
flag_modified(action, "data")
else:
new_action = Action(
action_id=action_id,
slug=slug,
filename=filename,
name=name,
data=data
)
db.session.add(new_action)
except Exception as e:
print(f"Error importing action {filename}: {e}")
# Remove actions that are no longer in the folder
all_actions = Action.query.all()
for action in all_actions:
if action.action_id not in current_ids:
db.session.delete(action)
db.session.commit()
def sync_styles():
if not os.path.exists(current_app.config['STYLES_DIR']):
return
current_ids = []
for filename in os.listdir(current_app.config['STYLES_DIR']):
if filename.endswith('.json'):
file_path = os.path.join(current_app.config['STYLES_DIR'], filename)
try:
with open(file_path, 'r') as f:
data = json.load(f)
style_id = data.get('style_id') or filename.replace('.json', '')
current_ids.append(style_id)
# Generate URL-safe slug
slug = re.sub(r'[^a-zA-Z0-9_]', '', style_id)
# Check if style already exists
style = Style.query.filter_by(style_id=style_id).first()
name = data.get('style_name', style_id.replace('_', ' ').title())
if style:
style.data = data
style.name = name
style.slug = slug
style.filename = filename
# Check if cover image still exists
if style.image_path:
full_img_path = os.path.join(current_app.config['UPLOAD_FOLDER'], style.image_path)
if not os.path.exists(full_img_path):
print(f"Image missing for {style.name}, clearing path.")
style.image_path = None
flag_modified(style, "data")
else:
new_style = Style(
style_id=style_id,
slug=slug,
filename=filename,
name=name,
data=data
)
db.session.add(new_style)
except Exception as e:
print(f"Error importing style {filename}: {e}")
# Remove styles that are no longer in the folder
all_styles = Style.query.all()
for style in all_styles:
if style.style_id not in current_ids:
db.session.delete(style)
db.session.commit()
def sync_detailers():
if not os.path.exists(current_app.config['DETAILERS_DIR']):
return
current_ids = []
for filename in os.listdir(current_app.config['DETAILERS_DIR']):
if filename.endswith('.json'):
file_path = os.path.join(current_app.config['DETAILERS_DIR'], filename)
try:
with open(file_path, 'r') as f:
data = json.load(f)
detailer_id = data.get('detailer_id') or filename.replace('.json', '')
current_ids.append(detailer_id)
# Generate URL-safe slug
slug = re.sub(r'[^a-zA-Z0-9_]', '', detailer_id)
# Check if detailer already exists
detailer = Detailer.query.filter_by(detailer_id=detailer_id).first()
name = data.get('detailer_name', detailer_id.replace('_', ' ').title())
if detailer:
detailer.data = data
detailer.name = name
detailer.slug = slug
detailer.filename = filename
# Check if cover image still exists
if detailer.image_path:
full_img_path = os.path.join(current_app.config['UPLOAD_FOLDER'], detailer.image_path)
if not os.path.exists(full_img_path):
print(f"Image missing for {detailer.name}, clearing path.")
detailer.image_path = None
flag_modified(detailer, "data")
else:
new_detailer = Detailer(
detailer_id=detailer_id,
slug=slug,
filename=filename,
name=name,
data=data
)
db.session.add(new_detailer)
except Exception as e:
print(f"Error importing detailer {filename}: {e}")
# Remove detailers that are no longer in the folder
all_detailers = Detailer.query.all()
for detailer in all_detailers:
if detailer.detailer_id not in current_ids:
db.session.delete(detailer)
db.session.commit()
def sync_scenes():
if not os.path.exists(current_app.config['SCENES_DIR']):
return
current_ids = []
for filename in os.listdir(current_app.config['SCENES_DIR']):
if filename.endswith('.json'):
file_path = os.path.join(current_app.config['SCENES_DIR'], filename)
try:
with open(file_path, 'r') as f:
data = json.load(f)
scene_id = data.get('scene_id') or filename.replace('.json', '')
current_ids.append(scene_id)
# Generate URL-safe slug
slug = re.sub(r'[^a-zA-Z0-9_]', '', scene_id)
# Check if scene already exists
scene = Scene.query.filter_by(scene_id=scene_id).first()
name = data.get('scene_name', scene_id.replace('_', ' ').title())
if scene:
scene.data = data
scene.name = name
scene.slug = slug
scene.filename = filename
# Check if cover image still exists
if scene.image_path:
full_img_path = os.path.join(current_app.config['UPLOAD_FOLDER'], scene.image_path)
if not os.path.exists(full_img_path):
print(f"Image missing for {scene.name}, clearing path.")
scene.image_path = None
flag_modified(scene, "data")
else:
new_scene = Scene(
scene_id=scene_id,
slug=slug,
filename=filename,
name=name,
data=data
)
db.session.add(new_scene)
except Exception as e:
print(f"Error importing scene {filename}: {e}")
# Remove scenes that are no longer in the folder
all_scenes = Scene.query.all()
for scene in all_scenes:
if scene.scene_id not in current_ids:
db.session.delete(scene)
db.session.commit()
def _default_checkpoint_data(checkpoint_path, filename):
"""Return template-default data for a checkpoint with no JSON file."""
name_base = filename.rsplit('.', 1)[0]
return {
"checkpoint_path": checkpoint_path,
"checkpoint_name": filename,
"base_positive": "anime",
"base_negative": "text, logo",
"steps": 25,
"cfg": 5,
"sampler_name": "euler_ancestral",
"vae": "integrated"
}
def sync_checkpoints():
checkpoints_dir = current_app.config.get('CHECKPOINTS_DIR', 'data/checkpoints')
os.makedirs(checkpoints_dir, exist_ok=True)
# Load all JSON data files keyed by checkpoint_path
json_data_by_path = {}
for filename in os.listdir(checkpoints_dir):
if filename.endswith('.json') and not filename.endswith('.template'):
file_path = os.path.join(checkpoints_dir, filename)
try:
with open(file_path, 'r') as f:
data = json.load(f)
ckpt_path = data.get('checkpoint_path')
if ckpt_path:
json_data_by_path[ckpt_path] = data
except Exception as e:
print(f"Error reading checkpoint JSON {filename}: {e}")
current_ids = []
dirs = [
(current_app.config.get('ILLUSTRIOUS_MODELS_DIR', ''), 'Illustrious'),
(current_app.config.get('NOOB_MODELS_DIR', ''), 'Noob'),
]
for dirpath, family in dirs:
if not dirpath or not os.path.exists(dirpath):
continue
for f in sorted(os.listdir(dirpath)):
if not (f.endswith('.safetensors') or f.endswith('.ckpt')):
continue
checkpoint_path = f"{family}/{f}"
checkpoint_id = checkpoint_path
slug = re.sub(r'[^a-zA-Z0-9_]', '_', checkpoint_path.rsplit('.', 1)[0]).lower().strip('_')
name_base = f.rsplit('.', 1)[0]
friendly_name = re.sub(r'[^a-zA-Z0-9]+', ' ', name_base).strip().title()
current_ids.append(checkpoint_id)
data = json_data_by_path.get(checkpoint_path,
_default_checkpoint_data(checkpoint_path, f))
display_name = data.get('checkpoint_name', f).rsplit('.', 1)[0]
display_name = re.sub(r'[^a-zA-Z0-9]+', ' ', display_name).strip().title() or friendly_name
ckpt = Checkpoint.query.filter_by(checkpoint_id=checkpoint_id).first()
if ckpt:
ckpt.name = display_name
ckpt.slug = slug
ckpt.checkpoint_path = checkpoint_path
ckpt.data = data
flag_modified(ckpt, "data")
if ckpt.image_path:
full_img_path = os.path.join(current_app.config['UPLOAD_FOLDER'], ckpt.image_path)
if not os.path.exists(full_img_path):
ckpt.image_path = None
else:
db.session.add(Checkpoint(
checkpoint_id=checkpoint_id,
slug=slug,
name=display_name,
checkpoint_path=checkpoint_path,
data=data,
))
all_ckpts = Checkpoint.query.all()
for ckpt in all_ckpts:
if ckpt.checkpoint_id not in current_ids:
db.session.delete(ckpt)
db.session.commit()

342
services/workflow.py Normal file
View File

@@ -0,0 +1,342 @@
import json
import logging
import random
from flask import session
from models import Settings, Checkpoint
from utils import _resolve_lora_weight
from services.prompts import _cross_dedup_prompts
logger = logging.getLogger('gaze')
def _log_workflow_prompts(label, workflow):
"""Log the final assembled ComfyUI prompts in a consistent, readable block."""
sep = "=" * 72
active_loras = []
lora_details = []
# Collect detailed LoRA information
for node_id, label_str in [("16", "char/look"), ("17", "outfit"), ("18", "action"), ("19", "style/detail/scene")]:
if node_id in workflow:
name = workflow[node_id]["inputs"].get("lora_name", "")
if name:
strength_model = workflow[node_id]["inputs"].get("strength_model", "?")
strength_clip = workflow[node_id]["inputs"].get("strength_clip", "?")
# Short version for summary
if isinstance(strength_model, float):
active_loras.append(f"{label_str}:{name.split('/')[-1]}@{strength_model:.3f}")
else:
active_loras.append(f"{label_str}:{name.split('/')[-1]}@{strength_model}")
# Detailed version
lora_details.append(f" Node {node_id} ({label_str}): {name}")
lora_details.append(f" strength_model={strength_model}, strength_clip={strength_clip}")
# Extract VAE information
vae_info = "(integrated)"
if '21' in workflow:
vae_info = workflow['21']['inputs'].get('vae_name', '(custom)')
# Extract adetailer information
adetailer_info = []
for node_id, node_name in [("11", "Face"), ("13", "Hand")]:
if node_id in workflow:
adetailer_info.append(f" {node_name} (Node {node_id}): steps={workflow[node_id]['inputs'].get('steps', '?')}, "
f"cfg={workflow[node_id]['inputs'].get('cfg', '?')}, "
f"denoise={workflow[node_id]['inputs'].get('denoise', '?')}")
face_text = workflow.get('14', {}).get('inputs', {}).get('text', '')
hand_text = workflow.get('15', {}).get('inputs', {}).get('text', '')
lines = [
sep,
f" WORKFLOW PROMPTS [{label}]",
sep,
" MODEL CONFIGURATION:",
f" Checkpoint : {workflow['4']['inputs'].get('ckpt_name', '(not set)')}",
f" VAE : {vae_info}",
"",
" GENERATION SETTINGS:",
f" Seed : {workflow['3']['inputs'].get('seed', '(not set)')}",
f" Resolution : {workflow['5']['inputs'].get('width', '?')} x {workflow['5']['inputs'].get('height', '?')}",
f" Sampler : {workflow['3']['inputs'].get('sampler_name', '?')} / {workflow['3']['inputs'].get('scheduler', '?')}",
f" Steps : {workflow['3']['inputs'].get('steps', '?')}",
f" CFG Scale : {workflow['3']['inputs'].get('cfg', '?')}",
f" Denoise : {workflow['3']['inputs'].get('denoise', '1.0')}",
]
# Add LoRA details
if active_loras:
lines.append("")
lines.append(" LORA CONFIGURATION:")
lines.extend(lora_details)
else:
lines.append("")
lines.append(" LORA CONFIGURATION: (none)")
# Add adetailer details
if adetailer_info:
lines.append("")
lines.append(" ADETAILER CONFIGURATION:")
lines.extend(adetailer_info)
# Add prompts
lines.extend([
"",
" PROMPTS:",
f" [+] Positive : {workflow['6']['inputs'].get('text', '')}",
f" [-] Negative : {workflow['7']['inputs'].get('text', '')}",
])
if face_text:
lines.append(f" [F] Face : {face_text}")
if hand_text:
lines.append(f" [H] Hand : {hand_text}")
lines.append(sep)
logger.info("\n%s", "\n".join(lines))
def _apply_checkpoint_settings(workflow, ckpt_data):
"""Apply checkpoint-specific sampler/prompt/VAE settings to the workflow."""
steps = ckpt_data.get('steps')
cfg = ckpt_data.get('cfg')
sampler_name = ckpt_data.get('sampler_name')
scheduler = ckpt_data.get('scheduler')
base_positive = ckpt_data.get('base_positive', '')
base_negative = ckpt_data.get('base_negative', '')
vae = ckpt_data.get('vae', 'integrated')
# KSampler (node 3)
if steps and '3' in workflow:
workflow['3']['inputs']['steps'] = int(steps)
if cfg and '3' in workflow:
workflow['3']['inputs']['cfg'] = float(cfg)
if sampler_name and '3' in workflow:
workflow['3']['inputs']['sampler_name'] = sampler_name
if scheduler and '3' in workflow:
workflow['3']['inputs']['scheduler'] = scheduler
# Face/hand detailers (nodes 11, 13)
for node_id in ['11', '13']:
if node_id in workflow:
if steps:
workflow[node_id]['inputs']['steps'] = int(steps)
if cfg:
workflow[node_id]['inputs']['cfg'] = float(cfg)
if sampler_name:
workflow[node_id]['inputs']['sampler_name'] = sampler_name
if scheduler:
workflow[node_id]['inputs']['scheduler'] = scheduler
# Prepend base_positive to positive prompts (main + face/hand detailers)
if base_positive:
for node_id in ['6', '14', '15']:
if node_id in workflow:
workflow[node_id]['inputs']['text'] = f"{base_positive}, {workflow[node_id]['inputs']['text']}"
# Append base_negative to negative prompt (shared by main + detailers via node 7)
if base_negative and '7' in workflow:
workflow['7']['inputs']['text'] = f"{workflow['7']['inputs']['text']}, {base_negative}"
# VAE: if not integrated, inject a VAELoader node and rewire
if vae and vae != 'integrated':
workflow['21'] = {
'inputs': {'vae_name': vae},
'class_type': 'VAELoader'
}
if '8' in workflow:
workflow['8']['inputs']['vae'] = ['21', 0]
for node_id in ['11', '13']:
if node_id in workflow:
workflow[node_id]['inputs']['vae'] = ['21', 0]
return workflow
def _get_default_checkpoint():
"""Return (checkpoint_path, checkpoint_data) from the database Settings, session, or fall back to workflow file."""
ckpt_path = session.get('default_checkpoint')
# If no session checkpoint, try to read from database Settings
if not ckpt_path:
settings = Settings.query.first()
if settings and settings.default_checkpoint:
ckpt_path = settings.default_checkpoint
logger.debug("Loaded default checkpoint from database: %s", ckpt_path)
# If still no checkpoint, try to read from the workflow file
if not ckpt_path:
try:
with open('comfy_workflow.json', 'r') as f:
workflow = json.load(f)
ckpt_path = workflow.get('4', {}).get('inputs', {}).get('ckpt_name')
logger.debug("Loaded default checkpoint from workflow file: %s", ckpt_path)
except Exception:
pass
if not ckpt_path:
return None, None
ckpt = Checkpoint.query.filter_by(checkpoint_path=ckpt_path).first()
if not ckpt:
# Checkpoint path exists but not in DB - return path with empty data
return ckpt_path, {}
return ckpt.checkpoint_path, ckpt.data or {}
def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_negative=None, outfit=None, action=None, style=None, detailer=None, scene=None, width=None, height=None, checkpoint_data=None, look=None, fixed_seed=None):
# 1. Update prompts using replacement to preserve embeddings
workflow["6"]["inputs"]["text"] = workflow["6"]["inputs"]["text"].replace("{{POSITIVE_PROMPT}}", prompts["main"])
if custom_negative:
workflow["7"]["inputs"]["text"] = f"{workflow['7']['inputs']['text']}, {custom_negative}"
if "14" in workflow:
workflow["14"]["inputs"]["text"] = workflow["14"]["inputs"]["text"].replace("{{FACE_PROMPT}}", prompts["face"])
if "15" in workflow:
workflow["15"]["inputs"]["text"] = workflow["15"]["inputs"]["text"].replace("{{HAND_PROMPT}}", prompts["hand"])
# 2. Update Checkpoint - always set one, fall back to default if not provided
if not checkpoint:
default_ckpt, default_ckpt_data = _get_default_checkpoint()
checkpoint = default_ckpt
if not checkpoint_data:
checkpoint_data = default_ckpt_data
if checkpoint:
workflow["4"]["inputs"]["ckpt_name"] = checkpoint
else:
raise ValueError("No checkpoint specified and no default checkpoint configured")
# 3. Handle LoRAs - Node 16 for character, Node 17 for outfit, Node 18 for action, Node 19 for style/detailer
# Start with direct checkpoint connections
model_source = ["4", 0]
clip_source = ["4", 1]
# Look negative prompt (applied before character LoRA)
if look:
look_negative = look.data.get('negative', '')
if look_negative:
workflow["7"]["inputs"]["text"] = f"{look_negative}, {workflow['7']['inputs']['text']}"
# Character LoRA (Node 16) — look LoRA overrides character LoRA when present
if look:
char_lora_data = look.data.get('lora', {})
else:
char_lora_data = character.data.get('lora', {}) if character else {}
char_lora_name = char_lora_data.get('lora_name')
if char_lora_name and "16" in workflow:
_w16 = _resolve_lora_weight(char_lora_data)
workflow["16"]["inputs"]["lora_name"] = char_lora_name
workflow["16"]["inputs"]["strength_model"] = _w16
workflow["16"]["inputs"]["strength_clip"] = _w16
workflow["16"]["inputs"]["model"] = ["4", 0] # From checkpoint
workflow["16"]["inputs"]["clip"] = ["4", 1] # From checkpoint
model_source = ["16", 0]
clip_source = ["16", 1]
logger.debug("Character LoRA: %s @ %s", char_lora_name, _w16)
# Outfit LoRA (Node 17) - chains from character LoRA or checkpoint
outfit_lora_data = outfit.data.get('lora', {}) if outfit else {}
outfit_lora_name = outfit_lora_data.get('lora_name')
if outfit_lora_name and "17" in workflow:
_w17 = _resolve_lora_weight({**{'lora_weight': 0.8}, **outfit_lora_data})
workflow["17"]["inputs"]["lora_name"] = outfit_lora_name
workflow["17"]["inputs"]["strength_model"] = _w17
workflow["17"]["inputs"]["strength_clip"] = _w17
# Chain from character LoRA (node 16) or checkpoint (node 4)
workflow["17"]["inputs"]["model"] = model_source
workflow["17"]["inputs"]["clip"] = clip_source
model_source = ["17", 0]
clip_source = ["17", 1]
logger.debug("Outfit LoRA: %s @ %s", outfit_lora_name, _w17)
# Action LoRA (Node 18) - chains from previous LoRA or checkpoint
action_lora_data = action.data.get('lora', {}) if action else {}
action_lora_name = action_lora_data.get('lora_name')
if action_lora_name and "18" in workflow:
_w18 = _resolve_lora_weight(action_lora_data)
workflow["18"]["inputs"]["lora_name"] = action_lora_name
workflow["18"]["inputs"]["strength_model"] = _w18
workflow["18"]["inputs"]["strength_clip"] = _w18
# Chain from previous source
workflow["18"]["inputs"]["model"] = model_source
workflow["18"]["inputs"]["clip"] = clip_source
model_source = ["18", 0]
clip_source = ["18", 1]
logger.debug("Action LoRA: %s @ %s", action_lora_name, _w18)
# Style/Detailer/Scene LoRA (Node 19) - chains from previous LoRA or checkpoint
# Priority: Style > Detailer > Scene (Scene LoRAs are rare but supported)
target_obj = style or detailer or scene
style_lora_data = target_obj.data.get('lora', {}) if target_obj else {}
style_lora_name = style_lora_data.get('lora_name')
if style_lora_name and "19" in workflow:
_w19 = _resolve_lora_weight(style_lora_data)
workflow["19"]["inputs"]["lora_name"] = style_lora_name
workflow["19"]["inputs"]["strength_model"] = _w19
workflow["19"]["inputs"]["strength_clip"] = _w19
# Chain from previous source
workflow["19"]["inputs"]["model"] = model_source
workflow["19"]["inputs"]["clip"] = clip_source
model_source = ["19", 0]
clip_source = ["19", 1]
logger.debug("Style/Detailer LoRA: %s @ %s", style_lora_name, _w19)
# Apply connections to all model/clip consumers
workflow["3"]["inputs"]["model"] = model_source
workflow["11"]["inputs"]["model"] = model_source
workflow["13"]["inputs"]["model"] = model_source
workflow["6"]["inputs"]["clip"] = clip_source
workflow["7"]["inputs"]["clip"] = clip_source
workflow["11"]["inputs"]["clip"] = clip_source
workflow["13"]["inputs"]["clip"] = clip_source
workflow["14"]["inputs"]["clip"] = clip_source
workflow["15"]["inputs"]["clip"] = clip_source
# 4. Randomize seeds (or use a fixed seed for reproducible batches like Strengths Gallery)
gen_seed = fixed_seed if fixed_seed is not None else random.randint(1, 10**15)
workflow["3"]["inputs"]["seed"] = gen_seed
if "11" in workflow: workflow["11"]["inputs"]["seed"] = gen_seed
if "13" in workflow: workflow["13"]["inputs"]["seed"] = gen_seed
# 5. Set image dimensions
if "5" in workflow:
if width:
workflow["5"]["inputs"]["width"] = int(width)
if height:
workflow["5"]["inputs"]["height"] = int(height)
# 6. Apply checkpoint-specific settings (steps, cfg, sampler, base prompts, VAE)
if checkpoint_data:
workflow = _apply_checkpoint_settings(workflow, checkpoint_data)
# 7. Sync sampler/scheduler from main KSampler to adetailer nodes
sampler_name = workflow["3"]["inputs"].get("sampler_name")
scheduler = workflow["3"]["inputs"].get("scheduler")
for node_id in ["11", "13"]:
if node_id in workflow:
if sampler_name:
workflow[node_id]["inputs"]["sampler_name"] = sampler_name
if scheduler:
workflow[node_id]["inputs"]["scheduler"] = scheduler
# 8. Cross-deduplicate: remove tags shared between positive and negative
pos_text, neg_text = _cross_dedup_prompts(
workflow["6"]["inputs"]["text"],
workflow["7"]["inputs"]["text"]
)
workflow["6"]["inputs"]["text"] = pos_text
workflow["7"]["inputs"]["text"] = neg_text
# 9. Final prompt debug — logged after all transformations are complete
_log_workflow_prompts("_prepare_workflow", workflow)
return workflow