diff --git a/CLAUDE.md b/CLAUDE.md index 07ad995..a38b80d 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -30,7 +30,7 @@ services/ generation.py # Shared generation logic (generate_from_preset) routes/ __init__.py # register_routes(app) — imports and calls all route modules - shared.py # Factory functions for common routes (favourite, upload, clone, save_json, etc.) + shared.py # Factory functions for common routes + apply_library_filters() helper characters.py # Character CRUD + generation + outfit management outfits.py # Outfit routes actions.py # Action routes @@ -115,27 +115,29 @@ All category models (except Settings and Checkpoint) share this pattern: ## ComfyUI Workflow Node Map -The workflow (`comfy_workflow.json`) uses string node IDs. These are the critical nodes: +The workflow (`comfy_workflow.json`) uses string node IDs. Named constants are defined in `services/workflow.py`: -| Node | Role | -|------|------| -| `3` | Main KSampler | -| `4` | Checkpoint loader | -| `5` | Empty latent (width/height) | -| `6` | Positive prompt — contains `{{POSITIVE_PROMPT}}` placeholder | -| `7` | Negative prompt | -| `8` | VAE decode | -| `9` | Save image | -| `11` | Face ADetailer | -| `13` | Hand ADetailer | -| `14` | Face detailer prompt — contains `{{FACE_PROMPT}}` placeholder | -| `15` | Hand detailer prompt — contains `{{HAND_PROMPT}}` placeholder | -| `16` | Character LoRA (or Look LoRA when a Look is active) | -| `17` | Outfit LoRA | -| `18` | Action LoRA | -| `19` | Style / Detailer / Scene LoRA (priority: style > detailer > scene) | +| Constant | Node | Role | +|----------|------|------| +| `NODE_KSAMPLER` | `3` | Main KSampler | +| `NODE_CHECKPOINT` | `4` | Checkpoint loader | +| `NODE_LATENT` | `5` | Empty latent (width/height) | +| `NODE_POSITIVE` | `6` | Positive prompt — contains `{{POSITIVE_PROMPT}}` placeholder | +| `NODE_NEGATIVE` | `7` | Negative prompt | +| `NODE_VAE_DECODE` | `8` | VAE decode | +| `NODE_SAVE` | `9` | Save image | +| `NODE_FACE_DETAILER` | `11` | Face ADetailer | +| `NODE_HAND_DETAILER` | `13` | Hand ADetailer | +| `NODE_FACE_PROMPT` | `14` | Face detailer prompt — contains `{{FACE_PROMPT}}` placeholder | +| `NODE_HAND_PROMPT` | `15` | Hand detailer prompt — contains `{{HAND_PROMPT}}` placeholder | +| `NODE_LORA_CHAR` | `16` | Character LoRA (or Look LoRA when a Look is active) | +| `NODE_LORA_OUTFIT` | `17` | Outfit LoRA | +| `NODE_LORA_ACTION` | `18` | Action LoRA | +| `NODE_LORA_STYLE` | `19` | Style / Detailer / Scene LoRA (priority: style > detailer > scene) | +| `NODE_LORA_CHAR_B` | `20` | Character LoRA B (second character) | +| `NODE_VAE_LOADER` | `21` | VAE loader | -LoRA nodes chain: `4 → 16 → 17 → 18 → 19`. Unused LoRA nodes are bypassed by pointing `model_source`/`clip_source` directly to the prior node. All model/clip consumers (nodes 3, 6, 7, 11, 13, 14, 15) are wired to the final `model_source`/`clip_source` at the end of `_prepare_workflow`. +LoRA nodes chain: `4 → 16 → 17 → 18 → 19`. Unused LoRA nodes are bypassed by pointing `model_source`/`clip_source` directly to the prior node. All model/clip consumers (nodes 3, 6, 7, 11, 13, 14, 15) are wired to the final `model_source`/`clip_source` at the end of `_prepare_workflow`. Always use the named constants instead of string literals when referencing node IDs. --- @@ -143,12 +145,13 @@ LoRA nodes chain: `4 → 16 → 17 → 18 → 19`. Unused LoRA nodes are bypasse ### `utils.py` — Constants and Pure Helpers -- **`_IDENTITY_KEYS` / `_WARDROBE_KEYS`** — Lists of canonical field names for the `identity` and `wardrobe` sections. Used by `_ensure_character_fields()`. +- **`_BODY_GROUP_KEYS`** — Canonical list of field names shared by both `identity` and `wardrobe` sections: `['base', 'head', 'upper_body', 'lower_body', 'hands', 'feet', 'additional']`. Used by `build_prompt()`, `_ensure_character_fields()`, and `_resolve_preset_fields()`. - **`ALLOWED_EXTENSIONS`** — Permitted upload file extensions. - **`_LORA_DEFAULTS`** — Default LoRA directory paths per category. - **`parse_orientation(orientation_str)`** — Converts orientation codes (`1F`, `2F`, `1M1F`, etc.) into Danbooru tags. - **`_resolve_lora_weight(lora_data)`** — Extracts and validates LoRA weight from a lora data dict. - **`allowed_file(filename)`** — Checks file extension against `ALLOWED_EXTENSIONS`. +- **`clean_html_text(html_raw)`** — Strips HTML tags, scripts, styles, and images from raw HTML, returning plain text. Used by bulk_create routes. ### `services/prompts.py` — Prompt Building @@ -181,10 +184,11 @@ Two independent queues with separate worker threads: ### `services/comfyui.py` — ComfyUI HTTP Client -- **`queue_prompt(prompt_workflow, client_id)`** — POSTs workflow to ComfyUI's `/prompt` endpoint. -- **`get_history(prompt_id)`** — Polls ComfyUI for job completion. -- **`get_image(filename, subfolder, folder_type)`** — Retrieves generated image bytes. -- **`_ensure_checkpoint_loaded(checkpoint_path)`** — Forces ComfyUI to load a specific checkpoint. +- **`queue_prompt(prompt_workflow, client_id)`** — POSTs workflow to ComfyUI's `/prompt` endpoint. Validates HTTP response status before parsing JSON; raises `RuntimeError` on non-OK responses. Timeout: 30s. +- **`get_history(prompt_id)`** — Polls ComfyUI for job completion. Timeout: 10s. +- **`get_image(filename, subfolder, folder_type)`** — Retrieves generated image bytes. Timeout: 30s. +- **`get_loaded_checkpoint()`** — Returns the checkpoint path currently loaded in ComfyUI by inspecting the most recent job in `/history`. +- **`_ensure_checkpoint_loaded(checkpoint_path)`** — Forces ComfyUI to unload all models if the desired checkpoint doesn't match what's currently loaded. ### `services/llm.py` — LLM Integration @@ -211,8 +215,10 @@ Two independent queues with separate worker threads: ### `services/mcp.py` — MCP/Docker Lifecycle -- **`ensure_mcp_server_running()`** — Ensures the danbooru-mcp Docker container is running. -- **`ensure_character_mcp_server_running()`** — Ensures the character-mcp Docker container is running. +- **`_ensure_repo(compose_dir, repo_url, name)`** — Generic helper: clones a git repo if the directory doesn't exist. +- **`_ensure_server_running(compose_dir, repo_url, container_name, name)`** — Generic helper: ensures a Docker Compose service is running (clones repo if needed, starts container if not running). +- **`ensure_mcp_server_running()`** — Ensures the danbooru-mcp Docker container is running (thin wrapper around `_ensure_server_running`). +- **`ensure_character_mcp_server_running()`** — Ensures the character-mcp Docker container is running (thin wrapper around `_ensure_server_running`). ### Route-local Helpers @@ -239,7 +245,7 @@ Some helpers are defined inside a route module's `register_routes()` since they' "identity": { "base_specs": "", "hair": "", "eyes": "", "hands": "", "arms": "", "torso": "", "pelvis": "", "legs": "", "feet": "", "extra": "" }, "defaults": { "expression": "", "pose": "", "scene": "" }, "wardrobe": { - "default": { "full_body": "", "headwear": "", "top": "", "bottom": "", "legwear": "", "footwear": "", "hands": "", "gloves": "", "accessories": "" } + "default": { "base": "", "head": "", "upper_body": "", "lower_body": "", "hands": "", "feet": "", "additional": "" } }, "styles": { "aesthetic": "", "primary_color": "", "secondary_color": "", "tertiary_color": "" }, "lora": { "lora_name": "Illustrious/Looks/tifa.safetensors", "lora_weight": 0.8, "lora_triggers": "" }, @@ -254,7 +260,7 @@ Some helpers are defined inside a route module's `register_routes()` since they' { "outfit_id": "french_maid_01", "outfit_name": "French Maid", - "wardrobe": { "full_body": "", "headwear": "", "top": "", "bottom": "", "legwear": "", "footwear": "", "hands": "", "accessories": "" }, + "wardrobe": { "base": "", "head": "", "upper_body": "", "lower_body": "", "hands": "", "feet": "", "additional": "" }, "lora": { "lora_name": "Illustrious/Clothing/maid.safetensors", "lora_weight": 0.8, "lora_triggers": "" }, "tags": { "outfit_type": "Uniform", "nsfw": false } } @@ -439,7 +445,7 @@ Image retrieval is handled server-side by the `_make_finalize()` callback; there - `static/js/library-toolbar.js` — Library page toolbar (batch generate, clear covers, missing items) - Context processors inject `all_checkpoints`, `default_checkpoint_path`, and `COMFYUI_WS_URL` into every template. The `random_gen_image(category, slug)` template global returns a random image path from `static/uploads///` for use as a fallback cover when `image_path` is not set. - **No `{% block head %}` exists** in layout.html — do not try to use it. -- Generation is async: JS submits the form via AJAX (`X-Requested-With: XMLHttpRequest`), receives a `{"job_id": ...}` response, then polls `/api/queue//status` every ~1.5 seconds until `status == "done"`. The server-side worker handles all ComfyUI polling and image saving via the `_make_finalize()` callback. There are no client-facing finalize HTTP routes. +- Generation is async: JS submits the form via AJAX (`X-Requested-With: XMLHttpRequest`), receives a `{"job_id": ...}` response, then polls `/api/queue//status` every ~1.5 seconds until `status == "done"` or the 5-minute timeout is reached. The server-side worker handles all ComfyUI polling and image saving via the `_make_finalize()` callback. There are no client-facing finalize HTTP routes. - **Batch generation** (library pages): Uses a two-phase pattern: 1. **Queue phase**: All jobs are submitted upfront via sequential fetch calls, collecting job IDs 2. **Poll phase**: All jobs are polled concurrently via `Promise.all()`, updating UI as each completes @@ -501,6 +507,7 @@ All library index pages support query params: - `?favourite=on` — show only favourites - `?nsfw=sfw|nsfw|all` — filter by NSFW status - Results are ordered by `is_favourite DESC, name ASC` (favourites sort first). +- Filter logic is shared via `apply_library_filters(query, model_class)` in `routes/shared.py`, which returns `(items, fav, nsfw)`. ### Gallery Image Sidecar Files @@ -609,3 +616,5 @@ Volumes mounted into the app container: - **`_make_finalize` action semantics**: Pass `action=None` when the route should always update the DB cover (e.g. batch generate, checkpoint generate). Pass `action=request.form.get('action')` for routes that support both "preview" (no DB update) and "replace" (update DB). The factory skips the DB write when `action` is truthy and not `"replace"`. - **LLM queue runs without request context**: `_enqueue_task()` callbacks execute in a background thread with only `app.app_context()`. Do not access `flask.request`, `flask.session`, or other request-scoped objects inside `task_fn`. Use `has_request_context()` guard if code is shared between HTTP handlers and background tasks. - **Tags are metadata only**: Tags (`data['tags']`) are never injected into generation prompts. They are purely for UI filtering and search. The old pattern of `parts.extend(data.get('tags', []))` in prompt building has been removed. +- **Path traversal guard on replace cover**: The replace cover route in `routes/shared.py` validates `preview_path` using `os.path.realpath()` + `startswith()` to prevent path traversal attacks. +- **Logging uses lazy % formatting**: All logger calls use `logger.info("msg %s", var)` style, not f-strings. This avoids formatting the string when the log level is disabled. diff --git a/app.py b/app.py index ed95ff8..3983382 100644 --- a/app.py +++ b/app.py @@ -69,6 +69,7 @@ if __name__ == '__main__': from sqlalchemy import text os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True) + os.makedirs(app.config['SESSION_FILE_DIR'], exist_ok=True) db.create_all() # --- Helper for safe column additions --- @@ -79,8 +80,11 @@ if __name__ == '__main__': logger.info("Added %s.%s column", table, column) except Exception as e: db.session.rollback() - if 'duplicate column name' not in str(e).lower() and 'already exists' not in str(e).lower(): - logger.debug("Migration note (%s.%s): %s", table, column, e) + err_str = str(e).lower() + if 'duplicate column name' in err_str or 'already exists' in err_str: + pass # Column already exists, expected + else: + logger.warning("Migration failed (%s.%s): %s", table, column, e) # --- All migrations (grouped before syncs) --- _add_column('character', 'active_outfit', "VARCHAR(100) DEFAULT 'default'") @@ -106,7 +110,7 @@ if __name__ == '__main__': _add_column('settings', col_name, col_type) # is_favourite / is_nsfw on all resource tables - for tbl in ['character', 'look', 'outfit', 'action', 'style', 'scene', 'detailer', 'checkpoint']: + for tbl in ['character', 'look', 'outfit', 'action', 'style', 'scene', 'detailer', 'checkpoint', 'preset']: _add_column(tbl, 'is_favourite', 'BOOLEAN DEFAULT 0') _add_column(tbl, 'is_nsfw', 'BOOLEAN DEFAULT 0') diff --git a/data/characters/2b.json b/data/characters/2b.json index c14ef05..f26971b 100644 --- a/data/characters/2b.json +++ b/data/characters/2b.json @@ -16,14 +16,13 @@ "scene": "" }, "wardrobe": { - "full_body": "black_dress, lace-trimmed_dress, gothic_lolita", - "headwear": "blindfold", - "top": "black_dress, cleavage_cutout, feather_trim", - "bottom": "short_dress", - "legwear": "thighhighs", - "footwear": "thigh_boots, black_boots, high_heels", - "hands": "black_gloves", - "accessories": "katana, sword_on_back" + "base": "black_dress, lace-trimmed_dress, gothic_lolita", + "head": "blindfold", + "upper_body": "black_dress, cleavage_cutout, feather_trim", + "lower_body": "short_dress", + "additional": "katana, sword_on_back", + "feet": "thighhighs, thigh_boots, black_boots, high_heels", + "hands": "black_gloves" }, "styles": { "aesthetic": "gothic_lolita, science_fiction, dark_atmosphere", diff --git a/data/characters/aisha_clan_clan.json b/data/characters/aisha_clan_clan.json index 67668ef..78112f7 100644 --- a/data/characters/aisha_clan_clan.json +++ b/data/characters/aisha_clan_clan.json @@ -16,14 +16,13 @@ "scene": "space_station" }, "wardrobe": { - "full_body": "off-shoulder_dress, two-tone_dress", - "headwear": "circlet, hair_ring", - "top": "neck_bell, white_collar, long_sleeves, cleavage", - "bottom": "black_belt", - "legwear": "pantyhose, thigh_strap", - "footwear": "", - "hands": "bracelets", - "accessories": "bell" + "base": "off-shoulder_dress, two-tone_dress", + "head": "circlet, hair_ring", + "upper_body": "neck_bell, white_collar, long_sleeves, cleavage", + "lower_body": "black_belt", + "additional": "bell", + "feet": "pantyhose, thigh_strap", + "hands": "bracelets" }, "styles": { "aesthetic": "retro_anime, 1990s_(style), outlaw_star", diff --git a/data/characters/android_21.json b/data/characters/android_21.json index d82dcb7..5503bdb 100644 --- a/data/characters/android_21.json +++ b/data/characters/android_21.json @@ -16,14 +16,13 @@ "scene": "indoors, laboratory" }, "wardrobe": { - "full_body": "lab_coat, dress, checkered_pattern", - "headwear": "", - "top": "lab_coat, dress", - "bottom": "", - "legwear": "thighhighs, black_thighhighs", - "footwear": "high_heels", - "hands": "", - "accessories": "earrings, ring" + "base": "lab_coat, dress, checkered_pattern", + "head": "", + "upper_body": "lab_coat, dress", + "lower_body": "", + "additional": "earrings, ring", + "feet": "thighhighs, black_thighhighs, high_heels", + "hands": "" }, "styles": { "aesthetic": "anime", diff --git a/data/characters/becky_blackbell.json b/data/characters/becky_blackbell.json index 6684da6..ff62ac5 100644 --- a/data/characters/becky_blackbell.json +++ b/data/characters/becky_blackbell.json @@ -16,14 +16,13 @@ "scene": "" }, "wardrobe": { - "full_body": "eden_academy_school_uniform, black_dress, gold_trim", - "headwear": "hair_ornament", - "top": "", - "bottom": "", - "legwear": "white_socks", - "footwear": "loafers", - "hands": "", - "accessories": "" + "base": "eden_academy_school_uniform, black_dress, gold_trim", + "head": "hair_ornament", + "upper_body": "", + "lower_body": "", + "additional": "", + "feet": "white_socks, loafers", + "hands": "" }, "styles": { "aesthetic": "anime_style", diff --git a/data/characters/blossom_ppg.json b/data/characters/blossom_ppg.json index 3796d01..3e90181 100644 --- a/data/characters/blossom_ppg.json +++ b/data/characters/blossom_ppg.json @@ -16,14 +16,13 @@ "scene": "cityscape, daytime, sky" }, "wardrobe": { - "full_body": "pink_dress, sleeveless_dress, a-line_dress", - "headwear": "red_hair_bow, oversized_bow", - "top": "", - "bottom": "", - "legwear": "white_leggings, white_tights", - "footwear": "black_shoes, mary_janes", - "hands": "", - "accessories": "black_waist_belt" + "base": "pink_dress, sleeveless_dress, a-line_dress", + "head": "red_hair_bow, oversized_bow", + "upper_body": "", + "lower_body": "", + "additional": "black_waist_belt", + "feet": "white_leggings, white_tights, black_shoes, mary_janes", + "hands": "" }, "styles": { "aesthetic": "modern_cartoon, cel_shading, vibrant", diff --git a/data/characters/bubbles_ppg.json b/data/characters/bubbles_ppg.json index e32ddcf..19655dc 100644 --- a/data/characters/bubbles_ppg.json +++ b/data/characters/bubbles_ppg.json @@ -16,14 +16,13 @@ "scene": "" }, "wardrobe": { - "full_body": "blue_dress", - "headwear": "", - "top": "", - "bottom": "black_belt", - "legwear": "thighhighs, white_socks", - "footwear": "mary_janes", - "hands": "", - "accessories": "" + "base": "blue_dress", + "head": "", + "upper_body": "", + "lower_body": "black_belt", + "additional": "", + "feet": "thighhighs, white_socks, mary_janes", + "hands": "" }, "styles": { "aesthetic": "vibrant_colors", diff --git a/data/characters/buttercup_ppg.json b/data/characters/buttercup_ppg.json index 027c12e..2d6ec56 100644 --- a/data/characters/buttercup_ppg.json +++ b/data/characters/buttercup_ppg.json @@ -16,14 +16,13 @@ "scene": "" }, "wardrobe": { - "full_body": "", - "headwear": "", - "top": "green_crop_top, sleeveless", - "bottom": "black_belt, green_shorts", - "legwear": "white_thighhighs", - "footwear": "black_boots", - "hands": "fingerless_gloves", - "accessories": "" + "base": "", + "head": "", + "upper_body": "green_crop_top, sleeveless", + "lower_body": "black_belt, green_shorts", + "additional": "", + "feet": "white_thighhighs, black_boots", + "hands": "fingerless_gloves" }, "styles": { "aesthetic": "high_contrast, vibrant", diff --git a/data/characters/clover_totally_spies.json b/data/characters/clover_totally_spies.json index a795a1b..02e1d0d 100644 --- a/data/characters/clover_totally_spies.json +++ b/data/characters/clover_totally_spies.json @@ -16,14 +16,13 @@ "scene": "" }, "wardrobe": { - "full_body": "red_bodysuit, latex_bodysuit", - "headwear": "", - "top": "", - "bottom": "", - "legwear": "", - "footwear": "boots, high_heels", - "hands": "", - "accessories": "belt, silver_belt" + "base": "red_bodysuit, latex_bodysuit", + "head": "", + "upper_body": "", + "lower_body": "", + "additional": "belt, silver_belt", + "feet": "boots, high_heels", + "hands": "" }, "styles": { "aesthetic": "anime_style, 2000s_(style)", diff --git a/data/characters/hikage_senran_kagura.json b/data/characters/hikage_senran_kagura.json index 9153ee2..a51e9ea 100644 --- a/data/characters/hikage_senran_kagura.json +++ b/data/characters/hikage_senran_kagura.json @@ -16,14 +16,13 @@ "scene": "" }, "wardrobe": { - "full_body": "torn_clothes", - "headwear": "", - "top": "yellow_shirt, crop_top", - "bottom": "jeans, torn_jeans, open_fly, loose_belt", - "legwear": "", - "footwear": "boots", - "hands": "arm_belt", - "accessories": "leg_belt" + "base": "torn_clothes", + "head": "", + "upper_body": "yellow_shirt, crop_top", + "lower_body": "jeans, torn_jeans, open_fly, loose_belt", + "additional": "leg_belt", + "feet": "boots", + "hands": "arm_belt" }, "styles": { "aesthetic": "anime, video_game", diff --git a/data/characters/shiki_senran_kagura.json b/data/characters/shiki_senran_kagura.json index 8e260e5..e632b03 100644 --- a/data/characters/shiki_senran_kagura.json +++ b/data/characters/shiki_senran_kagura.json @@ -16,14 +16,13 @@ "scene": "" }, "wardrobe": { - "full_body": "black_dress, frilled_dress, gothic_lolita", - "headwear": "black_hat, mini_hat", - "top": "", - "bottom": "", - "legwear": "thighhighs, black_thighhighs", - "footwear": "black_footwear", - "hands": "", - "accessories": "cross_necklace, scythe" + "base": "black_dress, frilled_dress, gothic_lolita", + "head": "black_hat, mini_hat", + "upper_body": "", + "lower_body": "", + "additional": "cross_necklace, scythe", + "feet": "thighhighs, black_thighhighs, black_footwear", + "hands": "" }, "styles": { "aesthetic": "gothic_lolita", diff --git a/data/characters/starfire_teen_titans.json b/data/characters/starfire_teen_titans.json index 4572a19..abf0ef6 100644 --- a/data/characters/starfire_teen_titans.json +++ b/data/characters/starfire_teen_titans.json @@ -16,14 +16,13 @@ "scene": "starry_sky, space, night" }, "wardrobe": { - "full_body": "", - "headwear": "", - "top": "crop_top", - "bottom": "purple_skirt, miniskirt", - "legwear": "", - "footwear": "thigh_boots, purple_boots", - "hands": "vambraces", - "accessories": "gorget, belt, armlet" + "base": "", + "head": "", + "upper_body": "crop_top", + "lower_body": "purple_skirt, miniskirt", + "additional": "gorget, belt, armlet", + "feet": "thigh_boots, purple_boots", + "hands": "vambraces" }, "styles": { "aesthetic": "cartoon, superhero, dc_comics", diff --git a/models.py b/models.py index e74c1e3..aae06e8 100644 --- a/models.py +++ b/models.py @@ -271,7 +271,7 @@ class Checkpoint(db.Model): slug = db.Column(db.String(255), unique=True, nullable=False) name = db.Column(db.String(255), nullable=False) checkpoint_path = db.Column(db.String(255), nullable=False) # e.g. "Illustrious/model.safetensors" - data = db.Column(db.JSON, nullable=True) + data = db.Column(db.JSON, nullable=False, default=dict) image_path = db.Column(db.String(255), nullable=True) is_favourite = db.Column(db.Boolean, default=False) is_nsfw = db.Column(db.Boolean, default=False) @@ -287,6 +287,8 @@ class Preset(db.Model): name = db.Column(db.String(100), nullable=False) data = db.Column(db.JSON, nullable=False) image_path = db.Column(db.String(255), nullable=True) + is_favourite = db.Column(db.Boolean, default=False) + is_nsfw = db.Column(db.Boolean, default=False) def __repr__(self): return f'' diff --git a/routes/actions.py b/routes/actions.py index a7302b6..6524591 100644 --- a/routes/actions.py +++ b/routes/actions.py @@ -15,8 +15,8 @@ from services.prompts import build_prompt, _resolve_character, _ensure_character from services.sync import sync_actions from services.file_io import get_available_loras from services.llm import load_prompt, call_llm -from utils import allowed_file, _LORA_DEFAULTS -from routes.shared import register_common_routes +from utils import allowed_file, _LORA_DEFAULTS, clean_html_text +from routes.shared import register_common_routes, apply_library_filters logger = logging.getLogger('gaze') @@ -26,17 +26,8 @@ def register_routes(app): @app.route('/actions') def actions_index(): - query = Action.query - fav = request.args.get('favourite') - nsfw = request.args.get('nsfw', 'all') - if fav == 'on': - query = query.filter_by(is_favourite=True) - if nsfw == 'sfw': - query = query.filter_by(is_nsfw=False) - elif nsfw == 'nsfw': - query = query.filter_by(is_nsfw=True) - actions = query.order_by(Action.is_favourite.desc(), Action.name).all() - return render_template('actions/index.html', actions=actions, favourite_filter=fav or '', nsfw_filter=nsfw) + actions, fav, nsfw = apply_library_filters(Action.query, Action) + return render_template('actions/index.html', actions=actions, favourite_filter=fav, nsfw_filter=nsfw) @app.route('/actions/rescan', methods=['POST']) def rescan_actions(): @@ -228,9 +219,9 @@ def register_routes(app): selected_fields.append(f'identity::{key}') # Add wardrobe fields (unless suppressed) if not suppress_wardrobe: - from utils import _WARDROBE_KEYS + from utils import _BODY_GROUP_KEYS wardrobe = character.get_active_wardrobe() - for key in _WARDROBE_KEYS: + for key in _BODY_GROUP_KEYS: if wardrobe.get(key): selected_fields.append(f'wardrobe::{key}') @@ -302,9 +293,9 @@ def register_routes(app): # Wardrobe (active outfit) — skip if suppressed if not suppress_wardrobe: - from utils import _WARDROBE_KEYS + from utils import _BODY_GROUP_KEYS wardrobe = extra_char.get_active_wardrobe() - for key in _WARDROBE_KEYS: + for key in _BODY_GROUP_KEYS: val = wardrobe.get(key) if val: extra_parts.append(val) @@ -389,11 +380,7 @@ def register_routes(app): try: with open(html_path, 'r', encoding='utf-8', errors='ignore') as hf: html_raw = hf.read() - clean_html = re.sub(r']*>.*?', '', html_raw, flags=re.DOTALL) - clean_html = re.sub(r']*>.*?', '', clean_html, flags=re.DOTALL) - clean_html = re.sub(r']*>', '', clean_html) - clean_html = re.sub(r'<[^>]+>', ' ', clean_html) - html_content = ' '.join(clean_html.split()) + html_content = clean_html_text(html_raw) except Exception: pass diff --git a/routes/characters.py b/routes/characters.py index 4fec0dd..ce22a0c 100644 --- a/routes/characters.py +++ b/routes/characters.py @@ -12,7 +12,7 @@ from services.llm import call_character_mcp_tool, call_llm, load_prompt from services.prompts import build_prompt from services.sync import sync_characters from services.workflow import _get_default_checkpoint, _prepare_workflow -from routes.shared import register_common_routes +from routes.shared import register_common_routes, apply_library_filters logger = logging.getLogger('gaze') @@ -22,17 +22,8 @@ def register_routes(app): @app.route('/') def index(): - query = Character.query - fav = request.args.get('favourite') - nsfw = request.args.get('nsfw', 'all') - if fav == 'on': - query = query.filter_by(is_favourite=True) - if nsfw == 'sfw': - query = query.filter_by(is_nsfw=False) - elif nsfw == 'nsfw': - query = query.filter_by(is_nsfw=True) - characters = query.order_by(Character.is_favourite.desc(), Character.name).all() - return render_template('index.html', characters=characters, favourite_filter=fav or '', nsfw_filter=nsfw) + characters, fav, nsfw = apply_library_filters(Character.query, Character) + return render_template('index.html', characters=characters, favourite_filter=fav, nsfw_filter=nsfw) @app.route('/rescan', methods=['POST']) def rescan(): @@ -274,16 +265,16 @@ def register_routes(app): # Fetch reference data from wiki URL if provided wiki_reference = '' if wiki_url: - logger.info(f"Fetching character data from URL: {wiki_url}") + logger.info("Fetching character data from URL: %s", wiki_url) wiki_data = call_character_mcp_tool('get_character_from_url', { 'url': wiki_url, 'name': name, }) if wiki_data: wiki_reference = f"\n\nReference data from wiki:\n{wiki_data}\n\nUse this reference to accurately describe the character's appearance, outfit, and features." - logger.info(f"Got wiki reference data ({len(wiki_data)} chars)") + logger.info("Got wiki reference data (%d chars)", len(wiki_data)) else: - logger.warning(f"Failed to fetch wiki data from {wiki_url}") + logger.warning("Failed to fetch wiki data from %s", wiki_url) # Step 1: Generate or select outfit first default_outfit_id = 'default' @@ -352,7 +343,7 @@ Create an outfit JSON with wardrobe fields appropriate for this character.""" db.session.commit() default_outfit_id = outfit_slug - logger.info(f"Generated outfit: {outfit_name} for character {name}") + logger.info("Generated outfit: %s for character %s", outfit_name, name) except Exception as e: logger.exception("Outfit generation error: %s", e) diff --git a/routes/checkpoints.py b/routes/checkpoints.py index 46d26f7..b23c559 100644 --- a/routes/checkpoints.py +++ b/routes/checkpoints.py @@ -14,8 +14,8 @@ from services.prompts import build_prompt, _resolve_character, _ensure_character from services.sync import sync_checkpoints, _default_checkpoint_data from services.file_io import get_available_checkpoints from services.llm import load_prompt, call_llm -from utils import allowed_file -from routes.shared import register_common_routes +from utils import allowed_file, clean_html_text +from routes.shared import register_common_routes, apply_library_filters logger = logging.getLogger('gaze') @@ -58,17 +58,8 @@ def register_routes(app): @app.route('/checkpoints') def checkpoints_index(): - query = Checkpoint.query - fav = request.args.get('favourite') - nsfw = request.args.get('nsfw', 'all') - if fav == 'on': - query = query.filter_by(is_favourite=True) - if nsfw == 'sfw': - query = query.filter_by(is_nsfw=False) - elif nsfw == 'nsfw': - query = query.filter_by(is_nsfw=True) - checkpoints = query.order_by(Checkpoint.is_favourite.desc(), Checkpoint.name).all() - return render_template('checkpoints/index.html', checkpoints=checkpoints, favourite_filter=fav or '', nsfw_filter=nsfw) + checkpoints, fav, nsfw = apply_library_filters(Checkpoint.query, Checkpoint) + return render_template('checkpoints/index.html', checkpoints=checkpoints, favourite_filter=fav, nsfw_filter=nsfw) @app.route('/checkpoints/rescan', methods=['POST']) def rescan_checkpoints(): @@ -179,11 +170,7 @@ def register_routes(app): try: with open(html_path, 'r', encoding='utf-8', errors='ignore') as hf: html_raw = hf.read() - clean_html = re.sub(r']*>.*?', '', html_raw, flags=re.DOTALL) - clean_html = re.sub(r']*>.*?', '', clean_html, flags=re.DOTALL) - clean_html = re.sub(r']*>', '', clean_html) - clean_html = re.sub(r'<[^>]+>', ' ', clean_html) - html_content = ' '.join(clean_html.split()) + html_content = clean_html_text(html_raw) except Exception as e: logger.error("Error reading HTML for %s: %s", filename, e) diff --git a/routes/detailers.py b/routes/detailers.py index 2627ee0..af20b34 100644 --- a/routes/detailers.py +++ b/routes/detailers.py @@ -13,8 +13,8 @@ from services.prompts import build_prompt, _resolve_character, _ensure_character from services.sync import sync_detailers from services.file_io import get_available_loras from services.llm import load_prompt, call_llm -from utils import _WARDROBE_KEYS -from routes.shared import register_common_routes +from utils import _BODY_GROUP_KEYS, clean_html_text +from routes.shared import register_common_routes, apply_library_filters logger = logging.getLogger('gaze') @@ -47,7 +47,7 @@ def register_routes(app): selected_fields.append(f'identity::{key}') selected_fields.append('special::name') wardrobe = character.get_active_wardrobe() - for key in _WARDROBE_KEYS: + for key in _BODY_GROUP_KEYS: if wardrobe.get(key): selected_fields.append(f'wardrobe::{key}') selected_fields.extend(['lora::lora_triggers']) @@ -87,17 +87,8 @@ def register_routes(app): @app.route('/detailers') def detailers_index(): - query = Detailer.query - fav = request.args.get('favourite') - nsfw = request.args.get('nsfw', 'all') - if fav == 'on': - query = query.filter_by(is_favourite=True) - if nsfw == 'sfw': - query = query.filter_by(is_nsfw=False) - elif nsfw == 'nsfw': - query = query.filter_by(is_nsfw=True) - detailers = query.order_by(Detailer.is_favourite.desc(), Detailer.name).all() - return render_template('detailers/index.html', detailers=detailers, favourite_filter=fav or '', nsfw_filter=nsfw) + detailers, fav, nsfw = apply_library_filters(Detailer.query, Detailer) + return render_template('detailers/index.html', detailers=detailers, favourite_filter=fav, nsfw_filter=nsfw) @app.route('/detailers/rescan', methods=['POST']) def rescan_detailers(): @@ -296,11 +287,7 @@ def register_routes(app): try: with open(html_path, 'r', encoding='utf-8', errors='ignore') as hf: html_raw = hf.read() - clean_html = re.sub(r']*>.*?', '', html_raw, flags=re.DOTALL) - clean_html = re.sub(r']*>.*?', '', clean_html, flags=re.DOTALL) - clean_html = re.sub(r']*>', '', clean_html) - clean_html = re.sub(r'<[^>]+>', ' ', clean_html) - html_content = ' '.join(clean_html.split()) + html_content = clean_html_text(html_raw) except Exception as e: logger.error("Error reading HTML %s: %s", html_filename, e) diff --git a/routes/looks.py b/routes/looks.py index dd3d967..d0a0824 100644 --- a/routes/looks.py +++ b/routes/looks.py @@ -2,6 +2,7 @@ import json import os import re import logging +from utils import clean_html_text from flask import render_template, request, redirect, url_for, flash, session from sqlalchemy.orm.attributes import flag_modified @@ -13,7 +14,7 @@ from services.prompts import build_prompt, _resolve_character, _ensure_character from services.sync import sync_looks from services.file_io import get_available_loras, _count_look_assignments from services.llm import load_prompt, call_llm -from routes.shared import register_common_routes +from routes.shared import register_common_routes, apply_library_filters logger = logging.getLogger('gaze') @@ -57,18 +58,9 @@ def register_routes(app): @app.route('/looks') def looks_index(): - query = Look.query - fav = request.args.get('favourite') - nsfw = request.args.get('nsfw', 'all') - if fav == 'on': - query = query.filter_by(is_favourite=True) - if nsfw == 'sfw': - query = query.filter_by(is_nsfw=False) - elif nsfw == 'nsfw': - query = query.filter_by(is_nsfw=True) - looks = query.order_by(Look.is_favourite.desc(), Look.name).all() + looks, fav, nsfw = apply_library_filters(Look.query, Look) look_assignments = _count_look_assignments() - return render_template('looks/index.html', looks=looks, look_assignments=look_assignments, favourite_filter=fav or '', nsfw_filter=nsfw) + return render_template('looks/index.html', looks=looks, look_assignments=look_assignments, favourite_filter=fav, nsfw_filter=nsfw) @app.route('/looks/rescan', methods=['POST']) def rescan_looks(): @@ -320,7 +312,7 @@ Character ID: {character_slug}""" character_data['lora'] = lora_data except Exception as e: - logger.exception(f"LLM character generation error: {e}") + logger.exception("LLM character generation error: %s", e) flash(f'Failed to generate character with AI: {e}', 'error') return redirect(url_for('look_detail', slug=slug)) else: @@ -494,11 +486,7 @@ Character ID: {character_slug}""" try: with open(html_path, 'r', encoding='utf-8', errors='ignore') as hf: html_raw = hf.read() - clean_html = re.sub(r']*>.*?', '', html_raw, flags=re.DOTALL) - clean_html = re.sub(r']*>.*?', '', clean_html, flags=re.DOTALL) - clean_html = re.sub(r']*>', '', clean_html) - clean_html = re.sub(r'<[^>]+>', ' ', clean_html) - html_content = ' '.join(clean_html.split()) + html_content = clean_html_text(html_raw) except Exception as e: logger.error("Error reading HTML %s: %s", html_filename, e) diff --git a/routes/outfits.py b/routes/outfits.py index c07e331..3e6edf1 100644 --- a/routes/outfits.py +++ b/routes/outfits.py @@ -13,9 +13,9 @@ from services.job_queue import _enqueue_job, _make_finalize, _enqueue_task from services.prompts import build_prompt, _resolve_character, _ensure_character_fields, _append_background from services.sync import sync_outfits from services.file_io import get_available_loras, _count_outfit_lora_assignments -from utils import allowed_file, _LORA_DEFAULTS +from utils import allowed_file, _LORA_DEFAULTS, clean_html_text from services.llm import load_prompt, call_llm -from routes.shared import register_common_routes +from routes.shared import register_common_routes, apply_library_filters logger = logging.getLogger('gaze') @@ -25,18 +25,9 @@ def register_routes(app): @app.route('/outfits') def outfits_index(): - query = Outfit.query - fav = request.args.get('favourite') - nsfw = request.args.get('nsfw', 'all') - if fav == 'on': - query = query.filter_by(is_favourite=True) - if nsfw == 'sfw': - query = query.filter_by(is_nsfw=False) - elif nsfw == 'nsfw': - query = query.filter_by(is_nsfw=True) - outfits = query.order_by(Outfit.is_favourite.desc(), Outfit.name).all() + outfits, fav, nsfw = apply_library_filters(Outfit.query, Outfit) lora_assignments = _count_outfit_lora_assignments() - return render_template('outfits/index.html', outfits=outfits, lora_assignments=lora_assignments, favourite_filter=fav or '', nsfw_filter=nsfw) + return render_template('outfits/index.html', outfits=outfits, lora_assignments=lora_assignments, favourite_filter=fav, nsfw_filter=nsfw) @app.route('/outfits/rescan', methods=['POST']) def rescan_outfits(): @@ -90,11 +81,7 @@ def register_routes(app): try: with open(html_path, 'r', encoding='utf-8', errors='ignore') as hf: html_raw = hf.read() - clean_html = re.sub(r']*>.*?', '', html_raw, flags=re.DOTALL) - clean_html = re.sub(r']*>.*?', '', clean_html, flags=re.DOTALL) - clean_html = re.sub(r']*>', '', clean_html) - clean_html = re.sub(r'<[^>]+>', ' ', clean_html) - html_content = ' '.join(clean_html.split()) + html_content = clean_html_text(html_raw) except Exception: pass @@ -313,12 +300,12 @@ def register_routes(app): # No explicit field selection (e.g. batch generation) — build a selection # that includes identity + wardrobe + name + lora triggers, but NOT character # defaults (expression, pose, scene), so outfit covers stay generic. - from utils import _IDENTITY_KEYS, _WARDROBE_KEYS - for key in _IDENTITY_KEYS: + from utils import _BODY_GROUP_KEYS + for key in _BODY_GROUP_KEYS: if character.data.get('identity', {}).get(key): selected_fields.append(f'identity::{key}') outfit_wardrobe = outfit.data.get('wardrobe', {}) - for key in _WARDROBE_KEYS: + for key in _BODY_GROUP_KEYS: if outfit_wardrobe.get(key): selected_fields.append(f'wardrobe::{key}') selected_fields.append('special::name') diff --git a/routes/presets.py b/routes/presets.py index a887ee2..bf3927b 100644 --- a/routes/presets.py +++ b/routes/presets.py @@ -8,7 +8,7 @@ from sqlalchemy.orm.attributes import flag_modified from services.sync import sync_presets from services.generation import generate_from_preset from services.llm import load_prompt, call_llm -from routes.shared import register_common_routes +from routes.shared import register_common_routes, apply_library_filters logger = logging.getLogger('gaze') @@ -18,8 +18,9 @@ def register_routes(app): @app.route('/presets') def presets_index(): - presets = Preset.query.order_by(Preset.filename).all() - return render_template('presets/index.html', presets=presets) + presets, fav, nsfw = apply_library_filters(Preset.query, Preset) + return render_template('presets/index.html', presets=presets, + favourite_filter=fav, nsfw_filter=nsfw) @app.route('/preset/') def preset_detail(slug): diff --git a/routes/regenerate.py b/routes/regenerate.py index 15f264e..078d002 100644 --- a/routes/regenerate.py +++ b/routes/regenerate.py @@ -63,7 +63,7 @@ def register_routes(app): clean_json = llm_response.replace('```json', '').replace('```', '').strip() new_data = json.loads(clean_json) except Exception as e: - logger.exception(f"Regenerate tags LLM error for {category}/{slug}") + logger.exception("Regenerate tags LLM error for %s/%s", category, slug) return {'error': f'LLM error: {str(e)}'}, 500 # Preserve protected fields from original @@ -106,7 +106,7 @@ def register_routes(app): with open(file_path, 'w') as f: json.dump(new_data, f, indent=2) except Exception as e: - logger.warning(f"Could not write {file_path}: {e}") + logger.warning("Could not write %s: %s", file_path, e) migrated += 1 @@ -122,7 +122,7 @@ def register_routes(app): migrated += 1 db.session.commit() - logger.info(f"Migrated {migrated} resources from list tags to dict tags") + logger.info("Migrated %d resources from list tags to dict tags", migrated) return {'success': True, 'migrated': migrated} def _make_regen_task(category, slug, name, system_prompt): diff --git a/routes/scenes.py b/routes/scenes.py index 6c21014..f0f421a 100644 --- a/routes/scenes.py +++ b/routes/scenes.py @@ -13,8 +13,8 @@ from services.prompts import build_prompt, _resolve_character, _ensure_character from services.sync import sync_scenes from services.file_io import get_available_loras from services.llm import load_prompt, call_llm -from routes.shared import register_common_routes -from utils import _WARDROBE_KEYS +from routes.shared import register_common_routes, apply_library_filters +from utils import _BODY_GROUP_KEYS, clean_html_text logger = logging.getLogger('gaze') @@ -24,17 +24,8 @@ def register_routes(app): @app.route('/scenes') def scenes_index(): - query = Scene.query - fav = request.args.get('favourite') - nsfw = request.args.get('nsfw', 'all') - if fav == 'on': - query = query.filter_by(is_favourite=True) - if nsfw == 'sfw': - query = query.filter_by(is_nsfw=False) - elif nsfw == 'nsfw': - query = query.filter_by(is_nsfw=True) - scenes = query.order_by(Scene.is_favourite.desc(), Scene.name).all() - return render_template('scenes/index.html', scenes=scenes, favourite_filter=fav or '', nsfw_filter=nsfw) + scenes, fav, nsfw = apply_library_filters(Scene.query, Scene) + return render_template('scenes/index.html', scenes=scenes, favourite_filter=fav, nsfw_filter=nsfw) @app.route('/scenes/rescan', methods=['POST']) def rescan_scenes(): @@ -177,7 +168,7 @@ def register_routes(app): selected_fields.append(f'identity::{key}') selected_fields.append('special::name') wardrobe = character.get_active_wardrobe() - for key in _WARDROBE_KEYS: + for key in _BODY_GROUP_KEYS: if wardrobe.get(key): selected_fields.append(f'wardrobe::{key}') selected_fields.extend(['defaults::scene', 'lora::lora_triggers']) @@ -312,11 +303,7 @@ def register_routes(app): try: with open(html_path, 'r', encoding='utf-8', errors='ignore') as hf: html_raw = hf.read() - clean_html = re.sub(r']*>.*?', '', html_raw, flags=re.DOTALL) - clean_html = re.sub(r']*>.*?', '', clean_html, flags=re.DOTALL) - clean_html = re.sub(r']*>', '', clean_html) - clean_html = re.sub(r'<[^>]+>', ' ', clean_html) - html_content = ' '.join(clean_html.split()) + html_content = clean_html_text(html_raw) except Exception as e: logger.error("Error reading HTML %s: %s", html_filename, e) diff --git a/routes/settings.py b/routes/settings.py index d781dab..4a2fe47 100644 --- a/routes/settings.py +++ b/routes/settings.py @@ -62,7 +62,7 @@ def register_routes(app): db.session.commit() logger.info("Default checkpoint saved to database: %s", checkpoint_path) except Exception as e: - logger.error(f"Failed to persist checkpoint to database: {e}") + logger.error("Failed to persist checkpoint to database: %s", e) db.session.rollback() # Also persist to comfy_workflow.json for backwards compatibility @@ -78,7 +78,7 @@ def register_routes(app): with open(workflow_path, 'w') as f: json.dump(workflow, f, indent=2) except Exception as e: - logger.error(f"Failed to persist checkpoint to workflow file: {e}") + logger.error("Failed to persist checkpoint to workflow file: %s", e) return {'status': 'ok'} diff --git a/routes/shared.py b/routes/shared.py index 8d7de35..65e0d5a 100644 --- a/routes/shared.py +++ b/routes/shared.py @@ -20,6 +20,23 @@ from utils import allowed_file logger = logging.getLogger('gaze') +def apply_library_filters(query, model_class): + """Apply standard favourite/NSFW filters and sorting to a library query. + + Returns (items, favourite_filter, nsfw_filter) tuple. + """ + fav = request.args.get('favourite') + nsfw = request.args.get('nsfw', 'all') + if fav == 'on': + query = query.filter_by(is_favourite=True) + if nsfw == 'sfw': + query = query.filter_by(is_nsfw=False) + elif nsfw == 'nsfw': + query = query.filter_by(is_nsfw=True) + items = query.order_by(model_class.is_favourite.desc(), model_class.name).all() + return items, fav or '', nsfw + + # --------------------------------------------------------------------------- # Category configuration registry # --------------------------------------------------------------------------- @@ -237,11 +254,16 @@ def _register_replace_cover_route(app, cfg): def replace_cover(slug): entity = Model.query.filter_by(slug=slug).first_or_404() preview_path = request.form.get('preview_path') - if preview_path and os.path.exists( - os.path.join(current_app.config['UPLOAD_FOLDER'], preview_path)): - entity.image_path = preview_path - db.session.commit() - flash('Cover image updated!') + if preview_path: + full_path = os.path.realpath( + os.path.join(current_app.config['UPLOAD_FOLDER'], preview_path)) + upload_root = os.path.realpath(current_app.config['UPLOAD_FOLDER']) + if full_path.startswith(upload_root + os.sep) and os.path.exists(full_path): + entity.image_path = preview_path + db.session.commit() + flash('Cover image updated!') + else: + flash('Invalid preview path.', 'error') else: flash('No valid preview image selected.', 'error') return redirect(url_for(detail_ep, slug=slug)) diff --git a/routes/styles.py b/routes/styles.py index d91c558..265efa4 100644 --- a/routes/styles.py +++ b/routes/styles.py @@ -14,8 +14,8 @@ from services.prompts import build_prompt, _resolve_character, _ensure_character from services.sync import sync_styles from services.file_io import get_available_loras from services.llm import load_prompt, call_llm -from routes.shared import register_common_routes -from utils import _WARDROBE_KEYS +from routes.shared import register_common_routes, apply_library_filters +from utils import _BODY_GROUP_KEYS, clean_html_text logger = logging.getLogger('gaze') @@ -47,7 +47,7 @@ def register_routes(app): selected_fields.append(f'identity::{key}') selected_fields.append('special::name') wardrobe = character.get_active_wardrobe() - for key in _WARDROBE_KEYS: + for key in _BODY_GROUP_KEYS: if wardrobe.get(key): selected_fields.append(f'wardrobe::{key}') selected_fields.extend(['style::artist_name', 'style::artistic_style', 'lora::lora_triggers']) @@ -82,17 +82,8 @@ def register_routes(app): @app.route('/styles') def styles_index(): - query = Style.query - fav = request.args.get('favourite') - nsfw = request.args.get('nsfw', 'all') - if fav == 'on': - query = query.filter_by(is_favourite=True) - if nsfw == 'sfw': - query = query.filter_by(is_nsfw=False) - elif nsfw == 'nsfw': - query = query.filter_by(is_nsfw=True) - styles = query.order_by(Style.is_favourite.desc(), Style.name).all() - return render_template('styles/index.html', styles=styles, favourite_filter=fav or '', nsfw_filter=nsfw) + styles, fav, nsfw = apply_library_filters(Style.query, Style) + return render_template('styles/index.html', styles=styles, favourite_filter=fav, nsfw_filter=nsfw) @app.route('/styles/rescan', methods=['POST']) def rescan_styles(): @@ -323,11 +314,7 @@ def register_routes(app): try: with open(html_path, 'r', encoding='utf-8', errors='ignore') as hf: html_raw = hf.read() - clean_html = re.sub(r']*>.*?', '', html_raw, flags=re.DOTALL) - clean_html = re.sub(r']*>.*?', '', clean_html, flags=re.DOTALL) - clean_html = re.sub(r']*>', '', clean_html) - clean_html = re.sub(r'<[^>]+>', ' ', clean_html) - html_content = ' '.join(clean_html.split()) + html_content = clean_html_text(html_raw) except Exception: pass diff --git a/routes/transfer.py b/routes/transfer.py index 23cdf26..daa3d0f 100644 --- a/routes/transfer.py +++ b/routes/transfer.py @@ -244,7 +244,7 @@ Generate a complete {target_category.rstrip('s')} profile with all required fiel new_data[target_name_key] = new_name except Exception as e: - logger.exception(f"LLM transfer error: {e}") + logger.exception("LLM transfer error: %s", e) flash(f'Failed to generate {target_category.rstrip("s")} with AI: {e}') return redirect(url_for('transfer_resource', category=category, slug=slug)) else: @@ -290,7 +290,7 @@ Generate a complete {target_category.rstrip('s')} profile with all required fiel lora_moved = True flash(f'Moved LoRA file to {target_lora_dir}') except Exception as lora_e: - logger.exception(f"LoRA move error: {lora_e}") + logger.exception("LoRA move error: %s", lora_e) flash(f'Warning: Failed to move LoRA file: {lora_e}', 'warning') else: flash(f'Warning: Source LoRA file not found at {abs_source_path}', 'warning') @@ -317,7 +317,7 @@ Generate a complete {target_category.rstrip('s')} profile with all required fiel db.session.delete(resource) flash(f'Removed original {category.rstrip("s")}: {resource_name}') except Exception as rm_e: - logger.exception(f"Error removing original: {rm_e}") + logger.exception("Error removing original: %s", rm_e) flash(f'Warning: Failed to remove original: {rm_e}', 'warning') db.session.commit() @@ -325,7 +325,7 @@ Generate a complete {target_category.rstrip('s')} profile with all required fiel return redirect(url_for(target_config['index_route'], highlight=safe_slug)) except Exception as e: - logger.exception(f"Transfer save error: {e}") + logger.exception("Transfer save error: %s", e) flash(f'Failed to save transferred {target_category.rstrip("s")}: {e}') return redirect(url_for('transfer_resource', category=category, slug=slug)) diff --git a/services/comfyui.py b/services/comfyui.py index 9fd4d8a..515d6b3 100644 --- a/services/comfyui.py +++ b/services/comfyui.py @@ -2,6 +2,7 @@ import json import logging import requests from flask import current_app +from services.workflow import NODE_CHECKPOINT logger = logging.getLogger('gaze') @@ -14,9 +15,11 @@ def get_loaded_checkpoint(): if resp.ok: history = resp.json() if history: - latest = max(history.values(), key=lambda j: j.get('status', {}).get('status_str', '')) + # Sort by prompt ID (numeric string) to get the most recent job + latest_id = max(history.keys()) + latest = history[latest_id] nodes = latest.get('prompt', [None, None, {}])[2] - return nodes.get('4', {}).get('inputs', {}).get('ckpt_name') + return nodes.get(NODE_CHECKPOINT, {}).get('inputs', {}).get('ckpt_name') except Exception: pass return None @@ -34,26 +37,27 @@ def _ensure_checkpoint_loaded(checkpoint_path): if resp.ok: history = resp.json() if history: - latest = max(history.values(), key=lambda j: j.get('status', {}).get('status_str', '')) + latest_id = max(history.keys()) + latest = history[latest_id] nodes = latest.get('prompt', [None, None, {}])[2] - loaded_ckpt = nodes.get('4', {}).get('inputs', {}).get('ckpt_name') + loaded_ckpt = nodes.get(NODE_CHECKPOINT, {}).get('inputs', {}).get('ckpt_name') # If the loaded checkpoint matches what we want, no action needed if loaded_ckpt == checkpoint_path: - logger.info(f"Checkpoint {checkpoint_path} already loaded in ComfyUI") + logger.info("Checkpoint %s already loaded in ComfyUI", checkpoint_path) return # Checkpoint doesn't match or couldn't determine - force unload all models - logger.info(f"Forcing ComfyUI to unload models to ensure {checkpoint_path} loads") + logger.info("Forcing ComfyUI to unload models to ensure %s loads", checkpoint_path) requests.post(f'{url}/free', json={'unload_models': True}, timeout=5) except Exception as e: - logger.warning(f"Failed to check/force checkpoint reload: {e}") + logger.warning("Failed to check/force checkpoint reload: %s", e) def queue_prompt(prompt_workflow, client_id=None): """POST a workflow to ComfyUI's /prompt endpoint.""" # Ensure the checkpoint in the workflow is loaded in ComfyUI - checkpoint_path = prompt_workflow.get('4', {}).get('inputs', {}).get('ckpt_name') + checkpoint_path = prompt_workflow.get(NODE_CHECKPOINT, {}).get('inputs', {}).get('ckpt_name') _ensure_checkpoint_loaded(checkpoint_path) p = {"prompt": prompt_workflow} @@ -72,7 +76,10 @@ def queue_prompt(prompt_workflow, client_id=None): logger.debug("=" * 80) data = json.dumps(p).encode('utf-8') - response = requests.post(f"{url}/prompt", data=data) + response = requests.post(f"{url}/prompt", data=data, timeout=30) + if not response.ok: + logger.error("ComfyUI returned HTTP %s: %s", response.status_code, response.text[:500]) + raise RuntimeError(f"ComfyUI returned HTTP {response.status_code}") response_json = response.json() # Log the response from ComfyUI @@ -90,7 +97,7 @@ def queue_prompt(prompt_workflow, client_id=None): def get_history(prompt_id): """Poll ComfyUI /history for results of a given prompt_id.""" url = current_app.config['COMFYUI_URL'] - response = requests.get(f"{url}/history/{prompt_id}") + response = requests.get(f"{url}/history/{prompt_id}", timeout=10) history_json = response.json() # Log detailed history response for debugging @@ -128,6 +135,6 @@ def get_image(filename, subfolder, folder_type): data = {"filename": filename, "subfolder": subfolder, "type": folder_type} logger.debug("Fetching image from ComfyUI: filename=%s, subfolder=%s, type=%s", filename, subfolder, folder_type) - response = requests.get(f"{url}/view", params=data) + response = requests.get(f"{url}/view", params=data, timeout=30) logger.debug("Image retrieved: %d bytes (status: %s)", len(response.content), response.status_code) return response.content diff --git a/services/llm.py b/services/llm.py index a4b399e..94bc721 100644 --- a/services/llm.py +++ b/services/llm.py @@ -205,13 +205,13 @@ def call_llm(prompt, system_prompt="You are a creative assistant."): except requests.exceptions.RequestException as e: error_body = "" try: error_body = f" - Body: {response.text}" - except: pass + except Exception: pass raise RuntimeError(f"LLM API request failed: {str(e)}{error_body}") from e except (KeyError, IndexError) as e: # Log the raw response to help diagnose the issue raw = "" try: raw = response.text[:500] - except: pass + except Exception: pass logger.warning("Unexpected LLM response format (key=%s). Raw response: %s", e, raw) if format_retries > 0: format_retries -= 1 diff --git a/services/mcp.py b/services/mcp.py index 5be016e..ed28ad3 100644 --- a/services/mcp.py +++ b/services/mcp.py @@ -12,147 +12,83 @@ CHAR_MCP_COMPOSE_DIR = os.path.join(MCP_TOOLS_DIR, 'character-mcp') CHAR_MCP_REPO_URL = 'https://git.liveaodh.com/aodhan/character-mcp.git' -def _ensure_mcp_repo(): - """Clone or update the danbooru-mcp source repository inside tools/. +def _ensure_repo(compose_dir, repo_url, name): + """Clone or update an MCP source repository inside tools/. - - If ``tools/danbooru-mcp/`` does not exist, clone from MCP_REPO_URL. + - If the directory does not exist, clone from repo_url. - If it already exists, run ``git pull`` to fetch the latest changes. Errors are non-fatal. """ os.makedirs(MCP_TOOLS_DIR, exist_ok=True) try: - if not os.path.isdir(MCP_COMPOSE_DIR): - logger.info('Cloning danbooru-mcp from %s …', MCP_REPO_URL) + if not os.path.isdir(compose_dir): + logger.info('Cloning %s from %s …', name, repo_url) subprocess.run( - ['git', 'clone', MCP_REPO_URL, MCP_COMPOSE_DIR], + ['git', 'clone', repo_url, compose_dir], timeout=120, check=True, ) - logger.info('danbooru-mcp cloned successfully.') + logger.info('%s cloned successfully.', name) else: - logger.info('Updating danbooru-mcp via git pull …') + logger.info('Updating %s via git pull …', name) subprocess.run( ['git', 'pull'], - cwd=MCP_COMPOSE_DIR, + cwd=compose_dir, timeout=60, check=True, ) - logger.info('danbooru-mcp updated.') + logger.info('%s updated.', name) except FileNotFoundError: - logger.warning('git not found on PATH — danbooru-mcp repo will not be cloned/updated.') + logger.warning('git not found on PATH — %s repo will not be cloned/updated.', name) except subprocess.CalledProcessError as e: - logger.warning('git operation failed for danbooru-mcp: %s', e) + logger.warning('git operation failed for %s: %s', name, e) except subprocess.TimeoutExpired: - logger.warning('git timed out while cloning/updating danbooru-mcp.') + logger.warning('git timed out while cloning/updating %s.', name) except Exception as e: - logger.warning('Could not clone/update danbooru-mcp repo: %s', e) + logger.warning('Could not clone/update %s repo: %s', name, e) + + +def _ensure_server_running(compose_dir, repo_url, container_name, name): + """Ensure an MCP repo is present/up-to-date, then start the Docker + container if it is not already running. + + Uses ``docker compose up -d`` so the image is built automatically on first + run. Errors are non-fatal — the app will still start even if Docker is + unavailable. + + Skipped when ``SKIP_MCP_AUTOSTART=true`` (set by docker-compose, where the + MCP service is managed by compose instead). + """ + if os.environ.get('SKIP_MCP_AUTOSTART', '').lower() == 'true': + logger.info('SKIP_MCP_AUTOSTART set — skipping %s auto-start.', name) + return + _ensure_repo(compose_dir, repo_url, name) + try: + result = subprocess.run( + ['docker', 'ps', '--filter', f'name={container_name}', '--format', '{{.Names}}'], + capture_output=True, text=True, timeout=10, + ) + if container_name in result.stdout: + logger.info('%s container already running.', name) + return + logger.info('Starting %s container via docker compose …', name) + subprocess.run( + ['docker', 'compose', 'up', '-d'], + cwd=compose_dir, + timeout=120, + ) + logger.info('%s container started.', name) + except FileNotFoundError: + logger.warning('docker not found on PATH — %s will not be started automatically.', name) + except subprocess.TimeoutExpired: + logger.warning('docker timed out while starting %s.', name) + except Exception as e: + logger.warning('Could not ensure %s is running: %s', name, e) def ensure_mcp_server_running(): - """Ensure the danbooru-mcp repo is present/up-to-date, then start the - Docker container if it is not already running. - - Uses ``docker compose up -d`` so the image is built automatically on first - run. Errors are non-fatal — the app will still start even if Docker is - unavailable. - - Skipped when ``SKIP_MCP_AUTOSTART=true`` (set by docker-compose, where the - danbooru-mcp service is managed by compose instead). - """ - if os.environ.get('SKIP_MCP_AUTOSTART', '').lower() == 'true': - logger.info('SKIP_MCP_AUTOSTART set — skipping danbooru-mcp auto-start.') - return - _ensure_mcp_repo() - try: - result = subprocess.run( - ['docker', 'ps', '--filter', 'name=danbooru-mcp', '--format', '{{.Names}}'], - capture_output=True, text=True, timeout=10, - ) - if 'danbooru-mcp' in result.stdout: - logger.info('danbooru-mcp container already running.') - return - # Container not running — start it via docker compose - logger.info('Starting danbooru-mcp container via docker compose …') - subprocess.run( - ['docker', 'compose', 'up', '-d'], - cwd=MCP_COMPOSE_DIR, - timeout=120, - ) - logger.info('danbooru-mcp container started.') - except FileNotFoundError: - logger.warning('docker not found on PATH — danbooru-mcp will not be started automatically.') - except subprocess.TimeoutExpired: - logger.warning('docker timed out while starting danbooru-mcp.') - except Exception as e: - logger.warning('Could not ensure danbooru-mcp is running: %s', e) - - -def _ensure_character_mcp_repo(): - """Clone or update the character-mcp source repository inside tools/. - - - If ``tools/character-mcp/`` does not exist, clone from CHAR_MCP_REPO_URL. - - If it already exists, run ``git pull`` to fetch the latest changes. - Errors are non-fatal. - """ - os.makedirs(MCP_TOOLS_DIR, exist_ok=True) - try: - if not os.path.isdir(CHAR_MCP_COMPOSE_DIR): - logger.info('Cloning character-mcp from %s …', CHAR_MCP_REPO_URL) - subprocess.run( - ['git', 'clone', CHAR_MCP_REPO_URL, CHAR_MCP_COMPOSE_DIR], - timeout=120, check=True, - ) - logger.info('character-mcp cloned successfully.') - else: - logger.info('Updating character-mcp via git pull …') - subprocess.run( - ['git', 'pull'], - cwd=CHAR_MCP_COMPOSE_DIR, - timeout=60, check=True, - ) - logger.info('character-mcp updated.') - except FileNotFoundError: - logger.warning('git not found on PATH — character-mcp repo will not be cloned/updated.') - except subprocess.CalledProcessError as e: - logger.warning('git operation failed for character-mcp: %s', e) - except subprocess.TimeoutExpired: - logger.warning('git timed out while cloning/updating character-mcp.') - except Exception as e: - logger.warning('Could not clone/update character-mcp repo: %s', e) + """Ensure the danbooru-mcp Docker container is running.""" + _ensure_server_running(MCP_COMPOSE_DIR, MCP_REPO_URL, 'danbooru-mcp', 'danbooru-mcp') def ensure_character_mcp_server_running(): - """Ensure the character-mcp repo is present/up-to-date, then start the - Docker container if it is not already running. - - Uses ``docker compose up -d`` so the image is built automatically on first - run. Errors are non-fatal — the app will still start even if Docker is - unavailable. - - Skipped when ``SKIP_MCP_AUTOSTART=true`` (set by docker-compose, where the - character-mcp service is managed by compose instead). - """ - if os.environ.get('SKIP_MCP_AUTOSTART', '').lower() == 'true': - logger.info('SKIP_MCP_AUTOSTART set — skipping character-mcp auto-start.') - return - _ensure_character_mcp_repo() - try: - result = subprocess.run( - ['docker', 'ps', '--filter', 'name=character-mcp', '--format', '{{.Names}}'], - capture_output=True, text=True, timeout=10, - ) - if 'character-mcp' in result.stdout: - logger.info('character-mcp container already running.') - return - # Container not running — start it via docker compose - logger.info('Starting character-mcp container via docker compose …') - subprocess.run( - ['docker', 'compose', 'up', '-d'], - cwd=CHAR_MCP_COMPOSE_DIR, - timeout=120, - ) - logger.info('character-mcp container started.') - except FileNotFoundError: - logger.warning('docker not found on PATH — character-mcp will not be started automatically.') - except subprocess.TimeoutExpired: - logger.warning('docker timed out while starting character-mcp.') - except Exception as e: - logger.warning('Could not ensure character-mcp is running: %s', e) + """Ensure the character-mcp Docker container is running.""" + _ensure_server_running(CHAR_MCP_COMPOSE_DIR, CHAR_MCP_REPO_URL, 'character-mcp', 'character-mcp') diff --git a/services/prompts.py b/services/prompts.py index 6dad470..9bcab47 100644 --- a/services/prompts.py +++ b/services/prompts.py @@ -1,6 +1,6 @@ import re from models import db, Character -from utils import _IDENTITY_KEYS, _WARDROBE_KEYS, _BODY_GROUP_KEYS, parse_orientation +from utils import _BODY_GROUP_KEYS, parse_orientation def _dedup_tags(prompt_str): @@ -57,7 +57,7 @@ def _ensure_character_fields(character, selected_fields, include_wardrobe=True, include_defaults — also inject defaults::expression and defaults::pose (for outfit/look previews) """ identity = character.data.get('identity', {}) - for key in _IDENTITY_KEYS: + for key in _BODY_GROUP_KEYS: if identity.get(key): field_key = f'identity::{key}' if field_key not in selected_fields: @@ -72,7 +72,7 @@ def _ensure_character_fields(character, selected_fields, include_wardrobe=True, selected_fields.append('special::name') if include_wardrobe: wardrobe = character.get_active_wardrobe() - for key in _WARDROBE_KEYS: + for key in _BODY_GROUP_KEYS: if wardrobe.get(key): field_key = f'wardrobe::{key}' if field_key not in selected_fields: diff --git a/services/sync.py b/services/sync.py index 6e4d998..0ed1c20 100644 --- a/services/sync.py +++ b/services/sync.py @@ -193,7 +193,7 @@ def ensure_default_outfit(): "lora_weight": 0.8, "lora_triggers": "" }, - "tags": [] + "tags": {"outfit_type": "Default", "nsfw": False} } try: diff --git a/services/workflow.py b/services/workflow.py index 0168576..c3990c4 100644 --- a/services/workflow.py +++ b/services/workflow.py @@ -9,6 +9,27 @@ from services.prompts import _cross_dedup_prompts logger = logging.getLogger('gaze') +# --------------------------------------------------------------------------- +# ComfyUI workflow node IDs (must match comfy_workflow.json) +# --------------------------------------------------------------------------- +NODE_KSAMPLER = "3" +NODE_CHECKPOINT = "4" +NODE_LATENT = "5" +NODE_POSITIVE = "6" +NODE_NEGATIVE = "7" +NODE_VAE_DECODE = "8" +NODE_SAVE = "9" +NODE_FACE_DETAILER = "11" +NODE_HAND_DETAILER = "13" +NODE_FACE_PROMPT = "14" +NODE_HAND_PROMPT = "15" +NODE_LORA_CHAR = "16" +NODE_LORA_OUTFIT = "17" +NODE_LORA_ACTION = "18" +NODE_LORA_STYLE = "19" +NODE_LORA_CHAR_B = "20" +NODE_VAE_LOADER = "21" + # Node IDs used by DetailerForEach in multi-char mode _SEGS_DETAILER_NODES = ['46', '47', '53', '54'] # Node IDs for per-character CLIP prompts in multi-char mode @@ -22,7 +43,7 @@ def _log_workflow_prompts(label, workflow): lora_details = [] # Collect detailed LoRA information - for node_id, label_str in [("16", "char/look"), ("17", "outfit"), ("18", "action"), ("19", "style/detail/scene"), ("20", "char_b")]: + for node_id, label_str in [(NODE_LORA_CHAR, "char/look"), (NODE_LORA_OUTFIT, "outfit"), (NODE_LORA_ACTION, "action"), (NODE_LORA_STYLE, "style/detail/scene"), (NODE_LORA_CHAR_B, "char_b")]: if node_id in workflow: name = workflow[node_id]["inputs"].get("lora_name", "") if name: @@ -41,13 +62,13 @@ def _log_workflow_prompts(label, workflow): # Extract VAE information vae_info = "(integrated)" - if '21' in workflow: - vae_info = workflow['21']['inputs'].get('vae_name', '(custom)') + if NODE_VAE_LOADER in workflow: + vae_info = workflow[NODE_VAE_LOADER]['inputs'].get('vae_name', '(custom)') # Extract adetailer information adetailer_info = [] # Single-char mode: FaceDetailer nodes 11 + 13 - for node_id, node_name in [("11", "Face"), ("13", "Hand")]: + for node_id, node_name in [(NODE_FACE_DETAILER, "Face"), (NODE_HAND_DETAILER, "Hand")]: if node_id in workflow: adetailer_info.append(f" {node_name} (Node {node_id}): steps={workflow[node_id]['inputs'].get('steps', '?')}, " f"cfg={workflow[node_id]['inputs'].get('cfg', '?')}, " @@ -59,24 +80,24 @@ def _log_workflow_prompts(label, workflow): f"cfg={workflow[node_id]['inputs'].get('cfg', '?')}, " f"denoise={workflow[node_id]['inputs'].get('denoise', '?')}") - face_text = workflow.get('14', {}).get('inputs', {}).get('text', '') - hand_text = workflow.get('15', {}).get('inputs', {}).get('text', '') + face_text = workflow.get(NODE_FACE_PROMPT, {}).get('inputs', {}).get('text', '') + hand_text = workflow.get(NODE_HAND_PROMPT, {}).get('inputs', {}).get('text', '') lines = [ sep, f" WORKFLOW PROMPTS [{label}]", sep, " MODEL CONFIGURATION:", - f" Checkpoint : {workflow['4']['inputs'].get('ckpt_name', '(not set)')}", + f" Checkpoint : {workflow[NODE_CHECKPOINT]['inputs'].get('ckpt_name', '(not set)')}", f" VAE : {vae_info}", "", " GENERATION SETTINGS:", - f" Seed : {workflow['3']['inputs'].get('seed', '(not set)')}", - f" Resolution : {workflow['5']['inputs'].get('width', '?')} x {workflow['5']['inputs'].get('height', '?')}", - f" Sampler : {workflow['3']['inputs'].get('sampler_name', '?')} / {workflow['3']['inputs'].get('scheduler', '?')}", - f" Steps : {workflow['3']['inputs'].get('steps', '?')}", - f" CFG Scale : {workflow['3']['inputs'].get('cfg', '?')}", - f" Denoise : {workflow['3']['inputs'].get('denoise', '1.0')}", + f" Seed : {workflow[NODE_KSAMPLER]['inputs'].get('seed', '(not set)')}", + f" Resolution : {workflow[NODE_LATENT]['inputs'].get('width', '?')} x {workflow[NODE_LATENT]['inputs'].get('height', '?')}", + f" Sampler : {workflow[NODE_KSAMPLER]['inputs'].get('sampler_name', '?')} / {workflow[NODE_KSAMPLER]['inputs'].get('scheduler', '?')}", + f" Steps : {workflow[NODE_KSAMPLER]['inputs'].get('steps', '?')}", + f" CFG Scale : {workflow[NODE_KSAMPLER]['inputs'].get('cfg', '?')}", + f" Denoise : {workflow[NODE_KSAMPLER]['inputs'].get('denoise', '1.0')}", ] # Add LoRA details @@ -98,8 +119,8 @@ def _log_workflow_prompts(label, workflow): lines.extend([ "", " PROMPTS:", - f" [+] Positive : {workflow['6']['inputs'].get('text', '')}", - f" [-] Negative : {workflow['7']['inputs'].get('text', '')}", + f" [+] Positive : {workflow[NODE_POSITIVE]['inputs'].get('text', '')}", + f" [-] Negative : {workflow[NODE_NEGATIVE]['inputs'].get('text', '')}", ]) if face_text: @@ -128,17 +149,17 @@ def _apply_checkpoint_settings(workflow, ckpt_data): vae = ckpt_data.get('vae', 'integrated') # KSampler (node 3) - if steps and '3' in workflow: - workflow['3']['inputs']['steps'] = int(steps) - if cfg and '3' in workflow: - workflow['3']['inputs']['cfg'] = float(cfg) - if sampler_name and '3' in workflow: - workflow['3']['inputs']['sampler_name'] = sampler_name - if scheduler and '3' in workflow: - workflow['3']['inputs']['scheduler'] = scheduler + if steps and NODE_KSAMPLER in workflow: + workflow[NODE_KSAMPLER]['inputs']['steps'] = int(steps) + if cfg and NODE_KSAMPLER in workflow: + workflow[NODE_KSAMPLER]['inputs']['cfg'] = float(cfg) + if sampler_name and NODE_KSAMPLER in workflow: + workflow[NODE_KSAMPLER]['inputs']['sampler_name'] = sampler_name + if scheduler and NODE_KSAMPLER in workflow: + workflow[NODE_KSAMPLER]['inputs']['scheduler'] = scheduler # Face/hand detailers (nodes 11, 13) + multi-char SEGS detailers - for node_id in ['11', '13'] + _SEGS_DETAILER_NODES: + for node_id in [NODE_FACE_DETAILER, NODE_HAND_DETAILER] + _SEGS_DETAILER_NODES: if node_id in workflow: if steps: workflow[node_id]['inputs']['steps'] = int(steps) @@ -151,25 +172,25 @@ def _apply_checkpoint_settings(workflow, ckpt_data): # Prepend base_positive to all positive prompt nodes if base_positive: - for node_id in ['6', '14', '15'] + _SEGS_PROMPT_NODES: + for node_id in [NODE_POSITIVE, NODE_FACE_PROMPT, NODE_HAND_PROMPT] + _SEGS_PROMPT_NODES: if node_id in workflow: workflow[node_id]['inputs']['text'] = f"{base_positive}, {workflow[node_id]['inputs']['text']}" # Append base_negative to negative prompt (shared by main + detailers via node 7) - if base_negative and '7' in workflow: - workflow['7']['inputs']['text'] = f"{workflow['7']['inputs']['text']}, {base_negative}" + if base_negative and NODE_NEGATIVE in workflow: + workflow[NODE_NEGATIVE]['inputs']['text'] = f"{workflow[NODE_NEGATIVE]['inputs']['text']}, {base_negative}" # VAE: if not integrated, inject a VAELoader node and rewire if vae and vae != 'integrated': - workflow['21'] = { + workflow[NODE_VAE_LOADER] = { 'inputs': {'vae_name': vae}, 'class_type': 'VAELoader' } - if '8' in workflow: - workflow['8']['inputs']['vae'] = ['21', 0] - for node_id in ['11', '13'] + _SEGS_DETAILER_NODES: + if NODE_VAE_DECODE in workflow: + workflow[NODE_VAE_DECODE]['inputs']['vae'] = [NODE_VAE_LOADER, 0] + for node_id in [NODE_FACE_DETAILER, NODE_HAND_DETAILER] + _SEGS_DETAILER_NODES: if node_id in workflow: - workflow[node_id]['inputs']['vae'] = ['21', 0] + workflow[node_id]['inputs']['vae'] = [NODE_VAE_LOADER, 0] return workflow @@ -190,7 +211,7 @@ def _get_default_checkpoint(): try: with open('comfy_workflow.json', 'r') as f: workflow = json.load(f) - ckpt_path = workflow.get('4', {}).get('inputs', {}).get('ckpt_name') + ckpt_path = workflow.get(NODE_CHECKPOINT, {}).get('inputs', {}).get('ckpt_name') logger.debug("Loaded default checkpoint from workflow file: %s", ckpt_path) except Exception: pass @@ -231,11 +252,11 @@ def _inject_multi_char_detailers(workflow, prompts, model_source, clip_source): Image flow: VAEDecode(8) → PersonA(46) → PersonB(47) → FaceA(53) → FaceB(54) → Hand(13) """ - vae_source = ["4", 2] + vae_source = [NODE_CHECKPOINT, 2] # Remove old single face detailer and its prompt — we replace them - workflow.pop('11', None) - workflow.pop('14', None) + workflow.pop(NODE_FACE_DETAILER, None) + workflow.pop(NODE_FACE_PROMPT, None) # --- Person detection --- workflow['40'] = { @@ -246,7 +267,7 @@ def _inject_multi_char_detailers(workflow, prompts, model_source, clip_source): workflow['41'] = { 'inputs': { 'bbox_detector': ['40', 0], - 'image': ['8', 0], + 'image': [NODE_VAE_DECODE, 0], 'threshold': 0.5, 'dilation': 10, 'crop_factor': 3.0, @@ -313,13 +334,13 @@ def _inject_multi_char_detailers(workflow, prompts, model_source, clip_source): workflow['46'] = { 'inputs': { **_person_base, - 'image': ['8', 0], + 'image': [NODE_VAE_DECODE, 0], 'segs': ['42', 0], 'model': model_source, 'clip': clip_source, 'vae': vae_source, 'positive': ['44', 0], - 'negative': ['7', 0], + 'negative': [NODE_NEGATIVE, 0], }, 'class_type': 'DetailerForEach' } @@ -333,7 +354,7 @@ def _inject_multi_char_detailers(workflow, prompts, model_source, clip_source): 'clip': clip_source, 'vae': vae_source, 'positive': ['45', 0], - 'negative': ['7', 0], + 'negative': [NODE_NEGATIVE, 0], }, 'class_type': 'DetailerForEach' } @@ -413,7 +434,7 @@ def _inject_multi_char_detailers(workflow, prompts, model_source, clip_source): 'clip': clip_source, 'vae': vae_source, 'positive': ['51', 0], - 'negative': ['7', 0], + 'negative': [NODE_NEGATIVE, 0], }, 'class_type': 'DetailerForEach' } @@ -427,29 +448,29 @@ def _inject_multi_char_detailers(workflow, prompts, model_source, clip_source): 'clip': clip_source, 'vae': vae_source, 'positive': ['52', 0], - 'negative': ['7', 0], + 'negative': [NODE_NEGATIVE, 0], }, 'class_type': 'DetailerForEach' } # Rewire hand detailer: image input from last face detailer instead of old node 11 - if '13' in workflow: - workflow['13']['inputs']['image'] = ['54', 0] + if NODE_HAND_DETAILER in workflow: + workflow[NODE_HAND_DETAILER]['inputs']['image'] = ['54', 0] logger.debug("Injected multi-char SEGS detailers (nodes 40-54)") def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_negative=None, outfit=None, action=None, style=None, detailer=None, scene=None, width=None, height=None, checkpoint_data=None, look=None, fixed_seed=None, character_b=None): # 1. Update prompts using replacement to preserve embeddings - workflow["6"]["inputs"]["text"] = workflow["6"]["inputs"]["text"].replace("{{POSITIVE_PROMPT}}", prompts["main"]) + workflow[NODE_POSITIVE]["inputs"]["text"] = workflow[NODE_POSITIVE]["inputs"]["text"].replace("{{POSITIVE_PROMPT}}", prompts["main"]) if custom_negative: - workflow["7"]["inputs"]["text"] = f"{custom_negative}, {workflow['7']['inputs']['text']}" + workflow[NODE_NEGATIVE]["inputs"]["text"] = f"{custom_negative}, {workflow[NODE_NEGATIVE]['inputs']['text']}" - if "14" in workflow: - workflow["14"]["inputs"]["text"] = workflow["14"]["inputs"]["text"].replace("{{FACE_PROMPT}}", prompts["face"]) - if "15" in workflow: - workflow["15"]["inputs"]["text"] = workflow["15"]["inputs"]["text"].replace("{{HAND_PROMPT}}", prompts["hand"]) + if NODE_FACE_PROMPT in workflow: + workflow[NODE_FACE_PROMPT]["inputs"]["text"] = workflow[NODE_FACE_PROMPT]["inputs"]["text"].replace("{{FACE_PROMPT}}", prompts["face"]) + if NODE_HAND_PROMPT in workflow: + workflow[NODE_HAND_PROMPT]["inputs"]["text"] = workflow[NODE_HAND_PROMPT]["inputs"]["text"].replace("{{HAND_PROMPT}}", prompts["hand"]) # 2. Update Checkpoint - always set one, fall back to default if not provided if not checkpoint: @@ -458,20 +479,20 @@ def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_nega if not checkpoint_data: checkpoint_data = default_ckpt_data if checkpoint: - workflow["4"]["inputs"]["ckpt_name"] = checkpoint + workflow[NODE_CHECKPOINT]["inputs"]["ckpt_name"] = checkpoint else: raise ValueError("No checkpoint specified and no default checkpoint configured") # 3. Handle LoRAs - Node 16 for character, Node 17 for outfit, Node 18 for action, Node 19 for style/detailer # Start with direct checkpoint connections - model_source = ["4", 0] - clip_source = ["4", 1] + model_source = [NODE_CHECKPOINT, 0] + clip_source = [NODE_CHECKPOINT, 1] # Look negative prompt (applied before character LoRA) if look: look_negative = look.data.get('negative', '') if look_negative: - workflow["7"]["inputs"]["text"] = f"{look_negative}, {workflow['7']['inputs']['text']}" + workflow[NODE_NEGATIVE]["inputs"]["text"] = f"{look_negative}, {workflow[NODE_NEGATIVE]['inputs']['text']}" # Character LoRA (Node 16) — look LoRA overrides character LoRA when present if look: @@ -480,47 +501,47 @@ def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_nega char_lora_data = character.data.get('lora', {}) if character else {} char_lora_name = char_lora_data.get('lora_name') - if char_lora_name and "16" in workflow: + if char_lora_name and NODE_LORA_CHAR in workflow: _w16 = _resolve_lora_weight(char_lora_data) - workflow["16"]["inputs"]["lora_name"] = char_lora_name - workflow["16"]["inputs"]["strength_model"] = _w16 - workflow["16"]["inputs"]["strength_clip"] = _w16 - workflow["16"]["inputs"]["model"] = ["4", 0] # From checkpoint - workflow["16"]["inputs"]["clip"] = ["4", 1] # From checkpoint - model_source = ["16", 0] - clip_source = ["16", 1] + workflow[NODE_LORA_CHAR]["inputs"]["lora_name"] = char_lora_name + workflow[NODE_LORA_CHAR]["inputs"]["strength_model"] = _w16 + workflow[NODE_LORA_CHAR]["inputs"]["strength_clip"] = _w16 + workflow[NODE_LORA_CHAR]["inputs"]["model"] = [NODE_CHECKPOINT, 0] # From checkpoint + workflow[NODE_LORA_CHAR]["inputs"]["clip"] = [NODE_CHECKPOINT, 1] # From checkpoint + model_source = [NODE_LORA_CHAR, 0] + clip_source = [NODE_LORA_CHAR, 1] logger.debug("Character LoRA: %s @ %s", char_lora_name, _w16) # Outfit LoRA (Node 17) - chains from character LoRA or checkpoint outfit_lora_data = outfit.data.get('lora', {}) if outfit else {} outfit_lora_name = outfit_lora_data.get('lora_name') - if outfit_lora_name and "17" in workflow: + if outfit_lora_name and NODE_LORA_OUTFIT in workflow: _w17 = _resolve_lora_weight({**{'lora_weight': 0.8}, **outfit_lora_data}) - workflow["17"]["inputs"]["lora_name"] = outfit_lora_name - workflow["17"]["inputs"]["strength_model"] = _w17 - workflow["17"]["inputs"]["strength_clip"] = _w17 + workflow[NODE_LORA_OUTFIT]["inputs"]["lora_name"] = outfit_lora_name + workflow[NODE_LORA_OUTFIT]["inputs"]["strength_model"] = _w17 + workflow[NODE_LORA_OUTFIT]["inputs"]["strength_clip"] = _w17 # Chain from character LoRA (node 16) or checkpoint (node 4) - workflow["17"]["inputs"]["model"] = model_source - workflow["17"]["inputs"]["clip"] = clip_source - model_source = ["17", 0] - clip_source = ["17", 1] + workflow[NODE_LORA_OUTFIT]["inputs"]["model"] = model_source + workflow[NODE_LORA_OUTFIT]["inputs"]["clip"] = clip_source + model_source = [NODE_LORA_OUTFIT, 0] + clip_source = [NODE_LORA_OUTFIT, 1] logger.debug("Outfit LoRA: %s @ %s", outfit_lora_name, _w17) # Action LoRA (Node 18) - chains from previous LoRA or checkpoint action_lora_data = action.data.get('lora', {}) if action else {} action_lora_name = action_lora_data.get('lora_name') - if action_lora_name and "18" in workflow: + if action_lora_name and NODE_LORA_ACTION in workflow: _w18 = _resolve_lora_weight(action_lora_data) - workflow["18"]["inputs"]["lora_name"] = action_lora_name - workflow["18"]["inputs"]["strength_model"] = _w18 - workflow["18"]["inputs"]["strength_clip"] = _w18 + workflow[NODE_LORA_ACTION]["inputs"]["lora_name"] = action_lora_name + workflow[NODE_LORA_ACTION]["inputs"]["strength_model"] = _w18 + workflow[NODE_LORA_ACTION]["inputs"]["strength_clip"] = _w18 # Chain from previous source - workflow["18"]["inputs"]["model"] = model_source - workflow["18"]["inputs"]["clip"] = clip_source - model_source = ["18", 0] - clip_source = ["18", 1] + workflow[NODE_LORA_ACTION]["inputs"]["model"] = model_source + workflow[NODE_LORA_ACTION]["inputs"]["clip"] = clip_source + model_source = [NODE_LORA_ACTION, 0] + clip_source = [NODE_LORA_ACTION, 1] logger.debug("Action LoRA: %s @ %s", action_lora_name, _w18) # Style/Detailer/Scene LoRA (Node 19) - chains from previous LoRA or checkpoint @@ -529,31 +550,31 @@ def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_nega style_lora_data = target_obj.data.get('lora', {}) if target_obj else {} style_lora_name = style_lora_data.get('lora_name') - if style_lora_name and "19" in workflow: + if style_lora_name and NODE_LORA_STYLE in workflow: _w19 = _resolve_lora_weight(style_lora_data) - workflow["19"]["inputs"]["lora_name"] = style_lora_name - workflow["19"]["inputs"]["strength_model"] = _w19 - workflow["19"]["inputs"]["strength_clip"] = _w19 + workflow[NODE_LORA_STYLE]["inputs"]["lora_name"] = style_lora_name + workflow[NODE_LORA_STYLE]["inputs"]["strength_model"] = _w19 + workflow[NODE_LORA_STYLE]["inputs"]["strength_clip"] = _w19 # Chain from previous source - workflow["19"]["inputs"]["model"] = model_source - workflow["19"]["inputs"]["clip"] = clip_source - model_source = ["19", 0] - clip_source = ["19", 1] + workflow[NODE_LORA_STYLE]["inputs"]["model"] = model_source + workflow[NODE_LORA_STYLE]["inputs"]["clip"] = clip_source + model_source = [NODE_LORA_STYLE, 0] + clip_source = [NODE_LORA_STYLE, 1] logger.debug("Style/Detailer LoRA: %s @ %s", style_lora_name, _w19) # Second character LoRA (Node 20) - for multi-character generation if character_b: char_b_lora_data = character_b.data.get('lora', {}) char_b_lora_name = char_b_lora_data.get('lora_name') - if char_b_lora_name and "20" in workflow: + if char_b_lora_name and NODE_LORA_CHAR_B in workflow: _w20 = _resolve_lora_weight(char_b_lora_data) - workflow["20"]["inputs"]["lora_name"] = char_b_lora_name - workflow["20"]["inputs"]["strength_model"] = _w20 - workflow["20"]["inputs"]["strength_clip"] = _w20 - workflow["20"]["inputs"]["model"] = model_source - workflow["20"]["inputs"]["clip"] = clip_source - model_source = ["20", 0] - clip_source = ["20", 1] + workflow[NODE_LORA_CHAR_B]["inputs"]["lora_name"] = char_b_lora_name + workflow[NODE_LORA_CHAR_B]["inputs"]["strength_model"] = _w20 + workflow[NODE_LORA_CHAR_B]["inputs"]["strength_clip"] = _w20 + workflow[NODE_LORA_CHAR_B]["inputs"]["model"] = model_source + workflow[NODE_LORA_CHAR_B]["inputs"]["clip"] = clip_source + model_source = [NODE_LORA_CHAR_B, 0] + clip_source = [NODE_LORA_CHAR_B, 1] logger.debug("Character B LoRA: %s @ %s", char_b_lora_name, _w20) # 3b. Multi-char: inject per-character SEGS detailers (replaces node 11/14) @@ -561,35 +582,35 @@ def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_nega _inject_multi_char_detailers(workflow, prompts, model_source, clip_source) # Apply connections to all model/clip consumers (conditional on node existence) - for nid in ["3", "11", "13"] + _SEGS_DETAILER_NODES: + for nid in [NODE_KSAMPLER, NODE_FACE_DETAILER, NODE_HAND_DETAILER] + _SEGS_DETAILER_NODES: if nid in workflow: workflow[nid]["inputs"]["model"] = model_source - for nid in ["6", "7", "11", "13", "14", "15"] + _SEGS_PROMPT_NODES: + for nid in [NODE_POSITIVE, NODE_NEGATIVE, NODE_FACE_DETAILER, NODE_HAND_DETAILER, NODE_FACE_PROMPT, NODE_HAND_PROMPT] + _SEGS_PROMPT_NODES: if nid in workflow: workflow[nid]["inputs"]["clip"] = clip_source # 4. Randomize seeds (or use a fixed seed for reproducible batches like Strengths Gallery) gen_seed = fixed_seed if fixed_seed is not None else random.randint(1, 10**15) - for nid in ["3", "11", "13"] + _SEGS_DETAILER_NODES: + for nid in [NODE_KSAMPLER, NODE_FACE_DETAILER, NODE_HAND_DETAILER] + _SEGS_DETAILER_NODES: if nid in workflow: workflow[nid]["inputs"]["seed"] = gen_seed # 5. Set image dimensions - if "5" in workflow: + if NODE_LATENT in workflow: if width: - workflow["5"]["inputs"]["width"] = int(width) + workflow[NODE_LATENT]["inputs"]["width"] = int(width) if height: - workflow["5"]["inputs"]["height"] = int(height) + workflow[NODE_LATENT]["inputs"]["height"] = int(height) # 6. Apply checkpoint-specific settings (steps, cfg, sampler, base prompts, VAE) if checkpoint_data: workflow = _apply_checkpoint_settings(workflow, checkpoint_data) # 7. Sync sampler/scheduler from main KSampler to adetailer nodes - sampler_name = workflow["3"]["inputs"].get("sampler_name") - scheduler = workflow["3"]["inputs"].get("scheduler") - for node_id in ["11", "13"] + _SEGS_DETAILER_NODES: + sampler_name = workflow[NODE_KSAMPLER]["inputs"].get("sampler_name") + scheduler = workflow[NODE_KSAMPLER]["inputs"].get("scheduler") + for node_id in [NODE_FACE_DETAILER, NODE_HAND_DETAILER] + _SEGS_DETAILER_NODES: if node_id in workflow: if sampler_name: workflow[node_id]["inputs"]["sampler_name"] = sampler_name @@ -598,11 +619,11 @@ def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_nega # 8. Cross-deduplicate: remove tags shared between positive and negative pos_text, neg_text = _cross_dedup_prompts( - workflow["6"]["inputs"]["text"], - workflow["7"]["inputs"]["text"] + workflow[NODE_POSITIVE]["inputs"]["text"], + workflow[NODE_NEGATIVE]["inputs"]["text"] ) - workflow["6"]["inputs"]["text"] = pos_text - workflow["7"]["inputs"]["text"] = neg_text + workflow[NODE_POSITIVE]["inputs"]["text"] = pos_text + workflow[NODE_NEGATIVE]["inputs"]["text"] = neg_text # 9. Final prompt debug — logged after all transformations are complete _log_workflow_prompts("_prepare_workflow", workflow) diff --git a/static/js/detail-common.js b/static/js/detail-common.js index bb6e20d..e1c5d66 100644 --- a/static/js/detail-common.js +++ b/static/js/detail-common.js @@ -67,9 +67,15 @@ function initDetailPage(options = {}) { // ----------------------------------------------------------------------- // Job polling // ----------------------------------------------------------------------- - async function waitForJob(jobId) { + async function waitForJob(jobId, maxPollMs = 300000) { return new Promise((resolve, reject) => { + const start = Date.now(); const poll = setInterval(async () => { + if (Date.now() - start > maxPollMs) { + clearInterval(poll); + reject(new Error('Job timed out after ' + Math.round(maxPollMs / 1000) + 's')); + return; + } try { const resp = await fetch(`/api/queue/${jobId}/status`); const data = await resp.json(); diff --git a/static/js/library-toolbar.js b/static/js/library-toolbar.js index dd9daa3..cafdeeb 100644 --- a/static/js/library-toolbar.js +++ b/static/js/library-toolbar.js @@ -28,9 +28,15 @@ document.addEventListener('DOMContentLoaded', () => { const bulkOverwriteBtn = document.getElementById('bulk-overwrite-btn'); // --- Utility: poll a job until done --- - function waitForJob(jobId) { + function waitForJob(jobId, maxPollMs = 300000) { return new Promise((resolve, reject) => { + const start = Date.now(); const poll = setInterval(async () => { + if (Date.now() - start > maxPollMs) { + clearInterval(poll); + reject(new Error('Job timed out after ' + Math.round(maxPollMs / 1000) + 's')); + return; + } try { const resp = await fetch(`/api/queue/${jobId}/status`); const data = await resp.json(); diff --git a/templates/presets/index.html b/templates/presets/index.html index c84fbae..02f5039 100644 --- a/templates/presets/index.html +++ b/templates/presets/index.html @@ -19,6 +19,19 @@ {% endif %} {% endwith %} + +
+
+ + +
+ +
+
{% for preset in presets %}
diff --git a/utils.py b/utils.py index e203f9c..16614cb 100644 --- a/utils.py +++ b/utils.py @@ -12,8 +12,16 @@ _LORA_DEFAULTS = { } _BODY_GROUP_KEYS = ['base', 'head', 'upper_body', 'lower_body', 'hands', 'feet', 'additional'] -_IDENTITY_KEYS = _BODY_GROUP_KEYS -_WARDROBE_KEYS = _BODY_GROUP_KEYS + + +def clean_html_text(html_raw): + """Strip HTML tags, scripts, styles, and images from raw HTML, returning plain text.""" + import re + text = re.sub(r']*>.*?', '', html_raw, flags=re.DOTALL) + text = re.sub(r']*>.*?', '', text, flags=re.DOTALL) + text = re.sub(r']*>', '', text) + text = re.sub(r'<[^>]+>', ' ', text) + return ' '.join(text.split()) def allowed_file(filename):