diff --git a/app.py b/app.py index 77f75db..67bf808 100644 --- a/app.py +++ b/app.py @@ -1,5 +1,6 @@ import os import json +import logging import time import re import requests @@ -42,6 +43,16 @@ app.config['SESSION_PERMANENT'] = False db.init_app(app) Session(app) +# --------------------------------------------------------------------------- +# Logging +# --------------------------------------------------------------------------- +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s [%(levelname)s] %(name)s: %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', +) +logger = logging.getLogger('gaze') + # --------------------------------------------------------------------------- # Generation Job Queue # --------------------------------------------------------------------------- @@ -78,6 +89,7 @@ def _enqueue_job(label, workflow, finalize_fn): with _job_queue_lock: _job_queue.append(job) _job_history[job['id']] = job + logger.info("Job queued: [%s] %s", job['id'][:8], label) _queue_worker_event.set() return job @@ -104,6 +116,8 @@ def _queue_worker(): with _job_queue_lock: job['status'] = 'processing' + logger.info("Job started: [%s] %s", job['id'][:8], job['label']) + try: with app.app_context(): # Send workflow to ComfyUI @@ -114,6 +128,7 @@ def _queue_worker(): comfy_id = prompt_response['prompt_id'] with _job_queue_lock: job['comfy_prompt_id'] = comfy_id + logger.info("Job [%s] queued in ComfyUI as %s", job['id'][:8], comfy_id) # Poll until done (max ~10 minutes) max_retries = 300 @@ -129,15 +144,17 @@ def _queue_worker(): if not finished: raise Exception("ComfyUI generation timed out") + logger.info("Job [%s] generation complete, finalizing...", job['id'][:8]) # Run the finalize callback (saves image to disk / DB) # finalize_fn(comfy_prompt_id, job) — job is passed so callback can store result job['finalize_fn'](comfy_id, job) with _job_queue_lock: job['status'] = 'done' + logger.info("Job done: [%s] %s", job['id'][:8], job['label']) except Exception as e: - print(f"[Queue] Job {job['id']} failed: {e}") + logger.exception("Job failed: [%s] %s — %s", job['id'][:8], job['label'], e) with _job_queue_lock: job['status'] = 'failed' job['error'] = str(e) @@ -2078,16 +2095,8 @@ def replace_cover_from_preview(slug): return redirect(url_for('detail', slug=slug)) def _log_workflow_prompts(label, workflow): - """Print the final assembled ComfyUI prompts in a consistent, readable block.""" + """Log the final assembled ComfyUI prompts in a consistent, readable block.""" sep = "=" * 72 - print(f"\n{sep}") - print(f" WORKFLOW PROMPTS [{label}]") - print(sep) - print(f" Checkpoint : {workflow['4']['inputs'].get('ckpt_name', '(not set)')}") - print(f" Seed : {workflow['3']['inputs'].get('seed', '(not set)')}") - print(f" Resolution : {workflow['5']['inputs'].get('width', '?')} x {workflow['5']['inputs'].get('height', '?')}") - print(f" Sampler : {workflow['3']['inputs'].get('sampler_name', '?')} / {workflow['3']['inputs'].get('scheduler', '?')} steps={workflow['3']['inputs'].get('steps', '?')} cfg={workflow['3']['inputs'].get('cfg', '?')}") - # LoRA chain summary active_loras = [] for node_id, label_str in [("16", "char/look"), ("17", "outfit"), ("18", "action"), ("19", "style/detail/scene")]: if node_id in workflow: @@ -2095,16 +2104,26 @@ def _log_workflow_prompts(label, workflow): if name: w = workflow[node_id]["inputs"].get("strength_model", "?") active_loras.append(f"{label_str}:{name.split('/')[-1]}@{w:.3f}" if isinstance(w, float) else f"{label_str}:{name.split('/')[-1]}@{w}") - print(f" LoRAs : {' | '.join(active_loras) if active_loras else '(none)'}") - print(f" [+] Positive : {workflow['6']['inputs'].get('text', '')}") - print(f" [-] Negative : {workflow['7']['inputs'].get('text', '')}") face_text = workflow.get('14', {}).get('inputs', {}).get('text', '') hand_text = workflow.get('15', {}).get('inputs', {}).get('text', '') + lines = [ + sep, + f" WORKFLOW PROMPTS [{label}]", + sep, + f" Checkpoint : {workflow['4']['inputs'].get('ckpt_name', '(not set)')}", + f" Seed : {workflow['3']['inputs'].get('seed', '(not set)')}", + f" Resolution : {workflow['5']['inputs'].get('width', '?')} x {workflow['5']['inputs'].get('height', '?')}", + f" Sampler : {workflow['3']['inputs'].get('sampler_name', '?')} / {workflow['3']['inputs'].get('scheduler', '?')} steps={workflow['3']['inputs'].get('steps', '?')} cfg={workflow['3']['inputs'].get('cfg', '?')}", + f" LoRAs : {' | '.join(active_loras) if active_loras else '(none)'}", + f" [+] Positive : {workflow['6']['inputs'].get('text', '')}", + f" [-] Negative : {workflow['7']['inputs'].get('text', '')}", + ] if face_text: - print(f" [F] Face : {face_text}") + lines.append(f" [F] Face : {face_text}") if hand_text: - print(f" [H] Hand : {hand_text}") - print(f"{sep}\n") + lines.append(f" [H] Hand : {hand_text}") + lines.append(sep) + logger.info("\n%s", "\n".join(lines)) def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_negative=None, outfit=None, action=None, style=None, detailer=None, scene=None, width=None, height=None, checkpoint_data=None, look=None, fixed_seed=None): @@ -2150,7 +2169,7 @@ def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_nega workflow["16"]["inputs"]["clip"] = ["4", 1] # From checkpoint model_source = ["16", 0] clip_source = ["16", 1] - print(f"Character LoRA: {char_lora_name} @ {_w16}") + logger.debug("Character LoRA: %s @ %s", char_lora_name, _w16) # Outfit LoRA (Node 17) - chains from character LoRA or checkpoint outfit_lora_data = outfit.data.get('lora', {}) if outfit else {} @@ -2166,7 +2185,7 @@ def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_nega workflow["17"]["inputs"]["clip"] = clip_source model_source = ["17", 0] clip_source = ["17", 1] - print(f"Outfit LoRA: {outfit_lora_name} @ {_w17}") + logger.debug("Outfit LoRA: %s @ %s", outfit_lora_name, _w17) # Action LoRA (Node 18) - chains from previous LoRA or checkpoint action_lora_data = action.data.get('lora', {}) if action else {} @@ -2182,7 +2201,7 @@ def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_nega workflow["18"]["inputs"]["clip"] = clip_source model_source = ["18", 0] clip_source = ["18", 1] - print(f"Action LoRA: {action_lora_name} @ {_w18}") + logger.debug("Action LoRA: %s @ %s", action_lora_name, _w18) # Style/Detailer/Scene LoRA (Node 19) - chains from previous LoRA or checkpoint # Priority: Style > Detailer > Scene (Scene LoRAs are rare but supported) @@ -2200,7 +2219,7 @@ def _prepare_workflow(workflow, character, prompts, checkpoint=None, custom_nega workflow["19"]["inputs"]["clip"] = clip_source model_source = ["19", 0] clip_source = ["19", 1] - print(f"Style/Detailer LoRA: {style_lora_name} @ {_w19}") + logger.debug("Style/Detailer LoRA: %s @ %s", style_lora_name, _w19) # Apply connections to all model/clip consumers workflow["3"]["inputs"]["model"] = model_source