Add semantic tagging, search, favourite/NSFW filtering, and LLM job queue

Replaces old list-format tags (which duplicated prompt content) with structured
dict tags per category (origin_series, outfit_type, participants, style_type,
scene_type, etc.). Tags are now purely organizational metadata — removed from
the prompt pipeline entirely.

Adds is_favourite and is_nsfw columns to all 8 resource models. Favourite is
DB-only (user preference); NSFW is mirrored in JSON tags for rescan persistence.
All library pages get filter controls and favourites-first sorting.

Introduces a parallel LLM job queue (_enqueue_task + _llm_queue_worker) for
background tag regeneration, with the same status polling UI as ComfyUI jobs.
Fixes call_llm() to use has_request_context() fallback for background threads.

Adds global search (/search) across resources and gallery images, with navbar
search bar. Adds gallery image sidecar JSON for per-image favourite/NSFW metadata.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Aodhan Collins
2026-03-21 03:22:09 +00:00
parent 7d79e626a5
commit 32a73b02f5
72 changed files with 3163 additions and 2212 deletions

View File

@@ -2,7 +2,7 @@ import os
import json
import asyncio
import requests
from flask import request as flask_request
from flask import has_request_context, request as flask_request
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from models import Settings
@@ -77,6 +77,28 @@ def call_mcp_tool(name, arguments):
return json.dumps({"error": str(e)})
async def _run_character_mcp_tool(name, arguments):
server_params = StdioServerParameters(
command="docker",
args=["run", "--rm", "-i",
"-v", "character-cache:/root/.local/share/character_details",
"character-mcp:latest"],
)
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
result = await session.call_tool(name, arguments)
return result.content[0].text
def call_character_mcp_tool(name, arguments):
try:
return asyncio.run(_run_character_mcp_tool(name, arguments))
except Exception as e:
print(f"Character MCP Tool Error: {e}")
return None
def load_prompt(filename):
path = os.path.join('data/prompts', filename)
if os.path.exists(path):
@@ -100,7 +122,7 @@ def call_llm(prompt, system_prompt="You are a creative assistant."):
headers = {
"Authorization": f"Bearer {settings.openrouter_api_key}",
"Content-Type": "application/json",
"HTTP-Referer": flask_request.url_root,
"HTTP-Referer": flask_request.url_root if has_request_context() else "http://localhost:5000/",
"X-Title": "Character Browser"
}
model = settings.openrouter_model or 'google/gemini-2.0-flash-001'
@@ -120,7 +142,8 @@ def call_llm(prompt, system_prompt="You are a creative assistant."):
{"role": "user", "content": prompt}
]
max_turns = 10
max_turns = 15
tool_turns_remaining = 8 # stop offering tools after this many tool-calling turns
use_tools = True
format_retries = 3 # retries allowed for unexpected response format
@@ -131,13 +154,13 @@ def call_llm(prompt, system_prompt="You are a creative assistant."):
"messages": messages,
}
# Only add tools if supported/requested
if use_tools:
# Only add tools if supported/requested and we haven't exhausted tool turns
if use_tools and tool_turns_remaining > 0:
data["tools"] = DANBOORU_TOOLS
data["tool_choice"] = "auto"
try:
response = requests.post(url, headers=headers, json=data)
response = requests.post(url, headers=headers, json=data, timeout=120)
# If 400 Bad Request and we were using tools, try once without tools
if response.status_code == 400 and use_tools:
@@ -158,6 +181,7 @@ def call_llm(prompt, system_prompt="You are a creative assistant."):
raise KeyError('message')
if message.get('tool_calls'):
tool_turns_remaining -= 1
messages.append(message)
for tool_call in message['tool_calls']:
name = tool_call['function']['name']
@@ -170,6 +194,8 @@ def call_llm(prompt, system_prompt="You are a creative assistant."):
"name": name,
"content": tool_result
})
if tool_turns_remaining <= 0:
print("Tool turn limit reached — next request will not offer tools")
continue
return message['content']