feat: memory v2, prompt styles, Dream/GAZE integration, Wyoming TTS fix

SQLite + sqlite-vec replaces JSON memory files with semantic search,
follow-up injection, privacy levels, and lifecycle management.

Six prompt styles (quick/standard/creative/roleplayer/game-master/storyteller)
with per-style Claude model tiering (Haiku/Sonnet/Opus), temperature control,
and section stripping. Characters can set default style and per-style overrides.

Dream character import and GAZE character linking in the dashboard editor
with auto-populated fields, cover image resolution, and preset assignment.

Bridge: session isolation (conversation_id / 12h satellite buckets),
model routing refactor, PUT/DELETE support, memory REST endpoints.

Dashboard: mobile-responsive sidebar, retry button, style picker in chat,
follow-up banner, memory lifecycle/privacy UI, cloud model options in editor.

Wyoming TTS: upgraded to v1.8.0 for HA 1.7.2 compatibility.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Aodhan Collins
2026-03-24 22:31:04 +00:00
parent c3bae6fdc0
commit 56580a2cb2
34 changed files with 2891 additions and 467 deletions

View File

@@ -37,6 +37,26 @@ from pathlib import Path
import wave
import io
import re
from datetime import datetime, timezone
from urllib.parse import parse_qs
from memory_store import (
init_db as init_memory_db,
retrieve_memories as _retrieve_memories,
get_pending_followups,
auto_resolve_expired_followups,
auto_archive_old_resolved,
increment_surfaced_count,
add_memory as _add_memory,
add_or_merge_memory,
update_memory as _update_memory,
delete_memory as _delete_memory,
list_memories as _list_memories,
search_memories as _search_memories,
resolve_followup,
count_memories,
migrate_from_json,
)
from wyoming.client import AsyncTcpClient
from wyoming.tts import Synthesize, SynthesizeVoice
from wyoming.asr import Transcribe, Transcript
@@ -48,7 +68,7 @@ TIMEOUT_WARM = 120 # Model already loaded in VRAM
TIMEOUT_COLD = 180 # Model needs loading first (~10-20s load + inference)
OLLAMA_PS_URL = "http://localhost:11434/api/ps"
VTUBE_BRIDGE_URL = "http://localhost:8002"
DEFAULT_MODEL = "anthropic/claude-sonnet-4-20250514"
DEFAULT_MODEL = "anthropic/claude-sonnet-4-6"
def _vtube_fire_and_forget(path: str, data: dict):
@@ -85,12 +105,21 @@ SATELLITE_MAP_PATH = Path("/Users/aodhan/homeai-data/satellite-map.json")
MEMORIES_DIR = Path("/Users/aodhan/homeai-data/memories")
ACTIVE_TTS_VOICE_PATH = Path("/Users/aodhan/homeai-data/active-tts-voice.json")
ACTIVE_MODE_PATH = Path("/Users/aodhan/homeai-data/active-mode.json")
ACTIVE_STYLE_PATH = Path("/Users/aodhan/homeai-data/active-prompt-style.json")
PROMPT_STYLES_DIR = Path(__file__).parent / "prompt-styles"
# Cloud provider model mappings for mode routing
# Cloud provider model mappings for mode routing (fallback when style has no model)
CLOUD_MODELS = {
"anthropic": "anthropic/claude-sonnet-4-20250514",
"anthropic": "anthropic/claude-sonnet-4-6",
"openai": "openai/gpt-4o",
}
LOCAL_MODEL = "ollama/qwen3.5:35b-a3b"
# Lock to serialise model-switch + agent-call (openclaw config is global)
_model_lock = threading.Lock()
# Initialize memory database at module load
init_memory_db()
def load_mode() -> dict:
@@ -102,11 +131,56 @@ def load_mode() -> dict:
return {"mode": "private", "cloud_provider": "anthropic", "overrides": {}}
def resolve_model(mode_data: dict) -> str | None:
"""Resolve which model to use based on mode. Returns None for default (private/local)."""
def resolve_model(mode_data: dict) -> str:
"""Resolve which model to use based on mode."""
mode = mode_data.get("mode", "private")
if mode == "private":
return None # Use OpenClaw default (ollama/qwen3.5:35b-a3b)
return mode_data.get("local_model", LOCAL_MODEL)
provider = mode_data.get("cloud_provider", "anthropic")
return CLOUD_MODELS.get(provider, CLOUD_MODELS["anthropic"])
def load_prompt_style(style_id: str) -> dict:
"""Load a prompt style template by ID. Returns the style dict or a default."""
if not style_id:
style_id = "standard"
safe_id = style_id.replace("/", "_").replace("..", "")
style_path = PROMPT_STYLES_DIR / f"{safe_id}.json"
try:
with open(style_path) as f:
return json.load(f)
except Exception:
return {"id": "standard", "name": "Standard", "group": "cloud", "instruction": "", "strip_sections": []}
def load_active_style() -> str:
"""Load the active prompt style ID from state file. Defaults to 'standard'."""
try:
with open(ACTIVE_STYLE_PATH) as f:
data = json.load(f)
return data.get("style", "standard")
except Exception:
return "standard"
def resolve_model_for_style(style: dict, mode_data: dict) -> str:
"""Resolve model based on prompt style, falling back to mode config.
Priority: style 'model' field > group-based routing > mode default."""
mode = mode_data.get("mode", "private")
group = style.get("group", "cloud")
# Private mode always uses local model regardless of style
if mode == "private" and group == "local":
return mode_data.get("local_model", LOCAL_MODEL)
# Per-style model override (e.g. haiku for quick, opus for roleplay)
style_model = style.get("model")
if style_model:
return style_model
# Fallback: cloud model from mode config
if group == "local":
return mode_data.get("local_model", LOCAL_MODEL)
provider = mode_data.get("cloud_provider", "anthropic")
return CLOUD_MODELS.get(provider, CLOUD_MODELS["anthropic"])
@@ -192,31 +266,44 @@ def load_character(character_id: str = None) -> dict:
return {}
def load_character_prompt(satellite_id: str = None, character_id: str = None) -> str:
def load_character_prompt(satellite_id: str = None, character_id: str = None,
prompt_style: str = None, user_message: str = "",
is_cloud: bool = False) -> str:
"""Load the full system prompt for a character, resolved by satellite or explicit ID.
Builds a rich prompt from system_prompt + profile fields (background, dialogue_style, etc.)."""
Builds a rich prompt from style instruction + system_prompt + profile fields + memories.
The prompt_style controls HOW the character responds (brief, conversational, roleplay, etc.)."""
if not character_id:
character_id = resolve_character_id(satellite_id)
char = load_character(character_id)
if not char:
return ""
# Load prompt style template
style_id = prompt_style or load_active_style()
style = load_prompt_style(style_id)
strip_sections = set(style.get("strip_sections", []))
sections = []
# Core system prompt
# 1. Response style instruction (framing directive — goes first)
instruction = style.get("instruction", "")
if instruction:
sections.append(f"[Response Style: {style.get('name', style_id)}]\n{instruction}")
# 2. Core character identity (system_prompt)
prompt = char.get("system_prompt", "")
if prompt:
sections.append(prompt)
# Character profile fields
# 3. Character profile fields (filtered by style's strip_sections)
profile_parts = []
if char.get("background"):
if "background" not in strip_sections and char.get("background"):
profile_parts.append(f"## Background\n{char['background']}")
if char.get("appearance"):
if "appearance" not in strip_sections and char.get("appearance"):
profile_parts.append(f"## Appearance\n{char['appearance']}")
if char.get("dialogue_style"):
if "dialogue_style" not in strip_sections and char.get("dialogue_style"):
profile_parts.append(f"## Dialogue Style\n{char['dialogue_style']}")
if char.get("skills"):
if "skills" not in strip_sections and char.get("skills"):
skills = char["skills"]
if isinstance(skills, list):
skills_text = ", ".join(skills[:15])
@@ -226,7 +313,18 @@ def load_character_prompt(satellite_id: str = None, character_id: str = None) ->
if profile_parts:
sections.append("[Character Profile]\n" + "\n\n".join(profile_parts))
# Character metadata
# 4. Per-character style overrides (optional customization per style)
style_overrides = char.get("prompt_style_overrides", {}).get(style_id, {})
if style_overrides:
override_parts = []
if style_overrides.get("dialogue_style"):
override_parts.append(f"## Dialogue Style Override\n{style_overrides['dialogue_style']}")
if style_overrides.get("system_prompt_suffix"):
override_parts.append(style_overrides["system_prompt_suffix"])
if override_parts:
sections.append("[Style-Specific Notes]\n" + "\n\n".join(override_parts))
# 5. Character metadata
meta_lines = []
if char.get("display_name"):
meta_lines.append(f"Your name is: {char['display_name']}")
@@ -243,47 +341,86 @@ def load_character_prompt(satellite_id: str = None, character_id: str = None) ->
if meta_lines:
sections.append("[Character Metadata]\n" + "\n".join(meta_lines))
# Memories (personal + general)
personal, general = load_memories(character_id)
# 6. Memories (personal + general, context-aware retrieval)
personal, general, followups = load_memories(character_id, context=user_message, is_cloud=is_cloud)
if personal:
sections.append("[Personal Memories]\n" + "\n".join(f"- {m}" for m in personal))
if general:
sections.append("[General Knowledge]\n" + "\n".join(f"- {m}" for m in general))
# 7. Pending follow-ups (things the character should naturally bring up)
if followups:
followup_lines = [
f"- {fu['follow_up_context']} (from {fu['created_at'][:10]})"
for fu in followups[:3]
]
sections.append(
"[Pending Follow-ups — Bring these up naturally if relevant]\n"
"You have unresolved topics to check on with the user. "
"Weave them into conversation naturally — don't list them. "
"If the user addresses one, use memory-ctl resolve <id> to mark it resolved.\n"
+ "\n".join(followup_lines)
)
return "\n\n".join(sections)
def load_memories(character_id: str) -> tuple[list[str], list[str]]:
"""Load personal (per-character) and general memories.
Returns (personal_contents, general_contents) truncated to fit context budget."""
PERSONAL_BUDGET = 4000 # max chars for personal memories in prompt
GENERAL_BUDGET = 3000 # max chars for general memories in prompt
def _truncate_to_budget(contents: list[str], budget: int) -> list[str]:
"""Truncate a list of strings to fit within a character budget."""
result = []
used = 0
for content in contents:
if used + len(content) > budget:
break
result.append(content)
used += len(content)
return result
def _read_memories(path: Path, budget: int) -> list[str]:
def load_memories(character_id: str, context: str = "", is_cloud: bool = False) -> tuple[list[str], list[str], list[dict]]:
"""Load personal and general memories using semantic + recency retrieval.
Returns (personal_contents, general_contents, pending_followups)."""
PERSONAL_BUDGET = 4000
GENERAL_BUDGET = 3000
# Check if SQLite has any memories; fall back to JSON if empty (pre-migration)
if count_memories(character_id) == 0 and count_memories("shared") == 0:
return _load_memories_json_fallback(character_id), [], []
personal_mems = _retrieve_memories(character_id, context, limit=15,
exclude_private_for_cloud=is_cloud)
general_mems = _retrieve_memories("shared", context, limit=10,
exclude_private_for_cloud=is_cloud)
followups = get_pending_followups(character_id)
personal = _truncate_to_budget([m["content"] for m in personal_mems], PERSONAL_BUDGET)
general = _truncate_to_budget([m["content"] for m in general_mems], GENERAL_BUDGET)
return personal, general, followups
def _load_memories_json_fallback(character_id: str) -> list[str]:
"""Legacy JSON fallback for pre-migration state."""
def _read(path: Path, budget: int) -> list[str]:
try:
with open(path) as f:
data = json.load(f)
except Exception:
return []
memories = data.get("memories", [])
# Sort newest first
memories.sort(key=lambda m: m.get("createdAt", ""), reverse=True)
result = []
used = 0
result, used = [], 0
for m in memories:
content = m.get("content", "").strip()
if not content:
continue
if used + len(content) > budget:
if used + len(content) > 4000:
break
result.append(content)
used += len(content)
return result
safe_id = character_id.replace("/", "_")
personal = _read_memories(MEMORIES_DIR / "personal" / f"{safe_id}.json", PERSONAL_BUDGET)
general = _read_memories(MEMORIES_DIR / "general.json", GENERAL_BUDGET)
return personal, general
return _read(MEMORIES_DIR / "personal" / f"{safe_id}.json", 4000)
class OpenClawBridgeHandler(BaseHTTPRequestHandler):
@@ -297,6 +434,7 @@ class OpenClawBridgeHandler(BaseHTTPRequestHandler):
"""Send a JSON response."""
self.send_response(status_code)
self.send_header("Content-Type", "application/json")
self.send_header("Access-Control-Allow-Origin", "*")
self.end_headers()
self.wfile.write(json.dumps(data).encode())
@@ -319,11 +457,17 @@ class OpenClawBridgeHandler(BaseHTTPRequestHandler):
self._handle_stt_request()
return
# Only handle the agent message endpoint
# Agent message endpoint
if parsed_path.path == "/api/agent/message":
self._handle_agent_request()
return
# Memory API: POST /api/memories/...
if parsed_path.path.startswith("/api/memories/"):
parts = parsed_path.path[len("/api/memories/"):].strip("/").split("/")
self._handle_memory_post(parts)
return
self._send_json_response(404, {"error": "Not found"})
def _handle_tts_request(self):
@@ -399,11 +543,29 @@ class OpenClawBridgeHandler(BaseHTTPRequestHandler):
audio_bytes = resp.read()
return audio_bytes, "audio/mpeg"
def do_PUT(self):
"""Handle PUT requests (memory updates)."""
parsed_path = urlparse(self.path)
if parsed_path.path.startswith("/api/memories/"):
parts = parsed_path.path[len("/api/memories/"):].strip("/").split("/")
self._handle_memory_put(parts)
return
self._send_json_response(404, {"error": "Not found"})
def do_DELETE(self):
"""Handle DELETE requests (memory deletion)."""
parsed_path = urlparse(self.path)
if parsed_path.path.startswith("/api/memories/"):
parts = parsed_path.path[len("/api/memories/"):].strip("/").split("/")
self._handle_memory_delete(parts)
return
self._send_json_response(404, {"error": "Not found"})
def do_OPTIONS(self):
"""Handle CORS preflight requests."""
self.send_response(204)
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
self.send_header("Access-Control-Allow-Methods", "POST, GET, PUT, DELETE, OPTIONS")
self.send_header("Access-Control-Allow-Headers", "Content-Type")
self.end_headers()
@@ -531,19 +693,55 @@ class OpenClawBridgeHandler(BaseHTTPRequestHandler):
self._send_json_response(200, {"status": "ok", "message": "Wake word received"})
@staticmethod
def _call_openclaw(message: str, agent: str, timeout: int, model: str = None) -> str:
"""Call OpenClaw CLI and return stdout."""
cmd = ["/opt/homebrew/bin/openclaw", "agent", "--message", message, "--agent", agent]
if model:
cmd.extend(["--model", model])
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=timeout,
check=True,
def _config_set(path: str, value: str):
"""Set an OpenClaw config value."""
subprocess.run(
["/opt/homebrew/bin/openclaw", "config", "set", path, value],
capture_output=True, text=True, timeout=5,
)
return result.stdout.strip()
@staticmethod
def _call_openclaw(message: str, agent: str, timeout: int,
model: str = None, session_id: str = None,
params: dict = None, thinking: str = None) -> str:
"""Call OpenClaw CLI and return stdout.
Temporarily switches the gateway's primary model and inference params
via `openclaw config set`, protected by _model_lock to prevent races."""
cmd = ["/opt/homebrew/bin/openclaw", "agent", "--message", message, "--agent", agent]
if session_id:
cmd.extend(["--session-id", session_id])
if thinking:
cmd.extend(["--thinking", thinking])
with _model_lock:
if model:
OpenClawBridgeHandler._config_set(
"agents.defaults.model.primary", model)
# Set per-style temperature if provided
temp_path = None
if model and params and params.get("temperature") is not None:
temp_path = f'agents.defaults.models["{model}"].params.temperature'
OpenClawBridgeHandler._config_set(
temp_path, str(params["temperature"]))
try:
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=timeout,
check=True,
)
return result.stdout.strip()
finally:
# Restore defaults
if model and model != DEFAULT_MODEL:
OpenClawBridgeHandler._config_set(
"agents.defaults.model.primary", DEFAULT_MODEL)
if temp_path:
# Restore to neutral default
OpenClawBridgeHandler._config_set(temp_path, "0.5")
@staticmethod
def _needs_followup(response: str) -> bool:
@@ -588,6 +786,8 @@ class OpenClawBridgeHandler(BaseHTTPRequestHandler):
agent = data.get("agent", "main")
satellite_id = data.get("satellite_id")
explicit_character_id = data.get("character_id")
requested_style = data.get("prompt_style")
conversation_id = data.get("conversation_id")
if not message:
self._send_json_response(400, {"error": "Message is required"})
@@ -598,10 +798,28 @@ class OpenClawBridgeHandler(BaseHTTPRequestHandler):
character_id = explicit_character_id
else:
character_id = resolve_character_id(satellite_id)
system_prompt = load_character_prompt(character_id=character_id)
# Resolve prompt style: explicit > character default > global active
char = load_character(character_id)
style_id = requested_style or char.get("default_prompt_style") or load_active_style()
style = load_prompt_style(style_id)
print(f"[OpenClaw Bridge] Prompt style: {style.get('name', style_id)} ({style.get('group', 'cloud')})")
# Determine if routing to cloud (for privacy filtering)
mode_data = load_mode()
active_model = resolve_model_for_style(style, mode_data)
is_cloud = style.get("group", "cloud") == "cloud" and mode_data.get("mode") != "private"
system_prompt = load_character_prompt(
character_id=character_id, prompt_style=style_id,
user_message=message, is_cloud=is_cloud,
)
# Run lifecycle maintenance (cheap SQL updates)
auto_resolve_expired_followups()
auto_archive_old_resolved()
# Set the active TTS config for the Wyoming server to pick up
char = load_character(character_id)
tts_config = char.get("tts", {})
if tts_config:
set_active_tts_voice(character_id, tts_config)
@@ -616,14 +834,30 @@ class OpenClawBridgeHandler(BaseHTTPRequestHandler):
if system_prompt:
message = f"System Context: {system_prompt}\n\nUser Request: {message}"
# Load mode and resolve model routing
mode_data = load_mode()
model_override = resolve_model(mode_data)
active_model = model_override or DEFAULT_MODEL
if model_override:
print(f"[OpenClaw Bridge] Mode: PUBLIC → {model_override}")
group = style.get("group", "cloud")
print(f"[OpenClaw Bridge] Routing: {group.upper()}{active_model}")
# Resolve session ID for OpenClaw thread isolation
# Dashboard chats: use conversation_id (each "New Chat" = fresh thread)
# Satellites: use rotating 12-hour bucket so old context expires naturally
if conversation_id:
session_id = conversation_id
elif satellite_id:
now = datetime.now(timezone.utc)
half = "am" if now.hour < 12 else "pm"
session_id = f"sat_{satellite_id}_{now.strftime('%Y%m%d')}_{half}"
else:
print(f"[OpenClaw Bridge] Mode: PRIVATE ({active_model})")
# API call with no conversation or satellite — use a transient session
session_id = f"api_{int(datetime.now(timezone.utc).timestamp())}"
print(f"[OpenClaw Bridge] Session: {session_id}")
# Extract style inference params (temperature, etc.) and thinking level
style_params = style.get("params", {})
style_thinking = style.get("thinking")
if style_params:
print(f"[OpenClaw Bridge] Style params: {style_params}")
if style_thinking:
print(f"[OpenClaw Bridge] Thinking: {style_thinking}")
# Check if model is warm to set appropriate timeout
warm = is_model_warm()
@@ -635,7 +869,7 @@ class OpenClawBridgeHandler(BaseHTTPRequestHandler):
# Call OpenClaw CLI (use full path for launchd compatibility)
try:
response_text = self._call_openclaw(message, agent, timeout, model=model_override)
response_text = self._call_openclaw(message, agent, timeout, model=active_model, session_id=session_id, params=style_params, thinking=style_thinking)
# Re-prompt if the model promised to act but didn't call a tool.
# Detect "I'll do X" / "Let me X" responses that lack any result.
@@ -645,11 +879,19 @@ class OpenClawBridgeHandler(BaseHTTPRequestHandler):
"You just said you would do something but didn't actually call the exec tool. "
"Do NOT explain what you will do — call the tool NOW using exec and return the result."
)
response_text = self._call_openclaw(followup, agent, timeout, model=model_override)
response_text = self._call_openclaw(followup, agent, timeout, model=active_model, session_id=session_id, params=style_params, thinking=style_thinking)
# Increment surfaced_count on follow-ups that were injected into prompt
try:
followups = get_pending_followups(character_id)
for fu in followups[:3]:
increment_surfaced_count(fu["id"])
except Exception as e:
print(f"[OpenClaw Bridge] Follow-up tracking error: {e}")
# Signal avatar: idle (TTS handler will override to 'speaking' if voice is used)
_vtube_fire_and_forget("/expression", {"event": "idle"})
self._send_json_response(200, {"response": response_text, "model": active_model})
self._send_json_response(200, {"response": response_text, "model": active_model, "prompt_style": style_id})
except subprocess.TimeoutExpired:
self._send_json_response(504, {"error": f"OpenClaw command timed out after {timeout}s (model was {'warm' if warm else 'cold'})"})
except subprocess.CalledProcessError as e:
@@ -660,18 +902,174 @@ class OpenClawBridgeHandler(BaseHTTPRequestHandler):
except Exception as e:
self._send_json_response(500, {"error": str(e)})
def do_GET(self):
"""Handle GET requests (health check)."""
parsed_path = urlparse(self.path)
# ------------------------------------------------------------------
# Memory REST API
# ------------------------------------------------------------------
if parsed_path.path == "/status" or parsed_path.path == "/":
def _read_json_body(self) -> dict | None:
"""Read and parse JSON body from request. Returns None on error (response already sent)."""
content_length = int(self.headers.get("Content-Length", 0))
if content_length == 0:
self._send_json_response(400, {"error": "Empty body"})
return None
try:
return json.loads(self.rfile.read(content_length).decode())
except json.JSONDecodeError:
self._send_json_response(400, {"error": "Invalid JSON"})
return None
def _handle_memory_get(self, path_parts: list[str], query_params: dict):
"""Handle GET /api/memories/..."""
# GET /api/memories/general
if len(path_parts) == 1 and path_parts[0] == "general":
limit = int(query_params.get("limit", ["50"])[0])
offset = int(query_params.get("offset", ["0"])[0])
memory_type = query_params.get("type", [None])[0]
lifecycle = query_params.get("lifecycle", [None])[0]
category = query_params.get("category", [None])[0]
memories = _list_memories("shared", memory_type=memory_type,
lifecycle_state=lifecycle, category=category,
limit=limit, offset=offset)
self._send_json_response(200, {"memories": memories})
return
if len(path_parts) < 1:
self._send_json_response(400, {"error": "Character ID required"})
return
char_id = path_parts[0]
# GET /api/memories/:characterId/followups
if len(path_parts) == 2 and path_parts[1] == "followups":
followups = get_pending_followups(char_id)
self._send_json_response(200, {"followups": followups})
return
# GET /api/memories/:characterId
limit = int(query_params.get("limit", ["50"])[0])
offset = int(query_params.get("offset", ["0"])[0])
memory_type = query_params.get("type", [None])[0]
lifecycle = query_params.get("lifecycle", [None])[0]
category = query_params.get("category", [None])[0]
query = query_params.get("q", [None])[0]
if query:
memories = _search_memories(char_id, query, memory_type=memory_type, limit=limit)
else:
memories = _list_memories(char_id, memory_type=memory_type,
lifecycle_state=lifecycle, category=category,
limit=limit, offset=offset)
self._send_json_response(200, {"memories": memories, "characterId": char_id})
def _handle_memory_post(self, path_parts: list[str]):
"""Handle POST /api/memories/..."""
data = self._read_json_body()
if data is None:
return
# POST /api/memories/migrate
if len(path_parts) == 1 and path_parts[0] == "migrate":
result = migrate_from_json()
self._send_json_response(200, result)
return
# POST /api/memories/:memoryId/resolve
if len(path_parts) == 2 and path_parts[1] == "resolve":
ok = resolve_followup(path_parts[0])
self._send_json_response(200 if ok else 404,
{"ok": ok, "id": path_parts[0]})
return
# POST /api/memories/general — add general memory
if len(path_parts) == 1 and path_parts[0] == "general":
content = data.get("content", "").strip()
if not content:
self._send_json_response(400, {"error": "content is required"})
return
mem = add_or_merge_memory(
character_id="shared",
content=content,
memory_type=data.get("memory_type"),
category=data.get("category", "other"),
importance=data.get("importance"),
privacy_level=data.get("privacy_level"),
tags=data.get("tags"),
source=data.get("source", "dashboard"),
)
self._send_json_response(200, {"ok": True, "memory": mem})
return
# POST /api/memories/:characterId — add personal memory
if len(path_parts) == 1:
char_id = path_parts[0]
content = data.get("content", "").strip()
if not content:
self._send_json_response(400, {"error": "content is required"})
return
mem = add_or_merge_memory(
character_id=char_id,
content=content,
memory_type=data.get("memory_type"),
category=data.get("category", "other"),
importance=data.get("importance"),
privacy_level=data.get("privacy_level"),
tags=data.get("tags"),
follow_up_due=data.get("follow_up_due"),
follow_up_context=data.get("follow_up_context"),
source=data.get("source", "dashboard"),
)
self._send_json_response(200, {"ok": True, "memory": mem})
return
self._send_json_response(404, {"error": "Not found"})
def _handle_memory_put(self, path_parts: list[str]):
"""Handle PUT /api/memories/:memoryId — update a memory."""
if len(path_parts) != 1:
self._send_json_response(400, {"error": "Memory ID required"})
return
data = self._read_json_body()
if data is None:
return
mem = _update_memory(path_parts[0], **data)
if mem:
self._send_json_response(200, {"ok": True, "memory": mem})
else:
self._send_json_response(404, {"error": "Memory not found"})
def _handle_memory_delete(self, path_parts: list[str]):
"""Handle DELETE /api/memories/:memoryId."""
if len(path_parts) != 1:
self._send_json_response(400, {"error": "Memory ID required"})
return
ok = _delete_memory(path_parts[0])
self._send_json_response(200 if ok else 404, {"ok": ok, "id": path_parts[0]})
# ------------------------------------------------------------------
# HTTP method dispatchers
# ------------------------------------------------------------------
def do_GET(self):
"""Handle GET requests."""
parsed_path = urlparse(self.path)
path = parsed_path.path
if path == "/status" or path == "/":
self._send_json_response(200, {
"status": "ok",
"service": "OpenClaw HTTP Bridge",
"version": "1.0.0"
"version": "2.0.0"
})
else:
self._send_json_response(404, {"error": "Not found"})
return
# Memory API: GET /api/memories/...
if path.startswith("/api/memories/"):
parts = path[len("/api/memories/"):].strip("/").split("/")
query_params = parse_qs(parsed_path.query)
self._handle_memory_get(parts, query_params)
return
self._send_json_response(404, {"error": "Not found"})
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):