feat: Music Assistant, Claude primary LLM, model tag in chat, setup.sh rewrite
- Deploy Music Assistant on Pi (10.0.0.199:8095) with host networking for Chromecast mDNS discovery, Spotify + SMB library support - Switch primary LLM from Ollama to Claude Sonnet 4 (Anthropic API), local models remain as fallback - Add model info tag under each assistant message in dashboard chat, persisted in conversation JSON - Rewrite homeai-agent/setup.sh: loads .env, injects API keys into plists, symlinks plists to ~/Library/LaunchAgents/, smoke tests services - Update install_service() in common.sh to use symlinks instead of copies - Open UFW ports on Pi for Music Assistant (8095, 8097, 8927) - Add ANTHROPIC_API_KEY to openclaw + bridge launchd plists Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -48,6 +48,7 @@ TIMEOUT_WARM = 120 # Model already loaded in VRAM
|
||||
TIMEOUT_COLD = 180 # Model needs loading first (~10-20s load + inference)
|
||||
OLLAMA_PS_URL = "http://localhost:11434/api/ps"
|
||||
VTUBE_BRIDGE_URL = "http://localhost:8002"
|
||||
DEFAULT_MODEL = "anthropic/claude-sonnet-4-20250514"
|
||||
|
||||
|
||||
def _vtube_fire_and_forget(path: str, data: dict):
|
||||
@@ -83,6 +84,31 @@ CHARACTERS_DIR = Path("/Users/aodhan/homeai-data/characters")
|
||||
SATELLITE_MAP_PATH = Path("/Users/aodhan/homeai-data/satellite-map.json")
|
||||
MEMORIES_DIR = Path("/Users/aodhan/homeai-data/memories")
|
||||
ACTIVE_TTS_VOICE_PATH = Path("/Users/aodhan/homeai-data/active-tts-voice.json")
|
||||
ACTIVE_MODE_PATH = Path("/Users/aodhan/homeai-data/active-mode.json")
|
||||
|
||||
# Cloud provider model mappings for mode routing
|
||||
CLOUD_MODELS = {
|
||||
"anthropic": "anthropic/claude-sonnet-4-20250514",
|
||||
"openai": "openai/gpt-4o",
|
||||
}
|
||||
|
||||
|
||||
def load_mode() -> dict:
|
||||
"""Load the public/private mode configuration."""
|
||||
try:
|
||||
with open(ACTIVE_MODE_PATH) as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return {"mode": "private", "cloud_provider": "anthropic", "overrides": {}}
|
||||
|
||||
|
||||
def resolve_model(mode_data: dict) -> str | None:
|
||||
"""Resolve which model to use based on mode. Returns None for default (private/local)."""
|
||||
mode = mode_data.get("mode", "private")
|
||||
if mode == "private":
|
||||
return None # Use OpenClaw default (ollama/qwen3.5:35b-a3b)
|
||||
provider = mode_data.get("cloud_provider", "anthropic")
|
||||
return CLOUD_MODELS.get(provider, CLOUD_MODELS["anthropic"])
|
||||
|
||||
|
||||
def clean_text_for_tts(text: str) -> str:
|
||||
@@ -505,10 +531,13 @@ class OpenClawBridgeHandler(BaseHTTPRequestHandler):
|
||||
self._send_json_response(200, {"status": "ok", "message": "Wake word received"})
|
||||
|
||||
@staticmethod
|
||||
def _call_openclaw(message: str, agent: str, timeout: int) -> str:
|
||||
def _call_openclaw(message: str, agent: str, timeout: int, model: str = None) -> str:
|
||||
"""Call OpenClaw CLI and return stdout."""
|
||||
cmd = ["/opt/homebrew/bin/openclaw", "agent", "--message", message, "--agent", agent]
|
||||
if model:
|
||||
cmd.extend(["--model", model])
|
||||
result = subprocess.run(
|
||||
["/opt/homebrew/bin/openclaw", "agent", "--message", message, "--agent", agent],
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
@@ -587,6 +616,15 @@ class OpenClawBridgeHandler(BaseHTTPRequestHandler):
|
||||
if system_prompt:
|
||||
message = f"System Context: {system_prompt}\n\nUser Request: {message}"
|
||||
|
||||
# Load mode and resolve model routing
|
||||
mode_data = load_mode()
|
||||
model_override = resolve_model(mode_data)
|
||||
active_model = model_override or DEFAULT_MODEL
|
||||
if model_override:
|
||||
print(f"[OpenClaw Bridge] Mode: PUBLIC → {model_override}")
|
||||
else:
|
||||
print(f"[OpenClaw Bridge] Mode: PRIVATE ({active_model})")
|
||||
|
||||
# Check if model is warm to set appropriate timeout
|
||||
warm = is_model_warm()
|
||||
timeout = TIMEOUT_WARM if warm else TIMEOUT_COLD
|
||||
@@ -597,7 +635,7 @@ class OpenClawBridgeHandler(BaseHTTPRequestHandler):
|
||||
|
||||
# Call OpenClaw CLI (use full path for launchd compatibility)
|
||||
try:
|
||||
response_text = self._call_openclaw(message, agent, timeout)
|
||||
response_text = self._call_openclaw(message, agent, timeout, model=model_override)
|
||||
|
||||
# Re-prompt if the model promised to act but didn't call a tool.
|
||||
# Detect "I'll do X" / "Let me X" responses that lack any result.
|
||||
@@ -607,11 +645,11 @@ class OpenClawBridgeHandler(BaseHTTPRequestHandler):
|
||||
"You just said you would do something but didn't actually call the exec tool. "
|
||||
"Do NOT explain what you will do — call the tool NOW using exec and return the result."
|
||||
)
|
||||
response_text = self._call_openclaw(followup, agent, timeout)
|
||||
response_text = self._call_openclaw(followup, agent, timeout, model=model_override)
|
||||
|
||||
# Signal avatar: idle (TTS handler will override to 'speaking' if voice is used)
|
||||
_vtube_fire_and_forget("/expression", {"event": "idle"})
|
||||
self._send_json_response(200, {"response": response_text})
|
||||
self._send_json_response(200, {"response": response_text, "model": active_model})
|
||||
except subprocess.TimeoutExpired:
|
||||
self._send_json_response(504, {"error": f"OpenClaw command timed out after {timeout}s (model was {'warm' if warm else 'cold'})"})
|
||||
except subprocess.CalledProcessError as e:
|
||||
|
||||
Reference in New Issue
Block a user