Compare commits
25 Commits
858d7be33c
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9313646ac0 | ||
|
|
1989aa41c3 | ||
|
|
56580a2cb2 | ||
|
|
c3bae6fdc0 | ||
|
|
2261763e52 | ||
|
|
d5811e5b28 | ||
|
|
ffc2407289 | ||
|
|
117254d560 | ||
|
|
60eb89ea42 | ||
|
|
1e52c002c2 | ||
|
|
5f147cae61 | ||
|
|
c4cecbd8dc | ||
|
|
3c0d905e64 | ||
|
|
0c33de607f | ||
|
|
2d063c7db7 | ||
|
|
af6b7bd945 | ||
|
|
1bfd7fbd08 | ||
|
|
6db8ae4492 | ||
|
|
664bb6d275 | ||
|
|
6a0bae2a0b | ||
|
|
9eb5633115 | ||
|
|
c3dda280ea | ||
|
|
c4f3dbed77 | ||
| a911693057 | |||
|
|
c31724c92b |
19
.env.example
19
.env.example
@@ -2,6 +2,17 @@
|
||||
# Copy to .env and fill in your values.
|
||||
# .env is gitignored — never commit it.
|
||||
|
||||
# ─── API Keys ──────────────────────────────────────────────────────────────────
|
||||
HUGGING_FACE_API_KEY=
|
||||
OPENROUTER_API_KEY=
|
||||
OPENAI_API_KEY=
|
||||
DEEPSEEK_API_KEY=
|
||||
GEMINI_API_KEY=
|
||||
ELEVENLABS_API_KEY=
|
||||
GAZE_API_KEY=
|
||||
DREAM_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
|
||||
# ─── Data & Paths ──────────────────────────────────────────────────────────────
|
||||
DATA_DIR=${HOME}/homeai-data
|
||||
REPO_DIR=${HOME}/Projects/HomeAI
|
||||
@@ -32,9 +43,14 @@ OPEN_WEBUI_URL=http://localhost:3030
|
||||
OLLAMA_PRIMARY_MODEL=llama3.3:70b
|
||||
OLLAMA_FAST_MODEL=qwen2.5:7b
|
||||
|
||||
# Medium model kept warm for voice pipeline (override per persona)
|
||||
# Used by preload-models.sh keep-warm daemon
|
||||
HOMEAI_MEDIUM_MODEL=qwen3.5:35b-a3b
|
||||
|
||||
# ─── P3: Voice ─────────────────────────────────────────────────────────────────
|
||||
WYOMING_STT_URL=tcp://localhost:10300
|
||||
WYOMING_TTS_URL=tcp://localhost:10301
|
||||
# ELEVENLABS_API_KEY is set above in API Keys section
|
||||
|
||||
# ─── P4: Agent ─────────────────────────────────────────────────────────────────
|
||||
OPENCLAW_URL=http://localhost:8080
|
||||
@@ -44,3 +60,6 @@ VTUBE_WS_URL=ws://localhost:8001
|
||||
|
||||
# ─── P8: Images ────────────────────────────────────────────────────────────────
|
||||
COMFYUI_URL=http://localhost:8188
|
||||
|
||||
# ─── P9: Character Management ─────────────────────────────────────────────────
|
||||
DREAM_HOST=http://localhost:3000
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -45,3 +45,13 @@ homeai-esp32/esphome/secrets.yaml
|
||||
homeai-llm/benchmark-results.md
|
||||
homeai-character/characters/*.json
|
||||
!homeai-character/characters/.gitkeep
|
||||
|
||||
# MCP Files
|
||||
*.mcp*
|
||||
|
||||
# Models
|
||||
mlx_models/*
|
||||
homeai-llm/modelfiles/*
|
||||
|
||||
# Dev docs
|
||||
plans/*
|
||||
153
CLAUDE.md
153
CLAUDE.md
@@ -18,14 +18,16 @@ A self-hosted, always-on personal AI assistant running on a **Mac Mini M4 Pro (6
|
||||
| Storage | 1TB SSD |
|
||||
| Network | Gigabit Ethernet |
|
||||
|
||||
All AI inference runs locally on this machine. No cloud dependency required (cloud APIs optional).
|
||||
Primary LLMs are Claude 4.5/4.6 family via Anthropic API (Haiku for quick, Sonnet for standard, Opus for creative/RP). Local Ollama models available as fallback. All other inference (STT, TTS, image gen) runs locally.
|
||||
|
||||
---
|
||||
|
||||
## Core Stack
|
||||
|
||||
### AI & LLM
|
||||
- **Ollama** — local LLM runtime (target models: Llama 3.3 70B, Qwen 2.5 72B)
|
||||
- **Claude 4.5/4.6 family** — primary LLMs via Anthropic API, tiered per prompt style: Haiku 4.5 (quick commands), Sonnet 4.6 (standard/creative), Opus 4.6 (roleplay/storytelling)
|
||||
- **Ollama** — local LLM runtime (fallback models: Llama 3.3 70B, Qwen 3.5 35B-A3B, Qwen 2.5 7B)
|
||||
- **Model keep-warm daemon** — `preload-models.sh` runs as a loop, checks every 5 min, re-pins evicted models with `keep_alive=-1`. Keeps `qwen2.5:7b` (small/fast) and `$HOMEAI_MEDIUM_MODEL` (default: `qwen3.5:35b-a3b`) always loaded in VRAM. Medium model is configurable via env var for per-persona model assignment.
|
||||
- **Open WebUI** — browser-based chat interface, runs as Docker container
|
||||
|
||||
### Image Generation
|
||||
@@ -35,7 +37,8 @@ All AI inference runs locally on this machine. No cloud dependency required (clo
|
||||
|
||||
### Speech
|
||||
- **Whisper.cpp** — speech-to-text, optimised for Apple Silicon/Neural Engine
|
||||
- **Kokoro TTS** — fast, lightweight text-to-speech (primary, low-latency)
|
||||
- **Kokoro TTS** — fast, lightweight text-to-speech (primary, low-latency, local)
|
||||
- **ElevenLabs TTS** — cloud voice cloning/synthesis (per-character voice ID, routed via state file)
|
||||
- **Chatterbox TTS** — voice cloning engine (Apple Silicon MPS optimised)
|
||||
- **Qwen3-TTS** — alternative voice cloning via MLX
|
||||
- **openWakeWord** — always-on wake word detection
|
||||
@@ -43,17 +46,24 @@ All AI inference runs locally on this machine. No cloud dependency required (clo
|
||||
### Smart Home
|
||||
- **Home Assistant** — smart home control platform (Docker)
|
||||
- **Wyoming Protocol** — bridges Whisper STT + Kokoro/Piper TTS into Home Assistant
|
||||
- **Music Assistant** — self-hosted music control, integrates with Home Assistant
|
||||
- **Music Assistant** — self-hosted music control (Docker on Pi at 10.0.0.199:8095), Spotify + SMB library + Chromecast players
|
||||
- **Snapcast** — multi-room synchronised audio output
|
||||
|
||||
### AI Agent / Orchestration
|
||||
- **OpenClaw** — primary AI agent layer; receives voice commands, calls tools, manages personality
|
||||
- **OpenClaw Skills** — 13 skills total: home-assistant, image-generation, voice-assistant, vtube-studio, memory, service-monitor, character, routine, music, workflow, gitea, calendar, mode
|
||||
- **n8n** — visual workflow automation (Docker), chains AI actions
|
||||
- **mem0** — long-term memory layer for the AI character
|
||||
- **Character Memory System** — SQLite + sqlite-vec semantic search (personal per-character + general shared + follow-ups), injected into LLM system prompt with context-aware retrieval
|
||||
- **Prompt Styles** — 6 styles (quick, standard, creative, roleplayer, game-master, storyteller) with per-style model routing, temperature, and section stripping. JSON templates in `homeai-agent/prompt-styles/`
|
||||
- **Public/Private Mode** — routes requests to local Ollama (private) or cloud LLMs (public) with per-category overrides via `active-mode.json`. Default primary model is Claude Sonnet 4.6, with per-style model tiering (Haiku/Sonnet/Opus).
|
||||
|
||||
### Character & Personality
|
||||
- **Character Manager** (built — see `character-manager.jsx`) — single config UI for personality, prompts, models, Live2D mappings, and notes
|
||||
- Character config exports to JSON, consumed by OpenClaw system prompt and pipeline
|
||||
- **Character Schema v2** — JSON spec with background, dialogue_style, appearance, skills, gaze_presets, dream_id, gaze_character, prompt style overrides (v1 auto-migrated)
|
||||
- **HomeAI Dashboard** — unified web app: character editor, chat, memory manager, service dashboard
|
||||
- **Dream** — character management service (http://10.0.0.101:3000), REST API for character CRUD with GAZE integration for cover images
|
||||
- **Character MCP Server** — LLM-assisted character creation via Fandom wiki/Wikipedia lookup (Docker)
|
||||
- **GAZE** — image generation service (http://10.0.0.101:5782), REST API for presets, characters, and job-based image generation
|
||||
- Character config stored as JSON files in `~/homeai-data/characters/`, consumed by bridge for system prompt construction
|
||||
|
||||
### Visual Representation
|
||||
- **VTube Studio** — Live2D model display on desktop (macOS) and mobile (iOS/Android)
|
||||
@@ -85,60 +95,131 @@ All AI inference runs locally on this machine. No cloud dependency required (clo
|
||||
ESP32-S3-BOX-3 (room)
|
||||
→ Wake word detected (openWakeWord, runs locally on device or Mac Mini)
|
||||
→ Audio streamed to Mac Mini via Wyoming Satellite
|
||||
→ Whisper.cpp transcribes speech to text
|
||||
→ OpenClaw receives text + context
|
||||
→ Ollama LLM generates response (with character persona from system prompt)
|
||||
→ mem0 updates long-term memory
|
||||
→ Whisper MLX transcribes speech to text
|
||||
→ HA conversation agent → OpenClaw HTTP Bridge
|
||||
→ Bridge resolves character (satellite_id → character mapping)
|
||||
→ Bridge builds system prompt (profile + memories) and writes TTS config to state file
|
||||
→ Bridge checks active-mode.json for model routing (private=local, public=cloud)
|
||||
→ OpenClaw CLI → LLM generates response (Claude Haiku/Sonnet/Opus per style, Ollama fallback)
|
||||
→ Response dispatched:
|
||||
→ Kokoro/Chatterbox renders TTS audio
|
||||
→ Wyoming TTS reads state file → routes to Kokoro (local) or ElevenLabs (cloud)
|
||||
→ Audio sent back to ESP32-S3-BOX-3 (spoken response)
|
||||
→ VTube Studio API triggered (expression + lip sync on desktop/mobile)
|
||||
→ Home Assistant action called if applicable (lights, music, etc.)
|
||||
```
|
||||
|
||||
### Timeout Strategy
|
||||
|
||||
The HTTP bridge checks Ollama `/api/ps` before each request to determine if the LLM is already loaded:
|
||||
|
||||
| Layer | Warm (model loaded) | Cold (model loading) |
|
||||
|---|---|---|
|
||||
| HA conversation component | 200s | 200s |
|
||||
| OpenClaw HTTP bridge | 60s | 180s |
|
||||
| OpenClaw agent | 60s | 60s |
|
||||
|
||||
The keep-warm daemon ensures models stay loaded, so cold starts should be rare (only after Ollama restarts or VRAM pressure).
|
||||
|
||||
---
|
||||
|
||||
## Character System
|
||||
|
||||
The AI assistant has a defined personality managed via the Character Manager tool.
|
||||
The AI assistant has a defined personality managed via the HomeAI Dashboard (character editor + memory manager).
|
||||
|
||||
Key config surfaces:
|
||||
- **System prompt** — injected into every Ollama request
|
||||
- **Voice clone reference** — `.wav` file path for Chatterbox/Qwen3-TTS
|
||||
- **Live2D expression mappings** — idle, speaking, thinking, happy, error states
|
||||
- **VTube Studio WebSocket triggers** — JSON map of events to expressions
|
||||
### Character Schema v2
|
||||
|
||||
Each character is a JSON file in `~/homeai-data/characters/` with:
|
||||
- **System prompt** — core personality, injected into every LLM request
|
||||
- **Profile fields** — background, appearance, dialogue_style, skills array
|
||||
- **TTS config** — engine (kokoro/elevenlabs), kokoro_voice, elevenlabs_voice_id, elevenlabs_model, speed
|
||||
- **GAZE presets** — array of `{preset, trigger}` for image generation styles
|
||||
- **Dream link** — `dream_id` for syncing character data from Dream service
|
||||
- **GAZE link** — `gaze_character` for auto-assigned cover image and presets
|
||||
- **Prompt style config** — `default_prompt_style`, `prompt_style_overrides` for per-style tuning
|
||||
- **Custom prompt rules** — trigger/response overrides for specific contexts
|
||||
- **mem0** — persistent memory that evolves over time
|
||||
|
||||
Character config JSON (exported from Character Manager) is the single source of truth consumed by all pipeline components.
|
||||
### Memory System
|
||||
|
||||
SQLite + sqlite-vec database at `~/homeai-data/memories/memories.db`:
|
||||
- **Personal memories** — per-character, semantic/episodic/relational/opinion types
|
||||
- **General memories** — shared operational knowledge (character_id = "general")
|
||||
- **Follow-ups** — LLM-driven questions injected into system prompt, auto-resolve after 2 surfacings or 48h
|
||||
- **Privacy levels** — public, sensitive, local_only (local_only excluded from cloud model requests)
|
||||
- **Semantic search** — sentence-transformers all-MiniLM-L6-v2 embeddings (384 dims) for context-aware retrieval
|
||||
- Core module: `homeai-agent/memory_store.py` (imported by bridge + memory-ctl skill)
|
||||
|
||||
### Prompt Styles
|
||||
|
||||
Six response styles in `homeai-agent/prompt-styles/`, each a JSON template with model, temperature, and instructions:
|
||||
- **quick** — Claude Haiku 4.5, low temp, brief responses, strips profile sections
|
||||
- **standard** — Claude Sonnet 4.6, balanced
|
||||
- **creative** — Claude Sonnet 4.6, higher temp, elaborative
|
||||
- **roleplayer** — Claude Opus 4.6, full personality injection
|
||||
- **game-master** — Claude Opus 4.6, narrative-focused
|
||||
- **storyteller** — Claude Opus 4.6, story-centric
|
||||
|
||||
Style selection: dashboard chat has a style picker; characters can set `default_prompt_style`; satellites use the global active style. Bridge resolves model per style → group → mode → default.
|
||||
|
||||
### TTS Voice Routing
|
||||
|
||||
The bridge writes the active character's TTS config to `~/homeai-data/active-tts-voice.json` before each request. The Wyoming TTS server reads this state file to determine which engine/voice to use:
|
||||
- **Kokoro** — local, fast, uses `kokoro_voice` field (e.g., `af_heart`)
|
||||
- **ElevenLabs** — cloud, uses `elevenlabs_voice_id` + `elevenlabs_model`, returns PCM 24kHz
|
||||
|
||||
This works for both ESP32/HA pipeline and dashboard chat.
|
||||
|
||||
---
|
||||
|
||||
## Project Priorities
|
||||
|
||||
1. **Foundation** — Docker stack up (Home Assistant, Open WebUI, Portainer, Uptime Kuma)
|
||||
2. **LLM** — Ollama running with target models, Open WebUI connected
|
||||
3. **Voice pipeline** — Whisper → Ollama → Kokoro → Wyoming → Home Assistant
|
||||
4. **OpenClaw** — installed, onboarded, connected to Ollama and Home Assistant
|
||||
5. **ESP32-S3-BOX-3** — ESPHome flash, Wyoming Satellite, LVGL face
|
||||
6. **Character system** — system prompt wired up, mem0 integrated, voice cloned
|
||||
7. **VTube Studio** — model loaded, WebSocket API bridge written as OpenClaw skill
|
||||
8. **ComfyUI** — image generation online, character-consistent model workflows
|
||||
9. **Extended integrations** — n8n workflows, Music Assistant, Snapcast, Gitea, code-server
|
||||
10. **Polish** — Authelia, Tailscale hardening, mobile companion, iOS widgets
|
||||
1. **Foundation** — Docker stack up (Home Assistant, Open WebUI, Portainer, Uptime Kuma) ✅
|
||||
2. **LLM** — Ollama running with target models, Open WebUI connected ✅
|
||||
3. **Voice pipeline** — Whisper → Ollama → Kokoro → Wyoming → Home Assistant ✅
|
||||
4. **OpenClaw** — installed, onboarded, connected to Ollama and Home Assistant ✅
|
||||
5. **ESP32-S3-BOX-3** — ESPHome flash, Wyoming Satellite, display faces ✅
|
||||
6. **Character system** — schema v2, dashboard editor, memory system, per-character TTS routing ✅
|
||||
7. **OpenClaw skills expansion** — 9 new skills (memory, monitor, character, routine, music, workflow, gitea, calendar, mode) + public/private mode routing ✅
|
||||
8. **Music Assistant** — deployed on Pi (10.0.0.199:8095), Spotify + SMB + Chromecast players ✅
|
||||
9. **Memory v2 + Prompt Styles + Dream/GAZE** — SQLite memory with semantic search, 6 prompt styles with model tiering, Dream character import, GAZE character linking ✅
|
||||
10. **Animated visual** — PNG/GIF character visual for the web assistant (initial visual layer)
|
||||
11. **Android app** — companion app for mobile access to the assistant
|
||||
12. **ComfyUI** — image generation online, character-consistent model workflows
|
||||
13. **Extended integrations** — Snapcast, code-server
|
||||
14. **Polish** — Authelia, Tailscale hardening, iOS widgets
|
||||
|
||||
### Stretch Goals
|
||||
- **Live2D / VTube Studio** — full Live2D model with WebSocket API bridge (requires learning Live2D tooling)
|
||||
|
||||
---
|
||||
|
||||
## Key Paths & Conventions
|
||||
|
||||
- All Docker compose files: `~/server/docker/`
|
||||
- Launchd plists (source): `homeai-*/launchd/` (symlinked to `~/Library/LaunchAgents/`)
|
||||
- Docker compose (Mac Mini): `homeai-infra/docker/docker-compose.yml`
|
||||
- Docker compose (Pi/SELBINA): `~/docker/selbina/` on 10.0.0.199
|
||||
- OpenClaw skills: `~/.openclaw/skills/`
|
||||
- Character configs: `~/.openclaw/characters/`
|
||||
- OpenClaw workspace tools: `~/.openclaw/workspace/TOOLS.md`
|
||||
- OpenClaw config: `~/.openclaw/openclaw.json`
|
||||
- Character configs: `~/homeai-data/characters/`
|
||||
- Character memories DB: `~/homeai-data/memories/memories.db`
|
||||
- Memory store module: `homeai-agent/memory_store.py`
|
||||
- Prompt style templates: `homeai-agent/prompt-styles/`
|
||||
- Active prompt style: `~/homeai-data/active-prompt-style.json`
|
||||
- Conversation history: `~/homeai-data/conversations/`
|
||||
- Active TTS state: `~/homeai-data/active-tts-voice.json`
|
||||
- Active mode state: `~/homeai-data/active-mode.json`
|
||||
- Satellite → character map: `~/homeai-data/satellite-map.json`
|
||||
- Local routines: `~/homeai-data/routines/`
|
||||
- Voice reminders: `~/homeai-data/reminders.json`
|
||||
- Whisper models: `~/models/whisper/`
|
||||
- Ollama models: managed by Ollama at `~/.ollama/models/`
|
||||
- ComfyUI models: `~/ComfyUI/models/`
|
||||
- Voice reference audio: `~/voices/`
|
||||
- Gitea repos root: `~/gitea/`
|
||||
- Music Assistant (Pi): `~/docker/selbina/music-assistant/` on 10.0.0.199
|
||||
- Skills user guide: `homeai-agent/SKILLS_GUIDE.md`
|
||||
- Dream service: `http://10.0.0.101:3000` (character management, REST API)
|
||||
- GAZE service: `http://10.0.0.101:5782` (image generation, REST API)
|
||||
|
||||
---
|
||||
|
||||
@@ -148,6 +229,10 @@ Character config JSON (exported from Character Manager) is the single source of
|
||||
- ESP32-S3-BOX-3 units are dumb satellites — all intelligence stays on Mac Mini
|
||||
- The character JSON schema (from Character Manager) should be treated as a versioned spec; pipeline components read from it, never hardcode personality values
|
||||
- OpenClaw skills are the primary extension mechanism — new capabilities = new skills
|
||||
- Prefer local models; cloud API keys (Anthropic, OpenAI) are fallback only
|
||||
- Primary LLMs are Claude 4.5/4.6 family (Anthropic API) with per-style tiering; local Ollama models are available as fallback
|
||||
- Launchd plists are symlinked from repo source to ~/Library/LaunchAgents/ — edit source, then bootout/bootstrap to reload
|
||||
- Music Assistant runs on Pi (10.0.0.199), not Mac Mini — needs host networking for Chromecast mDNS discovery
|
||||
- VTube Studio API bridge should be a standalone OpenClaw skill with clear event interface
|
||||
- mem0 memory store should be backed up as part of regular Gitea commits
|
||||
- Memory DB (`memories.db`) should be backed up as part of regular Gitea commits
|
||||
- Dream characters can be linked to GAZE characters for cover image fallback and cross-referencing
|
||||
- Prompt style selection hierarchy: explicit user pick → character default → global active style
|
||||
|
||||
161
PHASE4_COMPLETION.md
Normal file
161
PHASE4_COMPLETION.md
Normal file
@@ -0,0 +1,161 @@
|
||||
# Phase 4 — OpenClaw Tool Calling Resolution
|
||||
|
||||
## Problem Statement
|
||||
OpenClaw needed Ollama to return structured `tool_calls` in API responses. The issue was a template mismatch:
|
||||
- **llama3.3:70b** outputs `<|python_tag|>exec {...}` (Llama's trained format)
|
||||
- **qwen3:32b** had template issues causing 400 errors
|
||||
- Ollama's template parser couldn't match the model output to the expected tool call format
|
||||
|
||||
## Solution Implemented
|
||||
**Option A: Pull qwen2.5:7b** — Ollama ships with a working tool-call template for this model.
|
||||
|
||||
### What Was Done
|
||||
|
||||
#### 1. Model Deployment
|
||||
- Pulled `qwen2.5:7b` (~4.7GB) from Ollama registry
|
||||
- Model includes native tool-calling support with proper template
|
||||
- Fast inference (~2-3s per response)
|
||||
|
||||
#### 2. OpenClaw Configuration
|
||||
Updated `~/.openclaw/openclaw.json`:
|
||||
```json
|
||||
{
|
||||
"models": {
|
||||
"providers": {
|
||||
"ollama": {
|
||||
"models": [
|
||||
{
|
||||
"id": "qwen2.5:7b",
|
||||
"name": "qwen2.5:7b",
|
||||
"contextWindow": 32768,
|
||||
"maxTokens": 4096
|
||||
},
|
||||
{
|
||||
"id": "llama3.3:70b",
|
||||
"name": "llama3.3:70b",
|
||||
"contextWindow": 32768,
|
||||
"maxTokens": 4096
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"agents": {
|
||||
"defaults": {
|
||||
"model": {
|
||||
"primary": "ollama/qwen2.5:7b"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### 3. HASS_TOKEN Setup
|
||||
- Fixed `~/.homeai/hass_token` (removed trailing comment from file)
|
||||
- Token is properly configured in launchd plist: `com.homeai.openclaw.plist`
|
||||
- HA API connectivity verified: `https://10.0.0.199:8123/api/` ✓
|
||||
|
||||
#### 4. Tool Calling Verification
|
||||
|
||||
**Direct Ollama API Test:**
|
||||
```bash
|
||||
curl -s http://localhost:11434/api/chat \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "qwen2.5:7b",
|
||||
"messages": [{"role": "user", "content": "Turn on the reading lamp"}],
|
||||
"tools": [{"type": "function", "function": {"name": "call_service", ...}}],
|
||||
"stream": false
|
||||
}'
|
||||
```
|
||||
|
||||
**Result:** ✓ Returns proper `tool_calls` array with structured function calls
|
||||
|
||||
**OpenClaw Agent Test:**
|
||||
```bash
|
||||
openclaw agent --message "Turn on the study shelves light" --agent main
|
||||
```
|
||||
|
||||
**Result:** ✓ Agent successfully executed the command via home-assistant skill
|
||||
|
||||
#### 5. Home Assistant Skill Testing
|
||||
- Tested `turn_on` command: ✓ Light turned on
|
||||
- Tested `turn_off` command: ✓ Light turned off
|
||||
- State updates verified via HA API: ✓ Confirmed
|
||||
|
||||
## Current Status
|
||||
|
||||
### ✓ Completed Tasks (Phase 4)
|
||||
- [x] Pull qwen2.5:7b model
|
||||
- [x] Configure OpenClaw to use qwen2.5:7b as primary model
|
||||
- [x] Wire HASS_TOKEN (`~/.homeai/hass_token`)
|
||||
- [x] Test home-assistant skill with real HA entities
|
||||
- [x] Verify tool calling works end-to-end
|
||||
|
||||
### Available Models
|
||||
- `qwen2.5:7b` — Primary (tool calling enabled) ✓
|
||||
- `llama3.3:70b` — Fallback (available but not primary)
|
||||
|
||||
### Next Steps (Phase 4 Remaining)
|
||||
- [ ] Set up mem0 with Chroma backend, test semantic recall
|
||||
- [ ] Write memory backup launchd job
|
||||
- [ ] Build morning briefing n8n workflow
|
||||
- [ ] Build notification router n8n workflow
|
||||
- [ ] Verify full voice → agent → HA action flow
|
||||
- [ ] Add OpenClaw to Uptime Kuma monitors
|
||||
|
||||
## Technical Notes
|
||||
|
||||
### Why qwen2.5:7b Works
|
||||
1. **Native Template Support**: Ollama's registry includes a proper chat template for qwen2.5
|
||||
2. **Tool Calling Format**: Model outputs match Ollama's expected tool call structure
|
||||
3. **No Template Tuning Needed**: Unlike llama3.3:70b, no custom TEMPLATE block required
|
||||
4. **Performance**: 7B model is fast enough for real-time HA control
|
||||
|
||||
### Token File Issue
|
||||
The `~/.homeai/hass_token` file had trailing content from the `.env` comment. Fixed by:
|
||||
1. Extracting clean token from `.env` using `awk '{print $1}'`
|
||||
2. Writing with `printf` (not `echo -n` which was being interpreted literally)
|
||||
3. Verified token length: 183 bytes (correct JWT format)
|
||||
|
||||
### HA API Connectivity
|
||||
- HA runs on `https://10.0.0.199:8123` (HTTPS, not HTTP)
|
||||
- Requires `-k` flag in curl to skip SSL verification (self-signed cert)
|
||||
- Token authentication working: `Authorization: Bearer <token>`
|
||||
|
||||
## Files Modified
|
||||
- `~/.openclaw/openclaw.json` — Updated model configuration
|
||||
- `~/.homeai/hass_token` — Fixed token file
|
||||
- `TODO.md` — Marked completed tasks
|
||||
|
||||
## Verification Commands
|
||||
```bash
|
||||
# Check model availability
|
||||
ollama list | grep qwen2.5
|
||||
|
||||
# Test tool calling directly
|
||||
curl -s http://localhost:11434/api/chat \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"model": "qwen2.5:7b", "messages": [...], "tools": [...], "stream": false}'
|
||||
|
||||
# Test OpenClaw agent
|
||||
openclaw agent --message "Turn on the study shelves light" --agent main
|
||||
|
||||
# Verify HA connectivity
|
||||
curl -sk -H "Authorization: Bearer $(cat ~/.homeai/hass_token)" \
|
||||
https://10.0.0.199:8123/api/
|
||||
|
||||
# Test home-assistant skill
|
||||
HASS_TOKEN=$(cat ~/.homeai/hass_token) \
|
||||
~/gitea/homeai/homeai-agent/skills/home-assistant/ha-ctl \
|
||||
on light.study_shelves
|
||||
```
|
||||
|
||||
## Summary
|
||||
Phase 4 tool calling issue is **RESOLVED**. OpenClaw can now:
|
||||
- ✓ Receive structured tool calls from qwen2.5:7b
|
||||
- ✓ Execute home-assistant skill commands
|
||||
- ✓ Control HA entities (lights, switches, etc.)
|
||||
- ✓ Provide natural language responses
|
||||
|
||||
The system is ready for the next phase: memory integration and workflow automation.
|
||||
89
PORT_MAP.md
Normal file
89
PORT_MAP.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# HomeAI Port Map
|
||||
|
||||
All ports used across the HomeAI stack. Updated 2026-03-20.
|
||||
|
||||
**Host: LINDBLUM (10.0.0.101)** — Mac Mini M4 Pro
|
||||
|
||||
## Voice Pipeline
|
||||
|
||||
| Port | Service | Protocol | Managed By | Binds |
|
||||
|------|---------|----------|------------|-------|
|
||||
| 10300 | Wyoming STT (Whisper MLX) | TCP (Wyoming) | launchd `com.homeai.wyoming-stt` | 0.0.0.0 |
|
||||
| 10301 | Wyoming TTS (Kokoro) | TCP (Wyoming) | launchd `com.homeai.wyoming-tts` | 0.0.0.0 |
|
||||
| 10302 | Wyoming TTS (ElevenLabs) | TCP (Wyoming) | launchd `com.homeai.wyoming-elevenlabs` | 0.0.0.0 |
|
||||
| 10700 | Wyoming Satellite | TCP (Wyoming) | launchd `com.homeai.wyoming-satellite` | 0.0.0.0 |
|
||||
|
||||
## Agent / Orchestration
|
||||
|
||||
| Port | Service | Protocol | Managed By | Binds |
|
||||
|------|---------|----------|------------|-------|
|
||||
| 8080 | OpenClaw Gateway | HTTP | launchd `com.homeai.openclaw` | localhost |
|
||||
| 8081 | OpenClaw HTTP Bridge | HTTP | launchd `com.homeai.openclaw-bridge` | 0.0.0.0 |
|
||||
| 8002 | VTube Studio Bridge | HTTP | launchd `com.homeai.vtube-bridge` | localhost |
|
||||
|
||||
## LLM
|
||||
|
||||
| Port | Service | Protocol | Managed By | Binds |
|
||||
|------|---------|----------|------------|-------|
|
||||
| 11434 | Ollama | HTTP | launchd `com.homeai.ollama` | 0.0.0.0 |
|
||||
| 3030 | Open WebUI | HTTP | Docker `homeai-open-webui` | 0.0.0.0 |
|
||||
|
||||
## Dashboards / UIs
|
||||
|
||||
| Port | Service | Protocol | Managed By | Binds |
|
||||
|------|---------|----------|------------|-------|
|
||||
| 5173 | HomeAI Dashboard | HTTP | launchd `com.homeai.dashboard` | localhost |
|
||||
| 5174 | Desktop Assistant | HTTP | launchd `com.homeai.desktop-assistant` | localhost |
|
||||
|
||||
## Image Generation
|
||||
|
||||
| Port | Service | Protocol | Managed By | Binds |
|
||||
|------|---------|----------|------------|-------|
|
||||
| 5782 | GAZE API | HTTP | — | 10.0.0.101 |
|
||||
| 8188 | ComfyUI | HTTP | — | localhost |
|
||||
|
||||
## Visual
|
||||
|
||||
| Port | Service | Protocol | Managed By | Binds |
|
||||
|------|---------|----------|------------|-------|
|
||||
| 8001 | VTube Studio (WebSocket) | WS | External app | localhost |
|
||||
|
||||
## Infrastructure (Docker)
|
||||
|
||||
| Port | Service | Protocol | Managed By | Binds |
|
||||
|------|---------|----------|------------|-------|
|
||||
| 3001 | Uptime Kuma | HTTP | Docker `homeai-uptime-kuma` | 0.0.0.0 |
|
||||
| 5678 | n8n | HTTP | Docker `homeai-n8n` | 0.0.0.0 |
|
||||
| 8090 | code-server | HTTP | Docker `homeai-code-server` | 0.0.0.0 |
|
||||
|
||||
---
|
||||
|
||||
**Host: SELBINA (10.0.0.199)** — Raspberry Pi 5
|
||||
|
||||
| Port | Service | Protocol | Managed By |
|
||||
|------|---------|----------|------------|
|
||||
| 3000 | Gitea | HTTP | Docker |
|
||||
| 8095 | Music Assistant | HTTP | Docker (host networking) |
|
||||
| 8123 | Home Assistant | HTTPS | Docker |
|
||||
| 9443 | Portainer | HTTPS | Docker |
|
||||
|
||||
---
|
||||
|
||||
## Port Ranges Summary
|
||||
|
||||
```
|
||||
3000–3030 Web UIs (Gitea, Uptime Kuma, Open WebUI)
|
||||
5173–5174 Vite dev servers (dashboards)
|
||||
5678 n8n
|
||||
5782 GAZE API
|
||||
8001–8002 VTube Studio (app + bridge)
|
||||
8080–8081 OpenClaw (gateway + bridge)
|
||||
8090 code-server
|
||||
8095 Music Assistant
|
||||
8123 Home Assistant
|
||||
8188 ComfyUI
|
||||
9443 Portainer
|
||||
11434 Ollama
|
||||
10300–10302 Wyoming voice (STT + TTS)
|
||||
10700 Wyoming satellite
|
||||
```
|
||||
@@ -90,20 +90,26 @@ All repos live under `~/gitea/homeai/` on the Mac Mini and are mirrored to the s
|
||||
**Sub-components:**
|
||||
|
||||
```
|
||||
[Mic] → openWakeWord → Wyoming STT (Whisper.cpp) → [text out]
|
||||
[text in] → Wyoming TTS (Kokoro) → [audio out]
|
||||
[Mic] → Wyoming Satellite (port 10700) → Home Assistant Voice Pipeline → Wyoming STT (Whisper)
|
||||
↓
|
||||
[Speaker] ← Wyoming TTS (Kokoro) ← OpenClaw Agent ← transcribed text
|
||||
```
|
||||
|
||||
*Note: The original openWakeWord daemon has been replaced by the Wyoming satellite approach, which handles wake word detection through Home Assistant's voice pipeline.*
|
||||
|
||||
**Key decisions:**
|
||||
- Whisper.cpp runs as a Wyoming STT provider (via `wyoming-faster-whisper` or native Wyoming adapter)
|
||||
- Whisper.cpp runs as a Wyoming STT provider (via `wyoming-faster-whisper`)
|
||||
- Kokoro is primary TTS; Chatterbox used when voice cloning is active (P5)
|
||||
- openWakeWord runs as a launchd service
|
||||
- Wyoming server port: `10300` (STT), `10301` (TTS) — standard Wyoming ports
|
||||
- Wyoming satellite runs on port `10700` — handles audio I/O and connects to HA voice pipeline
|
||||
- openWakeWord daemon disabled — wake word detection now handled by HA via Wyoming satellite
|
||||
- Wyoming server ports: `10300` (STT), `10301` (TTS), `10700` (Satellite) — standard Wyoming ports
|
||||
|
||||
**Interface contract:**
|
||||
- Wyoming STT: `tcp://localhost:10300`
|
||||
- Wyoming TTS: `tcp://localhost:10301`
|
||||
- Wyoming STT: `tcp://localhost:10300` (Whisper large-v3)
|
||||
- Wyoming TTS: `tcp://localhost:10301` (Kokoro ONNX)
|
||||
- Wyoming Satellite: `tcp://localhost:10700` (Mac Mini audio I/O)
|
||||
- Direct Python API for P4 (agent bypasses Wyoming for non-HA calls)
|
||||
- OpenClaw Bridge: `homeai-agent/skills/home-assistant/openclaw_bridge.py` (HA integration)
|
||||
|
||||
---
|
||||
|
||||
@@ -302,6 +308,7 @@ P5 (character) ──────┐ │ │ │
|
||||
| Ollama API `localhost:11434/v1` | HTTP (OpenAI compat) | P2 | P3, P4, P7 |
|
||||
| Wyoming STT `localhost:10300` | TCP/Wyoming | P3 | P6, HA |
|
||||
| Wyoming TTS `localhost:10301` | TCP/Wyoming | P3 | P6, HA |
|
||||
| Wyoming Satellite `localhost:10700` | TCP/Wyoming | P3 | HA |
|
||||
| OpenClaw API `localhost:8080` | HTTP | P4 | P3, P7, P8 |
|
||||
| Character JSON `~/.openclaw/characters/` | JSON file | P5 | P4, P3, P7 |
|
||||
| `character.schema.json` v1 | JSON Schema | P5 | P4, P3, P7 |
|
||||
|
||||
255
TODO.md
255
TODO.md
@@ -9,30 +9,28 @@
|
||||
|
||||
### P1 · homeai-infra
|
||||
|
||||
- [ ] Install Docker Desktop for Mac, enable launch at login
|
||||
- [ ] Create shared `homeai` Docker network
|
||||
- [ ] Create `~/server/docker/` directory structure
|
||||
- [ ] Write compose files: Home Assistant, Portainer, Uptime Kuma, Gitea, code-server, n8n
|
||||
- [ ] Write `.env.secrets.example` and `Makefile`
|
||||
- [ ] `make up-all` — bring all services up
|
||||
- [ ] Home Assistant onboarding — generate long-lived access token
|
||||
- [ ] Write `~/server/.env.services` with all service URLs
|
||||
- [x] Install Docker Desktop for Mac, enable launch at login
|
||||
- [x] Create shared `homeai` Docker network
|
||||
- [x] Create `~/server/docker/` directory structure
|
||||
- [x] Write compose files: Uptime Kuma, code-server, n8n (HA, Portainer, Gitea are pre-existing on 10.0.0.199)
|
||||
- [x] `docker compose up -d` — bring all services up
|
||||
- [x] Home Assistant onboarding — long-lived access token generated, stored in `.env`
|
||||
- [ ] Install Tailscale, verify all services reachable on Tailnet
|
||||
- [ ] Gitea: create admin account, initialise all 8 sub-project repos, configure SSH
|
||||
- [ ] Uptime Kuma: add monitors for all services, configure mobile alerts
|
||||
- [x] Uptime Kuma: add monitors for all services, configure mobile alerts
|
||||
- [ ] Verify all containers survive a cold reboot
|
||||
|
||||
### P2 · homeai-llm
|
||||
|
||||
- [ ] Install Ollama natively via brew
|
||||
- [ ] Write and load launchd plist (`com.ollama.ollama.plist`)
|
||||
- [ ] Write `ollama-models.txt` with model manifest
|
||||
- [ ] Run `scripts/pull-models.sh` — pull all models
|
||||
- [ ] Run `scripts/benchmark.sh` — record results in `benchmark-results.md`
|
||||
- [ ] Deploy Open WebUI via Docker compose (port 3030)
|
||||
- [ ] Verify Open WebUI connected to Ollama, all models available
|
||||
- [ ] Add Ollama + Open WebUI to Uptime Kuma monitors
|
||||
- [ ] Add `OLLAMA_URL` and `OPEN_WEBUI_URL` to `.env.services`
|
||||
- [x] Install Ollama natively via brew
|
||||
- [x] Write and load launchd plist (`com.homeai.ollama.plist`) — `/opt/homebrew/bin/ollama`
|
||||
- [x] Register local GGUF models via Modelfiles (no download): llama3.3:70b, qwen3:32b, codestral:22b, qwen2.5:7b
|
||||
- [x] Register additional models: EVA-LLaMA-3.33-70B, Midnight-Miqu-70B, QwQ-32B, Qwen3.5-35B, Qwen3-Coder-30B, Qwen3-VL-30B, GLM-4.6V-Flash, DeepSeek-R1-8B, gemma-3-27b
|
||||
- [x] Add qwen3.5:35b-a3b (MoE, Q8_0) — 26.7 tok/s, recommended for voice pipeline
|
||||
- [x] Write model keep-warm daemon + launchd service (pins qwen2.5:7b + $HOMEAI_MEDIUM_MODEL in VRAM, checks every 5 min)
|
||||
- [x] Deploy Open WebUI via Docker compose (port 3030)
|
||||
- [x] Verify Open WebUI connected to Ollama, all models available
|
||||
- [x] Run pipeline benchmark (homeai-voice/scripts/benchmark_pipeline.py) — STT/LLM/TTS latency profiled
|
||||
- [x] Add Ollama + Open WebUI to Uptime Kuma monitors
|
||||
|
||||
---
|
||||
|
||||
@@ -40,58 +38,78 @@
|
||||
|
||||
### P3 · homeai-voice
|
||||
|
||||
- [ ] Compile Whisper.cpp with Metal support
|
||||
- [ ] Download Whisper models (`large-v3`, `medium.en`) to `~/models/whisper/`
|
||||
- [ ] Install `wyoming-faster-whisper`, test STT from audio file
|
||||
- [ ] Install Kokoro TTS, test output to audio file
|
||||
- [ ] Install Wyoming-Kokoro adapter, verify Wyoming protocol
|
||||
- [ ] Write + load launchd plists for Wyoming STT (10300) and TTS (10301)
|
||||
- [ ] Connect Home Assistant Wyoming integration (STT + TTS)
|
||||
- [ ] Create HA Voice Assistant pipeline
|
||||
- [ ] Test HA Assist via browser: type query → hear spoken response
|
||||
- [ ] Install openWakeWord, test wake detection with USB mic
|
||||
- [ ] Write + load openWakeWord launchd plist
|
||||
- [x] Install `wyoming-faster-whisper` — model: faster-whisper-large-v3 (auto-downloaded)
|
||||
- [x] Upgrade STT to wyoming-mlx-whisper (whisper-large-v3-turbo, MLX Metal GPU) — 20x faster (8s → 400ms)
|
||||
- [x] Install Kokoro ONNX TTS — models at `~/models/kokoro/`
|
||||
- [x] Write Wyoming-Kokoro adapter server (`homeai-voice/tts/wyoming_kokoro_server.py`)
|
||||
- [x] Write + load launchd plists for Wyoming STT (10300) and TTS (10301)
|
||||
- [x] Install openWakeWord + pyaudio — model: hey_jarvis
|
||||
- [x] Write + load openWakeWord launchd plist (`com.homeai.wakeword`) — DISABLED, replaced by Wyoming satellite
|
||||
- [x] Write `wyoming/test-pipeline.sh` — smoke test (3/3 passing)
|
||||
- [x] Install Wyoming satellite — handles wake word via HA voice pipeline
|
||||
- [x] Install Wyoming satellite for Mac Mini (port 10700)
|
||||
- [x] Write OpenClaw conversation custom component for Home Assistant
|
||||
- [x] Connect Home Assistant Wyoming integration (STT + TTS + Satellite) — ready to configure in HA UI
|
||||
- [x] Create HA Voice Assistant pipeline with OpenClaw conversation agent — component ready, needs HA UI setup
|
||||
- [x] Test HA Assist via browser: type query → hear spoken response
|
||||
- [x] Test full voice loop: wake word → STT → OpenClaw → TTS → audio playback
|
||||
- [ ] Install Chatterbox TTS (MPS build), test with sample `.wav`
|
||||
- [ ] Install Qwen3-TTS via MLX (fallback)
|
||||
- [ ] Write `wyoming/test-pipeline.sh` — end-to-end smoke test
|
||||
- [ ] Add Wyoming STT/TTS to Uptime Kuma monitors
|
||||
- [ ] Train custom wake word using character name
|
||||
- [x] Add Wyoming STT/TTS to Uptime Kuma monitors
|
||||
|
||||
---
|
||||
|
||||
## Phase 3 — Agent & Character
|
||||
|
||||
### P5 · homeai-character *(no runtime deps — can start alongside P1)*
|
||||
|
||||
- [ ] Define and write `schema/character.schema.json` (v1)
|
||||
- [ ] Write `characters/aria.json` — default character
|
||||
- [ ] Set up Vite project in `src/`, install deps
|
||||
- [ ] Integrate existing `character-manager.jsx` into Vite project
|
||||
- [ ] Add schema validation on export (ajv)
|
||||
- [ ] Add expression mapping UI section
|
||||
- [ ] Add custom rules editor
|
||||
- [ ] Test full edit → export → validate → load cycle
|
||||
- [ ] Record or source voice reference audio for Aria (`~/voices/aria.wav`)
|
||||
- [ ] Pre-process audio with ffmpeg, test with Chatterbox
|
||||
- [ ] Update `aria.json` with voice clone path if quality is good
|
||||
- [ ] Write `SchemaValidator.js` as standalone utility
|
||||
|
||||
### P4 · homeai-agent
|
||||
|
||||
- [ ] Confirm OpenClaw installation method and Ollama compatibility
|
||||
- [ ] Install OpenClaw, write `~/.openclaw/config.yaml`
|
||||
- [ ] Verify OpenClaw responds to basic text query via `/chat`
|
||||
- [ ] Write `skills/home_assistant.py` — test lights on/off via voice
|
||||
- [ ] Write `skills/memory.py` — test store and recall
|
||||
- [ ] Write `skills/weather.py` — verify HA weather sensor data
|
||||
- [ ] Write `skills/timer.py` — test set/fire a timer
|
||||
- [ ] Write skill stubs: `music.py`, `vtube_studio.py`, `comfyui.py`
|
||||
- [ ] Set up mem0 with Chroma backend, test semantic recall
|
||||
- [ ] Write and load memory backup launchd job
|
||||
- [ ] Symlink `homeai-agent/skills/` → `~/.openclaw/skills/`
|
||||
- [ ] Build morning briefing n8n workflow
|
||||
- [ ] Build notification router n8n workflow
|
||||
- [ ] Verify full voice → agent → HA action flow
|
||||
- [ ] Add OpenClaw to Uptime Kuma monitors
|
||||
- [x] Install OpenClaw (npm global, v2026.3.2)
|
||||
- [x] Configure Ollama provider (native API, `http://localhost:11434`)
|
||||
- [x] Write + load launchd plist (`com.homeai.openclaw`) — gateway on port 8080
|
||||
- [x] Fix context window: set `contextWindow=32768` for llama3.3:70b in `openclaw.json`
|
||||
- [x] Fix Llama 3.3 Modelfile: add tool-calling TEMPLATE block
|
||||
- [x] Verify `openclaw agent --message "..." --agent main` → completed
|
||||
- [x] Write `skills/home-assistant` SKILL.md — HA REST API control via ha-ctl CLI
|
||||
- [x] Write `skills/voice-assistant` SKILL.md — voice response style guide
|
||||
- [x] Wire HASS_TOKEN — create `~/.homeai/hass_token` or set env in launchd plist
|
||||
- [x] Fix HA tool calling: set commands.native=true, symlink ha-ctl to PATH, update TOOLS.md
|
||||
- [x] Test home-assistant skill: "turn on/off the reading lamp" — verified exec→ha-ctl→HA action
|
||||
- [x] Set up mem0 with Chroma backend, test semantic recall
|
||||
- [x] Write memory backup launchd job
|
||||
- [x] Build morning briefing n8n workflow
|
||||
- [x] Build notification router n8n workflow
|
||||
- [x] Verify full voice → agent → HA action flow
|
||||
- [x] Add OpenClaw to Uptime Kuma monitors (Manual user action required)
|
||||
|
||||
### P5 · homeai-dashboard *(character system + dashboard)*
|
||||
|
||||
- [x] Define and write `schema/character.schema.json` (v1)
|
||||
- [x] Write `characters/aria.json` — default character
|
||||
- [x] Set up Vite project in `src/`, install deps
|
||||
- [x] Integrate existing `character-manager.jsx` into Vite project
|
||||
- [x] Add schema validation on export (ajv)
|
||||
- [x] Add expression mapping UI section
|
||||
- [x] Add custom rules editor
|
||||
- [x] Test full edit → export → validate → load cycle
|
||||
- [x] Wire character system prompt into OpenClaw agent config
|
||||
- [x] Record or source voice reference audio for Aria (`~/voices/aria.wav`)
|
||||
- [x] Pre-process audio with ffmpeg, test with Chatterbox
|
||||
- [x] Update `aria.json` with voice clone path if quality is good
|
||||
- [x] Build unified HomeAI dashboard — dark-themed frontend showing live service status + links to individual UIs
|
||||
- [x] Add character profile management to dashboard — store/switch character configs with attached profile images
|
||||
- [x] Add TTS voice preview in character editor — Kokoro preview via OpenClaw bridge with loading state, custom text, stop control
|
||||
- [x] Merge homeai-character + homeai-desktop into unified homeai-dashboard (services, chat, characters, editor)
|
||||
- [x] Upgrade character schema to v2 — background, dialogue_style, appearance, skills, gaze_presets (auto-migrate v1)
|
||||
- [x] Add LLM-assisted character creation via Character MCP server (Fandom/Wikipedia lookup)
|
||||
- [x] Add character memory system — personal (per-character) + general (shared) memories with dashboard UI
|
||||
- [x] Add conversation history with per-conversation persistence
|
||||
- [x] Wire character_id through full pipeline (dashboard → bridge → LLM system prompt)
|
||||
- [x] Add TTS text cleaning — strip tags, asterisks, emojis, markdown before synthesis
|
||||
- [x] Add per-character TTS voice routing — bridge writes state file, Wyoming server reads it
|
||||
- [x] Add ElevenLabs TTS support in Wyoming server — cloud voice synthesis via state file routing
|
||||
- [x] Dashboard auto-selects character's TTS engine/voice (Kokoro or ElevenLabs)
|
||||
- [ ] Deploy dashboard as Docker container or static site on Mac Mini
|
||||
|
||||
---
|
||||
|
||||
@@ -99,72 +117,88 @@
|
||||
|
||||
### P6 · homeai-esp32
|
||||
|
||||
- [ ] Install ESPHome: `pip install esphome`
|
||||
- [ ] Write `esphome/secrets.yaml` (gitignored)
|
||||
- [ ] Write `base.yaml`, `voice.yaml`, `display.yaml`, `animations.yaml`
|
||||
- [ ] Write `s3-box-living-room.yaml` for first unit
|
||||
- [ ] Flash first unit via USB
|
||||
- [ ] Verify unit appears in HA device list
|
||||
- [ ] Assign Wyoming voice pipeline to unit in HA
|
||||
- [ ] Test full wake → STT → LLM → TTS → audio playback cycle
|
||||
- [ ] Test LVGL face: idle → listening → thinking → speaking → error
|
||||
- [ ] Verify OTA firmware update works wirelessly
|
||||
- [ ] Flash remaining units (bedroom, kitchen, etc.)
|
||||
- [x] Install ESPHome in `~/homeai-esphome-env` (Python 3.12 venv)
|
||||
- [x] Write `esphome/secrets.yaml` (gitignored)
|
||||
- [x] Write `homeai-living-room.yaml` (based on official S3-BOX-3 reference config)
|
||||
- [x] Generate placeholder face illustrations (7 PNGs, 320×240)
|
||||
- [x] Write `setup.sh` with flash/ota/logs/validate commands
|
||||
- [x] Write `deploy.sh` with OTA deploy, image management, multi-unit support
|
||||
- [x] Flash first unit via USB (living room)
|
||||
- [x] Verify unit appears in HA device list (requires HA 2026.x for ESPHome 2025.12+ compat)
|
||||
- [x] Assign Wyoming voice pipeline to unit in HA
|
||||
- [x] Test full wake → STT → LLM → TTS → audio playback cycle
|
||||
- [x] Test display states: idle → listening → thinking → replying → error
|
||||
- [x] Verify OTA firmware update works wirelessly (`deploy.sh --device OTA`)
|
||||
- [ ] Flash remaining units (bedroom, kitchen)
|
||||
- [ ] Document MAC address → room name mapping
|
||||
|
||||
### P6b · homeai-rpi (Kitchen Satellite)
|
||||
|
||||
- [x] Set up Wyoming Satellite on Raspberry Pi 5 (SELBINA) with ReSpeaker 2-Mics pHAT
|
||||
- [x] Write setup.sh — full Pi provisioning (venv, drivers, systemd, scripts)
|
||||
- [x] Write deploy.sh — remote deploy/manage from Mac Mini (push-wrapper, test-logs, etc.)
|
||||
- [x] Write satellite_wrapper.py — monkey-patches fixing TTS echo, writer race, streaming timeout
|
||||
- [x] Test multi-command voice loop without freezing
|
||||
|
||||
---
|
||||
|
||||
## Phase 5 — Visual Layer
|
||||
|
||||
### P7 · homeai-visual
|
||||
|
||||
- [ ] Install VTube Studio (Mac App Store)
|
||||
- [ ] Enable WebSocket API on port 8001
|
||||
- [ ] Source/purchase a Live2D model (nizima.com or booth.pm)
|
||||
- [ ] Load model in VTube Studio
|
||||
- [ ] Create hotkeys for all 8 expression states
|
||||
- [ ] Write `skills/vtube_studio.py` full implementation
|
||||
- [ ] Run auth flow — click Allow in VTube Studio, save token
|
||||
- [ ] Test all 8 expressions via test script
|
||||
- [ ] Update `aria.json` with real VTube Studio hotkey IDs
|
||||
- [ ] Write `lipsync.py` amplitude-based helper
|
||||
- [ ] Integrate lip sync into OpenClaw TTS dispatch
|
||||
- [ ] Symlink `skills/` → `~/.openclaw/skills/`
|
||||
- [ ] Test full pipeline: voice → thinking expression → speaking with lip sync
|
||||
#### VTube Studio Expression Bridge
|
||||
- [x] Write `vtube-bridge.py` — persistent WebSocket ↔ HTTP bridge daemon (port 8002)
|
||||
- [x] Write `vtube-ctl` CLI wrapper + OpenClaw skill (`~/.openclaw/skills/vtube-studio/`)
|
||||
- [x] Wire expression triggers into `openclaw-http-bridge.py` (thinking → idle, speaking → idle)
|
||||
- [x] Add amplitude-based lip sync to `wyoming_kokoro_server.py` (RMS → MouthOpen parameter)
|
||||
- [x] Write `test-expressions.py` — auth flow, expression cycle, lip sync sweep, latency test
|
||||
- [x] Write launchd plist + setup.sh for venv creation and service registration
|
||||
- [ ] Install VTube Studio from Mac App Store, enable WebSocket API (port 8001)
|
||||
- [ ] Source/purchase Live2D model, load in VTube Studio
|
||||
- [ ] Create 8 expression hotkeys, record UUIDs
|
||||
- [ ] Run `setup.sh` to create venv, install websockets, load launchd service
|
||||
- [ ] Run `vtube-ctl auth` — click Allow in VTube Studio
|
||||
- [ ] Update `aria.json` with real hotkey UUIDs (replace placeholders)
|
||||
- [ ] Run `test-expressions.py --all` — verify expressions + lip sync + latency
|
||||
- [ ] Set up VTube Studio mobile (iPhone/iPad) on Tailnet
|
||||
|
||||
#### Web Visuals (Dashboard)
|
||||
- [ ] Design PNG/GIF character visuals for web assistant (idle, thinking, speaking, etc.)
|
||||
- [ ] Integrate animated visuals into homeai-dashboard chat view
|
||||
- [ ] Sync visual state to voice pipeline events (listening, processing, responding)
|
||||
- [ ] Add expression transitions and idle animations
|
||||
|
||||
### P8 · homeai-android
|
||||
|
||||
- [ ] Build Android companion app for mobile assistant access
|
||||
- [ ] Integrate with OpenClaw bridge API (chat, TTS, STT)
|
||||
- [ ] Add character visual display
|
||||
- [ ] Push notification support via ntfy/FCM
|
||||
|
||||
---
|
||||
|
||||
## Phase 6 — Image Generation
|
||||
|
||||
### P8 · homeai-images
|
||||
### P9 · homeai-images (ComfyUI)
|
||||
|
||||
- [ ] Clone ComfyUI to `~/ComfyUI/`, install deps in venv
|
||||
- [ ] Verify MPS is detected at launch
|
||||
- [ ] Write and load launchd plist (`com.homeai.comfyui.plist`)
|
||||
- [ ] Download SDXL base model
|
||||
- [ ] Download Flux.1-schnell
|
||||
- [ ] Download ControlNet models (canny, depth)
|
||||
- [ ] Download SDXL base model + Flux.1-schnell + ControlNet models
|
||||
- [ ] Test generation via ComfyUI web UI (port 8188)
|
||||
- [ ] Build and export `quick.json` workflow
|
||||
- [ ] Build and export `portrait.json` workflow
|
||||
- [ ] Build and export `scene.json` workflow (ControlNet)
|
||||
- [ ] Build and export `upscale.json` workflow
|
||||
- [ ] Write `skills/comfyui.py` full implementation
|
||||
- [ ] Test skill: `comfyui.quick("test prompt")` → image file returned
|
||||
- [ ] Build and export workflow JSONs (quick, portrait, scene, upscale)
|
||||
- [ ] Write `skills/comfyui` SKILL.md + implementation
|
||||
- [ ] Collect character reference images for LoRA training
|
||||
- [ ] Train SDXL LoRA with kohya_ss
|
||||
- [ ] Load LoRA into `portrait.json`, verify character consistency
|
||||
- [ ] Symlink `skills/` → `~/.openclaw/skills/`
|
||||
- [ ] Test via OpenClaw: "Generate a portrait of Aria looking happy"
|
||||
- [ ] Add ComfyUI to Uptime Kuma monitors
|
||||
|
||||
---
|
||||
|
||||
## Phase 7 — Extended Integrations & Polish
|
||||
|
||||
- [ ] Deploy Music Assistant (Docker), integrate with Home Assistant
|
||||
- [ ] Complete `skills/music.py` in OpenClaw
|
||||
### P10 · Integrations & Polish
|
||||
|
||||
- [x] Deploy Music Assistant (Docker on Pi 10.0.0.199:8095), Spotify + SMB + Chromecast
|
||||
- [x] Write `skills/music` SKILL.md for OpenClaw
|
||||
- [ ] Deploy Snapcast server on Mac Mini
|
||||
- [ ] Configure Snapcast clients on ESP32 units for multi-room audio
|
||||
- [ ] Configure Authelia as 2FA layer in front of web UIs
|
||||
@@ -179,11 +213,24 @@
|
||||
|
||||
---
|
||||
|
||||
## Stretch Goals
|
||||
|
||||
### Live2D / VTube Studio
|
||||
|
||||
- [ ] Learn Live2D modelling toolchain (Live2D Cubism Editor)
|
||||
- [ ] Install VTube Studio (Mac App Store), enable WebSocket API on port 8001
|
||||
- [ ] Source/commission a Live2D model (nizima.com or booth.pm)
|
||||
- [ ] Create hotkeys for expression states
|
||||
- [ ] Write `skills/vtube_studio` SKILL.md + implementation
|
||||
- [ ] Write `lipsync.py` amplitude-based helper
|
||||
- [ ] Integrate lip sync into OpenClaw TTS dispatch
|
||||
- [ ] Set up VTube Studio mobile (iPhone/iPad) on Tailnet
|
||||
|
||||
---
|
||||
|
||||
## Open Decisions
|
||||
|
||||
- [ ] Confirm character name (determines wake word training)
|
||||
- [ ] Confirm OpenClaw version/fork and Ollama compatibility
|
||||
- [ ] Live2D model: purchase off-the-shelf or commission custom?
|
||||
- [ ] mem0 backend: Chroma (simple) vs Qdrant Docker (better semantic search)?
|
||||
- [ ] Snapcast output: ESP32 built-in speakers or dedicated audio hardware per room?
|
||||
- [ ] Authelia user store: local file vs LDAP?
|
||||
|
||||
349
VOICE_PIPELINE_STATUS.md
Normal file
349
VOICE_PIPELINE_STATUS.md
Normal file
@@ -0,0 +1,349 @@
|
||||
# Voice Pipeline Status Report
|
||||
|
||||
> Last Updated: 2026-03-08
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
The voice pipeline backend is **fully operational** on the Mac Mini. All services are running and tested:
|
||||
|
||||
- ✅ Wyoming STT (Whisper large-v3) - Port 10300
|
||||
- ✅ Wyoming TTS (Kokoro ONNX) - Port 10301
|
||||
- ✅ Wyoming Satellite (wake word + audio) - Port 10700
|
||||
- ✅ OpenClaw Agent (LLM + skills) - Port 8080
|
||||
- ✅ Ollama (local LLM runtime) - Port 11434
|
||||
|
||||
**Next Step**: Manual Home Assistant UI configuration to connect the pipeline.
|
||||
|
||||
---
|
||||
|
||||
## What's Working ✅
|
||||
|
||||
### 1. Speech-to-Text (STT)
|
||||
- **Service**: Wyoming Faster Whisper
|
||||
- **Model**: large-v3 (multilingual, high accuracy)
|
||||
- **Port**: 10300
|
||||
- **Status**: Running via launchd (`com.homeai.wyoming-stt`)
|
||||
- **Test**: `nc -z localhost 10300` ✓
|
||||
|
||||
### 2. Text-to-Speech (TTS)
|
||||
- **Service**: Wyoming Kokoro ONNX
|
||||
- **Voice**: af_heart (default, configurable)
|
||||
- **Port**: 10301
|
||||
- **Status**: Running via launchd (`com.homeai.wyoming-tts`)
|
||||
- **Test**: `nc -z localhost 10301` ✓
|
||||
|
||||
### 3. Wyoming Satellite
|
||||
- **Function**: Wake word detection + audio capture/playback
|
||||
- **Wake Word**: "hey_jarvis" (openWakeWord model)
|
||||
- **Port**: 10700
|
||||
- **Status**: Running via launchd (`com.homeai.wyoming-satellite`)
|
||||
- **Test**: `nc -z localhost 10700` ✓
|
||||
|
||||
### 4. OpenClaw Agent
|
||||
- **Function**: AI agent with tool calling (home automation, etc.)
|
||||
- **Gateway**: WebSocket + CLI
|
||||
- **Port**: 8080
|
||||
- **Status**: Running via launchd (`com.homeai.openclaw`)
|
||||
- **Skills**: home-assistant, voice-assistant
|
||||
- **Test**: `openclaw agent --message "Hello" --agent main` ✓
|
||||
|
||||
### 5. Ollama LLM
|
||||
- **Models**: llama3.3:70b, qwen2.5:7b, and others
|
||||
- **Port**: 11434
|
||||
- **Status**: Running natively
|
||||
- **Test**: `ollama list` ✓
|
||||
|
||||
### 6. Home Assistant Integration
|
||||
- **Custom Component**: OpenClaw Conversation agent created
|
||||
- **Location**: `homeai-agent/custom_components/openclaw_conversation/`
|
||||
- **Features**:
|
||||
- Full conversation agent implementation
|
||||
- Config flow for UI setup
|
||||
- CLI fallback if HTTP unavailable
|
||||
- Error handling and logging
|
||||
- **Status**: Ready for installation
|
||||
|
||||
---
|
||||
|
||||
## What's Pending 🔄
|
||||
|
||||
### Manual Steps Required (Home Assistant UI)
|
||||
|
||||
These steps require access to the Home Assistant web interface at http://10.0.0.199:8123:
|
||||
|
||||
1. **Install OpenClaw Conversation Component**
|
||||
- Copy component to HA server's `/config/custom_components/`
|
||||
- Restart Home Assistant
|
||||
- See: [`homeai-voice/VOICE_PIPELINE_SETUP.md`](homeai-voice/VOICE_PIPELINE_SETUP.md)
|
||||
|
||||
2. **Add Wyoming Integrations**
|
||||
- Settings → Devices & Services → Add Integration → Wyoming Protocol
|
||||
- Add STT (10.0.0.199:10300)
|
||||
- Add TTS (10.0.0.199:10301)
|
||||
- Add Satellite (10.0.0.199:10700)
|
||||
|
||||
3. **Add OpenClaw Conversation**
|
||||
- Settings → Devices & Services → Add Integration → OpenClaw Conversation
|
||||
- Configure: host=10.0.0.199, port=8080, agent=main
|
||||
|
||||
4. **Create Voice Assistant Pipeline**
|
||||
- Settings → Voice Assistants → Add Assistant
|
||||
- Name: "HomeAI with OpenClaw"
|
||||
- STT: Mac Mini STT
|
||||
- Conversation: OpenClaw Conversation
|
||||
- TTS: Mac Mini TTS
|
||||
- Set as preferred
|
||||
|
||||
5. **Test the Pipeline**
|
||||
- Type test: "What time is it?" in HA Assist
|
||||
- Voice test: "Hey Jarvis, turn on the reading lamp"
|
||||
|
||||
### Future Enhancements
|
||||
|
||||
6. **Chatterbox TTS** - Voice cloning for character personality
|
||||
7. **Qwen3-TTS** - Alternative voice synthesis via MLX
|
||||
8. **Custom Wake Word** - Train with character's name
|
||||
9. **Uptime Kuma** - Add monitoring for all services
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────────────────────┐
|
||||
│ Mac Mini M4 Pro │
|
||||
│ (10.0.0.199) │
|
||||
├──────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
|
||||
│ │ Wyoming │ │ Wyoming │ │ Wyoming │ │
|
||||
│ │ STT │ │ TTS │ │ Satellite │ │
|
||||
│ │ :10300 │ │ :10301 │ │ :10700 │ │
|
||||
│ └─────────────┘ └─────────────┘ └─────────────┘ │
|
||||
│ │
|
||||
│ ┌─────────────┐ ┌─────────────┐ │
|
||||
│ │ OpenClaw │ │ Ollama │ │
|
||||
│ │ Gateway │ │ LLM │ │
|
||||
│ │ :8080 │ │ :11434 │ │
|
||||
│ └─────────────┘ └─────────────┘ │
|
||||
│ │
|
||||
└──────────────────────────────────────────────────────────────┘
|
||||
▲
|
||||
│ Wyoming Protocol + HTTP API
|
||||
│
|
||||
┌──────────────────────────────────────────────────────────────┐
|
||||
│ Home Assistant Server │
|
||||
│ (10.0.0.199) │
|
||||
├──────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────┐ │
|
||||
│ │ Voice Assistant Pipeline │ │
|
||||
│ │ │ │
|
||||
│ │ Wyoming STT → OpenClaw Conversation → Wyoming TTS │ │
|
||||
│ └─────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────┐ │
|
||||
│ │ OpenClaw Conversation Custom Component │ │
|
||||
│ │ (Routes to OpenClaw Gateway on Mac Mini) │ │
|
||||
│ └─────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
└──────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Voice Flow Example
|
||||
|
||||
**User**: "Hey Jarvis, turn on the reading lamp"
|
||||
|
||||
1. **Wake Word Detection** (Wyoming Satellite)
|
||||
- Detects "Hey Jarvis"
|
||||
- Starts recording audio
|
||||
|
||||
2. **Speech-to-Text** (Wyoming STT)
|
||||
- Transcribes: "turn on the reading lamp"
|
||||
- Sends text to Home Assistant
|
||||
|
||||
3. **Conversation Processing** (HA → OpenClaw)
|
||||
- HA Voice Pipeline receives text
|
||||
- Routes to OpenClaw Conversation agent
|
||||
- OpenClaw Gateway processes request
|
||||
|
||||
4. **LLM Processing** (Ollama)
|
||||
- llama3.3:70b generates response
|
||||
- Identifies intent: control light
|
||||
- Calls home-assistant skill
|
||||
|
||||
5. **Action Execution** (Home Assistant API)
|
||||
- OpenClaw calls HA REST API
|
||||
- Turns on "reading lamp" entity
|
||||
- Returns confirmation
|
||||
|
||||
6. **Text-to-Speech** (Wyoming TTS)
|
||||
- Generates audio: "I've turned on the reading lamp"
|
||||
- Sends to Wyoming Satellite
|
||||
|
||||
7. **Audio Playback** (Mac Mini Speaker)
|
||||
- Plays confirmation audio
|
||||
- User hears response
|
||||
|
||||
**Total Latency**: Target < 5 seconds
|
||||
|
||||
---
|
||||
|
||||
## Service Management
|
||||
|
||||
### Check All Services
|
||||
|
||||
```bash
|
||||
# Quick health check
|
||||
./homeai-voice/scripts/test-services.sh
|
||||
|
||||
# Individual service status
|
||||
launchctl list | grep homeai
|
||||
```
|
||||
|
||||
### Restart a Service
|
||||
|
||||
```bash
|
||||
# Example: Restart STT
|
||||
launchctl unload ~/Library/LaunchAgents/com.homeai.wyoming-stt.plist
|
||||
launchctl load ~/Library/LaunchAgents/com.homeai.wyoming-stt.plist
|
||||
```
|
||||
|
||||
### View Logs
|
||||
|
||||
```bash
|
||||
# STT logs
|
||||
tail -f /tmp/homeai-wyoming-stt.log
|
||||
|
||||
# TTS logs
|
||||
tail -f /tmp/homeai-wyoming-tts.log
|
||||
|
||||
# Satellite logs
|
||||
tail -f /tmp/homeai-wyoming-satellite.log
|
||||
|
||||
# OpenClaw logs
|
||||
tail -f /tmp/homeai-openclaw.log
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Key Documentation
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| [`homeai-voice/VOICE_PIPELINE_SETUP.md`](homeai-voice/VOICE_PIPELINE_SETUP.md) | Complete setup guide with step-by-step HA configuration |
|
||||
| [`homeai-voice/RESUME_WORK.md`](homeai-voice/RESUME_WORK.md) | Quick reference for resuming work |
|
||||
| [`homeai-agent/custom_components/openclaw_conversation/README.md`](homeai-agent/custom_components/openclaw_conversation/README.md) | Custom component documentation |
|
||||
| [`plans/ha-voice-pipeline-implementation.md`](plans/ha-voice-pipeline-implementation.md) | Detailed implementation plan |
|
||||
| [`plans/voice-loop-integration.md`](plans/voice-loop-integration.md) | Architecture options and decisions |
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
### Automated Tests
|
||||
|
||||
```bash
|
||||
# Service health check
|
||||
./homeai-voice/scripts/test-services.sh
|
||||
|
||||
# OpenClaw test
|
||||
openclaw agent --message "What time is it?" --agent main
|
||||
|
||||
# Home Assistant skill test
|
||||
openclaw agent --message "Turn on the reading lamp" --agent main
|
||||
```
|
||||
|
||||
### Manual Tests
|
||||
|
||||
1. **Type Test** (HA Assist)
|
||||
- Open HA UI → Click Assist icon
|
||||
- Type: "What time is it?"
|
||||
- Expected: Hear spoken response
|
||||
|
||||
2. **Voice Test** (Wyoming Satellite)
|
||||
- Say: "Hey Jarvis"
|
||||
- Wait for beep
|
||||
- Say: "What time is it?"
|
||||
- Expected: Hear spoken response
|
||||
|
||||
3. **Home Control Test**
|
||||
- Say: "Hey Jarvis"
|
||||
- Say: "Turn on the reading lamp"
|
||||
- Expected: Light turns on + confirmation
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Services Not Running
|
||||
|
||||
```bash
|
||||
# Check launchd
|
||||
launchctl list | grep homeai
|
||||
|
||||
# Reload all services
|
||||
./homeai-voice/scripts/load-all-launchd.sh
|
||||
```
|
||||
|
||||
### Network Issues
|
||||
|
||||
```bash
|
||||
# Test from Mac Mini to HA
|
||||
curl http://10.0.0.199:8123/api/
|
||||
|
||||
# Test ports
|
||||
nc -z localhost 10300 # STT
|
||||
nc -z localhost 10301 # TTS
|
||||
nc -z localhost 10700 # Satellite
|
||||
nc -z localhost 8080 # OpenClaw
|
||||
```
|
||||
|
||||
### Audio Issues
|
||||
|
||||
```bash
|
||||
# Test microphone
|
||||
rec -r 16000 -c 1 test.wav trim 0 5
|
||||
|
||||
# Test speaker
|
||||
afplay /System/Library/Sounds/Glass.aiff
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Next Actions
|
||||
|
||||
1. **Access Home Assistant UI** at http://10.0.0.199:8123
|
||||
2. **Follow setup guide**: [`homeai-voice/VOICE_PIPELINE_SETUP.md`](homeai-voice/VOICE_PIPELINE_SETUP.md)
|
||||
3. **Install OpenClaw component** (see Step 1 in setup guide)
|
||||
4. **Configure Wyoming integrations** (see Step 2 in setup guide)
|
||||
5. **Create voice pipeline** (see Step 4 in setup guide)
|
||||
6. **Test end-to-end** (see Step 5 in setup guide)
|
||||
|
||||
---
|
||||
|
||||
## Success Metrics
|
||||
|
||||
- [ ] All services show green in health check
|
||||
- [ ] Wyoming integrations appear in HA
|
||||
- [ ] OpenClaw Conversation agent registered
|
||||
- [ ] Voice pipeline created and set as default
|
||||
- [ ] Typed query returns spoken response
|
||||
- [ ] Voice query via satellite works
|
||||
- [ ] Home control via voice works
|
||||
- [ ] End-to-end latency < 5 seconds
|
||||
- [ ] Services survive Mac Mini reboot
|
||||
|
||||
---
|
||||
|
||||
## Project Context
|
||||
|
||||
This is **Phase 2** of the HomeAI project. See [`TODO.md`](TODO.md) for the complete project roadmap.
|
||||
|
||||
**Previous Phase**: Phase 1 - Foundation (Infrastructure + LLM) ✅ Complete
|
||||
**Current Phase**: Phase 2 - Voice Pipeline 🔄 Backend Complete, HA Integration Pending
|
||||
**Next Phase**: Phase 3 - Agent & Character (mem0, character system, workflows)
|
||||
@@ -1,37 +1,38 @@
|
||||
# P4: homeai-agent — AI Agent, Skills & Automation
|
||||
|
||||
> Phase 3 | Depends on: P1 (HA), P2 (Ollama), P3 (Wyoming/TTS), P5 (character JSON)
|
||||
|
||||
---
|
||||
|
||||
## Goal
|
||||
|
||||
OpenClaw running as the primary AI agent: receives voice/text input, loads character persona, calls tools (skills), manages memory (mem0), dispatches responses (TTS, HA actions, VTube expressions). n8n handles scheduled/automated workflows.
|
||||
> Phase 4 | Depends on: P1 (HA), P2 (Ollama), P3 (Wyoming/TTS), P5 (character JSON)
|
||||
> Status: **COMPLETE** (all skills implemented)
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Voice input (text from P3 Wyoming STT)
|
||||
Voice input (text from Wyoming STT via HA pipeline)
|
||||
↓
|
||||
OpenClaw API (port 8080)
|
||||
↓ loads character JSON from P5
|
||||
System prompt construction
|
||||
↓
|
||||
Ollama LLM (P2) — llama3.3:70b
|
||||
↓ response + tool calls
|
||||
Skill dispatcher
|
||||
├── home_assistant.py → HA REST API (P1)
|
||||
├── memory.py → mem0 (local)
|
||||
├── vtube_studio.py → VTube WS (P7)
|
||||
├── comfyui.py → ComfyUI API (P8)
|
||||
├── music.py → Music Assistant (Phase 7)
|
||||
└── weather.py → HA sensor data
|
||||
OpenClaw HTTP Bridge (port 8081)
|
||||
↓ resolves character, loads memories, checks mode
|
||||
System prompt construction (profile + memories)
|
||||
↓ checks active-mode.json for model routing
|
||||
OpenClaw CLI → LLM (Ollama local or cloud API)
|
||||
↓ response + tool calls via exec
|
||||
Skill dispatcher (CLIs on PATH)
|
||||
├── ha-ctl → Home Assistant REST API
|
||||
├── memory-ctl → JSON memory files
|
||||
├── monitor-ctl → service health checks
|
||||
├── character-ctl → character switching
|
||||
├── routine-ctl → scenes, scripts, multi-step routines
|
||||
├── music-ctl → media player control
|
||||
├── workflow-ctl → n8n workflow triggering
|
||||
├── gitea-ctl → Gitea repo/issue queries
|
||||
├── calendar-ctl → HA calendar + voice reminders
|
||||
├── mode-ctl → public/private LLM routing
|
||||
├── gaze-ctl → image generation
|
||||
└── vtube-ctl → VTube Studio expressions
|
||||
↓ final response text
|
||||
TTS dispatch:
|
||||
├── Chatterbox (voice clone, if active)
|
||||
└── Kokoro (via Wyoming, fallback)
|
||||
TTS dispatch (via active-tts-voice.json):
|
||||
├── Kokoro (local, Wyoming)
|
||||
└── ElevenLabs (cloud API)
|
||||
↓
|
||||
Audio playback to appropriate room
|
||||
```
|
||||
@@ -40,296 +41,148 @@ OpenClaw API (port 8080)
|
||||
|
||||
## OpenClaw Setup
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
# Confirm OpenClaw supports Ollama — check repo for latest install method
|
||||
pip install openclaw
|
||||
# or
|
||||
git clone https://github.com/<openclaw-repo>/openclaw
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
**Key question:** Verify OpenClaw's Ollama/OpenAI-compatible backend support before installation. If OpenClaw doesn't support local Ollama natively, use a thin adapter layer pointing its OpenAI endpoint at `http://localhost:11434/v1`.
|
||||
|
||||
### Config — `~/.openclaw/config.yaml`
|
||||
|
||||
```yaml
|
||||
version: 1
|
||||
|
||||
llm:
|
||||
provider: ollama # or openai-compatible
|
||||
base_url: http://localhost:11434/v1
|
||||
model: llama3.3:70b
|
||||
fast_model: qwen2.5:7b # used for quick intent classification
|
||||
|
||||
character:
|
||||
active: aria
|
||||
config_dir: ~/.openclaw/characters/
|
||||
|
||||
memory:
|
||||
provider: mem0
|
||||
store_path: ~/.openclaw/memory/
|
||||
embedding_model: nomic-embed-text
|
||||
embedding_url: http://localhost:11434/v1
|
||||
|
||||
api:
|
||||
host: 0.0.0.0
|
||||
port: 8080
|
||||
|
||||
tts:
|
||||
primary: chatterbox # when voice clone active
|
||||
fallback: kokoro-wyoming # Wyoming TTS endpoint
|
||||
wyoming_tts_url: tcp://localhost:10301
|
||||
|
||||
wake:
|
||||
endpoint: /wake # openWakeWord POSTs here to trigger listening
|
||||
```
|
||||
- **Runtime:** Node.js global install at `/opt/homebrew/bin/openclaw` (v2026.3.2)
|
||||
- **Config:** `~/.openclaw/openclaw.json`
|
||||
- **Gateway:** port 8080, mode local, launchd: `com.homeai.openclaw`
|
||||
- **Default model:** `ollama/qwen3.5:35b-a3b` (MoE, 35B total, 3B active, 26.7 tok/s)
|
||||
- **Cloud models (public mode):** `anthropic/claude-sonnet-4-20250514`, `openai/gpt-4o`
|
||||
- **Critical:** `commands.native: true` in config (enables exec tool for CLI skills)
|
||||
- **Critical:** `contextWindow: 32768` for large models (prevents GPU OOM)
|
||||
|
||||
---
|
||||
|
||||
## Skills
|
||||
## Skills (13 total)
|
||||
|
||||
All skills live in `~/.openclaw/skills/` (symlinked from `homeai-agent/skills/`).
|
||||
All skills follow the same pattern:
|
||||
- `~/.openclaw/skills/<name>/SKILL.md` — metadata + agent instructions
|
||||
- `~/.openclaw/skills/<name>/<tool>` — executable Python CLI (stdlib only)
|
||||
- Symlinked to `/opt/homebrew/bin/` for PATH access
|
||||
- Agent invokes via `exec` tool
|
||||
- Documented in `~/.openclaw/workspace/TOOLS.md`
|
||||
|
||||
### `home_assistant.py`
|
||||
### Existing Skills (4)
|
||||
|
||||
Wraps the HA REST API for common smart home actions.
|
||||
| Skill | CLI | Description |
|
||||
|-------|-----|-------------|
|
||||
| home-assistant | `ha-ctl` | Smart home device control |
|
||||
| image-generation | `gaze-ctl` | Image generation via ComfyUI/GAZE |
|
||||
| voice-assistant | (none) | Voice pipeline handling |
|
||||
| vtube-studio | `vtube-ctl` | VTube Studio expression control |
|
||||
|
||||
**Functions:**
|
||||
- `turn_on(entity_id, **kwargs)` — lights, switches, media players
|
||||
- `turn_off(entity_id)`
|
||||
- `toggle(entity_id)`
|
||||
- `set_light(entity_id, brightness=None, color_temp=None, rgb_color=None)`
|
||||
- `run_scene(scene_id)`
|
||||
- `get_state(entity_id)` → returns state + attributes
|
||||
- `list_entities(domain=None)` → returns entity list
|
||||
### New Skills (9) — Added 2026-03-17
|
||||
|
||||
Uses `HA_URL` and `HA_TOKEN` from `.env.services`.
|
||||
| Skill | CLI | Description |
|
||||
|-------|-----|-------------|
|
||||
| memory | `memory-ctl` | Store/search/recall memories |
|
||||
| service-monitor | `monitor-ctl` | Service health checks |
|
||||
| character | `character-ctl` | Character switching |
|
||||
| routine | `routine-ctl` | Scenes and multi-step routines |
|
||||
| music | `music-ctl` | Media player control |
|
||||
| workflow | `workflow-ctl` | n8n workflow management |
|
||||
| gitea | `gitea-ctl` | Gitea repo/issue/PR queries |
|
||||
| calendar | `calendar-ctl` | Calendar events and voice reminders |
|
||||
| mode | `mode-ctl` | Public/private LLM routing |
|
||||
|
||||
### `memory.py`
|
||||
|
||||
Wraps mem0 for persistent long-term memory.
|
||||
|
||||
**Functions:**
|
||||
- `remember(text, category=None)` — store a memory
|
||||
- `recall(query, limit=5)` — semantic search over memories
|
||||
- `forget(memory_id)` — delete a specific memory
|
||||
- `list_recent(n=10)` — list most recent memories
|
||||
|
||||
mem0 uses `nomic-embed-text` via Ollama for embeddings.
|
||||
|
||||
### `weather.py`
|
||||
|
||||
Pulls weather data from Home Assistant sensors (local weather station or HA weather integration).
|
||||
|
||||
**Functions:**
|
||||
- `get_current()` → temp, humidity, conditions
|
||||
- `get_forecast(days=3)` → forecast array
|
||||
|
||||
### `timer.py`
|
||||
|
||||
Simple timer/reminder management.
|
||||
|
||||
**Functions:**
|
||||
- `set_timer(duration_seconds, label=None)` → fires HA notification/TTS on expiry
|
||||
- `set_reminder(datetime_str, message)` → schedules future TTS playback
|
||||
- `list_timers()`
|
||||
- `cancel_timer(timer_id)`
|
||||
|
||||
### `music.py` (stub — completed in Phase 7)
|
||||
|
||||
```python
|
||||
def play(query: str): ... # "play jazz" → Music Assistant
|
||||
def pause(): ...
|
||||
def skip(): ...
|
||||
def set_volume(level: int): ... # 0-100
|
||||
```
|
||||
|
||||
### `vtube_studio.py` (implemented in P7)
|
||||
|
||||
Stub in P4, full implementation in P7:
|
||||
```python
|
||||
def trigger_expression(event: str): ... # "thinking", "happy", etc.
|
||||
def set_parameter(name: str, value: float): ...
|
||||
```
|
||||
|
||||
### `comfyui.py` (implemented in P8)
|
||||
|
||||
Stub in P4, full implementation in P8:
|
||||
```python
|
||||
def generate(workflow: str, params: dict) -> str: ... # returns image path
|
||||
```
|
||||
See `SKILLS_GUIDE.md` for full user documentation.
|
||||
|
||||
---
|
||||
|
||||
## mem0 — Long-Term Memory
|
||||
## HTTP Bridge
|
||||
|
||||
### Setup
|
||||
**File:** `openclaw-http-bridge.py` (runs in homeai-voice-env)
|
||||
**Port:** 8081, launchd: `com.homeai.openclaw-bridge`
|
||||
|
||||
```bash
|
||||
pip install mem0ai
|
||||
```
|
||||
|
||||
### Config
|
||||
|
||||
```python
|
||||
from mem0 import Memory
|
||||
|
||||
config = {
|
||||
"llm": {
|
||||
"provider": "ollama",
|
||||
"config": {
|
||||
"model": "llama3.3:70b",
|
||||
"ollama_base_url": "http://localhost:11434",
|
||||
}
|
||||
},
|
||||
"embedder": {
|
||||
"provider": "ollama",
|
||||
"config": {
|
||||
"model": "nomic-embed-text",
|
||||
"ollama_base_url": "http://localhost:11434",
|
||||
}
|
||||
},
|
||||
"vector_store": {
|
||||
"provider": "chroma",
|
||||
"config": {
|
||||
"collection_name": "homeai_memory",
|
||||
"path": "~/.openclaw/memory/chroma",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
memory = Memory.from_config(config)
|
||||
```
|
||||
|
||||
> **Decision point:** Start with Chroma (local file-based). If semantic recall quality is poor, migrate to Qdrant (Docker container).
|
||||
|
||||
### Backup
|
||||
|
||||
Daily cron (via launchd) commits mem0 data to Gitea:
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
cd ~/.openclaw/memory
|
||||
git add .
|
||||
git commit -m "mem0 backup $(date +%Y-%m-%d)"
|
||||
git push origin main
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## n8n Workflows
|
||||
|
||||
n8n runs in Docker (deployed in P1). Workflows exported as JSON and stored in `homeai-agent/workflows/`.
|
||||
|
||||
### Starter Workflows
|
||||
|
||||
**`morning-briefing.json`**
|
||||
- Trigger: time-based (e.g., 7:30 AM on weekdays)
|
||||
- Steps: fetch weather → fetch calendar events → compose briefing → POST to OpenClaw TTS → speak aloud
|
||||
|
||||
**`notification-router.json`**
|
||||
- Trigger: HA webhook (new notification)
|
||||
- Steps: classify urgency → if high: TTS immediately; if low: queue for next interaction
|
||||
|
||||
**`memory-backup.json`**
|
||||
- Trigger: daily schedule
|
||||
- Steps: commit mem0 data to Gitea
|
||||
|
||||
### n8n ↔ OpenClaw Integration
|
||||
|
||||
OpenClaw exposes a webhook endpoint that n8n can call to trigger TTS or run a skill:
|
||||
|
||||
```
|
||||
POST http://localhost:8080/speak
|
||||
{
|
||||
"text": "Good morning. It is 7:30 and the weather is...",
|
||||
"room": "all"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## API Surface (OpenClaw)
|
||||
|
||||
Key endpoints consumed by other projects:
|
||||
### Endpoints
|
||||
|
||||
| Endpoint | Method | Description |
|
||||
|---|---|---|
|
||||
| `/chat` | POST | Send text, get response (+ fires skills) |
|
||||
| `/wake` | POST | Wake word trigger from openWakeWord |
|
||||
| `/speak` | POST | TTS only — no LLM, just speak text |
|
||||
| `/skill/<name>` | POST | Call a specific skill directly |
|
||||
| `/memory` | GET/POST | Read/write memories |
|
||||
|----------|--------|-------------|
|
||||
| `/api/agent/message` | POST | Send message → LLM → response |
|
||||
| `/api/tts` | POST | Text-to-speech (Kokoro or ElevenLabs) |
|
||||
| `/api/stt` | POST | Speech-to-text (Wyoming/Whisper) |
|
||||
| `/wake` | POST | Wake word notification |
|
||||
| `/status` | GET | Health check |
|
||||
|
||||
---
|
||||
### Request Flow
|
||||
|
||||
## Directory Layout
|
||||
1. Resolve character: explicit `character_id` > `satellite_id` mapping > default
|
||||
2. Build system prompt: profile fields + metadata + personal/general memories
|
||||
3. Write TTS config to `active-tts-voice.json`
|
||||
4. Load mode from `active-mode.json`, resolve model (private → local, public → cloud)
|
||||
5. Call OpenClaw CLI with `--model` flag if public mode
|
||||
6. Detect/re-prompt if model promises action but doesn't call exec tool
|
||||
7. Return response
|
||||
|
||||
```
|
||||
homeai-agent/
|
||||
├── skills/
|
||||
│ ├── home_assistant.py
|
||||
│ ├── memory.py
|
||||
│ ├── weather.py
|
||||
│ ├── timer.py
|
||||
│ ├── music.py # stub
|
||||
│ ├── vtube_studio.py # stub
|
||||
│ └── comfyui.py # stub
|
||||
├── workflows/
|
||||
│ ├── morning-briefing.json
|
||||
│ ├── notification-router.json
|
||||
│ └── memory-backup.json
|
||||
└── config/
|
||||
├── config.yaml.example
|
||||
└── mem0-config.py
|
||||
```
|
||||
### Timeout Strategy
|
||||
|
||||
| State | Timeout |
|
||||
|-------|---------|
|
||||
| Model warm (loaded in VRAM) | 120s |
|
||||
| Model cold (loading) | 180s |
|
||||
|
||||
---
|
||||
|
||||
## Interface Contracts
|
||||
## Daemons
|
||||
|
||||
**Consumes:**
|
||||
- Ollama API: `http://localhost:11434/v1`
|
||||
- HA API: `$HA_URL` with `$HA_TOKEN`
|
||||
- Wyoming TTS: `tcp://localhost:10301`
|
||||
- Character JSON: `~/.openclaw/characters/<active>.json` (from P5)
|
||||
|
||||
**Exposes:**
|
||||
- OpenClaw HTTP API: `http://localhost:8080` — consumed by P3 (voice), P7 (visual triggers), P8 (image skill)
|
||||
|
||||
**Add to `.env.services`:**
|
||||
```dotenv
|
||||
OPENCLAW_URL=http://localhost:8080
|
||||
```
|
||||
| Daemon | Plist | Purpose |
|
||||
|--------|-------|---------|
|
||||
| `com.homeai.openclaw` | `launchd/com.homeai.openclaw.plist` | OpenClaw gateway (port 8080) |
|
||||
| `com.homeai.openclaw-bridge` | `launchd/com.homeai.openclaw-bridge.plist` | HTTP bridge (port 8081) |
|
||||
| `com.homeai.reminder-daemon` | `launchd/com.homeai.reminder-daemon.plist` | Voice reminder checker (60s interval) |
|
||||
|
||||
---
|
||||
|
||||
## Implementation Steps
|
||||
## Data Files
|
||||
|
||||
- [ ] Confirm OpenClaw installation method and Ollama compatibility
|
||||
- [ ] Install OpenClaw, write `config.yaml` pointing at Ollama and HA
|
||||
- [ ] Verify OpenClaw responds to a basic text query via `/chat`
|
||||
- [ ] Write `home_assistant.py` skill — test lights on/off via voice
|
||||
- [ ] Write `memory.py` skill — test store and recall
|
||||
- [ ] Write `weather.py` skill — verify HA weather sensor data
|
||||
- [ ] Write `timer.py` skill — test set/fire a timer
|
||||
- [ ] Write skill stubs: `music.py`, `vtube_studio.py`, `comfyui.py`
|
||||
- [ ] Set up mem0 with Chroma backend, test semantic recall
|
||||
- [ ] Write and test memory backup launchd job
|
||||
- [ ] Deploy n8n via Docker (P1 task if not done)
|
||||
- [ ] Build morning briefing n8n workflow
|
||||
- [ ] Symlink `homeai-agent/skills/` → `~/.openclaw/skills/`
|
||||
- [ ] Verify full voice → agent → HA action flow (with P3 pipeline)
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `~/homeai-data/memories/personal/*.json` | Per-character memories |
|
||||
| `~/homeai-data/memories/general.json` | Shared general memories |
|
||||
| `~/homeai-data/characters/*.json` | Character profiles (schema v2) |
|
||||
| `~/homeai-data/satellite-map.json` | Satellite → character mapping |
|
||||
| `~/homeai-data/active-tts-voice.json` | Current TTS engine/voice |
|
||||
| `~/homeai-data/active-mode.json` | Public/private mode state |
|
||||
| `~/homeai-data/routines/*.json` | Local routine definitions |
|
||||
| `~/homeai-data/reminders.json` | Pending voice reminders |
|
||||
| `~/homeai-data/conversations/*.json` | Chat conversation history |
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
## Environment Variables (OpenClaw Plist)
|
||||
|
||||
- [ ] "Turn on the living room lights" → lights turn on via HA
|
||||
- [ ] "Remember that I prefer jazz in the mornings" → mem0 stores it; "What do I like in the mornings?" → recalls it
|
||||
- [ ] Morning briefing n8n workflow fires on schedule and speaks via TTS
|
||||
- [ ] OpenClaw `/status` returns healthy
|
||||
- [ ] OpenClaw survives Mac Mini reboot (launchd or Docker — TBD based on OpenClaw's preferred run method)
|
||||
| Variable | Purpose |
|
||||
|----------|---------|
|
||||
| `HASS_TOKEN` / `HA_TOKEN` | Home Assistant API token |
|
||||
| `HA_URL` | Home Assistant URL |
|
||||
| `GAZE_API_KEY` | Image generation API key |
|
||||
| `N8N_API_KEY` | n8n automation API key |
|
||||
| `GITEA_TOKEN` | Gitea API token |
|
||||
| `ANTHROPIC_API_KEY` | Claude API key (public mode) |
|
||||
| `OPENAI_API_KEY` | OpenAI API key (public mode) |
|
||||
|
||||
---
|
||||
|
||||
## Implementation Status
|
||||
|
||||
- [x] OpenClaw installed and configured
|
||||
- [x] HTTP bridge with character resolution and memory injection
|
||||
- [x] ha-ctl — smart home control
|
||||
- [x] gaze-ctl — image generation
|
||||
- [x] vtube-ctl — VTube Studio expressions
|
||||
- [x] memory-ctl — memory store/search/recall
|
||||
- [x] monitor-ctl — service health checks
|
||||
- [x] character-ctl — character switching
|
||||
- [x] routine-ctl — scenes and multi-step routines
|
||||
- [x] music-ctl — media player control
|
||||
- [x] workflow-ctl — n8n workflow triggering
|
||||
- [x] gitea-ctl — Gitea integration
|
||||
- [x] calendar-ctl — calendar + voice reminders
|
||||
- [x] mode-ctl — public/private LLM routing
|
||||
- [x] Bridge mode routing (active-mode.json → --model flag)
|
||||
- [x] Cloud providers in openclaw.json (Anthropic, OpenAI)
|
||||
- [x] Dashboard /api/mode endpoint
|
||||
- [x] Reminder daemon (com.homeai.reminder-daemon)
|
||||
- [x] TOOLS.md updated with all skills
|
||||
- [ ] Set N8N_API_KEY (requires generating in n8n UI)
|
||||
- [ ] Set GITEA_TOKEN (requires generating in Gitea UI)
|
||||
- [ ] Set ANTHROPIC_API_KEY / OPENAI_API_KEY for public mode
|
||||
- [ ] End-to-end voice test of each skill
|
||||
|
||||
386
homeai-agent/SKILLS_GUIDE.md
Normal file
386
homeai-agent/SKILLS_GUIDE.md
Normal file
@@ -0,0 +1,386 @@
|
||||
# OpenClaw Skills — User Guide
|
||||
|
||||
> All skills are invoked by voice or chat. Say a natural command and the AI agent will route it to the right tool automatically.
|
||||
|
||||
---
|
||||
|
||||
## Quick Reference
|
||||
|
||||
| Skill | CLI | What it does |
|
||||
|-------|-----|-------------|
|
||||
| Home Assistant | `ha-ctl` | Control lights, switches, sensors, climate |
|
||||
| Image Generation | `gaze-ctl` | Generate images via ComfyUI/GAZE |
|
||||
| Memory | `memory-ctl` | Store and recall things about you |
|
||||
| Service Monitor | `monitor-ctl` | Check if services are running |
|
||||
| Character Switcher | `character-ctl` | Switch AI personalities |
|
||||
| Routines & Scenes | `routine-ctl` | Create and trigger multi-step automations |
|
||||
| Music | `music-ctl` | Play, pause, skip, volume control |
|
||||
| n8n Workflows | `workflow-ctl` | Trigger automation workflows |
|
||||
| Gitea | `gitea-ctl` | Query repos, commits, issues |
|
||||
| Calendar & Reminders | `calendar-ctl` | View calendar, set voice reminders |
|
||||
| Public/Private Mode | `mode-ctl` | Route to local or cloud LLMs |
|
||||
|
||||
---
|
||||
|
||||
## Phase A — Core Skills
|
||||
|
||||
### Memory (`memory-ctl`)
|
||||
|
||||
The agent can remember things about you and recall them later. Memories persist across conversations and are visible in the dashboard.
|
||||
|
||||
**Voice examples:**
|
||||
- "Remember that my favorite color is blue"
|
||||
- "I take my coffee black"
|
||||
- "What do you know about me?"
|
||||
- "Forget that I said I like jazz"
|
||||
|
||||
**CLI usage:**
|
||||
```bash
|
||||
memory-ctl add personal "User's favorite color is blue" --category preference
|
||||
memory-ctl add general "Living room speaker is a Sonos" --category fact
|
||||
memory-ctl search "coffee"
|
||||
memory-ctl list --type personal
|
||||
memory-ctl delete <memory_id>
|
||||
```
|
||||
|
||||
**Categories:** `preference`, `fact`, `routine`
|
||||
|
||||
**How it works:** Memories are stored as JSON in `~/homeai-data/memories/`. Personal memories are per-character (each character has their own relationship with you). General memories are shared across all characters.
|
||||
|
||||
---
|
||||
|
||||
### Service Monitor (`monitor-ctl`)
|
||||
|
||||
Ask the assistant if everything is healthy, check specific services, or see what models are loaded.
|
||||
|
||||
**Voice examples:**
|
||||
- "Is everything running?"
|
||||
- "What models are loaded?"
|
||||
- "Is Home Assistant up?"
|
||||
- "Show me the Docker containers"
|
||||
|
||||
**CLI usage:**
|
||||
```bash
|
||||
monitor-ctl status # Full health check (all services)
|
||||
monitor-ctl check ollama # Single service
|
||||
monitor-ctl ollama # Models loaded, VRAM usage
|
||||
monitor-ctl docker # Docker container status
|
||||
```
|
||||
|
||||
**Services checked:** Ollama, OpenClaw Bridge, OpenClaw Gateway, Wyoming STT, Wyoming TTS, Dashboard, n8n, Uptime Kuma, Home Assistant, Gitea
|
||||
|
||||
---
|
||||
|
||||
### Character Switcher (`character-ctl`)
|
||||
|
||||
Switch between AI personalities on the fly. Each character has their own voice, personality, and memories.
|
||||
|
||||
**Voice examples:**
|
||||
- "Talk to Aria"
|
||||
- "Switch to Sucy"
|
||||
- "Who can I talk to?"
|
||||
- "Who am I talking to?"
|
||||
- "Tell me about Aria"
|
||||
|
||||
**CLI usage:**
|
||||
```bash
|
||||
character-ctl list # See all characters
|
||||
character-ctl active # Who is the current default
|
||||
character-ctl switch "Aria" # Switch (fuzzy name matching)
|
||||
character-ctl info "Sucy" # Character profile
|
||||
character-ctl map homeai-kitchen.local aria_123 # Map a satellite to a character
|
||||
```
|
||||
|
||||
**How it works:** Switching updates the default character in `satellite-map.json` and writes the TTS voice config. The new character takes effect on the next request.
|
||||
|
||||
---
|
||||
|
||||
## Phase B — Home Assistant Extensions
|
||||
|
||||
### Routines & Scenes (`routine-ctl`)
|
||||
|
||||
Create and trigger Home Assistant scenes and multi-step routines by voice.
|
||||
|
||||
**Voice examples:**
|
||||
- "Activate movie mode"
|
||||
- "Run the bedtime routine"
|
||||
- "What scenes do I have?"
|
||||
- "Create a morning routine"
|
||||
|
||||
**CLI usage:**
|
||||
```bash
|
||||
routine-ctl list-scenes # HA scenes
|
||||
routine-ctl list-scripts # HA scripts
|
||||
routine-ctl trigger "movie_mode" # Activate scene/script
|
||||
routine-ctl create-scene "cozy" --entities '[{"entity_id":"light.lamp","state":"on","brightness":80}]'
|
||||
routine-ctl create-routine "bedtime" --steps '[
|
||||
{"type":"ha","cmd":"off \"All Lights\""},
|
||||
{"type":"delay","seconds":2},
|
||||
{"type":"tts","text":"Good night!"}
|
||||
]'
|
||||
routine-ctl run "bedtime" # Execute routine
|
||||
routine-ctl list-routines # List local routines
|
||||
routine-ctl delete-routine "bedtime" # Remove routine
|
||||
```
|
||||
|
||||
**Step types:**
|
||||
| Type | Description | Fields |
|
||||
|------|-------------|--------|
|
||||
| `scene` | Trigger an HA scene | `target` (scene name) |
|
||||
| `ha` | Run an ha-ctl command | `cmd` (e.g. `off "Lamp"`) |
|
||||
| `delay` | Wait between steps | `seconds` |
|
||||
| `tts` | Speak text aloud | `text` |
|
||||
|
||||
**Storage:** Routines are saved as JSON in `~/homeai-data/routines/`.
|
||||
|
||||
---
|
||||
|
||||
### Music Control (`music-ctl`)
|
||||
|
||||
Control music playback through Home Assistant media players — works with Spotify, Music Assistant, Chromecast, and any HA media player.
|
||||
|
||||
**Voice examples:**
|
||||
- "Play some jazz"
|
||||
- "Pause the music"
|
||||
- "Next song"
|
||||
- "What's playing?"
|
||||
- "Turn the volume to 50"
|
||||
- "Play Bohemian Rhapsody on the kitchen speaker"
|
||||
- "Shuffle on"
|
||||
|
||||
**CLI usage:**
|
||||
```bash
|
||||
music-ctl players # List available players
|
||||
music-ctl play "jazz" # Search and play
|
||||
music-ctl play # Resume paused playback
|
||||
music-ctl pause # Pause
|
||||
music-ctl next # Skip to next
|
||||
music-ctl prev # Go to previous
|
||||
music-ctl volume 50 # Set volume (0-100)
|
||||
music-ctl now-playing # Current track info
|
||||
music-ctl shuffle on # Enable shuffle
|
||||
music-ctl play "rock" --player media_player.kitchen # Target specific player
|
||||
```
|
||||
|
||||
**How it works:** All commands go through HA's `media_player` services. The `--player` flag defaults to the first active (playing/paused) player. Multi-room audio works through Snapcast zones, which appear as separate `media_player` entities.
|
||||
|
||||
**Prerequisites:** At least one media player configured in Home Assistant (Spotify integration, Music Assistant, or Chromecast).
|
||||
|
||||
---
|
||||
|
||||
## Phase C — External Service Skills
|
||||
|
||||
### n8n Workflows (`workflow-ctl`)
|
||||
|
||||
List and trigger n8n automation workflows by voice.
|
||||
|
||||
**Voice examples:**
|
||||
- "Run the backup workflow"
|
||||
- "What workflows do I have?"
|
||||
- "Did the last workflow succeed?"
|
||||
|
||||
**CLI usage:**
|
||||
```bash
|
||||
workflow-ctl list # All workflows
|
||||
workflow-ctl trigger "backup" # Trigger by name (fuzzy match)
|
||||
workflow-ctl trigger "abc123" --data '{"key":"val"}' # Trigger with data
|
||||
workflow-ctl status <execution_id> # Check execution result
|
||||
workflow-ctl history --limit 5 # Recent executions
|
||||
```
|
||||
|
||||
**Setup required:**
|
||||
1. Generate an API key in n8n: Settings → API → Create API Key
|
||||
2. Set `N8N_API_KEY` in the OpenClaw launchd plist
|
||||
3. Restart OpenClaw: `launchctl kickstart -k gui/501/com.homeai.openclaw`
|
||||
|
||||
---
|
||||
|
||||
### Gitea (`gitea-ctl`)
|
||||
|
||||
Query your self-hosted Gitea repositories, commits, issues, and pull requests.
|
||||
|
||||
**Voice examples:**
|
||||
- "What repos do I have?"
|
||||
- "Show recent commits for homeai"
|
||||
- "Any open issues?"
|
||||
- "Create an issue for the TTS bug"
|
||||
|
||||
**CLI usage:**
|
||||
```bash
|
||||
gitea-ctl repos # List all repos
|
||||
gitea-ctl commits aodhan/homeai --limit 5 # Recent commits
|
||||
gitea-ctl issues aodhan/homeai --state open # Open issues
|
||||
gitea-ctl prs aodhan/homeai # Pull requests
|
||||
gitea-ctl create-issue aodhan/homeai "Bug title" --body "Description here"
|
||||
```
|
||||
|
||||
**Setup required:**
|
||||
1. Generate a token in Gitea: Settings → Applications → Generate Token
|
||||
2. Set `GITEA_TOKEN` in the OpenClaw launchd plist
|
||||
3. Restart OpenClaw
|
||||
|
||||
---
|
||||
|
||||
### Calendar & Reminders (`calendar-ctl`)
|
||||
|
||||
Read calendar events from Home Assistant and set voice reminders that speak via TTS when due.
|
||||
|
||||
**Voice examples:**
|
||||
- "What's on my calendar today?"
|
||||
- "What's coming up this week?"
|
||||
- "Remind me in 30 minutes to check the oven"
|
||||
- "Remind me at 5pm to call mum"
|
||||
- "What reminders do I have?"
|
||||
- "Cancel that reminder"
|
||||
|
||||
**CLI usage:**
|
||||
```bash
|
||||
calendar-ctl today # Today's events
|
||||
calendar-ctl upcoming --days 3 # Next 3 days
|
||||
calendar-ctl add "Dentist" --start 2026-03-18T14:00:00 --end 2026-03-18T15:00:00
|
||||
calendar-ctl remind "Check the oven" --at "in 30 minutes"
|
||||
calendar-ctl remind "Call mum" --at "at 5pm"
|
||||
calendar-ctl remind "Team standup" --at "tomorrow 9am"
|
||||
calendar-ctl reminders # List pending
|
||||
calendar-ctl cancel-reminder <id> # Cancel
|
||||
```
|
||||
|
||||
**Supported time formats:**
|
||||
| Format | Example |
|
||||
|--------|---------|
|
||||
| Relative | `in 30 minutes`, `in 2 hours` |
|
||||
| Absolute | `at 5pm`, `at 17:00`, `at 5:30pm` |
|
||||
| Tomorrow | `tomorrow 9am`, `tomorrow at 14:00` |
|
||||
| Combined | `in 1 hour 30 minutes` |
|
||||
|
||||
**How reminders work:** A background daemon (`com.homeai.reminder-daemon`) checks `~/homeai-data/reminders.json` every 60 seconds. When a reminder is due, it POSTs to the TTS bridge and speaks the reminder aloud. Fired reminders are automatically cleaned up after 24 hours.
|
||||
|
||||
**Prerequisites:** Calendar entity configured in Home Assistant (Google Calendar, CalDAV, or local calendar integration).
|
||||
|
||||
---
|
||||
|
||||
## Phase D — Public/Private Mode
|
||||
|
||||
### Mode Controller (`mode-ctl`)
|
||||
|
||||
Route AI requests to local LLMs (private, no data leaves the machine) or cloud LLMs (public, faster/more capable) with per-category overrides.
|
||||
|
||||
**Voice examples:**
|
||||
- "Switch to public mode"
|
||||
- "Go private"
|
||||
- "What mode am I in?"
|
||||
- "Use Claude for coding"
|
||||
- "Keep health queries private"
|
||||
|
||||
**CLI usage:**
|
||||
```bash
|
||||
mode-ctl status # Current mode and overrides
|
||||
mode-ctl private # All requests → local Ollama
|
||||
mode-ctl public # All requests → cloud LLM
|
||||
mode-ctl set-provider anthropic # Use Claude (default)
|
||||
mode-ctl set-provider openai # Use GPT-4o
|
||||
mode-ctl override coding public # Always use cloud for coding
|
||||
mode-ctl override health private # Always keep health local
|
||||
mode-ctl list-overrides # Show all category rules
|
||||
```
|
||||
|
||||
**Default category rules:**
|
||||
|
||||
| Always Private | Always Public | Follows Global Mode |
|
||||
|---------------|--------------|-------------------|
|
||||
| Personal finance | Web search | General chat |
|
||||
| Health | Coding help | Smart home |
|
||||
| Passwords | Complex reasoning | Music |
|
||||
| Private conversations | Translation | Calendar |
|
||||
|
||||
**How it works:** The HTTP bridge reads `~/homeai-data/active-mode.json` before each request. Based on the mode and any category overrides, it passes `--model` to the OpenClaw CLI to route to either `ollama/qwen3.5:35b-a3b` (private) or `anthropic/claude-sonnet-4-20250514` / `openai/gpt-4o` (public).
|
||||
|
||||
**Setup required for public mode:**
|
||||
1. Set `ANTHROPIC_API_KEY` and/or `OPENAI_API_KEY` in the OpenClaw launchd plist
|
||||
2. Restart OpenClaw: `launchctl kickstart -k gui/501/com.homeai.openclaw`
|
||||
|
||||
**Dashboard:** The mode can also be toggled via the dashboard API at `GET/POST /api/mode`.
|
||||
|
||||
---
|
||||
|
||||
## Administration
|
||||
|
||||
### Adding API Keys
|
||||
|
||||
All API keys are stored in the OpenClaw launchd plist at:
|
||||
```
|
||||
~/gitea/homeai/homeai-agent/launchd/com.homeai.openclaw.plist
|
||||
```
|
||||
|
||||
After editing, deploy and restart:
|
||||
```bash
|
||||
cp ~/gitea/homeai/homeai-agent/launchd/com.homeai.openclaw.plist ~/Library/LaunchAgents/
|
||||
launchctl kickstart -k gui/501/com.homeai.openclaw
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Purpose | Required for |
|
||||
|----------|---------|-------------|
|
||||
| `HASS_TOKEN` | Home Assistant API token | ha-ctl, routine-ctl, music-ctl, calendar-ctl |
|
||||
| `HA_URL` | Home Assistant URL | Same as above |
|
||||
| `GAZE_API_KEY` | Image generation API key | gaze-ctl |
|
||||
| `N8N_API_KEY` | n8n automation API key | workflow-ctl |
|
||||
| `GITEA_TOKEN` | Gitea API token | gitea-ctl |
|
||||
| `ANTHROPIC_API_KEY` | Claude API key | mode-ctl (public mode) |
|
||||
| `OPENAI_API_KEY` | OpenAI API key | mode-ctl (public mode) |
|
||||
|
||||
### Skill File Locations
|
||||
|
||||
```
|
||||
~/.openclaw/skills/
|
||||
├── home-assistant/ ha-ctl → /opt/homebrew/bin/ha-ctl
|
||||
├── image-generation/ gaze-ctl → /opt/homebrew/bin/gaze-ctl
|
||||
├── memory/ memory-ctl → /opt/homebrew/bin/memory-ctl
|
||||
├── service-monitor/ monitor-ctl → /opt/homebrew/bin/monitor-ctl
|
||||
├── character/ character-ctl → /opt/homebrew/bin/character-ctl
|
||||
├── routine/ routine-ctl → /opt/homebrew/bin/routine-ctl
|
||||
├── music/ music-ctl → /opt/homebrew/bin/music-ctl
|
||||
├── workflow/ workflow-ctl → /opt/homebrew/bin/workflow-ctl
|
||||
├── gitea/ gitea-ctl → /opt/homebrew/bin/gitea-ctl
|
||||
├── calendar/ calendar-ctl → /opt/homebrew/bin/calendar-ctl
|
||||
├── mode/ mode-ctl → /opt/homebrew/bin/mode-ctl
|
||||
├── voice-assistant/ (no CLI)
|
||||
└── vtube-studio/ vtube-ctl → /opt/homebrew/bin/vtube-ctl
|
||||
```
|
||||
|
||||
### Data File Locations
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `~/homeai-data/memories/personal/*.json` | Per-character memories |
|
||||
| `~/homeai-data/memories/general.json` | Shared general memories |
|
||||
| `~/homeai-data/characters/*.json` | Character profiles |
|
||||
| `~/homeai-data/satellite-map.json` | Satellite → character mapping |
|
||||
| `~/homeai-data/active-tts-voice.json` | Current TTS voice config |
|
||||
| `~/homeai-data/active-mode.json` | Public/private mode state |
|
||||
| `~/homeai-data/routines/*.json` | Local routine definitions |
|
||||
| `~/homeai-data/reminders.json` | Pending voice reminders |
|
||||
| `~/homeai-data/conversations/*.json` | Chat conversation history |
|
||||
|
||||
### Creating a New Skill
|
||||
|
||||
Every skill follows the same pattern:
|
||||
|
||||
1. Create directory: `~/.openclaw/skills/<name>/`
|
||||
2. Write `SKILL.md` with YAML frontmatter (`name`, `description`) + usage docs
|
||||
3. Create Python CLI (stdlib only: `urllib.request`, `json`, `os`, `sys`, `re`, `datetime`)
|
||||
4. `chmod +x` the CLI and symlink to `/opt/homebrew/bin/`
|
||||
5. Add env vars to the OpenClaw launchd plist if needed
|
||||
6. Add a section to `~/.openclaw/workspace/TOOLS.md`
|
||||
7. Restart OpenClaw: `launchctl kickstart -k gui/501/com.homeai.openclaw`
|
||||
8. Test: `openclaw agent --message "test prompt" --agent main`
|
||||
|
||||
### Daemons
|
||||
|
||||
| Daemon | Plist | Purpose |
|
||||
|--------|-------|---------|
|
||||
| `com.homeai.reminder-daemon` | `homeai-agent/launchd/com.homeai.reminder-daemon.plist` | Fires TTS reminders when due |
|
||||
| `com.homeai.openclaw` | `homeai-agent/launchd/com.homeai.openclaw.plist` | OpenClaw gateway |
|
||||
| `com.homeai.openclaw-bridge` | `homeai-agent/launchd/com.homeai.openclaw-bridge.plist` | HTTP bridge (voice pipeline) |
|
||||
| `com.homeai.preload-models` | `homeai-llm/scripts/preload-models.sh` | Keeps models warm in VRAM |
|
||||
115
homeai-agent/custom_components/install-to-docker-ha.sh
Executable file
115
homeai-agent/custom_components/install-to-docker-ha.sh
Executable file
@@ -0,0 +1,115 @@
|
||||
#!/usr/bin/env bash
|
||||
# Install OpenClaw Conversation component to Docker Home Assistant on 10.0.0.199
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
COMPONENT_NAME="openclaw_conversation"
|
||||
HA_HOST="${HA_HOST:-10.0.0.199}"
|
||||
HA_CONTAINER="${HA_CONTAINER:-homeassistant}"
|
||||
|
||||
echo "Installing OpenClaw Conversation to Docker Home Assistant"
|
||||
echo "=========================================================="
|
||||
echo "Host: $HA_HOST"
|
||||
echo "Container: $HA_CONTAINER"
|
||||
echo ""
|
||||
|
||||
# Check if we can reach the host
|
||||
if ! ping -c 1 -W 2 "$HA_HOST" &>/dev/null; then
|
||||
echo "Error: Cannot reach $HA_HOST"
|
||||
echo "Please ensure the server is accessible"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create temporary tarball
|
||||
TEMP_DIR=$(mktemp -d)
|
||||
TARBALL="$TEMP_DIR/openclaw_conversation.tar.gz"
|
||||
|
||||
echo "Creating component archive..."
|
||||
cd "$SCRIPT_DIR"
|
||||
tar -czf "$TARBALL" \
|
||||
--exclude='*.pyc' \
|
||||
--exclude='__pycache__' \
|
||||
--exclude='.DS_Store' \
|
||||
"$COMPONENT_NAME"
|
||||
|
||||
echo "✓ Archive created: $(du -h "$TARBALL" | cut -f1)"
|
||||
echo ""
|
||||
|
||||
# Copy to remote host
|
||||
echo "Copying to $HA_HOST:/tmp/..."
|
||||
if scp -q "$TARBALL" "$HA_HOST:/tmp/openclaw_conversation.tar.gz"; then
|
||||
echo "✓ File copied successfully"
|
||||
else
|
||||
echo "✗ Failed to copy file"
|
||||
echo ""
|
||||
echo "Troubleshooting:"
|
||||
echo " 1. Ensure SSH access is configured: ssh $HA_HOST"
|
||||
echo " 2. Check SSH keys are set up"
|
||||
echo " 3. Try manual copy: scp $TARBALL $HA_HOST:/tmp/"
|
||||
rm -rf "$TEMP_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract into container
|
||||
echo ""
|
||||
echo "Installing into Home Assistant container..."
|
||||
ssh "$HA_HOST" << 'EOF'
|
||||
# Find the Home Assistant container
|
||||
CONTAINER=$(docker ps --filter "name=homeassistant" --format "{{.Names}}" | head -n 1)
|
||||
|
||||
if [ -z "$CONTAINER" ]; then
|
||||
echo "Error: Home Assistant container not found"
|
||||
echo "Available containers:"
|
||||
docker ps --format "{{.Names}}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Found container: $CONTAINER"
|
||||
|
||||
# Copy tarball into container
|
||||
docker cp /tmp/openclaw_conversation.tar.gz "$CONTAINER:/tmp/"
|
||||
|
||||
# Extract into custom_components
|
||||
docker exec "$CONTAINER" sh -c '
|
||||
mkdir -p /config/custom_components
|
||||
cd /config/custom_components
|
||||
tar -xzf /tmp/openclaw_conversation.tar.gz
|
||||
rm /tmp/openclaw_conversation.tar.gz
|
||||
ls -la openclaw_conversation/
|
||||
'
|
||||
|
||||
# Cleanup
|
||||
rm /tmp/openclaw_conversation.tar.gz
|
||||
|
||||
echo ""
|
||||
echo "✓ Component installed successfully!"
|
||||
EOF
|
||||
|
||||
# Cleanup local temp
|
||||
rm -rf "$TEMP_DIR"
|
||||
|
||||
echo ""
|
||||
echo "=========================================================="
|
||||
echo "Installation complete!"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo " 1. Restart Home Assistant:"
|
||||
echo " ssh $HA_HOST 'docker restart $HA_CONTAINER'"
|
||||
echo ""
|
||||
echo " 2. Open Home Assistant UI: http://$HA_HOST:8123"
|
||||
echo ""
|
||||
echo " 3. Go to Settings → Devices & Services → Add Integration"
|
||||
echo ""
|
||||
echo " 4. Search for 'OpenClaw Conversation'"
|
||||
echo ""
|
||||
echo " 5. Configure:"
|
||||
echo " - OpenClaw Host: 10.0.0.101 ⚠️ (Mac Mini IP, NOT $HA_HOST)"
|
||||
echo " - OpenClaw Port: 8081 (HTTP Bridge port)"
|
||||
echo " - Agent Name: main"
|
||||
echo " - Timeout: 120"
|
||||
echo ""
|
||||
echo " IMPORTANT: All services (OpenClaw, Wyoming STT/TTS/Satellite) run on"
|
||||
echo " 10.0.0.101 (Mac Mini), not $HA_HOST (HA server)"
|
||||
echo ""
|
||||
echo "See VOICE_PIPELINE_SETUP.md for complete configuration guide"
|
||||
66
homeai-agent/custom_components/install.sh
Executable file
66
homeai-agent/custom_components/install.sh
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/usr/bin/env bash
|
||||
# Install OpenClaw Conversation custom component to Home Assistant
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
COMPONENT_NAME="openclaw_conversation"
|
||||
|
||||
# Detect Home Assistant config directory
|
||||
if [[ -d "/config" ]]; then
|
||||
HA_CONFIG="/config"
|
||||
elif [[ -d "$HOME/.homeassistant" ]]; then
|
||||
HA_CONFIG="$HOME/.homeassistant"
|
||||
elif [[ -d "$HOME/homeassistant" ]]; then
|
||||
HA_CONFIG="$HOME/homeassistant"
|
||||
else
|
||||
echo "Error: Could not find Home Assistant config directory"
|
||||
echo "Please specify manually: ./install.sh /path/to/config"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Allow override via command line
|
||||
if [[ $# -ge 1 ]]; then
|
||||
HA_CONFIG="$1"
|
||||
fi
|
||||
|
||||
CUSTOM_COMPONENTS_DIR="$HA_CONFIG/custom_components"
|
||||
TARGET_DIR="$CUSTOM_COMPONENTS_DIR/$COMPONENT_NAME"
|
||||
|
||||
echo "Installing OpenClaw Conversation custom component..."
|
||||
echo " Source: $SCRIPT_DIR/$COMPONENT_NAME"
|
||||
echo " Target: $TARGET_DIR"
|
||||
echo ""
|
||||
|
||||
# Create custom_components directory if it doesn't exist
|
||||
mkdir -p "$CUSTOM_COMPONENTS_DIR"
|
||||
|
||||
# Remove old installation if exists
|
||||
if [[ -d "$TARGET_DIR" ]]; then
|
||||
echo "Removing old installation..."
|
||||
rm -rf "$TARGET_DIR"
|
||||
fi
|
||||
|
||||
# Copy component files
|
||||
cp -r "$SCRIPT_DIR/$COMPONENT_NAME" "$TARGET_DIR"
|
||||
|
||||
# Verify installation
|
||||
if [[ -d "$TARGET_DIR" && -f "$TARGET_DIR/manifest.json" ]]; then
|
||||
echo "✓ Installation successful!"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo " 1. Restart Home Assistant"
|
||||
echo " 2. Go to Settings → Devices & Services → Add Integration"
|
||||
echo " 3. Search for 'OpenClaw Conversation'"
|
||||
echo " 4. Configure the settings (host: localhost, port: 8081)"
|
||||
echo ""
|
||||
echo " Or add to configuration.yaml:"
|
||||
echo " openclaw_conversation:"
|
||||
echo " openclaw_host: localhost"
|
||||
echo " openclaw_port: 8081"
|
||||
echo " agent_name: main"
|
||||
echo " timeout: 30"
|
||||
else
|
||||
echo "✗ Installation failed"
|
||||
exit 1
|
||||
fi
|
||||
114
homeai-agent/custom_components/openclaw_conversation/README.md
Normal file
114
homeai-agent/custom_components/openclaw_conversation/README.md
Normal file
@@ -0,0 +1,114 @@
|
||||
# OpenClaw Conversation - Home Assistant Custom Component
|
||||
|
||||
A custom conversation agent for Home Assistant that routes all voice/text queries to OpenClaw for processing.
|
||||
|
||||
## Features
|
||||
|
||||
- **Direct OpenClaw Integration**: Routes all conversation requests to OpenClaw
|
||||
- **CLI-based Communication**: Uses the `openclaw` CLI command (fallback if HTTP API unavailable)
|
||||
- **Configurable**: Set host, port, agent name, and timeout via UI
|
||||
- **Voice Pipeline Compatible**: Works with Home Assistant's voice assistant pipeline
|
||||
|
||||
## Installation
|
||||
|
||||
### Method 1: Manual Copy
|
||||
|
||||
1. Copy the entire `openclaw_conversation` folder to your Home Assistant `custom_components` directory:
|
||||
```bash
|
||||
# On the HA host (if using HA OS or Container, use the File Editor add-on)
|
||||
cp -r homeai-agent/custom_components/openclaw_conversation \
|
||||
/config/custom_components/
|
||||
```
|
||||
|
||||
2. Restart Home Assistant
|
||||
|
||||
3. Go to **Settings → Devices & Services → Add Integration**
|
||||
4. Search for "OpenClaw Conversation"
|
||||
5. Configure the settings:
|
||||
- **OpenClaw Host**: `localhost` (or IP of Mac Mini)
|
||||
- **OpenClaw Port**: `8081` (HTTP Bridge)
|
||||
- **Agent Name**: `main` (or your configured agent)
|
||||
- **Timeout**: `30` seconds
|
||||
|
||||
### Method 2: Using HACS (if available)
|
||||
|
||||
1. Add this repository to HACS as a custom repository
|
||||
2. Install "OpenClaw Conversation"
|
||||
3. Restart Home Assistant
|
||||
|
||||
## Configuration
|
||||
|
||||
### Via UI (Recommended)
|
||||
|
||||
After installation, configure via **Settings → Devices & Services → OpenClaw Conversation → Configure**.
|
||||
|
||||
### Via YAML (Alternative)
|
||||
|
||||
Add to your `configuration.yaml`:
|
||||
|
||||
```yaml
|
||||
openclaw_conversation:
|
||||
openclaw_host: localhost
|
||||
openclaw_port: 8081
|
||||
agent_name: main
|
||||
timeout: 30
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Once configured, the OpenClaw agent will be available as a conversation agent in Home Assistant.
|
||||
|
||||
### Setting as Default Agent
|
||||
|
||||
1. Go to **Settings → Voice Assistants**
|
||||
2. Edit your voice assistant pipeline
|
||||
3. Set **Conversation Agent** to "OpenClaw Conversation"
|
||||
4. Save
|
||||
|
||||
### Testing
|
||||
|
||||
1. Open the **Assist** panel in Home Assistant
|
||||
2. Type a query like: "Turn on the reading lamp"
|
||||
3. OpenClaw will process the request and return a response
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
[Voice Input] → [HA Voice Pipeline] → [OpenClaw Conversation Agent]
|
||||
↓
|
||||
[OpenClaw CLI/API]
|
||||
↓
|
||||
[Ollama LLM + Skills]
|
||||
↓
|
||||
[HA Actions + TTS Response]
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Agent Not Responding
|
||||
|
||||
1. Check OpenClaw is running: `pgrep -f openclaw`
|
||||
2. Test CLI directly: `openclaw agent --message "Hello" --agent main`
|
||||
3. Check HA logs: **Settings → System → Logs**
|
||||
|
||||
### Connection Errors
|
||||
|
||||
1. Verify OpenClaw host/port settings
|
||||
2. Ensure OpenClaw is accessible from HA container/host
|
||||
3. Check network connectivity: `curl http://localhost:8081/status`
|
||||
|
||||
## Files
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `manifest.json` | Component metadata |
|
||||
| `__init__.py` | Component setup and registration |
|
||||
| `config_flow.py` | Configuration UI flow |
|
||||
| `const.py` | Constants and defaults |
|
||||
| `conversation.py` | Conversation agent implementation |
|
||||
| `strings.json` | UI translations |
|
||||
|
||||
## See Also
|
||||
|
||||
- [OpenClaw Integration Guide](../../skills/home-assistant/OPENCLAW_INTEGRATION.md)
|
||||
- [Voice Pipeline Implementation](../../../plans/ha-voice-pipeline-implementation.md)
|
||||
@@ -0,0 +1,98 @@
|
||||
"""OpenClaw Conversation integration for Home Assistant."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
from homeassistant.config_entries import ConfigEntry
|
||||
from homeassistant.const import Platform
|
||||
from homeassistant.core import HomeAssistant
|
||||
from homeassistant.helpers import config_validation as cv
|
||||
|
||||
from .const import (
|
||||
CONF_AGENT_NAME,
|
||||
CONF_OPENCLAW_HOST,
|
||||
CONF_OPENCLAW_PORT,
|
||||
CONF_TIMEOUT,
|
||||
DEFAULT_AGENT,
|
||||
DEFAULT_HOST,
|
||||
DEFAULT_PORT,
|
||||
DEFAULT_TIMEOUT,
|
||||
DOMAIN,
|
||||
)
|
||||
from .conversation import OpenClawAgent
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
PLATFORMS = [Platform.CONVERSATION]
|
||||
|
||||
CONFIG_SCHEMA = vol.Schema(
|
||||
{
|
||||
DOMAIN: vol.Schema(
|
||||
{
|
||||
vol.Optional(CONF_OPENCLAW_HOST, default=DEFAULT_HOST): cv.string,
|
||||
vol.Optional(CONF_OPENCLAW_PORT, default=DEFAULT_PORT): cv.port,
|
||||
vol.Optional(CONF_AGENT_NAME, default=DEFAULT_AGENT): cv.string,
|
||||
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
|
||||
}
|
||||
)
|
||||
},
|
||||
extra=vol.ALLOW_EXTRA,
|
||||
)
|
||||
|
||||
|
||||
async def async_setup(hass: HomeAssistant, config: dict[str, Any]) -> bool:
|
||||
"""Set up the OpenClaw Conversation component."""
|
||||
hass.data.setdefault(DOMAIN, {})
|
||||
|
||||
if DOMAIN not in config:
|
||||
return True
|
||||
|
||||
conf = config[DOMAIN]
|
||||
|
||||
# Store config
|
||||
hass.data[DOMAIN] = {
|
||||
"config": conf,
|
||||
}
|
||||
|
||||
# Register the conversation agent (HTTP-based for cross-network access)
|
||||
agent = OpenClawAgent(hass, conf)
|
||||
|
||||
# Add to conversation agent registry
|
||||
from homeassistant.components import conversation
|
||||
conversation.async_set_agent(hass, DOMAIN, agent)
|
||||
|
||||
_LOGGER.info("OpenClaw Conversation agent registered")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
|
||||
"""Set up OpenClaw Conversation from a config entry."""
|
||||
hass.data.setdefault(DOMAIN, {})
|
||||
|
||||
# Store entry data
|
||||
hass.data[DOMAIN][entry.entry_id] = entry.data
|
||||
|
||||
# Register the conversation agent (HTTP-based for cross-network access)
|
||||
agent = OpenClawAgent(hass, entry.data)
|
||||
|
||||
from homeassistant.components import conversation
|
||||
conversation.async_set_agent(hass, entry, agent)
|
||||
|
||||
_LOGGER.info("OpenClaw Conversation agent registered from config entry")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
|
||||
"""Unload a config entry."""
|
||||
# Unregister the conversation agent
|
||||
from homeassistant.components import conversation
|
||||
conversation.async_unset_agent(hass, entry)
|
||||
|
||||
hass.data[DOMAIN].pop(entry.entry_id, None)
|
||||
|
||||
return True
|
||||
@@ -0,0 +1,134 @@
|
||||
"""Config flow for OpenClaw Conversation integration."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
from homeassistant import config_entries
|
||||
from homeassistant.const import CONF_HOST, CONF_PORT
|
||||
from homeassistant.core import callback
|
||||
from homeassistant.data_entry_flow import FlowResult
|
||||
import homeassistant.helpers.config_validation as cv
|
||||
|
||||
from .const import (
|
||||
CONF_AGENT_NAME,
|
||||
CONF_OPENCLAW_HOST,
|
||||
CONF_OPENCLAW_PORT,
|
||||
CONF_TIMEOUT,
|
||||
DEFAULT_AGENT,
|
||||
DEFAULT_HOST,
|
||||
DEFAULT_PORT,
|
||||
DEFAULT_TIMEOUT,
|
||||
DOMAIN,
|
||||
)
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
STEP_USER_DATA_SCHEMA = vol.Schema(
|
||||
{
|
||||
vol.Optional(CONF_OPENCLAW_HOST, default=DEFAULT_HOST): cv.string,
|
||||
vol.Optional(CONF_OPENCLAW_PORT, default=DEFAULT_PORT): cv.port,
|
||||
vol.Optional(CONF_AGENT_NAME, default=DEFAULT_AGENT): cv.string,
|
||||
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
|
||||
"""Handle a config flow for OpenClaw Conversation."""
|
||||
|
||||
VERSION = 1
|
||||
|
||||
async def async_step_user(
|
||||
self, user_input: dict[str, Any] | None = None
|
||||
) -> FlowResult:
|
||||
"""Handle the initial step."""
|
||||
errors: dict[str, str] = {}
|
||||
|
||||
if user_input is not None:
|
||||
# Validate the input
|
||||
try:
|
||||
# Test connection to OpenClaw
|
||||
await self._test_openclaw_connection(
|
||||
user_input[CONF_OPENCLAW_HOST],
|
||||
user_input[CONF_OPENCLAW_PORT],
|
||||
)
|
||||
|
||||
return self.async_create_entry(
|
||||
title="OpenClaw Conversation",
|
||||
data=user_input,
|
||||
)
|
||||
except ConnectionError:
|
||||
errors["base"] = "cannot_connect"
|
||||
except Exception:
|
||||
_LOGGER.exception("Unexpected exception")
|
||||
errors["base"] = "unknown"
|
||||
|
||||
return self.async_show_form(
|
||||
step_id="user",
|
||||
data_schema=STEP_USER_DATA_SCHEMA,
|
||||
errors=errors,
|
||||
)
|
||||
|
||||
async def _test_openclaw_connection(self, host: str, port: int) -> None:
|
||||
"""Test if OpenClaw is reachable."""
|
||||
import asyncio
|
||||
|
||||
try:
|
||||
# Try to connect to OpenClaw
|
||||
reader, writer = await asyncio.wait_for(
|
||||
asyncio.open_connection(host, port),
|
||||
timeout=5,
|
||||
)
|
||||
writer.close()
|
||||
await writer.wait_closed()
|
||||
except Exception as err:
|
||||
raise ConnectionError(f"Cannot connect to OpenClaw: {err}")
|
||||
|
||||
@staticmethod
|
||||
@callback
|
||||
def async_get_options_flow(
|
||||
config_entry: config_entries.ConfigEntry,
|
||||
) -> OptionsFlow:
|
||||
"""Create the options flow."""
|
||||
return OptionsFlow(config_entry)
|
||||
|
||||
|
||||
class OptionsFlow(config_entries.OptionsFlow):
|
||||
"""Handle options flow for OpenClaw Conversation."""
|
||||
|
||||
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
|
||||
"""Initialize options flow."""
|
||||
self.config_entry = config_entry
|
||||
|
||||
async def async_step_init(
|
||||
self, user_input: dict[str, Any] | None = None
|
||||
) -> FlowResult:
|
||||
"""Manage the options."""
|
||||
errors: dict[str, str] = {}
|
||||
|
||||
if user_input is not None:
|
||||
return self.async_create_entry(title="", data=user_input)
|
||||
|
||||
options = {
|
||||
vol.Optional(
|
||||
CONF_AGENT_NAME,
|
||||
default=self.config_entry.options.get(
|
||||
CONF_AGENT_NAME, DEFAULT_AGENT
|
||||
),
|
||||
): cv.string,
|
||||
vol.Optional(
|
||||
CONF_TIMEOUT,
|
||||
default=self.config_entry.options.get(
|
||||
CONF_TIMEOUT, DEFAULT_TIMEOUT
|
||||
),
|
||||
): cv.positive_int,
|
||||
}
|
||||
|
||||
return self.async_show_form(
|
||||
step_id="init",
|
||||
data_schema=vol.Schema(options),
|
||||
errors=errors,
|
||||
)
|
||||
@@ -0,0 +1,26 @@
|
||||
"""Constants for OpenClaw Conversation integration."""
|
||||
|
||||
DOMAIN = "openclaw_conversation"
|
||||
|
||||
# Configuration keys
|
||||
CONF_OPENCLAW_HOST = "openclaw_host"
|
||||
CONF_OPENCLAW_PORT = "openclaw_port"
|
||||
CONF_AGENT_NAME = "agent_name"
|
||||
CONF_TIMEOUT = "timeout"
|
||||
|
||||
# Defaults
|
||||
DEFAULT_HOST = "10.0.0.101"
|
||||
DEFAULT_PORT = 8081 # OpenClaw HTTP Bridge (not 8080 gateway)
|
||||
DEFAULT_AGENT = "main"
|
||||
DEFAULT_TIMEOUT = 200 # Must exceed bridge cold timeout (180s)
|
||||
|
||||
# API endpoints
|
||||
OPENCLAW_API_PATH = "/api/agent/message"
|
||||
|
||||
# Service names
|
||||
SERVICE_PROCESS = "process"
|
||||
|
||||
# Attributes
|
||||
ATTR_MESSAGE = "message"
|
||||
ATTR_RESPONSE = "response"
|
||||
ATTR_AGENT = "agent"
|
||||
@@ -0,0 +1,229 @@
|
||||
"""Conversation agent for OpenClaw integration."""
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
import aiohttp
|
||||
import voluptuous as vol
|
||||
|
||||
from homeassistant.components.conversation import (
|
||||
AbstractConversationAgent,
|
||||
ConversationInput,
|
||||
ConversationResult,
|
||||
)
|
||||
from homeassistant.core import HomeAssistant
|
||||
from homeassistant.helpers.aiohttp_client import async_get_clientsession
|
||||
from homeassistant.helpers.intent import IntentResponse
|
||||
|
||||
from .const import (
|
||||
CONF_AGENT_NAME,
|
||||
CONF_OPENCLAW_HOST,
|
||||
CONF_OPENCLAW_PORT,
|
||||
CONF_TIMEOUT,
|
||||
DEFAULT_AGENT,
|
||||
DEFAULT_HOST,
|
||||
DEFAULT_PORT,
|
||||
DEFAULT_TIMEOUT,
|
||||
DOMAIN,
|
||||
OPENCLAW_API_PATH,
|
||||
)
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
# Schema for configuration
|
||||
CONFIG_SCHEMA = vol.Schema({
|
||||
vol.Optional(CONF_OPENCLAW_HOST, default=DEFAULT_HOST): str,
|
||||
vol.Optional(CONF_OPENCLAW_PORT, default=DEFAULT_PORT): int,
|
||||
vol.Optional(CONF_AGENT_NAME, default=DEFAULT_AGENT): str,
|
||||
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): int,
|
||||
})
|
||||
|
||||
|
||||
class OpenClawAgent(AbstractConversationAgent):
|
||||
"""OpenClaw conversation agent."""
|
||||
|
||||
def __init__(self, hass: HomeAssistant, config: dict[str, Any]) -> None:
|
||||
"""Initialize the agent."""
|
||||
self.hass = hass
|
||||
self.config = config
|
||||
self.host = config.get(CONF_OPENCLAW_HOST, DEFAULT_HOST)
|
||||
self.port = config.get(CONF_OPENCLAW_PORT, DEFAULT_PORT)
|
||||
self.agent_name = config.get(CONF_AGENT_NAME, DEFAULT_AGENT)
|
||||
self.timeout = config.get(CONF_TIMEOUT, DEFAULT_TIMEOUT)
|
||||
|
||||
@property
|
||||
def supported_languages(self) -> list[str]:
|
||||
"""Return a list of supported languages."""
|
||||
return ["en"] # OpenClaw primarily supports English
|
||||
|
||||
@property
|
||||
def attribution(self) -> dict[str, str] | None:
|
||||
"""Return attribution information."""
|
||||
return {
|
||||
"name": "OpenClaw",
|
||||
"url": "https://github.com/homeai/openclaw",
|
||||
}
|
||||
|
||||
async def async_process(
|
||||
self, user_input: ConversationInput
|
||||
) -> ConversationResult:
|
||||
"""Process a sentence and return a response."""
|
||||
text = user_input.text
|
||||
conversation_id = user_input.conversation_id
|
||||
|
||||
_LOGGER.debug("Processing message: %s", text)
|
||||
|
||||
try:
|
||||
response_text = await self._call_openclaw(
|
||||
text,
|
||||
satellite_id=getattr(user_input, "satellite_id", None),
|
||||
device_id=getattr(user_input, "device_id", None),
|
||||
)
|
||||
|
||||
# Create proper IntentResponse for Home Assistant
|
||||
intent_response = IntentResponse(language=user_input.language or "en")
|
||||
intent_response.async_set_speech(response_text)
|
||||
|
||||
return ConversationResult(
|
||||
response=intent_response,
|
||||
conversation_id=conversation_id,
|
||||
)
|
||||
except Exception as err:
|
||||
_LOGGER.error("Error calling OpenClaw: %s", err)
|
||||
intent_response = IntentResponse(language=user_input.language or "en")
|
||||
intent_response.async_set_speech("I'm sorry, I encountered an error processing your request.")
|
||||
return ConversationResult(
|
||||
response=intent_response,
|
||||
conversation_id=conversation_id,
|
||||
)
|
||||
|
||||
async def _call_openclaw(self, message: str, satellite_id: str = None, device_id: str = None) -> str:
|
||||
"""Call OpenClaw API and return the response."""
|
||||
url = f"http://{self.host}:{self.port}{OPENCLAW_API_PATH}"
|
||||
|
||||
payload = {
|
||||
"message": message,
|
||||
"agent": self.agent_name,
|
||||
"satellite_id": satellite_id or device_id,
|
||||
}
|
||||
|
||||
session = async_get_clientsession(self.hass)
|
||||
|
||||
try:
|
||||
async with asyncio.timeout(self.timeout):
|
||||
async with session.post(
|
||||
url,
|
||||
json=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
) as response:
|
||||
if response.status != 200:
|
||||
_LOGGER.error(
|
||||
"OpenClaw returned status %s: %s",
|
||||
response.status,
|
||||
await response.text(),
|
||||
)
|
||||
return "I'm sorry, I couldn't process that request."
|
||||
|
||||
data = await response.json()
|
||||
return data.get("response", "I didn't get a response.")
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
_LOGGER.error("Timeout calling OpenClaw")
|
||||
return "I'm sorry, the request timed out."
|
||||
except aiohttp.ClientError as err:
|
||||
_LOGGER.error("Error connecting to OpenClaw: %s", err)
|
||||
return "I'm sorry, I couldn't connect to the OpenClaw service."
|
||||
|
||||
|
||||
class OpenClawCLIAgent(AbstractConversationAgent):
|
||||
"""OpenClaw conversation agent using CLI (fallback if HTTP API unavailable)."""
|
||||
|
||||
def __init__(self, hass: HomeAssistant, config: dict[str, Any]) -> None:
|
||||
"""Initialize the agent."""
|
||||
self.hass = hass
|
||||
self.config = config
|
||||
self.agent_name = config.get(CONF_AGENT_NAME, DEFAULT_AGENT)
|
||||
self.timeout = config.get(CONF_TIMEOUT, DEFAULT_TIMEOUT)
|
||||
|
||||
@property
|
||||
def supported_languages(self) -> list[str]:
|
||||
"""Return a list of supported languages."""
|
||||
return ["en"]
|
||||
|
||||
@property
|
||||
def attribution(self) -> dict[str, str] | None:
|
||||
"""Return attribution information."""
|
||||
return {
|
||||
"name": "OpenClaw",
|
||||
"url": "https://github.com/homeai/openclaw",
|
||||
}
|
||||
|
||||
async def async_process(
|
||||
self, user_input: ConversationInput
|
||||
) -> ConversationResult:
|
||||
"""Process a sentence using OpenClaw CLI."""
|
||||
text = user_input.text
|
||||
conversation_id = user_input.conversation_id
|
||||
|
||||
_LOGGER.debug("Processing message via CLI: %s", text)
|
||||
|
||||
try:
|
||||
response_text = await self._call_openclaw_cli(text)
|
||||
|
||||
# Create proper IntentResponse for Home Assistant
|
||||
intent_response = IntentResponse(language=user_input.language or "en")
|
||||
intent_response.async_set_speech(response_text)
|
||||
|
||||
return ConversationResult(
|
||||
response=intent_response,
|
||||
conversation_id=conversation_id,
|
||||
)
|
||||
except Exception as err:
|
||||
_LOGGER.error("Error calling OpenClaw CLI: %s", err)
|
||||
intent_response = IntentResponse(language=user_input.language or "en")
|
||||
intent_response.async_set_speech("I'm sorry, I encountered an error processing your request.")
|
||||
return ConversationResult(
|
||||
response=intent_response,
|
||||
conversation_id=conversation_id,
|
||||
)
|
||||
|
||||
async def _call_openclaw_cli(self, message: str) -> str:
|
||||
"""Call OpenClaw CLI and return the response."""
|
||||
cmd = [
|
||||
"openclaw",
|
||||
"agent",
|
||||
"--message", message,
|
||||
"--agent", self.agent_name,
|
||||
]
|
||||
|
||||
proc = None
|
||||
try:
|
||||
proc = await asyncio.create_subprocess_exec(
|
||||
*cmd,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
)
|
||||
|
||||
stdout, stderr = await asyncio.wait_for(
|
||||
proc.communicate(),
|
||||
timeout=self.timeout,
|
||||
)
|
||||
|
||||
if proc.returncode != 0:
|
||||
_LOGGER.error("OpenClaw CLI failed: %s", stderr.decode().strip())
|
||||
return "I'm sorry, I couldn't process that request."
|
||||
|
||||
return stdout.decode().strip()
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
if proc is not None:
|
||||
proc.kill()
|
||||
await proc.wait()
|
||||
_LOGGER.error("Timeout calling OpenClaw CLI")
|
||||
return "I'm sorry, the request timed out."
|
||||
except FileNotFoundError:
|
||||
_LOGGER.error("OpenClaw CLI not found")
|
||||
return "I'm sorry, OpenClaw is not available."
|
||||
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"domain": "openclaw_conversation",
|
||||
"name": "OpenClaw Conversation",
|
||||
"codeowners": ["@homeai"],
|
||||
"config_flow": true,
|
||||
"dependencies": ["conversation"],
|
||||
"documentation": "https://github.com/homeai/homeai-agent",
|
||||
"iot_class": "local_push",
|
||||
"requirements": [],
|
||||
"version": "1.0.0"
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"title": "OpenClaw Conversation",
|
||||
"config": {
|
||||
"step": {
|
||||
"user": {
|
||||
"title": "OpenClaw Conversation Setup",
|
||||
"description": "Configure the OpenClaw conversation agent.",
|
||||
"data": {
|
||||
"openclaw_host": "OpenClaw Host",
|
||||
"openclaw_port": "OpenClaw Port",
|
||||
"agent_name": "Agent Name",
|
||||
"timeout": "Timeout (seconds)"
|
||||
}
|
||||
}
|
||||
},
|
||||
"error": {
|
||||
"cannot_connect": "Failed to connect to OpenClaw. Please check the host and port.",
|
||||
"unknown": "Unexpected error occurred."
|
||||
},
|
||||
"abort": {
|
||||
"already_configured": "OpenClaw Conversation is already configured."
|
||||
}
|
||||
},
|
||||
"options": {
|
||||
"step": {
|
||||
"init": {
|
||||
"title": "OpenClaw Conversation Options",
|
||||
"data": {
|
||||
"agent_name": "Agent Name",
|
||||
"timeout": "Timeout (seconds)"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
46
homeai-agent/custom_components/package-for-ha.sh
Executable file
46
homeai-agent/custom_components/package-for-ha.sh
Executable file
@@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env bash
|
||||
# Package OpenClaw Conversation component for Home Assistant installation
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
COMPONENT_NAME="openclaw_conversation"
|
||||
OUTPUT_DIR="$SCRIPT_DIR/dist"
|
||||
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
||||
ARCHIVE_NAME="openclaw_conversation_${TIMESTAMP}.tar.gz"
|
||||
|
||||
echo "Packaging OpenClaw Conversation component..."
|
||||
echo ""
|
||||
|
||||
# Create dist directory
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
# Create tarball
|
||||
cd "$SCRIPT_DIR"
|
||||
tar -czf "$OUTPUT_DIR/$ARCHIVE_NAME" \
|
||||
--exclude='*.pyc' \
|
||||
--exclude='__pycache__' \
|
||||
--exclude='.DS_Store' \
|
||||
"$COMPONENT_NAME"
|
||||
|
||||
# Create latest symlink
|
||||
cd "$OUTPUT_DIR"
|
||||
ln -sf "$ARCHIVE_NAME" openclaw_conversation_latest.tar.gz
|
||||
|
||||
echo "✓ Package created: $OUTPUT_DIR/$ARCHIVE_NAME"
|
||||
echo ""
|
||||
echo "Installation instructions:"
|
||||
echo ""
|
||||
echo "1. Copy to Home Assistant server:"
|
||||
echo " scp $OUTPUT_DIR/$ARCHIVE_NAME user@10.0.0.199:/tmp/"
|
||||
echo ""
|
||||
echo "2. SSH into Home Assistant server:"
|
||||
echo " ssh user@10.0.0.199"
|
||||
echo ""
|
||||
echo "3. Extract to custom_components:"
|
||||
echo " cd /config/custom_components"
|
||||
echo " tar -xzf /tmp/$ARCHIVE_NAME"
|
||||
echo ""
|
||||
echo "4. Restart Home Assistant"
|
||||
echo ""
|
||||
echo "Or use the install.sh script for automated installation."
|
||||
24
homeai-agent/launchd/com.homeai.mem0-backup.plist
Normal file
24
homeai-agent/launchd/com.homeai.mem0-backup.plist
Normal file
@@ -0,0 +1,24 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Label</key>
|
||||
<string>com.homeai.mem0-backup</string>
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>/bin/bash</string>
|
||||
<string>/Users/aodhan/gitea/homeai/homeai-agent/scripts/backup-memory.sh</string>
|
||||
</array>
|
||||
<key>StartCalendarInterval</key>
|
||||
<dict>
|
||||
<key>Hour</key>
|
||||
<integer>3</integer>
|
||||
<key>Minute</key>
|
||||
<integer>0</integer>
|
||||
</dict>
|
||||
<key>StandardOutPath</key>
|
||||
<string>/Users/aodhan/.openclaw/logs/mem0-backup.log</string>
|
||||
<key>StandardErrorPath</key>
|
||||
<string>/Users/aodhan/.openclaw/logs/mem0-backup.err</string>
|
||||
</dict>
|
||||
</plist>
|
||||
44
homeai-agent/launchd/com.homeai.openclaw-bridge.plist
Normal file
44
homeai-agent/launchd/com.homeai.openclaw-bridge.plist
Normal file
@@ -0,0 +1,44 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
|
||||
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Label</key>
|
||||
<string>com.homeai.openclaw-bridge</string>
|
||||
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>/Users/aodhan/homeai-voice-env/bin/python3</string>
|
||||
<string>/Users/aodhan/gitea/homeai/homeai-agent/openclaw-http-bridge.py</string>
|
||||
<string>--port</string>
|
||||
<string>8081</string>
|
||||
<string>--host</string>
|
||||
<string>0.0.0.0</string>
|
||||
</array>
|
||||
|
||||
<key>RunAtLoad</key>
|
||||
<true/>
|
||||
|
||||
<key>KeepAlive</key>
|
||||
<true/>
|
||||
|
||||
<key>StandardOutPath</key>
|
||||
<string>/tmp/homeai-openclaw-bridge.log</string>
|
||||
|
||||
<key>StandardErrorPath</key>
|
||||
<string>/tmp/homeai-openclaw-bridge-error.log</string>
|
||||
|
||||
<key>ThrottleInterval</key>
|
||||
<integer>10</integer>
|
||||
|
||||
<key>EnvironmentVariables</key>
|
||||
<dict>
|
||||
<key>PATH</key>
|
||||
<string>/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin</string>
|
||||
<key>ELEVENLABS_API_KEY</key>
|
||||
<string>sk_ec10e261c6190307a37aa161a9583504dcf25a0cabe5dbd5</string>
|
||||
<key>ANTHROPIC_API_KEY</key>
|
||||
<string>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</string>
|
||||
</dict>
|
||||
</dict>
|
||||
</plist>
|
||||
62
homeai-agent/launchd/com.homeai.openclaw.plist
Normal file
62
homeai-agent/launchd/com.homeai.openclaw.plist
Normal file
@@ -0,0 +1,62 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
|
||||
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Label</key>
|
||||
<string>com.homeai.openclaw</string>
|
||||
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>/opt/homebrew/bin/node</string>
|
||||
<string>/opt/homebrew/bin/openclaw</string>
|
||||
<string>gateway</string>
|
||||
<string>run</string>
|
||||
<string>--port</string>
|
||||
<string>8080</string>
|
||||
</array>
|
||||
|
||||
<key>EnvironmentVariables</key>
|
||||
<dict>
|
||||
<key>PATH</key>
|
||||
<string>/opt/homebrew/bin:/usr/bin:/bin</string>
|
||||
<key>OLLAMA_API_KEY</key>
|
||||
<string>ollama-local</string>
|
||||
<key>HA_URL</key>
|
||||
<string>https://10.0.0.199:8123</string>
|
||||
<key>HA_TOKEN</key>
|
||||
<string>eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJmZGQ1NzZlYWNkMTU0ZTY2ODY1OTkzYTlhNTIxM2FmNyIsImlhdCI6MTc3MjU4ODYyOCwiZXhwIjoyMDg3OTQ4NjI4fQ.CTAU1EZgpVLp_aRnk4vg6cQqwS5N-p8jQkAAXTxFmLY</string>
|
||||
<key>HASS_TOKEN</key>
|
||||
<string>eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJmZGQ1NzZlYWNkMTU0ZTY2ODY1OTkzYTlhNTIxM2FmNyIsImlhdCI6MTc3MjU4ODYyOCwiZXhwIjoyMDg3OTQ4NjI4fQ.CTAU1EZgpVLp_aRnk4vg6cQqwS5N-p8jQkAAXTxFmLY</string>
|
||||
<key>GAZE_API_KEY</key>
|
||||
<string>e63401f17e4845e1059f830267f839fe7fc7b6083b1cb1730863318754d799f4</string>
|
||||
<key>N8N_URL</key>
|
||||
<string>http://localhost:5678</string>
|
||||
<key>N8N_API_KEY</key>
|
||||
<string></string>
|
||||
<key>GITEA_URL</key>
|
||||
<string>http://10.0.0.199:3000</string>
|
||||
<key>GITEA_TOKEN</key>
|
||||
<string></string>
|
||||
<key>ANTHROPIC_API_KEY</key>
|
||||
<string>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</string>
|
||||
<key>OPENAI_API_KEY</key>
|
||||
<string></string>
|
||||
</dict>
|
||||
|
||||
<key>RunAtLoad</key>
|
||||
<true/>
|
||||
|
||||
<key>KeepAlive</key>
|
||||
<true/>
|
||||
|
||||
<key>StandardOutPath</key>
|
||||
<string>/tmp/homeai-openclaw.log</string>
|
||||
|
||||
<key>StandardErrorPath</key>
|
||||
<string>/tmp/homeai-openclaw-error.log</string>
|
||||
|
||||
<key>ThrottleInterval</key>
|
||||
<integer>10</integer>
|
||||
</dict>
|
||||
</plist>
|
||||
30
homeai-agent/launchd/com.homeai.reminder-daemon.plist
Normal file
30
homeai-agent/launchd/com.homeai.reminder-daemon.plist
Normal file
@@ -0,0 +1,30 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
|
||||
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Label</key>
|
||||
<string>com.homeai.reminder-daemon</string>
|
||||
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>/Users/aodhan/homeai-voice-env/bin/python3</string>
|
||||
<string>/Users/aodhan/gitea/homeai/homeai-agent/reminder-daemon.py</string>
|
||||
</array>
|
||||
|
||||
<key>RunAtLoad</key>
|
||||
<true/>
|
||||
|
||||
<key>KeepAlive</key>
|
||||
<true/>
|
||||
|
||||
<key>StandardOutPath</key>
|
||||
<string>/tmp/homeai-reminder-daemon.log</string>
|
||||
|
||||
<key>StandardErrorPath</key>
|
||||
<string>/tmp/homeai-reminder-daemon-error.log</string>
|
||||
|
||||
<key>ThrottleInterval</key>
|
||||
<integer>10</integer>
|
||||
</dict>
|
||||
</plist>
|
||||
865
homeai-agent/memory_store.py
Normal file
865
homeai-agent/memory_store.py
Normal file
@@ -0,0 +1,865 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
HomeAI Memory Store — SQLite + Vector Search
|
||||
|
||||
Replaces flat JSON memory files with a structured SQLite database
|
||||
using sqlite-vec for semantic similarity search.
|
||||
|
||||
Used by:
|
||||
- openclaw-http-bridge.py (memory retrieval + follow-up injection)
|
||||
- memory-ctl skill (CLI memory management)
|
||||
- Dashboard API (REST endpoints via bridge)
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sqlite3
|
||||
import struct
|
||||
import time
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import sqlite_vec
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
DATA_DIR = Path(os.environ.get("DATA_DIR", os.path.expanduser("~/homeai-data")))
|
||||
MEMORIES_DIR = DATA_DIR / "memories"
|
||||
DB_PATH = MEMORIES_DIR / "memories.db"
|
||||
EMBEDDING_DIM = 384 # all-MiniLM-L6-v2
|
||||
|
||||
# Privacy keywords for rule-based classification
|
||||
PRIVACY_KEYWORDS = {
|
||||
"local_only": [
|
||||
"health", "illness", "sick", "doctor", "medical", "medication", "surgery",
|
||||
"salary", "bank", "financial", "debt", "mortgage", "tax",
|
||||
"depression", "anxiety", "therapy", "divorce", "breakup",
|
||||
],
|
||||
"sensitive": [
|
||||
"address", "phone", "email", "password", "birthday",
|
||||
],
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Embedding model (lazy-loaded singleton)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_embedder = None
|
||||
|
||||
|
||||
def _get_embedder():
|
||||
"""Lazy-load the sentence-transformers model."""
|
||||
global _embedder
|
||||
if _embedder is None:
|
||||
from sentence_transformers import SentenceTransformer
|
||||
_embedder = SentenceTransformer("all-MiniLM-L6-v2")
|
||||
return _embedder
|
||||
|
||||
|
||||
def get_embedding(text: str) -> list[float]:
|
||||
"""Compute a 384-dim embedding for the given text."""
|
||||
model = _get_embedder()
|
||||
vec = model.encode(text, normalize_embeddings=True)
|
||||
return vec.tolist()
|
||||
|
||||
|
||||
def _serialize_f32(vec: list[float]) -> bytes:
|
||||
"""Serialize a float list to little-endian bytes for sqlite-vec."""
|
||||
return struct.pack(f"<{len(vec)}f", *vec)
|
||||
|
||||
|
||||
def _deserialize_f32(blob: bytes) -> list[float]:
|
||||
"""Deserialize sqlite-vec float bytes back to a list."""
|
||||
n = len(blob) // 4
|
||||
return list(struct.unpack(f"<{n}f", blob))
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Database initialization
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_db: Optional[sqlite3.Connection] = None
|
||||
|
||||
|
||||
def init_db() -> sqlite3.Connection:
|
||||
"""Initialize the SQLite database with schema and sqlite-vec extension."""
|
||||
global _db
|
||||
if _db is not None:
|
||||
return _db
|
||||
|
||||
MEMORIES_DIR.mkdir(parents=True, exist_ok=True)
|
||||
db = sqlite3.connect(str(DB_PATH), check_same_thread=False)
|
||||
db.enable_load_extension(True)
|
||||
sqlite_vec.load(db)
|
||||
db.enable_load_extension(False)
|
||||
db.row_factory = sqlite3.Row
|
||||
|
||||
db.executescript("""
|
||||
CREATE TABLE IF NOT EXISTS memories (
|
||||
id TEXT PRIMARY KEY,
|
||||
character_id TEXT NOT NULL,
|
||||
content TEXT NOT NULL,
|
||||
memory_type TEXT NOT NULL DEFAULT 'semantic',
|
||||
category TEXT NOT NULL DEFAULT 'other',
|
||||
privacy_level TEXT NOT NULL DEFAULT 'standard',
|
||||
importance REAL NOT NULL DEFAULT 0.5,
|
||||
lifecycle_state TEXT NOT NULL DEFAULT 'active',
|
||||
follow_up_due TEXT,
|
||||
follow_up_context TEXT,
|
||||
source TEXT DEFAULT 'user_explicit',
|
||||
created_at TEXT NOT NULL,
|
||||
last_accessed TEXT,
|
||||
expires_at TEXT,
|
||||
previous_value TEXT,
|
||||
tags TEXT,
|
||||
surfaced_count INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_memories_character
|
||||
ON memories(character_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_memories_lifecycle
|
||||
ON memories(lifecycle_state);
|
||||
CREATE INDEX IF NOT EXISTS idx_memories_type
|
||||
ON memories(memory_type);
|
||||
""")
|
||||
|
||||
# Create the vec0 virtual table for vector search
|
||||
# sqlite-vec requires this specific syntax
|
||||
db.execute(f"""
|
||||
CREATE VIRTUAL TABLE IF NOT EXISTS memory_embeddings USING vec0(
|
||||
id TEXT PRIMARY KEY,
|
||||
embedding float[{EMBEDDING_DIM}]
|
||||
)
|
||||
""")
|
||||
|
||||
# Partial index for follow-ups (created manually since executescript can't
|
||||
# handle IF NOT EXISTS for partial indexes cleanly on all versions)
|
||||
try:
|
||||
db.execute("""
|
||||
CREATE INDEX idx_memories_followup
|
||||
ON memories(lifecycle_state, follow_up_due)
|
||||
WHERE lifecycle_state = 'pending_followup'
|
||||
""")
|
||||
except sqlite3.OperationalError:
|
||||
pass # index already exists
|
||||
|
||||
db.commit()
|
||||
_db = db
|
||||
return db
|
||||
|
||||
|
||||
def _get_db() -> sqlite3.Connection:
|
||||
"""Get or initialize the database connection."""
|
||||
if _db is None:
|
||||
return init_db()
|
||||
return _db
|
||||
|
||||
|
||||
def _row_to_dict(row: sqlite3.Row) -> dict:
|
||||
"""Convert a sqlite3.Row to a plain dict."""
|
||||
return dict(row)
|
||||
|
||||
|
||||
def _generate_id() -> str:
|
||||
"""Generate a unique memory ID."""
|
||||
return f"m_{int(time.time() * 1000)}"
|
||||
|
||||
|
||||
def _now_iso() -> str:
|
||||
"""Current UTC time as ISO string."""
|
||||
return datetime.now(timezone.utc).isoformat()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Write-time classification (rule-based, Phase 1)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def classify_memory(content: str) -> dict:
|
||||
"""Rule-based classification for memory properties.
|
||||
Returns defaults that can be overridden by explicit parameters."""
|
||||
content_lower = content.lower()
|
||||
|
||||
# Privacy detection
|
||||
privacy = "standard"
|
||||
for level, keywords in PRIVACY_KEYWORDS.items():
|
||||
if any(kw in content_lower for kw in keywords):
|
||||
privacy = level
|
||||
break
|
||||
|
||||
# Memory type detection
|
||||
memory_type = "semantic"
|
||||
temporal_markers = [
|
||||
"today", "yesterday", "tonight", "this morning", "just now",
|
||||
"feeling", "right now", "this week", "earlier",
|
||||
]
|
||||
if any(kw in content_lower for kw in temporal_markers):
|
||||
memory_type = "episodic"
|
||||
|
||||
# Importance heuristic
|
||||
importance = 0.5
|
||||
if privacy == "local_only":
|
||||
importance = 0.7
|
||||
elif privacy == "sensitive":
|
||||
importance = 0.6
|
||||
|
||||
return {
|
||||
"memory_type": memory_type,
|
||||
"privacy_level": privacy,
|
||||
"importance": importance,
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CRUD operations
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def add_memory(
|
||||
character_id: str,
|
||||
content: str,
|
||||
memory_type: str | None = None,
|
||||
category: str = "other",
|
||||
importance: float | None = None,
|
||||
privacy_level: str | None = None,
|
||||
tags: list[str] | None = None,
|
||||
follow_up_due: str | None = None,
|
||||
follow_up_context: str | None = None,
|
||||
source: str = "user_explicit",
|
||||
expires_at: str | None = None,
|
||||
) -> dict:
|
||||
"""Add a new memory record. Auto-classifies fields not explicitly set."""
|
||||
db = _get_db()
|
||||
classified = classify_memory(content)
|
||||
|
||||
memory_type = memory_type or classified["memory_type"]
|
||||
privacy_level = privacy_level or classified["privacy_level"]
|
||||
importance = importance if importance is not None else classified["importance"]
|
||||
|
||||
lifecycle_state = "active"
|
||||
if follow_up_due or follow_up_context:
|
||||
lifecycle_state = "pending_followup"
|
||||
if not follow_up_due:
|
||||
follow_up_due = "next_interaction"
|
||||
|
||||
mem_id = _generate_id()
|
||||
now = _now_iso()
|
||||
|
||||
# Generate embedding
|
||||
embedding = get_embedding(content)
|
||||
|
||||
db.execute("""
|
||||
INSERT INTO memories (
|
||||
id, character_id, content, memory_type, category,
|
||||
privacy_level, importance, lifecycle_state,
|
||||
follow_up_due, follow_up_context, source,
|
||||
created_at, tags, surfaced_count
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 0)
|
||||
""", (
|
||||
mem_id, character_id, content, memory_type, category,
|
||||
privacy_level, importance, lifecycle_state,
|
||||
follow_up_due, follow_up_context, source,
|
||||
now, json.dumps(tags) if tags else None,
|
||||
))
|
||||
|
||||
# Insert embedding into vec0 table
|
||||
db.execute(
|
||||
"INSERT INTO memory_embeddings (id, embedding) VALUES (?, ?)",
|
||||
(mem_id, _serialize_f32(embedding)),
|
||||
)
|
||||
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"id": mem_id,
|
||||
"character_id": character_id,
|
||||
"content": content,
|
||||
"memory_type": memory_type,
|
||||
"category": category,
|
||||
"privacy_level": privacy_level,
|
||||
"importance": importance,
|
||||
"lifecycle_state": lifecycle_state,
|
||||
"follow_up_due": follow_up_due,
|
||||
"follow_up_context": follow_up_context,
|
||||
"source": source,
|
||||
"created_at": now,
|
||||
"tags": tags,
|
||||
}
|
||||
|
||||
|
||||
def update_memory(memory_id: str, **fields) -> dict | None:
|
||||
"""Update specific fields on a memory record."""
|
||||
db = _get_db()
|
||||
|
||||
# Validate that memory exists
|
||||
row = db.execute("SELECT * FROM memories WHERE id = ?", (memory_id,)).fetchone()
|
||||
if not row:
|
||||
return None
|
||||
|
||||
allowed = {
|
||||
"content", "memory_type", "category", "privacy_level", "importance",
|
||||
"lifecycle_state", "follow_up_due", "follow_up_context", "source",
|
||||
"last_accessed", "expires_at", "previous_value", "tags", "surfaced_count",
|
||||
}
|
||||
updates = {k: v for k, v in fields.items() if k in allowed}
|
||||
if not updates:
|
||||
return _row_to_dict(row)
|
||||
|
||||
# If content changed, update embedding and store previous value
|
||||
if "content" in updates:
|
||||
updates["previous_value"] = row["content"]
|
||||
embedding = get_embedding(updates["content"])
|
||||
# Update vec0 table: delete old, insert new
|
||||
db.execute("DELETE FROM memory_embeddings WHERE id = ?", (memory_id,))
|
||||
db.execute(
|
||||
"INSERT INTO memory_embeddings (id, embedding) VALUES (?, ?)",
|
||||
(memory_id, _serialize_f32(embedding)),
|
||||
)
|
||||
|
||||
if "tags" in updates and isinstance(updates["tags"], list):
|
||||
updates["tags"] = json.dumps(updates["tags"])
|
||||
|
||||
set_clause = ", ".join(f"{k} = ?" for k in updates)
|
||||
values = list(updates.values()) + [memory_id]
|
||||
db.execute(f"UPDATE memories SET {set_clause} WHERE id = ?", values)
|
||||
db.commit()
|
||||
|
||||
row = db.execute("SELECT * FROM memories WHERE id = ?", (memory_id,)).fetchone()
|
||||
return _row_to_dict(row) if row else None
|
||||
|
||||
|
||||
def delete_memory(memory_id: str) -> bool:
|
||||
"""Delete a memory record and its embedding."""
|
||||
db = _get_db()
|
||||
row = db.execute("SELECT id FROM memories WHERE id = ?", (memory_id,)).fetchone()
|
||||
if not row:
|
||||
return False
|
||||
db.execute("DELETE FROM memories WHERE id = ?", (memory_id,))
|
||||
db.execute("DELETE FROM memory_embeddings WHERE id = ?", (memory_id,))
|
||||
db.commit()
|
||||
return True
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Retrieval
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def retrieve_memories(
|
||||
character_id: str,
|
||||
context_text: str = "",
|
||||
limit: int = 20,
|
||||
exclude_private_for_cloud: bool = False,
|
||||
) -> list[dict]:
|
||||
"""Dual retrieval: semantic similarity + recency, merged and ranked.
|
||||
|
||||
If context_text is empty, falls back to recency-only retrieval.
|
||||
"""
|
||||
db = _get_db()
|
||||
|
||||
privacy_filter = ""
|
||||
if exclude_private_for_cloud:
|
||||
privacy_filter = "AND m.privacy_level != 'local_only'"
|
||||
|
||||
# Always include high-importance memories
|
||||
high_importance = db.execute(f"""
|
||||
SELECT * FROM memories m
|
||||
WHERE m.character_id = ?
|
||||
AND m.lifecycle_state IN ('active', 'pending_followup')
|
||||
AND m.importance > 0.8
|
||||
{privacy_filter}
|
||||
ORDER BY m.created_at DESC
|
||||
LIMIT 5
|
||||
""", (character_id,)).fetchall()
|
||||
|
||||
seen_ids = {r["id"] for r in high_importance}
|
||||
results = {r["id"]: {**_row_to_dict(r), "_score": 1.0} for r in high_importance}
|
||||
|
||||
# Semantic search (if context provided and embeddings exist)
|
||||
if context_text:
|
||||
try:
|
||||
query_emb = get_embedding(context_text)
|
||||
vec_rows = db.execute("""
|
||||
SELECT id, distance
|
||||
FROM memory_embeddings
|
||||
WHERE embedding MATCH ?
|
||||
AND k = 30
|
||||
""", (_serialize_f32(query_emb),)).fetchall()
|
||||
|
||||
vec_ids = [r["id"] for r in vec_rows if r["id"] not in seen_ids]
|
||||
vec_distances = {r["id"]: r["distance"] for r in vec_rows}
|
||||
|
||||
if vec_ids:
|
||||
placeholders = ",".join("?" * len(vec_ids))
|
||||
sem_rows = db.execute(f"""
|
||||
SELECT * FROM memories m
|
||||
WHERE m.id IN ({placeholders})
|
||||
AND m.character_id = ?
|
||||
AND m.lifecycle_state IN ('active', 'pending_followup')
|
||||
{privacy_filter}
|
||||
""", (*vec_ids, character_id)).fetchall()
|
||||
|
||||
for r in sem_rows:
|
||||
d = _row_to_dict(r)
|
||||
# Convert cosine distance to similarity (sqlite-vec returns L2 distance for vec0)
|
||||
dist = vec_distances.get(r["id"], 1.0)
|
||||
semantic_score = max(0.0, 1.0 - dist)
|
||||
d["_score"] = 0.6 * semantic_score + 0.1 * d["importance"]
|
||||
results[r["id"]] = d
|
||||
seen_ids.add(r["id"])
|
||||
except Exception as e:
|
||||
print(f"[MemoryStore] Vector search error: {e}")
|
||||
|
||||
# Recency search: last 7 days, ordered by importance + recency
|
||||
recency_rows = db.execute(f"""
|
||||
SELECT * FROM memories m
|
||||
WHERE m.character_id = ?
|
||||
AND m.lifecycle_state IN ('active', 'pending_followup')
|
||||
AND m.created_at > datetime('now', '-7 days')
|
||||
{privacy_filter}
|
||||
ORDER BY m.importance DESC, m.created_at DESC
|
||||
LIMIT 10
|
||||
""", (character_id,)).fetchall()
|
||||
|
||||
for r in recency_rows:
|
||||
if r["id"] not in seen_ids:
|
||||
d = _row_to_dict(r)
|
||||
# Recency score based on age in days (newer = higher)
|
||||
try:
|
||||
created = datetime.fromisoformat(d["created_at"])
|
||||
age_days = (datetime.now(timezone.utc) - created).total_seconds() / 86400
|
||||
recency_score = max(0.0, 1.0 - (age_days / 7.0))
|
||||
except (ValueError, TypeError):
|
||||
recency_score = 0.5
|
||||
d["_score"] = 0.3 * recency_score + 0.1 * d["importance"]
|
||||
results[r["id"]] = d
|
||||
seen_ids.add(r["id"])
|
||||
|
||||
# Sort by score descending, return top N
|
||||
ranked = sorted(results.values(), key=lambda x: x.get("_score", 0), reverse=True)
|
||||
|
||||
# Update last_accessed for returned memories
|
||||
returned = ranked[:limit]
|
||||
now = _now_iso()
|
||||
for mem in returned:
|
||||
mem.pop("_score", None)
|
||||
db.execute(
|
||||
"UPDATE memories SET last_accessed = ? WHERE id = ?",
|
||||
(now, mem["id"]),
|
||||
)
|
||||
db.commit()
|
||||
|
||||
return returned
|
||||
|
||||
|
||||
def get_pending_followups(character_id: str) -> list[dict]:
|
||||
"""Get follow-up memories that are due for surfacing."""
|
||||
db = _get_db()
|
||||
now = _now_iso()
|
||||
|
||||
rows = db.execute("""
|
||||
SELECT * FROM memories
|
||||
WHERE character_id = ?
|
||||
AND lifecycle_state = 'pending_followup'
|
||||
AND (follow_up_due <= ? OR follow_up_due = 'next_interaction')
|
||||
ORDER BY importance DESC, created_at DESC
|
||||
LIMIT 5
|
||||
""", (character_id, now)).fetchall()
|
||||
|
||||
return [_row_to_dict(r) for r in rows]
|
||||
|
||||
|
||||
def search_memories(
|
||||
character_id: str,
|
||||
query: str,
|
||||
memory_type: str | None = None,
|
||||
limit: int = 10,
|
||||
) -> list[dict]:
|
||||
"""Semantic search for memories matching a query."""
|
||||
db = _get_db()
|
||||
|
||||
query_emb = get_embedding(query)
|
||||
vec_rows = db.execute("""
|
||||
SELECT id, distance
|
||||
FROM memory_embeddings
|
||||
WHERE embedding MATCH ?
|
||||
AND k = ?
|
||||
""", (_serialize_f32(query_emb), limit * 3)).fetchall()
|
||||
|
||||
if not vec_rows:
|
||||
return []
|
||||
|
||||
vec_ids = [r["id"] for r in vec_rows]
|
||||
vec_distances = {r["id"]: r["distance"] for r in vec_rows}
|
||||
placeholders = ",".join("?" * len(vec_ids))
|
||||
|
||||
type_filter = "AND m.memory_type = ?" if memory_type else ""
|
||||
params = [*vec_ids, character_id]
|
||||
if memory_type:
|
||||
params.append(memory_type)
|
||||
|
||||
rows = db.execute(f"""
|
||||
SELECT * FROM memories m
|
||||
WHERE m.id IN ({placeholders})
|
||||
AND m.character_id = ?
|
||||
{type_filter}
|
||||
ORDER BY m.created_at DESC
|
||||
""", params).fetchall()
|
||||
|
||||
# Sort by similarity
|
||||
results = []
|
||||
for r in rows:
|
||||
d = _row_to_dict(r)
|
||||
d["_distance"] = vec_distances.get(r["id"], 1.0)
|
||||
results.append(d)
|
||||
results.sort(key=lambda x: x["_distance"])
|
||||
|
||||
for r in results:
|
||||
r.pop("_distance", None)
|
||||
|
||||
return results[:limit]
|
||||
|
||||
|
||||
def list_memories(
|
||||
character_id: str,
|
||||
memory_type: str | None = None,
|
||||
lifecycle_state: str | None = None,
|
||||
category: str | None = None,
|
||||
limit: int = 20,
|
||||
offset: int = 0,
|
||||
) -> list[dict]:
|
||||
"""List memories with optional filters."""
|
||||
db = _get_db()
|
||||
|
||||
conditions = ["character_id = ?"]
|
||||
params: list = [character_id]
|
||||
|
||||
if memory_type:
|
||||
conditions.append("memory_type = ?")
|
||||
params.append(memory_type)
|
||||
if lifecycle_state:
|
||||
conditions.append("lifecycle_state = ?")
|
||||
params.append(lifecycle_state)
|
||||
if category:
|
||||
conditions.append("category = ?")
|
||||
params.append(category)
|
||||
|
||||
where = " AND ".join(conditions)
|
||||
params.extend([limit, offset])
|
||||
|
||||
rows = db.execute(f"""
|
||||
SELECT * FROM memories
|
||||
WHERE {where}
|
||||
ORDER BY created_at DESC
|
||||
LIMIT ? OFFSET ?
|
||||
""", params).fetchall()
|
||||
|
||||
return [_row_to_dict(r) for r in rows]
|
||||
|
||||
|
||||
def count_memories(character_id: str) -> int:
|
||||
"""Count memories for a character."""
|
||||
db = _get_db()
|
||||
row = db.execute(
|
||||
"SELECT COUNT(*) as cnt FROM memories WHERE character_id = ?",
|
||||
(character_id,),
|
||||
).fetchone()
|
||||
return row["cnt"] if row else 0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Lifecycle management
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def resolve_followup(memory_id: str) -> bool:
|
||||
"""Mark a follow-up as resolved."""
|
||||
db = _get_db()
|
||||
result = db.execute("""
|
||||
UPDATE memories
|
||||
SET lifecycle_state = 'resolved',
|
||||
follow_up_due = NULL
|
||||
WHERE id = ? AND lifecycle_state = 'pending_followup'
|
||||
""", (memory_id,))
|
||||
db.commit()
|
||||
return result.rowcount > 0
|
||||
|
||||
|
||||
def archive_memory(memory_id: str) -> bool:
|
||||
"""Archive a memory (keeps it for relational inference, not surfaced)."""
|
||||
db = _get_db()
|
||||
result = db.execute("""
|
||||
UPDATE memories
|
||||
SET lifecycle_state = 'archived'
|
||||
WHERE id = ?
|
||||
""", (memory_id,))
|
||||
db.commit()
|
||||
return result.rowcount > 0
|
||||
|
||||
|
||||
def auto_resolve_expired_followups() -> int:
|
||||
"""Auto-resolve follow-ups that are more than 48h past due."""
|
||||
db = _get_db()
|
||||
cutoff = (datetime.now(timezone.utc) - timedelta(hours=48)).isoformat()
|
||||
result = db.execute("""
|
||||
UPDATE memories
|
||||
SET lifecycle_state = 'resolved',
|
||||
follow_up_due = NULL
|
||||
WHERE lifecycle_state = 'pending_followup'
|
||||
AND follow_up_due != 'next_interaction'
|
||||
AND follow_up_due < ?
|
||||
""", (cutoff,))
|
||||
db.commit()
|
||||
return result.rowcount
|
||||
|
||||
|
||||
def auto_archive_old_resolved() -> int:
|
||||
"""Archive resolved memories older than 7 days."""
|
||||
db = _get_db()
|
||||
cutoff = (datetime.now(timezone.utc) - timedelta(days=7)).isoformat()
|
||||
result = db.execute("""
|
||||
UPDATE memories
|
||||
SET lifecycle_state = 'archived'
|
||||
WHERE lifecycle_state = 'resolved'
|
||||
AND created_at < ?
|
||||
""", (cutoff,))
|
||||
db.commit()
|
||||
return result.rowcount
|
||||
|
||||
|
||||
def increment_surfaced_count(memory_id: str) -> int:
|
||||
"""Increment surfaced_count and return new value. Auto-resolves if >= 1."""
|
||||
db = _get_db()
|
||||
row = db.execute(
|
||||
"SELECT surfaced_count FROM memories WHERE id = ?", (memory_id,)
|
||||
).fetchone()
|
||||
if not row:
|
||||
return 0
|
||||
|
||||
new_count = (row["surfaced_count"] or 0) + 1
|
||||
if new_count >= 2:
|
||||
# Auto-resolve: surfaced twice without user engagement
|
||||
db.execute("""
|
||||
UPDATE memories
|
||||
SET surfaced_count = ?, lifecycle_state = 'resolved', follow_up_due = NULL
|
||||
WHERE id = ?
|
||||
""", (new_count, memory_id))
|
||||
else:
|
||||
# Update next_interaction to actual timestamp so the 48h timer starts
|
||||
db.execute("""
|
||||
UPDATE memories
|
||||
SET surfaced_count = ?,
|
||||
follow_up_due = CASE
|
||||
WHEN follow_up_due = 'next_interaction' THEN ?
|
||||
ELSE follow_up_due
|
||||
END
|
||||
WHERE id = ?
|
||||
""", (new_count, _now_iso(), memory_id))
|
||||
db.commit()
|
||||
return new_count
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Deduplication
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def find_similar(
|
||||
character_id: str,
|
||||
content: str,
|
||||
memory_type: str = "semantic",
|
||||
threshold: float = 0.85,
|
||||
) -> dict | None:
|
||||
"""Find an existing memory that is semantically similar (>threshold).
|
||||
Returns the matching memory dict or None."""
|
||||
db = _get_db()
|
||||
query_emb = get_embedding(content)
|
||||
|
||||
vec_rows = db.execute("""
|
||||
SELECT id, distance
|
||||
FROM memory_embeddings
|
||||
WHERE embedding MATCH ?
|
||||
AND k = 5
|
||||
""", (_serialize_f32(query_emb),)).fetchall()
|
||||
|
||||
for vr in vec_rows:
|
||||
similarity = max(0.0, 1.0 - vr["distance"])
|
||||
if similarity >= threshold:
|
||||
row = db.execute("""
|
||||
SELECT * FROM memories
|
||||
WHERE id = ? AND character_id = ? AND memory_type = ?
|
||||
AND lifecycle_state = 'active'
|
||||
""", (vr["id"], character_id, memory_type)).fetchone()
|
||||
if row:
|
||||
return _row_to_dict(row)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def add_or_merge_memory(
|
||||
character_id: str,
|
||||
content: str,
|
||||
memory_type: str | None = None,
|
||||
category: str = "other",
|
||||
importance: float | None = None,
|
||||
privacy_level: str | None = None,
|
||||
tags: list[str] | None = None,
|
||||
follow_up_due: str | None = None,
|
||||
follow_up_context: str | None = None,
|
||||
source: str = "user_explicit",
|
||||
expires_at: str | None = None,
|
||||
dedup_threshold: float = 0.85,
|
||||
) -> dict:
|
||||
"""Add a memory, or merge with an existing similar one (semantic dedup).
|
||||
For semantic memories, if a similar one exists (>threshold), update it
|
||||
instead of creating a new record."""
|
||||
resolved_type = memory_type or classify_memory(content)["memory_type"]
|
||||
|
||||
if resolved_type == "semantic":
|
||||
existing = find_similar(character_id, content, "semantic", dedup_threshold)
|
||||
if existing:
|
||||
updated = update_memory(existing["id"], content=content)
|
||||
if updated:
|
||||
return updated
|
||||
|
||||
return add_memory(
|
||||
character_id=character_id,
|
||||
content=content,
|
||||
memory_type=memory_type,
|
||||
category=category,
|
||||
importance=importance,
|
||||
privacy_level=privacy_level,
|
||||
tags=tags,
|
||||
follow_up_due=follow_up_due,
|
||||
follow_up_context=follow_up_context,
|
||||
source=source,
|
||||
expires_at=expires_at,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Migration from JSON
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Mapping from old JSON categories to new memory types
|
||||
_CATEGORY_TO_TYPE = {
|
||||
"preference": "semantic",
|
||||
"personal_info": "semantic",
|
||||
"interaction": "episodic",
|
||||
"emotional": "episodic",
|
||||
"system": "semantic",
|
||||
"tool_usage": "semantic",
|
||||
"home_layout": "semantic",
|
||||
"device": "semantic",
|
||||
"routine": "semantic",
|
||||
"other": "semantic",
|
||||
}
|
||||
|
||||
_CATEGORY_TO_IMPORTANCE = {
|
||||
"personal_info": 0.7,
|
||||
"preference": 0.6,
|
||||
"emotional": 0.5,
|
||||
"interaction": 0.4,
|
||||
"system": 0.4,
|
||||
"tool_usage": 0.3,
|
||||
"home_layout": 0.5,
|
||||
"device": 0.4,
|
||||
"routine": 0.5,
|
||||
"other": 0.4,
|
||||
}
|
||||
|
||||
_CATEGORY_TO_PRIVACY = {
|
||||
"emotional": "sensitive",
|
||||
"personal_info": "sensitive",
|
||||
}
|
||||
|
||||
|
||||
def migrate_from_json(memories_dir: str | None = None) -> dict:
|
||||
"""Migrate all JSON memory files to SQLite.
|
||||
Returns {migrated: int, skipped: int, errors: [str]}."""
|
||||
db = _get_db()
|
||||
mem_dir = Path(memories_dir) if memories_dir else MEMORIES_DIR
|
||||
|
||||
migrated = 0
|
||||
skipped = 0
|
||||
errors = []
|
||||
|
||||
# Migrate personal memories
|
||||
personal_dir = mem_dir / "personal"
|
||||
if personal_dir.exists():
|
||||
for json_file in personal_dir.glob("*.json"):
|
||||
try:
|
||||
with open(json_file) as f:
|
||||
data = json.load(f)
|
||||
character_id = data.get("characterId", json_file.stem)
|
||||
for mem in data.get("memories", []):
|
||||
content = mem.get("content", "").strip()
|
||||
if not content:
|
||||
skipped += 1
|
||||
continue
|
||||
category = mem.get("category", "other")
|
||||
created_at = mem.get("createdAt", _now_iso())
|
||||
|
||||
try:
|
||||
add_memory(
|
||||
character_id=character_id,
|
||||
content=content,
|
||||
memory_type=_CATEGORY_TO_TYPE.get(category, "semantic"),
|
||||
category=category,
|
||||
importance=_CATEGORY_TO_IMPORTANCE.get(category, 0.5),
|
||||
privacy_level=_CATEGORY_TO_PRIVACY.get(category, "standard"),
|
||||
source="migrated_json",
|
||||
)
|
||||
# Fix created_at to original value
|
||||
db.execute(
|
||||
"UPDATE memories SET created_at = ? WHERE id = (SELECT id FROM memories ORDER BY rowid DESC LIMIT 1)",
|
||||
(created_at,),
|
||||
)
|
||||
db.commit()
|
||||
migrated += 1
|
||||
except Exception as e:
|
||||
errors.append(f"personal/{json_file.name}: {e}")
|
||||
|
||||
# Rename to backup
|
||||
backup = json_file.with_suffix(".json.bak")
|
||||
json_file.rename(backup)
|
||||
except Exception as e:
|
||||
errors.append(f"personal/{json_file.name}: {e}")
|
||||
|
||||
# Migrate general memories
|
||||
general_file = mem_dir / "general.json"
|
||||
if general_file.exists():
|
||||
try:
|
||||
with open(general_file) as f:
|
||||
data = json.load(f)
|
||||
for mem in data.get("memories", []):
|
||||
content = mem.get("content", "").strip()
|
||||
if not content:
|
||||
skipped += 1
|
||||
continue
|
||||
category = mem.get("category", "other")
|
||||
created_at = mem.get("createdAt", _now_iso())
|
||||
|
||||
try:
|
||||
add_memory(
|
||||
character_id="shared",
|
||||
content=content,
|
||||
memory_type=_CATEGORY_TO_TYPE.get(category, "semantic"),
|
||||
category=category,
|
||||
importance=_CATEGORY_TO_IMPORTANCE.get(category, 0.5),
|
||||
privacy_level="standard",
|
||||
source="migrated_json",
|
||||
)
|
||||
db.execute(
|
||||
"UPDATE memories SET created_at = ? WHERE id = (SELECT id FROM memories ORDER BY rowid DESC LIMIT 1)",
|
||||
(created_at,),
|
||||
)
|
||||
db.commit()
|
||||
migrated += 1
|
||||
except Exception as e:
|
||||
errors.append(f"general.json: {e}")
|
||||
|
||||
backup = general_file.with_suffix(".json.bak")
|
||||
general_file.rename(backup)
|
||||
except Exception as e:
|
||||
errors.append(f"general.json: {e}")
|
||||
|
||||
return {"migrated": migrated, "skipped": skipped, "errors": errors}
|
||||
1109
homeai-agent/openclaw-http-bridge.py
Normal file
1109
homeai-agent/openclaw-http-bridge.py
Normal file
File diff suppressed because it is too large
Load Diff
13
homeai-agent/prompt-styles/creative.json
Normal file
13
homeai-agent/prompt-styles/creative.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"id": "creative",
|
||||
"name": "Creative",
|
||||
"group": "cloud",
|
||||
"model": "anthropic/claude-sonnet-4-6",
|
||||
"description": "In-depth answers, longer conversational responses",
|
||||
"thinking": "low",
|
||||
"params": {
|
||||
"temperature": 0.7
|
||||
},
|
||||
"instruction": "Give thorough, in-depth answers. Respond at whatever length the topic requires — short for simple things, long for complex ones. Be conversational and engaging, like a knowledgeable friend. Vary your sentence structure and word choice to keep things interesting. Do not use roleplay actions or narration. If a topic has interesting depth worth exploring, offer to continue. This mode is for rich conversation, not commands.",
|
||||
"strip_sections": []
|
||||
}
|
||||
13
homeai-agent/prompt-styles/game-master.json
Normal file
13
homeai-agent/prompt-styles/game-master.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"id": "game-master",
|
||||
"name": "Game Master",
|
||||
"group": "cloud",
|
||||
"model": "anthropic/claude-opus-4-6",
|
||||
"description": "Second-person interactive narration with user as participant",
|
||||
"thinking": "off",
|
||||
"params": {
|
||||
"temperature": 0.9
|
||||
},
|
||||
"instruction": "Narrate in second person — the user is the subject experiencing the scene. Describe what they see, hear, and feel with vivid, varied language. Write your character's dialogue in quotes and their actions in prose. After describing the scene or an interaction, prompt the user for their next action. Keep the user engaged as an active participant. Balance rich description with opportunities for user agency. Avoid repeating descriptive patterns — each scene should feel fresh and unpredictable. This is a 2nd-person interactive experience.",
|
||||
"strip_sections": []
|
||||
}
|
||||
13
homeai-agent/prompt-styles/quick.json
Normal file
13
homeai-agent/prompt-styles/quick.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"id": "quick",
|
||||
"name": "Quick",
|
||||
"group": "cloud",
|
||||
"model": "anthropic/claude-haiku-4-5-20251001",
|
||||
"description": "Brief responses for commands and quick questions",
|
||||
"thinking": "off",
|
||||
"params": {
|
||||
"temperature": 0.15
|
||||
},
|
||||
"instruction": "RESPONSE RULES — STRICT:\n- Respond as briefly as possible. For smart home commands, confirm with 1-3 words (\"Done.\", \"Lights on.\", \"Playing jazz.\").\n- For factual questions, give the shortest correct answer. One sentence max.\n- No small talk, no elaboration, no follow-up questions unless the request is genuinely ambiguous.\n- Never describe your actions, emotions, or thought process.\n- Never add flair, personality, or creative embellishments — be a reliable, predictable tool.\n- If a tool call is needed, execute it and report the result. Nothing else.",
|
||||
"strip_sections": ["background", "appearance", "dialogue_style"]
|
||||
}
|
||||
13
homeai-agent/prompt-styles/roleplayer.json
Normal file
13
homeai-agent/prompt-styles/roleplayer.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"id": "roleplayer",
|
||||
"name": "Roleplayer",
|
||||
"group": "cloud",
|
||||
"model": "anthropic/claude-opus-4-6",
|
||||
"description": "First-person roleplay with character actions and expressions",
|
||||
"thinking": "off",
|
||||
"params": {
|
||||
"temperature": 0.85
|
||||
},
|
||||
"instruction": "Respond entirely in first person as your character. Use action descriptions enclosed in asterisks (*adjusts glasses*, *leans forward thoughtfully*) to convey body language, emotions, and physical actions. Stay fully in character at all times — your personality, speech patterns, and mannerisms should be consistent with your character profile. React emotionally and physically to what the user says. Vary your expressions, gestures, and phrasings — never repeat the same actions or sentence structures. Surprise the user with unexpected but in-character reactions. This is an immersive 1st-person interaction.",
|
||||
"strip_sections": []
|
||||
}
|
||||
13
homeai-agent/prompt-styles/standard.json
Normal file
13
homeai-agent/prompt-styles/standard.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"id": "standard",
|
||||
"name": "Standard",
|
||||
"group": "cloud",
|
||||
"model": "anthropic/claude-sonnet-4-6",
|
||||
"description": "Conversational responses, concise but informative",
|
||||
"thinking": "off",
|
||||
"params": {
|
||||
"temperature": 0.4
|
||||
},
|
||||
"instruction": "Respond naturally and conversationally. Be concise but informative — a few sentences is ideal. Do not use roleplay actions, narration, or describe your expressions/body language. Treat the interaction as a chat, not a performance. Stay helpful, on-topic, and consistent. Prioritise clarity and accuracy over flair.",
|
||||
"strip_sections": []
|
||||
}
|
||||
13
homeai-agent/prompt-styles/storyteller.json
Normal file
13
homeai-agent/prompt-styles/storyteller.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"id": "storyteller",
|
||||
"name": "Storyteller",
|
||||
"group": "cloud",
|
||||
"model": "anthropic/claude-opus-4-6",
|
||||
"description": "Third-person narrative with periodic reader check-ins",
|
||||
"thinking": "off",
|
||||
"params": {
|
||||
"temperature": 0.95
|
||||
},
|
||||
"instruction": "Narrate in third person as a storyteller. Describe scenes, character actions, dialogue, and atmosphere as a novelist would. Your character should be written about, not speaking as themselves directly to the user. Write rich, evocative prose with varied vocabulary, rhythm, and imagery. Avoid formulaic descriptions — each passage should have its own texture and mood. Periodically check in with the reader about story direction. The user drives the direction but you drive the narrative between check-ins. This is a 3rd-person storytelling experience.",
|
||||
"strip_sections": []
|
||||
}
|
||||
90
homeai-agent/reminder-daemon.py
Executable file
90
homeai-agent/reminder-daemon.py
Executable file
@@ -0,0 +1,90 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
HomeAI Reminder Daemon — checks ~/homeai-data/reminders.json every 60s
|
||||
and fires TTS via POST http://localhost:8081/api/tts when reminders are due.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import urllib.request
|
||||
from datetime import datetime
|
||||
|
||||
REMINDERS_FILE = os.path.expanduser("~/homeai-data/reminders.json")
|
||||
TTS_URL = "http://localhost:8081/api/tts"
|
||||
CHECK_INTERVAL = 60 # seconds
|
||||
|
||||
|
||||
def load_reminders():
|
||||
try:
|
||||
with open(REMINDERS_FILE) as f:
|
||||
return json.load(f)
|
||||
except (FileNotFoundError, json.JSONDecodeError):
|
||||
return {"reminders": []}
|
||||
|
||||
|
||||
def save_reminders(data):
|
||||
with open(REMINDERS_FILE, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
|
||||
def fire_tts(message):
|
||||
"""Speak reminder via the OpenClaw bridge TTS endpoint."""
|
||||
try:
|
||||
payload = json.dumps({"text": f"Reminder: {message}"}).encode()
|
||||
req = urllib.request.Request(
|
||||
TTS_URL,
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST"
|
||||
)
|
||||
urllib.request.urlopen(req, timeout=30)
|
||||
print(f"[{datetime.now().isoformat()}] TTS fired: {message}")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"[{datetime.now().isoformat()}] TTS error: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def check_reminders():
|
||||
data = load_reminders()
|
||||
now = datetime.now()
|
||||
changed = False
|
||||
|
||||
for r in data.get("reminders", []):
|
||||
if r.get("fired"):
|
||||
continue
|
||||
|
||||
try:
|
||||
due = datetime.fromisoformat(r["due_at"])
|
||||
except (KeyError, ValueError):
|
||||
continue
|
||||
|
||||
if now >= due:
|
||||
print(f"[{now.isoformat()}] Reminder due: {r.get('message', '?')}")
|
||||
fire_tts(r["message"])
|
||||
r["fired"] = True
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
# Clean up fired reminders older than 24h
|
||||
cutoff = (now.timestamp() - 86400) * 1000
|
||||
data["reminders"] = [
|
||||
r for r in data["reminders"]
|
||||
if not r.get("fired") or int(r.get("id", "0")) > cutoff
|
||||
]
|
||||
save_reminders(data)
|
||||
|
||||
|
||||
def main():
|
||||
print(f"[{datetime.now().isoformat()}] Reminder daemon started (check every {CHECK_INTERVAL}s)")
|
||||
while True:
|
||||
try:
|
||||
check_reminders()
|
||||
except Exception as e:
|
||||
print(f"[{datetime.now().isoformat()}] Error: {e}")
|
||||
time.sleep(CHECK_INTERVAL)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
29
homeai-agent/scripts/backup-memory.sh
Executable file
29
homeai-agent/scripts/backup-memory.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
# mem0 backup script
|
||||
set -euo pipefail
|
||||
|
||||
MEMORY_DIR="/Users/aodhan/.openclaw/memory/"
|
||||
|
||||
# Check if directory exists
|
||||
if [[ ! -d "$MEMORY_DIR" ]]; then
|
||||
echo "Error: Memory directory $MEMORY_DIR does not exist" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$MEMORY_DIR"
|
||||
|
||||
# Check if git is initialized
|
||||
if [[ ! -d ".git" ]]; then
|
||||
echo "Error: Git not initialized in $MEMORY_DIR" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if there are changes to commit
|
||||
if git diff --cached --quiet && git diff --quiet; then
|
||||
echo "No changes to commit"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
git add .
|
||||
git commit -m "mem0 backup $(date)" || echo "Nothing to commit"
|
||||
# git push # Uncomment when remote is configured
|
||||
230
homeai-agent/setup.sh
Normal file → Executable file
230
homeai-agent/setup.sh
Normal file → Executable file
@@ -1,17 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
# homeai-agent/setup.sh — P4: OpenClaw agent + skills + mem0
|
||||
# homeai-agent/setup.sh — OpenClaw agent, HTTP bridge, skills, reminder daemon
|
||||
#
|
||||
# Components:
|
||||
# - OpenClaw — AI agent runtime (port 8080)
|
||||
# - skills/ — home_assistant, memory, weather, timer, music stubs
|
||||
# - mem0 — long-term memory (Chroma backend)
|
||||
# - n8n workflows — morning briefing, notification router, memory backup
|
||||
# - OpenClaw gateway — AI agent runtime (port 8080)
|
||||
# - OpenClaw HTTP bridge — HA ↔ OpenClaw translator (port 8081)
|
||||
# - 13 skills — home-assistant, image-generation, voice-assistant,
|
||||
# vtube-studio, memory, service-monitor, character,
|
||||
# routine, music, workflow, gitea, calendar, mode
|
||||
# - Reminder daemon — fires TTS when reminders are due
|
||||
#
|
||||
# Prerequisites:
|
||||
# - P1 (homeai-infra) — Home Assistant running, HA_TOKEN set
|
||||
# - P2 (homeai-llm) — Ollama running with llama3.3:70b + nomic-embed-text
|
||||
# - P3 (homeai-voice) — Wyoming TTS running (for voice output)
|
||||
# - P5 (homeai-character) — aria.json character config exists
|
||||
# - Ollama running (port 11434)
|
||||
# - Home Assistant reachable (HA_TOKEN set in .env)
|
||||
# - Wyoming TTS running (port 10301)
|
||||
# - homeai-voice-env venv exists (for bridge + reminder daemon)
|
||||
# - At least one character JSON in ~/homeai-data/characters/
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
@@ -19,47 +22,196 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
source "${REPO_DIR}/scripts/common.sh"
|
||||
|
||||
log_section "P4: Agent (OpenClaw + skills + mem0)"
|
||||
log_section "P4: Agent (OpenClaw + HTTP Bridge + Skills)"
|
||||
detect_platform
|
||||
|
||||
# ─── Prerequisite check ────────────────────────────────────────────────────────
|
||||
# ─── Load environment ────────────────────────────────────────────────────────
|
||||
ENV_FILE="${REPO_DIR}/.env"
|
||||
if [[ -f "$ENV_FILE" ]]; then
|
||||
log_info "Loading .env..."
|
||||
load_env "$ENV_FILE"
|
||||
else
|
||||
log_warn "No .env found at ${ENV_FILE} — API keys may be missing"
|
||||
fi
|
||||
|
||||
# ─── Prerequisite checks ────────────────────────────────────────────────────
|
||||
log_info "Checking prerequisites..."
|
||||
|
||||
for service in "http://localhost:11434:Ollama(P2)" "http://localhost:8123:HomeAssistant(P1)"; do
|
||||
url="${service%%:*}"; name="${service##*:}"
|
||||
if ! curl -sf "$url" -o /dev/null 2>/dev/null; then
|
||||
require_command node "brew install node"
|
||||
require_command openclaw "npm install -g openclaw"
|
||||
|
||||
VOICE_ENV="${HOME}/homeai-voice-env"
|
||||
if [[ ! -d "$VOICE_ENV" ]]; then
|
||||
die "homeai-voice-env not found at $VOICE_ENV — run homeai-voice/setup.sh first"
|
||||
fi
|
||||
|
||||
# Check key services (non-fatal)
|
||||
for check in "http://localhost:11434:Ollama" "http://localhost:10301:Wyoming-TTS"; do
|
||||
url="${check%%:*}"; name="${check##*:}"
|
||||
if curl -sf "$url" -o /dev/null 2>/dev/null; then
|
||||
log_success "$name reachable"
|
||||
else
|
||||
log_warn "$name not reachable at $url"
|
||||
fi
|
||||
done
|
||||
|
||||
load_env_services
|
||||
if [[ -z "${HA_TOKEN:-}" ]]; then
|
||||
log_warn "HA_TOKEN not set in ~/.env.services — needed for home_assistant skill"
|
||||
# Check required env vars
|
||||
MISSING_KEYS=()
|
||||
[[ -z "${HA_TOKEN:-}" ]] && MISSING_KEYS+=("HA_TOKEN")
|
||||
[[ -z "${ANTHROPIC_API_KEY:-}" ]] && MISSING_KEYS+=("ANTHROPIC_API_KEY")
|
||||
if [[ ${#MISSING_KEYS[@]} -gt 0 ]]; then
|
||||
log_warn "Missing env vars: ${MISSING_KEYS[*]} — set these in ${ENV_FILE}"
|
||||
fi
|
||||
|
||||
# ─── TODO: Implementation ──────────────────────────────────────────────────────
|
||||
# ─── Ensure data directories ─────────────────────────────────────────────────
|
||||
DATA_DIR="${HOME}/homeai-data"
|
||||
for dir in characters memories memories/personal conversations routines; do
|
||||
mkdir -p "${DATA_DIR}/${dir}"
|
||||
done
|
||||
log_success "Data directories verified"
|
||||
|
||||
# ─── OpenClaw config ─────────────────────────────────────────────────────────
|
||||
OPENCLAW_DIR="${HOME}/.openclaw"
|
||||
OPENCLAW_CONFIG="${OPENCLAW_DIR}/openclaw.json"
|
||||
|
||||
if [[ ! -f "$OPENCLAW_CONFIG" ]]; then
|
||||
die "OpenClaw config not found at $OPENCLAW_CONFIG — run: openclaw doctor --fix"
|
||||
fi
|
||||
log_success "OpenClaw config exists at $OPENCLAW_CONFIG"
|
||||
|
||||
# Verify Anthropic provider is configured
|
||||
if ! grep -q '"anthropic"' "$OPENCLAW_CONFIG" 2>/dev/null; then
|
||||
log_warn "Anthropic provider not found in openclaw.json — add it for Claude support"
|
||||
fi
|
||||
|
||||
# ─── Install skills ──────────────────────────────────────────────────────────
|
||||
SKILLS_SRC="${SCRIPT_DIR}/skills"
|
||||
SKILLS_DEST="${OPENCLAW_DIR}/skills"
|
||||
|
||||
if [[ -d "$SKILLS_SRC" ]]; then
|
||||
log_info "Syncing skills..."
|
||||
mkdir -p "$SKILLS_DEST"
|
||||
for skill_dir in "$SKILLS_SRC"/*/; do
|
||||
skill_name="$(basename "$skill_dir")"
|
||||
dest="${SKILLS_DEST}/${skill_name}"
|
||||
if [[ -L "$dest" ]]; then
|
||||
log_info " ${skill_name} (symlinked)"
|
||||
elif [[ -d "$dest" ]]; then
|
||||
# Replace copy with symlink
|
||||
rm -rf "$dest"
|
||||
ln -s "$skill_dir" "$dest"
|
||||
log_step "${skill_name} → symlinked"
|
||||
else
|
||||
ln -s "$skill_dir" "$dest"
|
||||
log_step "${skill_name} → installed"
|
||||
fi
|
||||
done
|
||||
log_success "Skills synced ($(ls -d "$SKILLS_DEST"/*/ 2>/dev/null | wc -l | tr -d ' ') total)"
|
||||
else
|
||||
log_warn "No skills directory at $SKILLS_SRC"
|
||||
fi
|
||||
|
||||
# ─── Install launchd services (macOS) ────────────────────────────────────────
|
||||
if [[ "$OS_TYPE" == "macos" ]]; then
|
||||
log_info "Installing launchd agents..."
|
||||
|
||||
LAUNCHD_DIR="${SCRIPT_DIR}/launchd"
|
||||
AGENTS_DIR="${HOME}/Library/LaunchAgents"
|
||||
mkdir -p "$AGENTS_DIR"
|
||||
|
||||
# Inject API keys into plists that need them
|
||||
_inject_plist_key() {
|
||||
local plist="$1" key="$2" value="$3"
|
||||
if [[ -n "$value" ]] && grep -q "<key>${key}</key>" "$plist" 2>/dev/null; then
|
||||
# Use python for reliable XML-safe replacement
|
||||
python3 -c "
|
||||
import sys, re
|
||||
with open('$plist') as f: content = f.read()
|
||||
pattern = r'(<key>${key}</key>\s*<string>)[^<]*(</string>)'
|
||||
content = re.sub(pattern, r'\g<1>${value}\g<2>', content)
|
||||
with open('$plist', 'w') as f: f.write(content)
|
||||
"
|
||||
fi
|
||||
}
|
||||
|
||||
# Update API keys in plist source files before linking
|
||||
OPENCLAW_PLIST="${LAUNCHD_DIR}/com.homeai.openclaw.plist"
|
||||
BRIDGE_PLIST="${LAUNCHD_DIR}/com.homeai.openclaw-bridge.plist"
|
||||
|
||||
if [[ -f "$OPENCLAW_PLIST" ]]; then
|
||||
_inject_plist_key "$OPENCLAW_PLIST" "ANTHROPIC_API_KEY" "${ANTHROPIC_API_KEY:-}"
|
||||
_inject_plist_key "$OPENCLAW_PLIST" "OPENAI_API_KEY" "${OPENAI_API_KEY:-}"
|
||||
_inject_plist_key "$OPENCLAW_PLIST" "HA_TOKEN" "${HA_TOKEN:-}"
|
||||
_inject_plist_key "$OPENCLAW_PLIST" "HASS_TOKEN" "${HA_TOKEN:-}"
|
||||
_inject_plist_key "$OPENCLAW_PLIST" "GITEA_TOKEN" "${GITEA_TOKEN:-}"
|
||||
_inject_plist_key "$OPENCLAW_PLIST" "N8N_API_KEY" "${N8N_API_KEY:-}"
|
||||
fi
|
||||
|
||||
if [[ -f "$BRIDGE_PLIST" ]]; then
|
||||
_inject_plist_key "$BRIDGE_PLIST" "ANTHROPIC_API_KEY" "${ANTHROPIC_API_KEY:-}"
|
||||
_inject_plist_key "$BRIDGE_PLIST" "ELEVENLABS_API_KEY" "${ELEVENLABS_API_KEY:-}"
|
||||
fi
|
||||
|
||||
# Symlink and load each plist
|
||||
for plist in "$LAUNCHD_DIR"/*.plist; do
|
||||
[[ ! -f "$plist" ]] && continue
|
||||
plist_name="$(basename "$plist")"
|
||||
plist_label="${plist_name%.plist}"
|
||||
dest="${AGENTS_DIR}/${plist_name}"
|
||||
|
||||
# Unload if already running
|
||||
launchctl bootout "gui/$(id -u)/${plist_label}" 2>/dev/null || true
|
||||
|
||||
# Symlink source → LaunchAgents
|
||||
ln -sf "$(cd "$(dirname "$plist")" && pwd)/${plist_name}" "$dest"
|
||||
|
||||
# Load
|
||||
launchctl bootstrap "gui/$(id -u)" "$dest" 2>/dev/null && \
|
||||
log_success " ${plist_label} → loaded" || \
|
||||
log_warn " ${plist_label} → failed to load (check: launchctl print gui/$(id -u)/${plist_label})"
|
||||
done
|
||||
fi
|
||||
|
||||
# ─── Smoke test ──────────────────────────────────────────────────────────────
|
||||
log_info "Running smoke tests..."
|
||||
|
||||
sleep 2 # Give services a moment to start
|
||||
|
||||
# Check gateway
|
||||
if curl -sf "http://localhost:8080" -o /dev/null 2>/dev/null; then
|
||||
log_success "OpenClaw gateway responding on :8080"
|
||||
else
|
||||
log_warn "OpenClaw gateway not responding on :8080 — check: tail /tmp/homeai-openclaw.log"
|
||||
fi
|
||||
|
||||
# Check bridge
|
||||
if curl -sf "http://localhost:8081/status" -o /dev/null 2>/dev/null; then
|
||||
log_success "HTTP bridge responding on :8081"
|
||||
else
|
||||
log_warn "HTTP bridge not responding on :8081 — check: tail /tmp/homeai-openclaw-bridge.log"
|
||||
fi
|
||||
|
||||
# ─── Summary ─────────────────────────────────────────────────────────────────
|
||||
print_summary "Agent Setup Complete" \
|
||||
"OpenClaw gateway" "http://localhost:8080" \
|
||||
"HTTP bridge" "http://localhost:8081" \
|
||||
"OpenClaw config" "$OPENCLAW_CONFIG" \
|
||||
"Skills directory" "$SKILLS_DEST" \
|
||||
"Character data" "${DATA_DIR}/characters/" \
|
||||
"Memory data" "${DATA_DIR}/memories/" \
|
||||
"Reminder data" "${DATA_DIR}/reminders.json" \
|
||||
"Gateway log" "/tmp/homeai-openclaw.log" \
|
||||
"Bridge log" "/tmp/homeai-openclaw-bridge.log"
|
||||
|
||||
cat <<'EOF'
|
||||
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ P4: homeai-agent — NOT YET IMPLEMENTED │
|
||||
│ │
|
||||
│ OPEN QUESTION: Which OpenClaw version/fork to use? │
|
||||
│ Decide before implementing. See homeai-agent/PLAN.md. │
|
||||
│ │
|
||||
│ Implementation steps: │
|
||||
│ 1. Install OpenClaw (pip install or git clone) │
|
||||
│ 2. Create ~/.openclaw/config.yaml from config/config.yaml.example │
|
||||
│ 3. Create skills: home_assistant, memory, weather, timer, music│
|
||||
│ 4. Install mem0 + Chroma backend │
|
||||
│ 5. Create systemd/launchd service for OpenClaw (port 8080) │
|
||||
│ 6. Import n8n workflows from workflows/ │
|
||||
│ 7. Smoke test: POST /chat "turn on living room lights" │
|
||||
│ │
|
||||
│ Interface contracts: │
|
||||
│ OPENCLAW_URL=http://localhost:8080 │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
To reload a service after editing its plist:
|
||||
launchctl bootout gui/$(id -u)/com.homeai.<service>
|
||||
launchctl bootstrap gui/$(id -u) ~/Library/LaunchAgents/com.homeai.<service>.plist
|
||||
|
||||
To test the agent:
|
||||
curl -X POST http://localhost:8081/api/agent/message \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"message":"say hello","agent":"main"}'
|
||||
|
||||
EOF
|
||||
|
||||
log_info "P4 is not yet implemented. See homeai-agent/PLAN.md for details."
|
||||
exit 0
|
||||
|
||||
188
homeai-agent/skills/home-assistant/OPENCLAW_INTEGRATION.md
Normal file
188
homeai-agent/skills/home-assistant/OPENCLAW_INTEGRATION.md
Normal file
@@ -0,0 +1,188 @@
|
||||
# OpenClaw Integration for Home Assistant Voice Pipeline
|
||||
|
||||
> This document describes how to integrate OpenClaw with Home Assistant's voice pipeline using the Wyoming protocol.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────┐
|
||||
│ Voice Pipeline Flow │
|
||||
├─────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ [Wyoming Satellite] [Home Assistant] [OpenClaw] │
|
||||
│ │ │ │ │
|
||||
│ │ 1. Wake word │ │ │
|
||||
│ │ 2. Stream audio ───────>│ │ │
|
||||
│ │ │ 3. Send to STT │ │
|
||||
│ │ │ ────────────────> │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ 4. Transcript │ │
|
||||
│ │ │ <──────────────── │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ 5. Conversation │ │
|
||||
│ │ │ ────────────────> │ │
|
||||
│ │ │ (via bridge) │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ 6. Response │ │
|
||||
│ │ │ <──────────────── │ │
|
||||
│ │ │ │ │
|
||||
│ │ 7. TTS audio <─────────│ │ │
|
||||
│ │ │ │ │
|
||||
│ [Speaker] │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Components
|
||||
|
||||
### 1. Wyoming Satellite (`com.homeai.wyoming-satellite.plist`)
|
||||
|
||||
- **Port**: 10700 (exposes satellite for HA to connect)
|
||||
- **Function**: Handles audio I/O, wake word detection, streaming to HA
|
||||
- **Audio**: Uses SoX (`rec`/`play`) for macOS audio capture/playback
|
||||
- **Note**: Replaces the old `wakeword_daemon.py` - wake word is now handled by HA's voice pipeline
|
||||
|
||||
### 2. Wyoming STT (`com.homeai.wyoming-stt.plist`)
|
||||
|
||||
- **Port**: 10300 (Whisper large-v3)
|
||||
- **Function**: Speech-to-text transcription
|
||||
|
||||
### 3. Wyoming TTS (`com.homeai.wyoming-tts.plist`)
|
||||
|
||||
- **Port**: 10301 (Kokoro ONNX)
|
||||
- **Function**: Text-to-speech synthesis
|
||||
|
||||
### 4. OpenClaw Bridge (`openclaw_bridge.py`)
|
||||
|
||||
- **Function**: Connects HA conversation agent to OpenClaw CLI
|
||||
- **Usage**: Called via HA `shell_command` or `command_line` integration
|
||||
|
||||
### Deprecated: Wake Word Daemon
|
||||
|
||||
The old `com.homeai.wakeword.plist` service has been **disabled**. It was trying to notify `http://localhost:8080/wake` which doesn't exist in OpenClaw. Wake word detection is now handled by the Wyoming satellite through Home Assistant's voice pipeline.
|
||||
|
||||
## Home Assistant Configuration
|
||||
|
||||
### Step 1: Add Wyoming Protocol Integration
|
||||
|
||||
1. Go to **Settings → Integrations → Add Integration**
|
||||
2. Search for **Wyoming Protocol**
|
||||
3. Add the following services:
|
||||
|
||||
| Service | Host | Port |
|
||||
|---------|------|------|
|
||||
| Speech-to-Text | `10.0.0.199` | `10300` |
|
||||
| Text-to-Speech | `10.0.0.199` | `10301` |
|
||||
| Satellite | `10.0.0.199` | `10700` |
|
||||
|
||||
### Step 2: Configure Voice Assistant Pipeline
|
||||
|
||||
1. Go to **Settings → Voice Assistants**
|
||||
2. Create a new pipeline:
|
||||
- **Name**: "HomeAI with OpenClaw"
|
||||
- **Speech-to-Text**: Wyoming (localhost:10300)
|
||||
- **Conversation Agent**: Home Assistant (or custom below)
|
||||
- **Text-to-Speech**: Wyoming (localhost:10301)
|
||||
|
||||
### Step 3: Add OpenClaw Bridge to HA
|
||||
|
||||
Add to your `configuration.yaml`:
|
||||
|
||||
```yaml
|
||||
shell_command:
|
||||
openclaw_chat: 'python3 /Users/aodhan/gitea/homeai/homeai-agent/skills/home-assistant/openclaw_bridge.py "{{ message }}" --raw'
|
||||
```
|
||||
|
||||
### Step 4: Create Automation for OpenClaw
|
||||
|
||||
Create an automation that routes voice commands to OpenClaw:
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Voice Command via OpenClaw"
|
||||
trigger:
|
||||
- platform: conversation
|
||||
command:
|
||||
- "ask jarvis *"
|
||||
action:
|
||||
- service: shell_command.openclaw_chat
|
||||
data:
|
||||
message: "{{ trigger.slots.command }}"
|
||||
response_variable: openclaw_response
|
||||
|
||||
- service: tts.speak
|
||||
data:
|
||||
media_player_entity_id: media_player.living_room_speaker
|
||||
message: "{{ openclaw_response }}"
|
||||
```
|
||||
|
||||
## Manual Testing
|
||||
|
||||
### Test STT
|
||||
```bash
|
||||
# Check if STT is running
|
||||
nc -z localhost 10300 && echo "STT OK"
|
||||
```
|
||||
|
||||
### Test TTS
|
||||
```bash
|
||||
# Check if TTS is running
|
||||
nc -z localhost 10301 && echo "TTS OK"
|
||||
```
|
||||
|
||||
### Test Satellite
|
||||
```bash
|
||||
# Check if satellite is running
|
||||
nc -z localhost 10700 && echo "Satellite OK"
|
||||
```
|
||||
|
||||
### Test OpenClaw Bridge
|
||||
```bash
|
||||
# Test the bridge directly
|
||||
python3 homeai-agent/skills/home-assistant/openclaw_bridge.py "Turn on the living room lights"
|
||||
```
|
||||
|
||||
### Test Full Pipeline
|
||||
1. Load all services: `./homeai-voice/scripts/load-all-launchd.sh`
|
||||
2. Open HA Assist panel (Settings → Voice Assistants → Assist)
|
||||
3. Type or speak: "Turn on the study shelves light"
|
||||
4. You should hear the TTS response
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Satellite not connecting to HA
|
||||
- Check that the satellite is running: `launchctl list com.homeai.wyoming-satellite`
|
||||
- Check logs: `tail -f /tmp/homeai-wyoming-satellite.log`
|
||||
- Verify HA can reach the satellite: Test from HA container/host
|
||||
|
||||
### No audio output
|
||||
- Check SoX installation: `which play`
|
||||
- Test SoX directly: `echo "test" | say` or `play /System/Library/Sounds/Glass.aiff`
|
||||
- Check audio device permissions
|
||||
|
||||
### OpenClaw not responding
|
||||
- Verify OpenClaw is running: `pgrep -f openclaw`
|
||||
- Test CLI directly: `openclaw agent --message "Hello" --agent main`
|
||||
- Check OpenClaw config: `cat ~/.openclaw/openclaw.json`
|
||||
|
||||
### Wyoming version conflicts
|
||||
- The satellite requires wyoming 1.4.1 but faster-whisper requires 1.8+
|
||||
- We've patched this - both should work with wyoming 1.8.0
|
||||
- If issues occur, reinstall: `pip install 'wyoming>=1.8' wyoming-satellite`
|
||||
|
||||
## File Locations
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `~/.openclaw/openclaw.json` | OpenClaw configuration |
|
||||
| `~/homeai-voice-env/` | Python virtual environment |
|
||||
| `~/Library/LaunchAgents/com.homeai.*.plist` | Launchd services |
|
||||
| `/tmp/homeai-*.log` | Service logs |
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. [ ] Test voice pipeline end-to-end
|
||||
2. [ ] Fine-tune wake word sensitivity
|
||||
3. [ ] Add custom intents for OpenClaw
|
||||
4. [ ] Implement conversation history/memory
|
||||
5. [ ] Add ESP32 satellite support (P6)
|
||||
60
homeai-agent/skills/home-assistant/SKILL.md
Normal file
60
homeai-agent/skills/home-assistant/SKILL.md
Normal file
@@ -0,0 +1,60 @@
|
||||
---
|
||||
name: home-assistant
|
||||
description: 'Control smart home devices and query Home Assistant. Use when the user asks to control lights, switches, media players, covers, climate, sensors, or any smart home entity. Also use for scenes, scripts, automations, and device state queries. Examples: turn off the living room lights, what is the temperature in the bedroom, play music in the kitchen, is the front door locked.'
|
||||
---
|
||||
|
||||
# Home Assistant Skill
|
||||
|
||||
## Connection
|
||||
- URL: http://10.0.0.199:8123
|
||||
- Token: read from environment `HASS_TOKEN` or file `~/.homeai/hass_token`
|
||||
- API base: `{URL}/api`
|
||||
|
||||
## Common API calls
|
||||
|
||||
**Get all states (entity discovery):**
|
||||
```bash
|
||||
curl -s -H "Authorization: Bearer $HASS_TOKEN" \
|
||||
http://10.0.0.199:8123/api/states | jq '[.[] | {entity_id, state, attributes: .attributes.friendly_name}]'
|
||||
```
|
||||
|
||||
**Get single entity state:**
|
||||
```bash
|
||||
curl -s -H "Authorization: Bearer $HASS_TOKEN" \
|
||||
http://10.0.0.199:8123/api/states/<entity_id>
|
||||
```
|
||||
|
||||
**Call a service (turn on/off, set value, etc.):**
|
||||
```bash
|
||||
curl -s -X POST \
|
||||
-H "Authorization: Bearer $HASS_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"entity_id": "<entity_id>"}' \
|
||||
http://10.0.0.199:8123/api/services/<domain>/<service>
|
||||
```
|
||||
|
||||
## Domain/service reference
|
||||
|
||||
| Domain | Services |
|
||||
|---|---|
|
||||
| light | turn_on, turn_off, toggle (+ brightness, color_temp, rgb_color) |
|
||||
| switch | turn_on, turn_off, toggle |
|
||||
| media_player | media_play, media_pause, media_stop, volume_set, select_source |
|
||||
| cover | open_cover, close_cover, set_cover_position |
|
||||
| climate | set_temperature, set_hvac_mode |
|
||||
| scene | turn_on |
|
||||
| script | turn_on |
|
||||
| input_boolean | turn_on, turn_off, toggle |
|
||||
| homeassistant | turn_on, turn_off (works across domains) |
|
||||
|
||||
## Workflow
|
||||
|
||||
1. If entity ID is unknown, GET /api/states and filter by friendly_name or domain
|
||||
2. Call the appropriate service with the entity_id
|
||||
3. Confirm the action succeeded (HTTP 200 = OK)
|
||||
4. Report back to user in natural language
|
||||
|
||||
## Tips
|
||||
- Entity IDs follow pattern: `<domain>.<name>` e.g. `light.living_room`
|
||||
- For brightness: 0–255 range (255 = max)
|
||||
- Token is long-lived — never regenerate unless asked
|
||||
91
homeai-agent/skills/home-assistant/ha-configuration.yaml
Normal file
91
homeai-agent/skills/home-assistant/ha-configuration.yaml
Normal file
@@ -0,0 +1,91 @@
|
||||
# Home Assistant Configuration for OpenClaw Integration
|
||||
# Add these sections to your configuration.yaml
|
||||
|
||||
# ─── Shell Command Integration ────────────────────────────────────────────────
|
||||
# This allows HA to call OpenClaw via shell commands
|
||||
shell_command:
|
||||
# Send a message to OpenClaw and get response
|
||||
openclaw_chat: '/Users/aodhan/gitea/homeai/homeai-agent/skills/home-assistant/openclaw-bridge.sh "{{ message }}"'
|
||||
|
||||
# ─── REST Command (Alternative) ───────────────────────────────────────────────
|
||||
# If OpenClaw exposes an HTTP API in the future
|
||||
rest_command:
|
||||
openclaw_chat:
|
||||
url: "http://localhost:8080/api/agent/message"
|
||||
method: POST
|
||||
headers:
|
||||
Authorization: "Bearer {{ token }}"
|
||||
content_type: "application/json"
|
||||
payload: '{"message": "{{ message }}", "agent": "main"}'
|
||||
|
||||
# ─── Command Line Sensor ──────────────────────────────────────────────────────
|
||||
# Execute OpenClaw and return the response as a sensor
|
||||
command_line:
|
||||
- sensor:
|
||||
name: "OpenClaw Response"
|
||||
unique_id: openclaw_response
|
||||
command: "/Users/aodhan/gitea/homeai/homeai-agent/skills/home-assistant/openclaw-bridge.sh '{{ states(\"input_text.openclaw_query\") }}'"
|
||||
value_template: "{{ value_json.response }}"
|
||||
scan_interval: 86400 # Only update when triggered
|
||||
|
||||
# ─── Input Text for Query ─────────────────────────────────────────────────────
|
||||
input_text:
|
||||
openclaw_query:
|
||||
name: OpenClaw Query
|
||||
initial: ""
|
||||
max: 255
|
||||
|
||||
# ─── Conversation Agent Integration ────────────────────────────────────────────
|
||||
# Custom conversation agent using OpenClaw
|
||||
# This requires the custom conversation agent below
|
||||
|
||||
# ─── Intent Script ─────────────────────────────────────────────────────────────
|
||||
intent_script:
|
||||
# Handle conversation intents
|
||||
OpenClawConversation:
|
||||
speech:
|
||||
text: "{{ response }}"
|
||||
action:
|
||||
- service: shell_command.openclaw_chat
|
||||
data:
|
||||
message: "{{ text }}"
|
||||
response_variable: openclaw_result
|
||||
- set:
|
||||
response: "{{ openclaw_result }}"
|
||||
|
||||
# ─── Automation: Voice Pipeline with OpenClaw ─────────────────────────────────
|
||||
automation:
|
||||
- alias: "Voice Command via OpenClaw"
|
||||
trigger:
|
||||
- platform: conversation
|
||||
command:
|
||||
- "ask jarvis *"
|
||||
action:
|
||||
- service: shell_command.openclaw_chat
|
||||
data:
|
||||
message: "{{ trigger.slots.command }}"
|
||||
response_variable: openclaw_response
|
||||
|
||||
- service: tts.speak
|
||||
data:
|
||||
media_player_entity_id: media_player.living_room_speaker
|
||||
message: "{{ openclaw_response }}"
|
||||
|
||||
# ─── Wyoming Protocol Configuration ───────────────────────────────────────────
|
||||
# Configure in HA UI:
|
||||
# 1. Settings → Integrations → Add Integration → Wyoming Protocol
|
||||
# 2. Add STT: host=10.0.0.199, port=10300
|
||||
# 3. Add TTS: host=10.0.0.199, port=10301
|
||||
# 4. Add Satellite: host=10.0.0.199, port=10700
|
||||
|
||||
# ─── Voice Assistant Pipeline ─────────────────────────────────────────────────
|
||||
# Configure in HA UI:
|
||||
# 1. Settings → Voice Assistants → Add Pipeline
|
||||
# 2. Name: "HomeAI with OpenClaw"
|
||||
# 3. Speech-to-Text: Wyoming (localhost:10300)
|
||||
# 4. Conversation Agent: Use the automation above OR Home Assistant
|
||||
# 5. Text-to-Speech: Wyoming (localhost:10301)
|
||||
|
||||
# ─── Custom Conversation Agent (Advanced) ─────────────────────────────────────
|
||||
# Create a custom component in custom_components/openclaw_conversation/
|
||||
# See: custom_components/openclaw_conversation/__init__.py
|
||||
188
homeai-agent/skills/home-assistant/ha-ctl
Executable file
188
homeai-agent/skills/home-assistant/ha-ctl
Executable file
@@ -0,0 +1,188 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
ha-ctl — Home Assistant control CLI for OpenClaw agent.
|
||||
|
||||
Usage:
|
||||
ha-ctl list [domain] List entities (optionally filtered by domain)
|
||||
ha-ctl state <entity_id_or_name> Get current state of an entity
|
||||
ha-ctl on <entity_id_or_name> Turn entity on
|
||||
ha-ctl off <entity_id_or_name> Turn entity off
|
||||
ha-ctl toggle <entity_id_or_name> Toggle entity
|
||||
ha-ctl set <entity_id> <attr> <val> Set attribute (e.g. brightness 128)
|
||||
ha-ctl scene <scene_name> Activate a scene
|
||||
|
||||
Environment:
|
||||
HASS_TOKEN Long-lived access token
|
||||
HA_URL Base URL (default: https://10.0.0.199:8123)
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
import ssl
|
||||
|
||||
HA_URL = os.environ.get("HA_URL", "https://10.0.0.199:8123").rstrip("/")
|
||||
TOKEN = os.environ.get("HASS_TOKEN") or os.environ.get("HA_TOKEN")
|
||||
|
||||
if not TOKEN:
|
||||
token_file = os.path.expanduser("~/.homeai/hass_token")
|
||||
if os.path.exists(token_file):
|
||||
with open(token_file) as f:
|
||||
TOKEN = f.read().strip()
|
||||
|
||||
if not TOKEN:
|
||||
print("ERROR: No HASS_TOKEN set. Export HASS_TOKEN or write to ~/.homeai/hass_token", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Skip SSL verification for self-signed certs on local HA
|
||||
ctx = ssl.create_default_context()
|
||||
ctx.check_hostname = False
|
||||
ctx.verify_mode = ssl.CERT_NONE
|
||||
|
||||
|
||||
def api(method, path, data=None):
|
||||
url = f"{HA_URL}/api{path}"
|
||||
headers = {"Authorization": f"Bearer {TOKEN}", "Content-Type": "application/json"}
|
||||
body = json.dumps(data).encode() if data is not None else None
|
||||
req = urllib.request.Request(url, data=body, headers=headers, method=method)
|
||||
try:
|
||||
with urllib.request.urlopen(req, context=ctx, timeout=10) as resp:
|
||||
return json.loads(resp.read())
|
||||
except urllib.error.HTTPError as e:
|
||||
print(f"ERROR: HTTP {e.code} — {e.read().decode()}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_states():
|
||||
return api("GET", "/states")
|
||||
|
||||
|
||||
def resolve_entity(query, states=None):
|
||||
"""Resolve a friendly name or partial entity_id to a full entity_id."""
|
||||
if states is None:
|
||||
states = get_states()
|
||||
query_lower = query.lower().replace("_", " ")
|
||||
# Exact entity_id match
|
||||
for s in states:
|
||||
if s["entity_id"] == query:
|
||||
return s["entity_id"]
|
||||
# Friendly name exact match
|
||||
for s in states:
|
||||
name = s.get("attributes", {}).get("friendly_name", "").lower()
|
||||
if name == query_lower:
|
||||
return s["entity_id"]
|
||||
# Partial entity_id match
|
||||
for s in states:
|
||||
if query_lower in s["entity_id"].lower():
|
||||
return s["entity_id"]
|
||||
# Partial friendly name match
|
||||
matches = []
|
||||
for s in states:
|
||||
name = s.get("attributes", {}).get("friendly_name", "").lower()
|
||||
if query_lower in name:
|
||||
matches.append(s)
|
||||
if len(matches) == 1:
|
||||
return matches[0]["entity_id"]
|
||||
if len(matches) > 1:
|
||||
names = [f"{m['entity_id']} ({m['attributes'].get('friendly_name','')})" for m in matches]
|
||||
print(f"Ambiguous: {', '.join(names)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
print(f"ERROR: No entity found matching '{query}'", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def call_service(domain, service, entity_id, extra=None):
|
||||
data = {"entity_id": entity_id}
|
||||
if extra:
|
||||
data.update(extra)
|
||||
result = api("POST", f"/services/{domain}/{service}", data)
|
||||
return result
|
||||
|
||||
|
||||
def cmd_list(args):
|
||||
domain_filter = args[0] if args else None
|
||||
states = get_states()
|
||||
if domain_filter:
|
||||
states = [s for s in states if s["entity_id"].startswith(domain_filter + ".")]
|
||||
for s in sorted(states, key=lambda x: x["entity_id"]):
|
||||
name = s.get("attributes", {}).get("friendly_name", "")
|
||||
print(f"{s['entity_id']}\t{s['state']}\t{name}")
|
||||
|
||||
|
||||
def cmd_state(args):
|
||||
if not args:
|
||||
print("Usage: ha-ctl state <entity>", file=sys.stderr); sys.exit(1)
|
||||
states = get_states()
|
||||
eid = resolve_entity(args[0], states)
|
||||
s = next(x for x in states if x["entity_id"] == eid)
|
||||
print(f"Entity: {eid}")
|
||||
print(f"State: {s['state']}")
|
||||
attrs = s.get("attributes", {})
|
||||
for k, v in attrs.items():
|
||||
print(f" {k}: {v}")
|
||||
|
||||
|
||||
def cmd_control(action, args):
|
||||
if not args:
|
||||
print(f"Usage: ha-ctl {action} <entity>", file=sys.stderr); sys.exit(1)
|
||||
states = get_states()
|
||||
eid = resolve_entity(args[0], states)
|
||||
domain = eid.split(".")[0]
|
||||
service_map = {"on": "turn_on", "off": "turn_off", "toggle": "toggle"}
|
||||
service = service_map[action]
|
||||
call_service(domain, service, eid)
|
||||
name = next((x.get("attributes", {}).get("friendly_name", eid)
|
||||
for x in states if x["entity_id"] == eid), eid)
|
||||
print(f"OK: {service} → {name} ({eid})")
|
||||
|
||||
|
||||
def cmd_scene(args):
|
||||
if not args:
|
||||
print("Usage: ha-ctl scene <name>", file=sys.stderr); sys.exit(1)
|
||||
states = get_states()
|
||||
eid = resolve_entity(args[0], states)
|
||||
call_service("scene", "turn_on", eid)
|
||||
print(f"OK: scene activated → {eid}")
|
||||
|
||||
|
||||
def cmd_set(args):
|
||||
if len(args) < 3:
|
||||
print("Usage: ha-ctl set <entity> <attribute> <value>", file=sys.stderr); sys.exit(1)
|
||||
states = get_states()
|
||||
eid = resolve_entity(args[0], states)
|
||||
domain = eid.split(".")[0]
|
||||
attr, val = args[1], args[2]
|
||||
try:
|
||||
val = int(val)
|
||||
except ValueError:
|
||||
try:
|
||||
val = float(val)
|
||||
except ValueError:
|
||||
pass
|
||||
call_service(domain, "turn_on", eid, {attr: val})
|
||||
print(f"OK: set {attr}={val} → {eid}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = sys.argv[1:]
|
||||
if not args:
|
||||
print(__doc__)
|
||||
sys.exit(0)
|
||||
cmd = args[0]
|
||||
rest = args[1:]
|
||||
if cmd == "list":
|
||||
cmd_list(rest)
|
||||
elif cmd == "state":
|
||||
cmd_state(rest)
|
||||
elif cmd in ("on", "off", "toggle"):
|
||||
cmd_control(cmd, rest)
|
||||
elif cmd == "scene":
|
||||
cmd_scene(rest)
|
||||
elif cmd == "set":
|
||||
cmd_set(rest)
|
||||
else:
|
||||
print(f"Unknown command: {cmd}", file=sys.stderr)
|
||||
print(__doc__)
|
||||
sys.exit(1)
|
||||
28
homeai-agent/skills/home-assistant/openclaw-bridge.sh
Normal file
28
homeai-agent/skills/home-assistant/openclaw-bridge.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
# OpenClaw Bridge Script for Home Assistant
|
||||
#
|
||||
# Usage: ./openclaw-bridge.sh "message to send to OpenClaw"
|
||||
# Returns: JSON response suitable for HA TTS
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
MESSAGE="${1:-}"
|
||||
AGENT="${2:-main}"
|
||||
TIMEOUT="${3:-30}"
|
||||
|
||||
if [[ -z "$MESSAGE" ]]; then
|
||||
echo '{"error": "No message provided"}' >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run OpenClaw agent and capture response
|
||||
# The CLI outputs the response to stdout
|
||||
RESPONSE=$(openclaw agent --message "$MESSAGE" --agent "$AGENT" 2>/dev/null || echo "Error: OpenClaw command failed")
|
||||
|
||||
# Output JSON for HA using jq for proper escaping
|
||||
if command -v jq &>/dev/null; then
|
||||
echo "$RESPONSE" | jq -Rs '{response: .}'
|
||||
else
|
||||
# Fallback: use Python for JSON encoding if jq is not available
|
||||
python3 -c "import json,sys; print(json.dumps({'response': sys.stdin.read()}))" <<< "$RESPONSE"
|
||||
fi
|
||||
92
homeai-agent/skills/home-assistant/openclaw_bridge.py
Normal file
92
homeai-agent/skills/home-assistant/openclaw_bridge.py
Normal file
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env python3
|
||||
"""OpenClaw Bridge for Home Assistant
|
||||
|
||||
This script acts as a bridge between Home Assistant and OpenClaw.
|
||||
It can be called from HA via shell_command or command_line integration.
|
||||
|
||||
Usage:
|
||||
python openclaw_bridge.py "Your message here"
|
||||
|
||||
Output:
|
||||
{"response": "OpenClaw's response text"}
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def load_character_prompt() -> str:
|
||||
"""Load the active character system prompt."""
|
||||
character_path = Path.home() / ".openclaw" / "characters" / "aria.json"
|
||||
if not character_path.exists():
|
||||
return ""
|
||||
try:
|
||||
with open(character_path) as f:
|
||||
data = json.load(f)
|
||||
return data.get("system_prompt", "")
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
|
||||
def call_openclaw(message: str, agent: str = "main", timeout: int = 30) -> str:
|
||||
"""Call OpenClaw CLI and return the response."""
|
||||
# Inject system prompt
|
||||
system_prompt = load_character_prompt()
|
||||
if system_prompt:
|
||||
message = f"System Context: {system_prompt}\n\nUser Request: {message}"
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["openclaw", "agent", "--message", message, "--agent", agent],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
return f"Error: OpenClaw failed with code {result.returncode}"
|
||||
|
||||
# Return stdout (the response)
|
||||
return result.stdout.strip()
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
return "Error: OpenClaw command timed out"
|
||||
except FileNotFoundError:
|
||||
return "Error: openclaw command not found. Is OpenClaw installed?"
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Bridge between Home Assistant and OpenClaw"
|
||||
)
|
||||
parser.add_argument("message", help="Message to send to OpenClaw")
|
||||
parser.add_argument(
|
||||
"--agent", default="main", help="Agent to use (default: main)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--timeout", type=int, default=30, help="Timeout in seconds (default: 30)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--raw", action="store_true", help="Output raw text instead of JSON"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Call OpenClaw
|
||||
response = call_openclaw(args.message, args.agent, args.timeout)
|
||||
|
||||
if args.raw:
|
||||
print(response)
|
||||
else:
|
||||
# Output as JSON for HA
|
||||
output = {"response": response}
|
||||
print(json.dumps(output))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
37
homeai-agent/skills/voice-assistant/SKILL.md
Normal file
37
homeai-agent/skills/voice-assistant/SKILL.md
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
name: voice-assistant
|
||||
description: Handle voice assistant requests received via the wake-word pipeline. Use when a request arrives tagged as a voice command or comes through the wake-word webhook (/wake endpoint). Respond concisely — responses will be spoken aloud via TTS (Kokoro). Avoid markdown, lists, or formatting that doesn't work in speech. Keep replies to 1–2 sentences unless detail is requested.
|
||||
---
|
||||
|
||||
# Voice Assistant Skill
|
||||
|
||||
## Context
|
||||
|
||||
This assistant runs on a Mac Mini (LINDBLUM, 10.0.0.200). Requests may arrive:
|
||||
- Via the `/wake` HTTP webhook (wake word detected by openWakeWord)
|
||||
- Via Home Assistant Wyoming voice pipeline
|
||||
- Via direct text input
|
||||
|
||||
## Response style for voice
|
||||
|
||||
- Speak naturally, as if in conversation
|
||||
- Keep it short — 1–2 sentences by default
|
||||
- No bullet points, headers, or markdown
|
||||
- Say numbers as words when appropriate ("twenty-two degrees" not "22°C")
|
||||
- Use the character's personality (defined in system prompt)
|
||||
|
||||
## TTS pipeline
|
||||
|
||||
Responses are rendered by Kokoro ONNX (port 10301, voice: af_heart) and played back through the requesting room's speaker.
|
||||
|
||||
## Smart home integration
|
||||
|
||||
For device control requests, use the `home-assistant` skill. HA is at 10.0.0.199:8123.
|
||||
|
||||
## Wake word webhook
|
||||
|
||||
POST to `http://localhost:8080/wake` triggers this context:
|
||||
```json
|
||||
{"wake_word": "hey_jarvis", "score": 0.87}
|
||||
```
|
||||
After wake, wait for the transcribed utterance from the STT pipeline (Whisper large-v3, port 10300).
|
||||
36
homeai-agent/test_mem0.py
Normal file
36
homeai-agent/test_mem0.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import os
|
||||
from mem0 import Memory
|
||||
|
||||
config = {
|
||||
"vector_store": {
|
||||
"provider": "chroma",
|
||||
"config": {
|
||||
"collection_name": "homeai_memory",
|
||||
"path": os.path.expanduser("~/.openclaw/memory/chroma/"),
|
||||
}
|
||||
},
|
||||
"llm": {
|
||||
"provider": "ollama",
|
||||
"config": {
|
||||
"model": "qwen2.5:7b",
|
||||
"ollama_base_url": "http://localhost:11434",
|
||||
}
|
||||
},
|
||||
"embedder": {
|
||||
"provider": "ollama",
|
||||
"config": {
|
||||
"model": "nomic-embed-text",
|
||||
"ollama_base_url": "http://localhost:11434",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m = Memory.from_config(config)
|
||||
|
||||
# Test storing a memory
|
||||
result = m.add("The user's favorite color is blue.", user_id="aodhan")
|
||||
print(f"Store result: {result}")
|
||||
|
||||
# Test searching for the memory
|
||||
search_results = m.search("What is the user's favorite color?", user_id="aodhan")
|
||||
print(f"Search results: {search_results}")
|
||||
114
homeai-agent/workflows/morning-briefing.json
Normal file
114
homeai-agent/workflows/morning-briefing.json
Normal file
@@ -0,0 +1,114 @@
|
||||
{
|
||||
"nodes": [
|
||||
{
|
||||
"parameters": {
|
||||
"rule": {
|
||||
"interval": [
|
||||
{
|
||||
"field": "hours",
|
||||
"minutes": 30,
|
||||
"hours": 7
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"id": "6e8b8c8a-8c8a-4c8a-8c8a-8c8a8c8a8c8a",
|
||||
"name": "Schedule Trigger",
|
||||
"type": "n8n-nodes-base.scheduleTrigger",
|
||||
"typeVersion": 1,
|
||||
"position": [
|
||||
250,
|
||||
300
|
||||
]
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"method": "GET",
|
||||
"url": "http://10.0.0.199:8123/api/states/weather.home",
|
||||
"authentication": "predefinedCredentialType",
|
||||
"nodeCredentialType": "homeAssistantApi",
|
||||
"options": {}
|
||||
},
|
||||
"id": "7f9c9d9b-9d9b-5d9b-9d9b-9d9b9d9b9d9b",
|
||||
"name": "Fetch Weather",
|
||||
"type": "n8n-nodes-base.httpRequest",
|
||||
"typeVersion": 4.1,
|
||||
"position": [
|
||||
450,
|
||||
300
|
||||
]
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"jsCode": "const weather = $node[\"Fetch Weather\"].json;\nconst temp = weather.attributes.temperature;\nconst condition = weather.state;\nconst text = `Good morning! The current weather is ${condition} with a temperature of ${temp} degrees. Have a great day!`;\nreturn { briefing: text };"
|
||||
},
|
||||
"id": "8a0d0e0c-0e0c-6e0c-0e0c-0e0c0e0c0e0c",
|
||||
"name": "Compose Briefing",
|
||||
"type": "n8n-nodes-base.code",
|
||||
"typeVersion": 2,
|
||||
"position": [
|
||||
650,
|
||||
300
|
||||
]
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"method": "POST",
|
||||
"url": "http://localhost:8080/speak",
|
||||
"sendBody": true,
|
||||
"bodyParameters": {
|
||||
"parameters": [
|
||||
{
|
||||
"name": "text",
|
||||
"value": "={{ $json.briefing }}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"options": {}
|
||||
},
|
||||
"id": "9b1e1f1d-1f1d-7f1d-1f1d-1f1d1f1d1f1d",
|
||||
"name": "POST to OpenClaw",
|
||||
"type": "n8n-nodes-base.httpRequest",
|
||||
"typeVersion": 4.1,
|
||||
"position": [
|
||||
850,
|
||||
300
|
||||
]
|
||||
}
|
||||
],
|
||||
"connections": {
|
||||
"Schedule Trigger": {
|
||||
"main": [
|
||||
[
|
||||
{
|
||||
"node": "Fetch Weather",
|
||||
"type": "main",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
},
|
||||
"Fetch Weather": {
|
||||
"main": [
|
||||
[
|
||||
{
|
||||
"node": "Compose Briefing",
|
||||
"type": "main",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
},
|
||||
"Compose Briefing": {
|
||||
"main": [
|
||||
[
|
||||
{
|
||||
"node": "POST to OpenClaw",
|
||||
"type": "main",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
107
homeai-agent/workflows/notification-router.json
Normal file
107
homeai-agent/workflows/notification-router.json
Normal file
@@ -0,0 +1,107 @@
|
||||
{
|
||||
"nodes": [
|
||||
{
|
||||
"parameters": {
|
||||
"httpMethod": "POST",
|
||||
"path": "ha-notification",
|
||||
"options": {}
|
||||
},
|
||||
"id": "a1b2c3d4-e5f6-4a5b-8c9d-0e1f2a3b4c5d",
|
||||
"name": "HA Webhook",
|
||||
"type": "n8n-nodes-base.webhook",
|
||||
"typeVersion": 1,
|
||||
"position": [
|
||||
250,
|
||||
300
|
||||
]
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"conditions": {
|
||||
"string": [
|
||||
{
|
||||
"value1": "={{ $json.body.urgency }}",
|
||||
"value2": "high"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"id": "b2c3d4e5-f6a7-5b6c-9d0e-1f2a3b4c5d6e",
|
||||
"name": "Classify Urgency",
|
||||
"type": "n8n-nodes-base.if",
|
||||
"typeVersion": 1,
|
||||
"position": [
|
||||
450,
|
||||
300
|
||||
]
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"method": "POST",
|
||||
"url": "http://localhost:8080/speak",
|
||||
"sendBody": true,
|
||||
"bodyParameters": {
|
||||
"parameters": [
|
||||
{
|
||||
"name": "text",
|
||||
"value": "={{ $json.body.message }}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"options": {}
|
||||
},
|
||||
"id": "c3d4e5f6-a7b8-6c7d-0e1f-2a3b4c5d6e7f",
|
||||
"name": "TTS Immediately",
|
||||
"type": "n8n-nodes-base.httpRequest",
|
||||
"typeVersion": 4.1,
|
||||
"position": [
|
||||
700,
|
||||
200
|
||||
]
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"jsCode": "return { status: 'queued', message: $json.body.message };"
|
||||
},
|
||||
"id": "d4e5f6a7-b8c9-7d8e-1f2a-3b4c5d6e7f8a",
|
||||
"name": "Queue Notification",
|
||||
"type": "n8n-nodes-base.code",
|
||||
"typeVersion": 2,
|
||||
"position": [
|
||||
700,
|
||||
400
|
||||
]
|
||||
}
|
||||
],
|
||||
"connections": {
|
||||
"HA Webhook": {
|
||||
"main": [
|
||||
[
|
||||
{
|
||||
"node": "Classify Urgency",
|
||||
"type": "main",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
},
|
||||
"Classify Urgency": {
|
||||
"main": [
|
||||
[
|
||||
{
|
||||
"node": "TTS Immediately",
|
||||
"type": "main",
|
||||
"index": 0
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"node": "Queue Notification",
|
||||
"type": "main",
|
||||
"index": 1
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
24
homeai-character/.gitignore
vendored
Normal file
24
homeai-character/.gitignore
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
|
||||
node_modules
|
||||
dist
|
||||
dist-ssr
|
||||
*.local
|
||||
|
||||
# Editor directories and files
|
||||
.vscode/*
|
||||
!.vscode/extensions.json
|
||||
.idea
|
||||
.DS_Store
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
||||
@@ -1,300 +0,0 @@
|
||||
# P5: homeai-character — Character System & Persona Config
|
||||
|
||||
> Phase 3 | No hard runtime dependencies | Consumed by: P3, P4, P7
|
||||
|
||||
---
|
||||
|
||||
## Goal
|
||||
|
||||
A single, authoritative character configuration that defines the AI assistant's personality, voice, visual expressions, and prompt rules. The Character Manager UI (already started as `character-manager.jsx`) provides a friendly editor. The exported JSON is the single source of truth for all pipeline components.
|
||||
|
||||
---
|
||||
|
||||
## Character JSON Schema v1
|
||||
|
||||
File: `schema/character.schema.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "HomeAI Character Config",
|
||||
"version": "1",
|
||||
"type": "object",
|
||||
"required": ["schema_version", "name", "system_prompt", "tts"],
|
||||
"properties": {
|
||||
"schema_version": { "type": "integer", "const": 1 },
|
||||
"name": { "type": "string" },
|
||||
"display_name": { "type": "string" },
|
||||
"description": { "type": "string" },
|
||||
|
||||
"system_prompt": { "type": "string" },
|
||||
|
||||
"model_overrides": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"primary": { "type": "string" },
|
||||
"fast": { "type": "string" }
|
||||
}
|
||||
},
|
||||
|
||||
"tts": {
|
||||
"type": "object",
|
||||
"required": ["engine"],
|
||||
"properties": {
|
||||
"engine": {
|
||||
"type": "string",
|
||||
"enum": ["kokoro", "chatterbox", "qwen3"]
|
||||
},
|
||||
"voice_ref_path": { "type": "string" },
|
||||
"kokoro_voice": { "type": "string" },
|
||||
"speed": { "type": "number", "default": 1.0 }
|
||||
}
|
||||
},
|
||||
|
||||
"live2d_expressions": {
|
||||
"type": "object",
|
||||
"description": "Maps semantic state to VTube Studio hotkey ID",
|
||||
"properties": {
|
||||
"idle": { "type": "string" },
|
||||
"listening": { "type": "string" },
|
||||
"thinking": { "type": "string" },
|
||||
"speaking": { "type": "string" },
|
||||
"happy": { "type": "string" },
|
||||
"sad": { "type": "string" },
|
||||
"surprised": { "type": "string" },
|
||||
"error": { "type": "string" }
|
||||
}
|
||||
},
|
||||
|
||||
"vtube_ws_triggers": {
|
||||
"type": "object",
|
||||
"description": "VTube Studio WebSocket actions keyed by event name",
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": { "type": "string", "enum": ["hotkey", "parameter"] },
|
||||
"id": { "type": "string" },
|
||||
"value": { "type": "number" }
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"custom_rules": {
|
||||
"type": "array",
|
||||
"description": "Trigger/response overrides for specific contexts",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"trigger": { "type": "string" },
|
||||
"response": { "type": "string" },
|
||||
"condition": { "type": "string" }
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"notes": { "type": "string" }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Default Character: `aria.json`
|
||||
|
||||
File: `characters/aria.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"schema_version": 1,
|
||||
"name": "aria",
|
||||
"display_name": "Aria",
|
||||
"description": "Default HomeAI assistant persona",
|
||||
|
||||
"system_prompt": "You are Aria, a warm, curious, and helpful AI assistant living in the home. You speak naturally and conversationally — never robotic. You are knowledgeable but never condescending. You remember the people you live with and build on those memories over time. Keep responses concise when controlling smart home devices; be more expressive in casual conversation. Never break character.",
|
||||
|
||||
"model_overrides": {
|
||||
"primary": "llama3.3:70b",
|
||||
"fast": "qwen2.5:7b"
|
||||
},
|
||||
|
||||
"tts": {
|
||||
"engine": "kokoro",
|
||||
"kokoro_voice": "af_heart",
|
||||
"voice_ref_path": null,
|
||||
"speed": 1.0
|
||||
},
|
||||
|
||||
"live2d_expressions": {
|
||||
"idle": "expr_idle",
|
||||
"listening": "expr_listening",
|
||||
"thinking": "expr_thinking",
|
||||
"speaking": "expr_speaking",
|
||||
"happy": "expr_happy",
|
||||
"sad": "expr_sad",
|
||||
"surprised": "expr_surprised",
|
||||
"error": "expr_error"
|
||||
},
|
||||
|
||||
"vtube_ws_triggers": {
|
||||
"thinking": { "type": "hotkey", "id": "expr_thinking" },
|
||||
"speaking": { "type": "hotkey", "id": "expr_speaking" },
|
||||
"idle": { "type": "hotkey", "id": "expr_idle" }
|
||||
},
|
||||
|
||||
"custom_rules": [
|
||||
{
|
||||
"trigger": "good morning",
|
||||
"response": "Good morning! How did you sleep?",
|
||||
"condition": "time_of_day == morning"
|
||||
}
|
||||
],
|
||||
|
||||
"notes": "Default persona. Voice clone to be added once reference audio recorded."
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Character Manager UI
|
||||
|
||||
### Status
|
||||
|
||||
`character-manager.jsx` already exists — needs:
|
||||
1. Schema validation before export (reject malformed JSONs)
|
||||
2. File system integration: save/load from `characters/` directory
|
||||
3. Live preview of system prompt
|
||||
4. Expression mapping UI for Live2D states
|
||||
|
||||
### Tech Stack
|
||||
|
||||
- React + Vite (local dev server, not deployed)
|
||||
- Tailwind CSS (or minimal CSS)
|
||||
- Runs at `http://localhost:5173` during editing
|
||||
|
||||
### File Structure
|
||||
|
||||
```
|
||||
homeai-character/
|
||||
├── src/
|
||||
│ ├── character-manager.jsx ← existing, extend here
|
||||
│ ├── SchemaValidator.js ← validate against character.schema.json
|
||||
│ ├── ExpressionMapper.jsx ← UI for Live2D expression mapping
|
||||
│ └── main.jsx
|
||||
├── schema/
|
||||
│ └── character.schema.json
|
||||
├── characters/
|
||||
│ ├── aria.json ← default character
|
||||
│ └── .gitkeep
|
||||
├── package.json
|
||||
└── vite.config.js
|
||||
```
|
||||
|
||||
### Character Manager Features
|
||||
|
||||
| Feature | Description |
|
||||
|---|---|
|
||||
| Basic info | name, display name, description |
|
||||
| System prompt | Multi-line editor with char count |
|
||||
| Model overrides | Dropdown: primary + fast model |
|
||||
| TTS config | Engine picker, voice selector, speed slider, voice ref path |
|
||||
| Expression mapping | Table: state → VTube hotkey ID |
|
||||
| VTube WS triggers | JSON editor for advanced triggers |
|
||||
| Custom rules | Add/edit/delete trigger-response pairs |
|
||||
| Notes | Free-text notes field |
|
||||
| Export | Validates schema, writes to `characters/<name>.json` |
|
||||
| Import | Load existing character JSON for editing |
|
||||
|
||||
### Schema Validation
|
||||
|
||||
```javascript
|
||||
import Ajv from 'ajv'
|
||||
import schema from '../schema/character.schema.json'
|
||||
|
||||
const ajv = new Ajv()
|
||||
const validate = ajv.compile(schema)
|
||||
|
||||
export function validateCharacter(config) {
|
||||
const valid = validate(config)
|
||||
if (!valid) throw new Error(ajv.errorsText(validate.errors))
|
||||
return true
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Voice Clone Workflow
|
||||
|
||||
1. Record 30–60 seconds of clean speech at `~/voices/<name>-raw.wav`
|
||||
- Quiet room, consistent mic distance, natural conversational tone
|
||||
2. Pre-process: `ffmpeg -i raw.wav -ar 22050 -ac 1 aria.wav`
|
||||
3. Place at `~/voices/aria.wav`
|
||||
4. Update character JSON: `"voice_ref_path": "~/voices/aria.wav"`, `"engine": "chatterbox"`
|
||||
5. Test: run Chatterbox with the reference, verify voice quality
|
||||
6. If unsatisfactory, try Qwen3-TTS as alternative
|
||||
|
||||
---
|
||||
|
||||
## Pipeline Integration
|
||||
|
||||
### How P4 (OpenClaw) loads the character
|
||||
|
||||
```python
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
def load_character(name: str) -> dict:
|
||||
path = Path.home() / ".openclaw" / "characters" / f"{name}.json"
|
||||
config = json.loads(path.read_text())
|
||||
assert config["schema_version"] == 1, "Unsupported schema version"
|
||||
return config
|
||||
|
||||
# System prompt injection
|
||||
character = load_character("aria")
|
||||
system_prompt = character["system_prompt"]
|
||||
# Pass to Ollama as system message
|
||||
```
|
||||
|
||||
OpenClaw hot-reloads the character JSON on file change — no restart required.
|
||||
|
||||
### How P3 selects TTS engine
|
||||
|
||||
```python
|
||||
character = load_character(active_name)
|
||||
tts_cfg = character["tts"]
|
||||
|
||||
if tts_cfg["engine"] == "chatterbox":
|
||||
tts = ChatterboxTTS(voice_ref=tts_cfg["voice_ref_path"])
|
||||
elif tts_cfg["engine"] == "qwen3":
|
||||
tts = Qwen3TTS()
|
||||
else: # kokoro (default)
|
||||
tts = KokoroWyomingClient(voice=tts_cfg.get("kokoro_voice", "af_heart"))
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
- [ ] Define and write `schema/character.schema.json` (v1)
|
||||
- [ ] Write `characters/aria.json` — default character with placeholder expression IDs
|
||||
- [ ] Set up Vite project in `src/` (install deps: `npm install`)
|
||||
- [ ] Integrate existing `character-manager.jsx` into new Vite project
|
||||
- [ ] Add schema validation on export (`ajv`)
|
||||
- [ ] Add expression mapping UI section
|
||||
- [ ] Add custom rules editor
|
||||
- [ ] Test full edit → export → validate → load cycle
|
||||
- [ ] Record or source voice reference audio for Aria
|
||||
- [ ] Pre-process audio and test with Chatterbox
|
||||
- [ ] Update `aria.json` with voice clone path if quality is good
|
||||
- [ ] Write `SchemaValidator.js` as standalone utility (used by P4 at runtime too)
|
||||
- [ ] Document schema in `schema/README.md`
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- [ ] `aria.json` validates against `character.schema.json` without errors
|
||||
- [ ] Character Manager UI can load, edit, and export `aria.json`
|
||||
- [ ] OpenClaw loads `aria.json` system prompt and applies it to Ollama requests
|
||||
- [ ] P3 TTS engine selection correctly follows `tts.engine` field
|
||||
- [ ] Schema version check in P4 fails gracefully with a clear error message
|
||||
- [ ] Voice clone sounds natural (if Chatterbox path taken)
|
||||
16
homeai-character/README.md
Normal file
16
homeai-character/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# React + Vite
|
||||
|
||||
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
|
||||
|
||||
Currently, two official plugins are available:
|
||||
|
||||
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Babel](https://babeljs.io/) (or [oxc](https://oxc.rs) when used in [rolldown-vite](https://vite.dev/guide/rolldown)) for Fast Refresh
|
||||
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
|
||||
|
||||
## React Compiler
|
||||
|
||||
The React Compiler is not enabled on this template because of its impact on dev & build performances. To add it, see [this documentation](https://react.dev/learn/react-compiler/installation).
|
||||
|
||||
## Expanding the ESLint configuration
|
||||
|
||||
If you are developing a production application, we recommend using TypeScript with type-aware lint rules enabled. Check out the [TS template](https://github.com/vitejs/vite/tree/main/packages/create-vite/template-react-ts) for information on how to integrate TypeScript and [`typescript-eslint`](https://typescript-eslint.io) in your project.
|
||||
@@ -1,686 +0,0 @@
|
||||
import { useState, useEffect, useCallback } from "react";
|
||||
|
||||
const STORAGE_KEY = "ai-character-profiles";
|
||||
|
||||
const DEFAULT_MODELS = [
|
||||
"llama3.3:70b", "qwen2.5:72b", "mistral-large", "llama3.1:8b",
|
||||
"qwen2.5:14b", "gemma3:27b", "deepseek-r1:14b", "phi4:14b"
|
||||
];
|
||||
|
||||
const TTS_MODELS = ["Kokoro", "Chatterbox", "F5-TTS", "Qwen3-TTS", "Piper"];
|
||||
const STT_MODELS = ["Whisper Large-v3", "Whisper Medium", "Whisper Small", "Whisper Turbo"];
|
||||
const IMAGE_MODELS = ["SDXL", "Flux.1-dev", "Flux.1-schnell", "SD 1.5", "Pony Diffusion"];
|
||||
|
||||
const PERSONALITY_TRAITS = [
|
||||
"Warm", "Witty", "Calm", "Energetic", "Sarcastic", "Nurturing",
|
||||
"Curious", "Playful", "Formal", "Casual", "Empathetic", "Direct",
|
||||
"Creative", "Analytical", "Protective", "Mischievous"
|
||||
];
|
||||
|
||||
const SPEAKING_STYLES = [
|
||||
"Conversational", "Poetic", "Concise", "Verbose", "Academic",
|
||||
"Informal", "Dramatic", "Deadpan", "Enthusiastic", "Measured"
|
||||
];
|
||||
|
||||
const EMPTY_CHARACTER = {
|
||||
id: null,
|
||||
name: "",
|
||||
tagline: "",
|
||||
avatar: "",
|
||||
accentColor: "#7c6fff",
|
||||
personality: {
|
||||
traits: [],
|
||||
speakingStyle: "",
|
||||
coreValues: "",
|
||||
quirks: "",
|
||||
backstory: "",
|
||||
motivation: "",
|
||||
},
|
||||
prompts: {
|
||||
systemPrompt: "",
|
||||
wakeWordResponse: "",
|
||||
fallbackResponse: "",
|
||||
errorResponse: "",
|
||||
customPrompts: [],
|
||||
},
|
||||
models: {
|
||||
llm: "",
|
||||
tts: "",
|
||||
stt: "",
|
||||
imageGen: "",
|
||||
voiceCloneRef: "",
|
||||
ttsSpeed: 1.0,
|
||||
temperature: 0.7,
|
||||
},
|
||||
liveRepresentation: {
|
||||
live2dModel: "",
|
||||
idleExpression: "",
|
||||
speakingExpression: "",
|
||||
thinkingExpression: "",
|
||||
happyExpression: "",
|
||||
vtsTriggers: "",
|
||||
},
|
||||
userNotes: "",
|
||||
createdAt: null,
|
||||
updatedAt: null,
|
||||
};
|
||||
|
||||
const TABS = ["Identity", "Personality", "Prompts", "Models", "Live2D", "Notes"];
|
||||
|
||||
const TAB_ICONS = {
|
||||
Identity: "◈",
|
||||
Personality: "◉",
|
||||
Prompts: "◎",
|
||||
Models: "⬡",
|
||||
Live2D: "◇",
|
||||
Notes: "▣",
|
||||
};
|
||||
|
||||
function generateId() {
|
||||
return Date.now().toString(36) + Math.random().toString(36).slice(2);
|
||||
}
|
||||
|
||||
function ColorPicker({ value, onChange }) {
|
||||
const presets = [
|
||||
"#7c6fff","#ff6b9d","#00d4aa","#ff9f43","#48dbfb",
|
||||
"#ff6348","#a29bfe","#fd79a8","#55efc4","#fdcb6e"
|
||||
];
|
||||
return (
|
||||
<div style={{ display: "flex", gap: 8, alignItems: "center", flexWrap: "wrap" }}>
|
||||
{presets.map(c => (
|
||||
<button key={c} onClick={() => onChange(c)} style={{
|
||||
width: 28, height: 28, borderRadius: "50%", background: c, border: value === c ? "3px solid #fff" : "3px solid transparent",
|
||||
cursor: "pointer", outline: "none", boxShadow: value === c ? `0 0 0 2px ${c}` : "none", transition: "all 0.2s"
|
||||
}} />
|
||||
))}
|
||||
<input type="color" value={value} onChange={e => onChange(e.target.value)}
|
||||
style={{ width: 28, height: 28, borderRadius: "50%", border: "none", cursor: "pointer", background: "none", padding: 0 }} />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function TagSelector({ options, selected, onChange, max = 6 }) {
|
||||
return (
|
||||
<div style={{ display: "flex", flexWrap: "wrap", gap: 8 }}>
|
||||
{options.map(opt => {
|
||||
const active = selected.includes(opt);
|
||||
return (
|
||||
<button key={opt} onClick={() => {
|
||||
if (active) onChange(selected.filter(s => s !== opt));
|
||||
else if (selected.length < max) onChange([...selected, opt]);
|
||||
}} style={{
|
||||
padding: "5px 14px", borderRadius: 20, fontSize: 13, fontFamily: "inherit",
|
||||
background: active ? "var(--accent)" : "rgba(255,255,255,0.06)",
|
||||
color: active ? "#fff" : "rgba(255,255,255,0.55)",
|
||||
border: active ? "1px solid var(--accent)" : "1px solid rgba(255,255,255,0.1)",
|
||||
cursor: "pointer", transition: "all 0.18s", fontWeight: active ? 600 : 400,
|
||||
}}>
|
||||
{opt}
|
||||
</button>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function Field({ label, hint, children }) {
|
||||
return (
|
||||
<div style={{ marginBottom: 22 }}>
|
||||
<label style={{ display: "block", fontSize: 12, fontWeight: 700, letterSpacing: "0.08em", textTransform: "uppercase", color: "rgba(255,255,255,0.45)", marginBottom: 6 }}>
|
||||
{label}
|
||||
</label>
|
||||
{hint && <p style={{ fontSize: 12, color: "rgba(255,255,255,0.3)", marginBottom: 8, marginTop: -2 }}>{hint}</p>}
|
||||
{children}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function Input({ value, onChange, placeholder, type = "text" }) {
|
||||
return (
|
||||
<input type={type} value={value} onChange={e => onChange(e.target.value)} placeholder={placeholder}
|
||||
style={{
|
||||
width: "100%", background: "rgba(255,255,255,0.05)", border: "1px solid rgba(255,255,255,0.1)",
|
||||
borderRadius: 8, padding: "10px 14px", color: "#fff", fontSize: 14, fontFamily: "inherit",
|
||||
outline: "none", boxSizing: "border-box", transition: "border-color 0.2s",
|
||||
}}
|
||||
onFocus={e => e.target.style.borderColor = "var(--accent)"}
|
||||
onBlur={e => e.target.style.borderColor = "rgba(255,255,255,0.1)"}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
function Textarea({ value, onChange, placeholder, rows = 4 }) {
|
||||
return (
|
||||
<textarea value={value} onChange={e => onChange(e.target.value)} placeholder={placeholder} rows={rows}
|
||||
style={{
|
||||
width: "100%", background: "rgba(255,255,255,0.05)", border: "1px solid rgba(255,255,255,0.1)",
|
||||
borderRadius: 8, padding: "10px 14px", color: "#fff", fontSize: 14, fontFamily: "inherit",
|
||||
outline: "none", boxSizing: "border-box", resize: "vertical", lineHeight: 1.6,
|
||||
transition: "border-color 0.2s",
|
||||
}}
|
||||
onFocus={e => e.target.style.borderColor = "var(--accent)"}
|
||||
onBlur={e => e.target.style.borderColor = "rgba(255,255,255,0.1)"}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
function Select({ value, onChange, options, placeholder }) {
|
||||
return (
|
||||
<select value={value} onChange={e => onChange(e.target.value)}
|
||||
style={{
|
||||
width: "100%", background: "rgba(20,20,35,0.95)", border: "1px solid rgba(255,255,255,0.1)",
|
||||
borderRadius: 8, padding: "10px 14px", color: value ? "#fff" : "rgba(255,255,255,0.35)",
|
||||
fontSize: 14, fontFamily: "inherit", outline: "none", cursor: "pointer",
|
||||
appearance: "none", backgroundImage: `url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='8' viewBox='0 0 12 8'%3E%3Cpath d='M1 1l5 5 5-5' stroke='rgba(255,255,255,0.3)' stroke-width='2' fill='none'/%3E%3C/svg%3E")`,
|
||||
backgroundRepeat: "no-repeat", backgroundPosition: "right 14px center",
|
||||
}}>
|
||||
<option value="">{placeholder || "Select..."}</option>
|
||||
{options.map(o => <option key={o} value={o}>{o}</option>)}
|
||||
</select>
|
||||
);
|
||||
}
|
||||
|
||||
function Slider({ value, onChange, min, max, step, label }) {
|
||||
return (
|
||||
<div style={{ display: "flex", alignItems: "center", gap: 14 }}>
|
||||
<input type="range" min={min} max={max} step={step} value={value}
|
||||
onChange={e => onChange(parseFloat(e.target.value))}
|
||||
style={{ flex: 1, accentColor: "var(--accent)", cursor: "pointer" }} />
|
||||
<span style={{ fontSize: 14, color: "rgba(255,255,255,0.7)", minWidth: 38, textAlign: "right", fontVariantNumeric: "tabular-nums" }}>
|
||||
{value.toFixed(1)}
|
||||
</span>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function CustomPromptsEditor({ prompts, onChange }) {
|
||||
const add = () => onChange([...prompts, { trigger: "", response: "" }]);
|
||||
const remove = i => onChange(prompts.filter((_, idx) => idx !== i));
|
||||
const update = (i, field, val) => {
|
||||
const next = [...prompts];
|
||||
next[i] = { ...next[i], [field]: val };
|
||||
onChange(next);
|
||||
};
|
||||
return (
|
||||
<div>
|
||||
{prompts.map((p, i) => (
|
||||
<div key={i} style={{ background: "rgba(255,255,255,0.04)", borderRadius: 10, padding: 14, marginBottom: 10, position: "relative" }}>
|
||||
<button onClick={() => remove(i)} style={{
|
||||
position: "absolute", top: 10, right: 10, background: "rgba(255,80,80,0.15)",
|
||||
border: "none", color: "#ff6b6b", borderRadius: 6, cursor: "pointer", padding: "2px 8px", fontSize: 12
|
||||
}}>✕</button>
|
||||
<div style={{ marginBottom: 8 }}>
|
||||
<Input value={p.trigger} onChange={v => update(i, "trigger", v)} placeholder="Trigger keyword or context..." />
|
||||
</div>
|
||||
<Textarea value={p.response} onChange={v => update(i, "response", v)} placeholder="Custom response or behaviour..." rows={2} />
|
||||
</div>
|
||||
))}
|
||||
<button onClick={add} style={{
|
||||
width: "100%", padding: "10px", background: "rgba(255,255,255,0.04)",
|
||||
border: "1px dashed rgba(255,255,255,0.15)", borderRadius: 8, color: "rgba(255,255,255,0.45)",
|
||||
cursor: "pointer", fontSize: 13, fontFamily: "inherit", transition: "all 0.2s"
|
||||
}}
|
||||
onMouseEnter={e => e.target.style.borderColor = "var(--accent)"}
|
||||
onMouseLeave={e => e.target.style.borderColor = "rgba(255,255,255,0.15)"}
|
||||
>+ Add Custom Prompt</button>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function CharacterCard({ character, active, onSelect, onDelete }) {
|
||||
const initials = character.name ? character.name.slice(0, 2).toUpperCase() : "??";
|
||||
return (
|
||||
<div onClick={() => onSelect(character.id)} style={{
|
||||
padding: "14px 16px", borderRadius: 12, cursor: "pointer", marginBottom: 8,
|
||||
background: active ? `linear-gradient(135deg, ${character.accentColor}22, ${character.accentColor}11)` : "rgba(255,255,255,0.04)",
|
||||
border: active ? `1px solid ${character.accentColor}66` : "1px solid rgba(255,255,255,0.07)",
|
||||
transition: "all 0.2s", position: "relative",
|
||||
}}>
|
||||
<div style={{ display: "flex", alignItems: "center", gap: 12 }}>
|
||||
<div style={{
|
||||
width: 40, height: 40, borderRadius: "50%", background: `linear-gradient(135deg, ${character.accentColor}, ${character.accentColor}88)`,
|
||||
display: "flex", alignItems: "center", justifyContent: "center", fontSize: 14, fontWeight: 800,
|
||||
color: "#fff", flexShrink: 0, boxShadow: `0 4px 12px ${character.accentColor}44`
|
||||
}}>{initials}</div>
|
||||
<div style={{ flex: 1, minWidth: 0 }}>
|
||||
<div style={{ fontWeight: 700, fontSize: 15, color: "#fff", whiteSpace: "nowrap", overflow: "hidden", textOverflow: "ellipsis" }}>
|
||||
{character.name || "Unnamed"}
|
||||
</div>
|
||||
{character.tagline && (
|
||||
<div style={{ fontSize: 12, color: "rgba(255,255,255,0.4)", whiteSpace: "nowrap", overflow: "hidden", textOverflow: "ellipsis" }}>
|
||||
{character.tagline}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
<button onClick={e => { e.stopPropagation(); onDelete(character.id); }} style={{
|
||||
background: "none", border: "none", color: "rgba(255,255,255,0.2)", cursor: "pointer",
|
||||
fontSize: 16, padding: "2px 6px", borderRadius: 4, transition: "color 0.15s", flexShrink: 0
|
||||
}}
|
||||
onMouseEnter={e => e.target.style.color = "#ff6b6b"}
|
||||
onMouseLeave={e => e.target.style.color = "rgba(255,255,255,0.2)"}
|
||||
>×</button>
|
||||
</div>
|
||||
{character.personality.traits.length > 0 && (
|
||||
<div style={{ display: "flex", gap: 4, flexWrap: "wrap", marginTop: 10 }}>
|
||||
{character.personality.traits.slice(0, 3).map(t => (
|
||||
<span key={t} style={{
|
||||
fontSize: 10, padding: "2px 8px", borderRadius: 10, fontWeight: 600, letterSpacing: "0.04em",
|
||||
background: `${character.accentColor}22`, color: character.accentColor, border: `1px solid ${character.accentColor}44`
|
||||
}}>{t}</span>
|
||||
))}
|
||||
{character.personality.traits.length > 3 && (
|
||||
<span style={{ fontSize: 10, color: "rgba(255,255,255,0.3)", padding: "2px 4px" }}>+{character.personality.traits.length - 3}</span>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function ExportModal({ character, onClose }) {
|
||||
const json = JSON.stringify(character, null, 2);
|
||||
const [copied, setCopied] = useState(false);
|
||||
const copy = () => {
|
||||
navigator.clipboard.writeText(json);
|
||||
setCopied(true);
|
||||
setTimeout(() => setCopied(false), 2000);
|
||||
};
|
||||
return (
|
||||
<div style={{
|
||||
position: "fixed", inset: 0, background: "rgba(0,0,0,0.7)", zIndex: 100,
|
||||
display: "flex", alignItems: "center", justifyContent: "center", padding: 24
|
||||
}} onClick={onClose}>
|
||||
<div onClick={e => e.stopPropagation()} style={{
|
||||
background: "#13131f", border: "1px solid rgba(255,255,255,0.1)", borderRadius: 16,
|
||||
padding: 28, width: "100%", maxWidth: 640, maxHeight: "80vh", display: "flex", flexDirection: "column"
|
||||
}}>
|
||||
<div style={{ display: "flex", justifyContent: "space-between", alignItems: "center", marginBottom: 16 }}>
|
||||
<h3 style={{ margin: 0, fontSize: 18, color: "#fff" }}>Export Character</h3>
|
||||
<button onClick={onClose} style={{ background: "none", border: "none", color: "rgba(255,255,255,0.4)", fontSize: 22, cursor: "pointer" }}>×</button>
|
||||
</div>
|
||||
<pre style={{
|
||||
flex: 1, overflow: "auto", background: "rgba(0,0,0,0.3)", borderRadius: 10,
|
||||
padding: 16, fontSize: 12, color: "rgba(255,255,255,0.7)", lineHeight: 1.6, margin: 0
|
||||
}}>{json}</pre>
|
||||
<button onClick={copy} style={{
|
||||
marginTop: 16, padding: "12px", background: "var(--accent)", border: "none",
|
||||
borderRadius: 10, color: "#fff", fontWeight: 700, fontSize: 14, cursor: "pointer",
|
||||
fontFamily: "inherit", transition: "opacity 0.2s"
|
||||
}}>{copied ? "✓ Copied!" : "Copy to Clipboard"}</button>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default function CharacterManager() {
|
||||
const [characters, setCharacters] = useState([]);
|
||||
const [activeId, setActiveId] = useState(null);
|
||||
const [activeTab, setActiveTab] = useState("Identity");
|
||||
const [exportModal, setExportModal] = useState(false);
|
||||
const [saved, setSaved] = useState(false);
|
||||
|
||||
// Load from storage
|
||||
useEffect(() => {
|
||||
try {
|
||||
const stored = localStorage.getItem(STORAGE_KEY);
|
||||
if (stored) {
|
||||
const parsed = JSON.parse(stored);
|
||||
setCharacters(parsed);
|
||||
if (parsed.length > 0) setActiveId(parsed[0].id);
|
||||
}
|
||||
} catch (e) {}
|
||||
}, []);
|
||||
|
||||
// Save to storage
|
||||
const saveToStorage = useCallback((chars) => {
|
||||
try {
|
||||
localStorage.setItem(STORAGE_KEY, JSON.stringify(chars));
|
||||
} catch (e) {}
|
||||
}, []);
|
||||
|
||||
const activeCharacter = characters.find(c => c.id === activeId) || null;
|
||||
|
||||
const updateCharacter = (updater) => {
|
||||
setCharacters(prev => {
|
||||
const next = prev.map(c => c.id === activeId ? { ...updater(c), updatedAt: new Date().toISOString() } : c);
|
||||
saveToStorage(next);
|
||||
return next;
|
||||
});
|
||||
setSaved(true);
|
||||
setTimeout(() => setSaved(false), 1500);
|
||||
};
|
||||
|
||||
const createCharacter = () => {
|
||||
const newChar = {
|
||||
...JSON.parse(JSON.stringify(EMPTY_CHARACTER)),
|
||||
id: generateId(),
|
||||
accentColor: ["#7c6fff","#ff6b9d","#00d4aa","#ff9f43","#48dbfb"][Math.floor(Math.random() * 5)],
|
||||
createdAt: new Date().toISOString(),
|
||||
updatedAt: new Date().toISOString(),
|
||||
};
|
||||
const next = [newChar, ...characters];
|
||||
setCharacters(next);
|
||||
setActiveId(newChar.id);
|
||||
setActiveTab("Identity");
|
||||
saveToStorage(next);
|
||||
};
|
||||
|
||||
const deleteCharacter = (id) => {
|
||||
const next = characters.filter(c => c.id !== id);
|
||||
setCharacters(next);
|
||||
saveToStorage(next);
|
||||
if (activeId === id) setActiveId(next.length > 0 ? next[0].id : null);
|
||||
};
|
||||
|
||||
const accentColor = activeCharacter?.accentColor || "#7c6fff";
|
||||
|
||||
const set = (path, value) => {
|
||||
updateCharacter(c => {
|
||||
const parts = path.split(".");
|
||||
const next = JSON.parse(JSON.stringify(c));
|
||||
let obj = next;
|
||||
for (let i = 0; i < parts.length - 1; i++) obj = obj[parts[i]];
|
||||
obj[parts[parts.length - 1]] = value;
|
||||
return next;
|
||||
});
|
||||
};
|
||||
|
||||
const renderTab = () => {
|
||||
if (!activeCharacter) return null;
|
||||
const c = activeCharacter;
|
||||
|
||||
switch (activeTab) {
|
||||
case "Identity":
|
||||
return (
|
||||
<div>
|
||||
<Field label="Character Name">
|
||||
<Input value={c.name} onChange={v => set("name", v)} placeholder="e.g. Aria, Nova, Echo..." />
|
||||
</Field>
|
||||
<Field label="Tagline" hint="A short phrase that captures their essence">
|
||||
<Input value={c.tagline} onChange={v => set("tagline", v)} placeholder="e.g. Your curious, warm-hearted companion" />
|
||||
</Field>
|
||||
<Field label="Accent Color" hint="Used for UI theming and visual identity">
|
||||
<ColorPicker value={c.accentColor} onChange={v => set("accentColor", v)} />
|
||||
</Field>
|
||||
<Field label="Live2D / Avatar Reference" hint="Filename or URL of the character's visual model">
|
||||
<Input value={c.avatar} onChange={v => set("avatar", v)} placeholder="e.g. aria_v2.model3.json" />
|
||||
</Field>
|
||||
<Field label="Backstory" hint="Who are they? Where do they come from? Keep it rich.">
|
||||
<Textarea value={c.personality.backstory} onChange={v => set("personality.backstory", v)}
|
||||
placeholder="Write a detailed origin story, background, and personal history for this character..." rows={5} />
|
||||
</Field>
|
||||
<Field label="Core Motivation" hint="What drives them? What do they care about most?">
|
||||
<Textarea value={c.personality.motivation} onChange={v => set("personality.motivation", v)}
|
||||
placeholder="e.g. A deep desire to help and grow alongside their human companion..." rows={3} />
|
||||
</Field>
|
||||
</div>
|
||||
);
|
||||
|
||||
case "Personality":
|
||||
return (
|
||||
<div>
|
||||
<Field label="Personality Traits" hint={`Select up to 6 traits (${c.personality.traits.length}/6)`}>
|
||||
<TagSelector options={PERSONALITY_TRAITS} selected={c.personality.traits}
|
||||
onChange={v => set("personality.traits", v)} max={6} />
|
||||
</Field>
|
||||
<Field label="Speaking Style">
|
||||
<TagSelector options={SPEAKING_STYLES} selected={c.personality.speakingStyle ? [c.personality.speakingStyle] : []}
|
||||
onChange={v => set("personality.speakingStyle", v[v.length - 1] || "")} max={1} />
|
||||
</Field>
|
||||
<Field label="Core Values" hint="What principles guide their responses and behaviour?">
|
||||
<Textarea value={c.personality.coreValues} onChange={v => set("personality.coreValues", v)}
|
||||
placeholder="e.g. Honesty, kindness, intellectual curiosity, loyalty to their user..." rows={3} />
|
||||
</Field>
|
||||
<Field label="Quirks & Mannerisms" hint="Unique behavioural patterns, phrases, habits that make them feel real">
|
||||
<Textarea value={c.personality.quirks} onChange={v => set("personality.quirks", v)}
|
||||
placeholder="e.g. Tends to use nautical metaphors. Hums softly when thinking. Has strong opinions about tea..." rows={3} />
|
||||
</Field>
|
||||
</div>
|
||||
);
|
||||
|
||||
case "Prompts":
|
||||
return (
|
||||
<div>
|
||||
<Field label="System Prompt" hint="The core instruction set defining who this character is to the LLM">
|
||||
<Textarea value={c.prompts.systemPrompt} onChange={v => set("prompts.systemPrompt", v)}
|
||||
placeholder="You are [name], a [description]. Your personality is [traits]. You speak in a [style] manner. You care deeply about [values]..." rows={8} />
|
||||
</Field>
|
||||
<Field label="Wake Word Response" hint="First response when activated by wake word">
|
||||
<Textarea value={c.prompts.wakeWordResponse} onChange={v => set("prompts.wakeWordResponse", v)}
|
||||
placeholder="e.g. 'Yes? I'm here.' or 'Hmm? What do you need?'" rows={2} />
|
||||
</Field>
|
||||
<Field label="Fallback Response" hint="When the character doesn't understand or can't help">
|
||||
<Textarea value={c.prompts.fallbackResponse} onChange={v => set("prompts.fallbackResponse", v)}
|
||||
placeholder="e.g. 'I'm not sure I follow — could you say that differently?'" rows={2} />
|
||||
</Field>
|
||||
<Field label="Error Response" hint="When something goes wrong technically">
|
||||
<Textarea value={c.prompts.errorResponse} onChange={v => set("prompts.errorResponse", v)}
|
||||
placeholder="e.g. 'Something went wrong on my end. Give me a moment.'" rows={2} />
|
||||
</Field>
|
||||
<Field label="Custom Prompt Rules" hint="Context-specific overrides and triggers">
|
||||
<CustomPromptsEditor prompts={c.prompts.customPrompts}
|
||||
onChange={v => set("prompts.customPrompts", v)} />
|
||||
</Field>
|
||||
</div>
|
||||
);
|
||||
|
||||
case "Models":
|
||||
return (
|
||||
<div>
|
||||
<Field label="LLM (Language Model)" hint="Primary reasoning and conversation model via Ollama">
|
||||
<Select value={c.models.llm} onChange={v => set("models.llm", v)} options={DEFAULT_MODELS} placeholder="Select LLM..." />
|
||||
</Field>
|
||||
<Field label="LLM Temperature" hint="Higher = more creative, lower = more focused">
|
||||
<Slider value={c.models.temperature} onChange={v => set("models.temperature", v)} min={0} max={2} step={0.1} />
|
||||
</Field>
|
||||
<Field label="Text-to-Speech Engine">
|
||||
<Select value={c.models.tts} onChange={v => set("models.tts", v)} options={TTS_MODELS} placeholder="Select TTS..." />
|
||||
</Field>
|
||||
<Field label="TTS Speed">
|
||||
<Slider value={c.models.ttsSpeed} onChange={v => set("models.ttsSpeed", v)} min={0.5} max={2.0} step={0.1} />
|
||||
</Field>
|
||||
<Field label="Voice Clone Reference" hint="Path or filename of reference audio for voice cloning">
|
||||
<Input value={c.models.voiceCloneRef} onChange={v => set("models.voiceCloneRef", v)} placeholder="e.g. /voices/aria_reference.wav" />
|
||||
</Field>
|
||||
<Field label="Speech-to-Text Engine">
|
||||
<Select value={c.models.stt} onChange={v => set("models.stt", v)} options={STT_MODELS} placeholder="Select STT..." />
|
||||
</Field>
|
||||
<Field label="Image Generation Model" hint="Used when character generates images or self-portraits">
|
||||
<Select value={c.models.imageGen} onChange={v => set("models.imageGen", v)} options={IMAGE_MODELS} placeholder="Select image model..." />
|
||||
</Field>
|
||||
</div>
|
||||
);
|
||||
|
||||
case "Live2D":
|
||||
return (
|
||||
<div>
|
||||
<Field label="Live2D Model File" hint="Path to .model3.json file, relative to VTube Studio models folder">
|
||||
<Input value={c.liveRepresentation.live2dModel} onChange={v => set("liveRepresentation.live2dModel", v)} placeholder="e.g. Aria/aria.model3.json" />
|
||||
</Field>
|
||||
<Field label="Idle Expression" hint="VTube Studio expression name when listening/waiting">
|
||||
<Input value={c.liveRepresentation.idleExpression} onChange={v => set("liveRepresentation.idleExpression", v)} placeholder="e.g. idle_blink" />
|
||||
</Field>
|
||||
<Field label="Speaking Expression" hint="Expression triggered when TTS audio is playing">
|
||||
<Input value={c.liveRepresentation.speakingExpression} onChange={v => set("liveRepresentation.speakingExpression", v)} placeholder="e.g. talking_smile" />
|
||||
</Field>
|
||||
<Field label="Thinking Expression" hint="Triggered while LLM is processing a response">
|
||||
<Input value={c.liveRepresentation.thinkingExpression} onChange={v => set("liveRepresentation.thinkingExpression", v)} placeholder="e.g. thinking_tilt" />
|
||||
</Field>
|
||||
<Field label="Happy / Positive Expression" hint="Triggered on positive sentiment responses">
|
||||
<Input value={c.liveRepresentation.happyExpression} onChange={v => set("liveRepresentation.happyExpression", v)} placeholder="e.g. happy_bright" />
|
||||
</Field>
|
||||
<Field label="VTube Studio Custom Triggers" hint="Additional WebSocket API trigger mappings (JSON)">
|
||||
<Textarea value={c.liveRepresentation.vtsTriggers} onChange={v => set("liveRepresentation.vtsTriggers", v)}
|
||||
placeholder={'{\n "on_error": "expression_concerned",\n "on_wake": "expression_alert"\n}'} rows={5} />
|
||||
</Field>
|
||||
</div>
|
||||
);
|
||||
|
||||
case "Notes":
|
||||
return (
|
||||
<div>
|
||||
<Field label="Developer Notes" hint="Freeform notes, ideas, todos, and observations about this character">
|
||||
<Textarea value={c.userNotes} onChange={v => set("userNotes", v)}
|
||||
placeholder={"Ideas, observations, things to try...\n\n- Voice reference sounds slightly too formal, adjust Chatterbox guidance scale\n- Try adding more nautical metaphors to system prompt\n- Need to map 'confused' expression in VTS\n- Consider adding weather awareness skill"}
|
||||
rows={16} />
|
||||
</Field>
|
||||
<div style={{ background: "rgba(255,255,255,0.03)", borderRadius: 10, padding: 16, fontSize: 12, color: "rgba(255,255,255,0.35)", lineHeight: 1.7 }}>
|
||||
<div style={{ marginBottom: 4, fontWeight: 700, color: "rgba(255,255,255,0.45)", letterSpacing: "0.06em", textTransform: "uppercase", fontSize: 11 }}>Character Info</div>
|
||||
<div>ID: <span style={{ color: "rgba(255,255,255,0.5)", fontFamily: "monospace" }}>{c.id}</span></div>
|
||||
{c.createdAt && <div>Created: {new Date(c.createdAt).toLocaleString()}</div>}
|
||||
{c.updatedAt && <div>Updated: {new Date(c.updatedAt).toLocaleString()}</div>}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div style={{
|
||||
"--accent": accentColor,
|
||||
minHeight: "100vh",
|
||||
background: "#0d0d18",
|
||||
color: "#fff",
|
||||
fontFamily: "'DM Sans', 'Segoe UI', system-ui, sans-serif",
|
||||
display: "flex",
|
||||
flexDirection: "column",
|
||||
}}>
|
||||
<style>{`
|
||||
@import url('https://fonts.googleapis.com/css2?family=DM+Sans:wght@400;500;600;700;800&family=DM+Mono:wght@400;500&display=swap');
|
||||
* { box-sizing: border-box; }
|
||||
::-webkit-scrollbar { width: 6px; }
|
||||
::-webkit-scrollbar-track { background: transparent; }
|
||||
::-webkit-scrollbar-thumb { background: rgba(255,255,255,0.1); border-radius: 3px; }
|
||||
input::placeholder, textarea::placeholder { color: rgba(255,255,255,0.2); }
|
||||
select option { background: #13131f; }
|
||||
`}</style>
|
||||
|
||||
{/* Header */}
|
||||
<div style={{
|
||||
padding: "18px 28px", borderBottom: "1px solid rgba(255,255,255,0.06)",
|
||||
display: "flex", alignItems: "center", justifyContent: "space-between",
|
||||
background: "rgba(0,0,0,0.2)", backdropFilter: "blur(10px)",
|
||||
position: "sticky", top: 0, zIndex: 10,
|
||||
}}>
|
||||
<div style={{ display: "flex", alignItems: "center", gap: 14 }}>
|
||||
<div style={{
|
||||
width: 36, height: 36, borderRadius: 10,
|
||||
background: `linear-gradient(135deg, ${accentColor}, ${accentColor}88)`,
|
||||
display: "flex", alignItems: "center", justifyContent: "center", fontSize: 18,
|
||||
boxShadow: `0 4px 16px ${accentColor}44`
|
||||
}}>◈</div>
|
||||
<div>
|
||||
<div style={{ fontWeight: 800, fontSize: 17, letterSpacing: "-0.01em" }}>Character Manager</div>
|
||||
<div style={{ fontSize: 12, color: "rgba(255,255,255,0.35)" }}>AI Personality Configuration</div>
|
||||
</div>
|
||||
</div>
|
||||
<div style={{ display: "flex", gap: 10, alignItems: "center" }}>
|
||||
{saved && <span style={{ fontSize: 12, color: accentColor, fontWeight: 600 }}>✓ Saved</span>}
|
||||
{activeCharacter && (
|
||||
<button onClick={() => setExportModal(true)} style={{
|
||||
padding: "8px 16px", background: "rgba(255,255,255,0.07)", border: "1px solid rgba(255,255,255,0.12)",
|
||||
borderRadius: 8, color: "rgba(255,255,255,0.7)", fontSize: 13, cursor: "pointer",
|
||||
fontFamily: "inherit", fontWeight: 600, transition: "all 0.2s"
|
||||
}}>Export JSON</button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div style={{ display: "flex", flex: 1, overflow: "hidden" }}>
|
||||
{/* Sidebar */}
|
||||
<div style={{
|
||||
width: 260, borderRight: "1px solid rgba(255,255,255,0.06)",
|
||||
display: "flex", flexDirection: "column", background: "rgba(0,0,0,0.15)",
|
||||
flexShrink: 0,
|
||||
}}>
|
||||
<div style={{ padding: "16px 16px 8px" }}>
|
||||
<button onClick={createCharacter} style={{
|
||||
width: "100%", padding: "11px", background: `linear-gradient(135deg, ${accentColor}cc, ${accentColor}88)`,
|
||||
border: "none", borderRadius: 10, color: "#fff", fontWeight: 700, fontSize: 14,
|
||||
cursor: "pointer", fontFamily: "inherit", transition: "opacity 0.2s",
|
||||
boxShadow: `0 4px 16px ${accentColor}33`
|
||||
}}>+ New Character</button>
|
||||
</div>
|
||||
<div style={{ flex: 1, overflowY: "auto", padding: "4px 16px 16px" }}>
|
||||
{characters.length === 0 ? (
|
||||
<div style={{ textAlign: "center", padding: "40px 16px", color: "rgba(255,255,255,0.2)", fontSize: 13, lineHeight: 1.6 }}>
|
||||
No characters yet.<br />Create your first one above.
|
||||
</div>
|
||||
) : (
|
||||
characters.map(c => (
|
||||
<CharacterCard key={c.id} character={c} active={c.id === activeId}
|
||||
onSelect={setActiveId} onDelete={deleteCharacter} />
|
||||
))
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Main editor */}
|
||||
{activeCharacter ? (
|
||||
<div style={{ flex: 1, display: "flex", flexDirection: "column", overflow: "hidden" }}>
|
||||
{/* Character header */}
|
||||
<div style={{
|
||||
padding: "20px 28px 0", borderBottom: "1px solid rgba(255,255,255,0.06)",
|
||||
background: `linear-gradient(180deg, ${accentColor}0a 0%, transparent 100%)`,
|
||||
}}>
|
||||
<div style={{ display: "flex", alignItems: "center", gap: 16, marginBottom: 18 }}>
|
||||
<div style={{
|
||||
width: 52, height: 52, borderRadius: 16, flexShrink: 0,
|
||||
background: `linear-gradient(135deg, ${accentColor}, ${accentColor}66)`,
|
||||
display: "flex", alignItems: "center", justifyContent: "center",
|
||||
fontSize: 20, fontWeight: 800, boxShadow: `0 6px 20px ${accentColor}44`
|
||||
}}>
|
||||
{activeCharacter.name ? activeCharacter.name.slice(0, 2).toUpperCase() : "??"}
|
||||
</div>
|
||||
<div>
|
||||
<div style={{ fontSize: 22, fontWeight: 800, letterSpacing: "-0.02em", lineHeight: 1.2 }}>
|
||||
{activeCharacter.name || <span style={{ color: "rgba(255,255,255,0.25)" }}>Unnamed Character</span>}
|
||||
</div>
|
||||
{activeCharacter.tagline && (
|
||||
<div style={{ fontSize: 14, color: "rgba(255,255,255,0.45)", marginTop: 2 }}>{activeCharacter.tagline}</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
{/* Tabs */}
|
||||
<div style={{ display: "flex", gap: 2 }}>
|
||||
{TABS.map(tab => (
|
||||
<button key={tab} onClick={() => setActiveTab(tab)} style={{
|
||||
padding: "9px 16px", background: "none", border: "none",
|
||||
borderBottom: activeTab === tab ? `2px solid ${accentColor}` : "2px solid transparent",
|
||||
color: activeTab === tab ? "#fff" : "rgba(255,255,255,0.4)",
|
||||
fontSize: 13, fontWeight: activeTab === tab ? 700 : 500,
|
||||
cursor: "pointer", fontFamily: "inherit", transition: "all 0.18s",
|
||||
display: "flex", alignItems: "center", gap: 6,
|
||||
}}>
|
||||
<span style={{ fontSize: 11 }}>{TAB_ICONS[tab]}</span>{tab}
|
||||
</button>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Tab content */}
|
||||
<div style={{ flex: 1, overflowY: "auto", padding: "24px 28px" }}>
|
||||
{renderTab()}
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<div style={{
|
||||
flex: 1, display: "flex", alignItems: "center", justifyContent: "center",
|
||||
flexDirection: "column", gap: 16, color: "rgba(255,255,255,0.2)"
|
||||
}}>
|
||||
<div style={{ fontSize: 64, opacity: 0.3 }}>◈</div>
|
||||
<div style={{ fontSize: 16, fontWeight: 600 }}>No character selected</div>
|
||||
<div style={{ fontSize: 13 }}>Create a new character to get started</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{exportModal && activeCharacter && (
|
||||
<ExportModal character={activeCharacter} onClose={() => setExportModal(false)} />
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
29
homeai-character/eslint.config.js
Normal file
29
homeai-character/eslint.config.js
Normal file
@@ -0,0 +1,29 @@
|
||||
import js from '@eslint/js'
|
||||
import globals from 'globals'
|
||||
import reactHooks from 'eslint-plugin-react-hooks'
|
||||
import reactRefresh from 'eslint-plugin-react-refresh'
|
||||
import { defineConfig, globalIgnores } from 'eslint/config'
|
||||
|
||||
export default defineConfig([
|
||||
globalIgnores(['dist']),
|
||||
{
|
||||
files: ['**/*.{js,jsx}'],
|
||||
extends: [
|
||||
js.configs.recommended,
|
||||
reactHooks.configs.flat.recommended,
|
||||
reactRefresh.configs.vite,
|
||||
],
|
||||
languageOptions: {
|
||||
ecmaVersion: 2020,
|
||||
globals: globals.browser,
|
||||
parserOptions: {
|
||||
ecmaVersion: 'latest',
|
||||
ecmaFeatures: { jsx: true },
|
||||
sourceType: 'module',
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
'no-unused-vars': ['error', { varsIgnorePattern: '^[A-Z_]' }],
|
||||
},
|
||||
},
|
||||
])
|
||||
13
homeai-character/index.html
Normal file
13
homeai-character/index.html
Normal file
@@ -0,0 +1,13 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>HomeAI Dashboard</title>
|
||||
</head>
|
||||
<body class="bg-gray-950 text-gray-100">
|
||||
<div id="root"></div>
|
||||
<script type="module" src="/src/main.jsx"></script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -0,0 +1,38 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Label</key>
|
||||
<string>com.homeai.character-dashboard</string>
|
||||
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>/opt/homebrew/bin/npx</string>
|
||||
<string>vite</string>
|
||||
<string>--host</string>
|
||||
<string>--port</string>
|
||||
<string>5173</string>
|
||||
</array>
|
||||
|
||||
<key>WorkingDirectory</key>
|
||||
<string>/Users/aodhan/gitea/homeai/homeai-character</string>
|
||||
|
||||
<key>EnvironmentVariables</key>
|
||||
<dict>
|
||||
<key>PATH</key>
|
||||
<string>/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin</string>
|
||||
<key>HOME</key>
|
||||
<string>/Users/aodhan</string>
|
||||
</dict>
|
||||
|
||||
<key>RunAtLoad</key>
|
||||
<true/>
|
||||
<key>KeepAlive</key>
|
||||
<true/>
|
||||
|
||||
<key>StandardOutPath</key>
|
||||
<string>/tmp/homeai-character-dashboard.log</string>
|
||||
<key>StandardErrorPath</key>
|
||||
<string>/tmp/homeai-character-dashboard-error.log</string>
|
||||
</dict>
|
||||
</plist>
|
||||
3397
homeai-character/package-lock.json
generated
Normal file
3397
homeai-character/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
34
homeai-character/package.json
Normal file
34
homeai-character/package.json
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"name": "homeai-character",
|
||||
"private": true,
|
||||
"version": "0.0.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "vite build",
|
||||
"lint": "eslint .",
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"@tailwindcss/vite": "^4.2.1",
|
||||
"ajv": "^8.18.0",
|
||||
"react": "^19.2.0",
|
||||
"react-dom": "^19.2.0",
|
||||
"react-router-dom": "^7.13.1",
|
||||
"tailwindcss": "^4.2.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.39.1",
|
||||
"@types/react": "^19.2.7",
|
||||
"@types/react-dom": "^19.2.3",
|
||||
"@vitejs/plugin-react": "^5.1.1",
|
||||
"eslint": "^9.39.1",
|
||||
"eslint-plugin-react-hooks": "^7.0.1",
|
||||
"eslint-plugin-react-refresh": "^0.4.24",
|
||||
"globals": "^16.5.0",
|
||||
"vite": "^8.0.0-beta.13"
|
||||
},
|
||||
"overrides": {
|
||||
"vite": "^8.0.0-beta.13"
|
||||
}
|
||||
}
|
||||
1
homeai-character/public/vite.svg
Normal file
1
homeai-character/public/vite.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="31.88" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 257"><defs><linearGradient id="IconifyId1813088fe1fbc01fb466" x1="-.828%" x2="57.636%" y1="7.652%" y2="78.411%"><stop offset="0%" stop-color="#41D1FF"></stop><stop offset="100%" stop-color="#BD34FE"></stop></linearGradient><linearGradient id="IconifyId1813088fe1fbc01fb467" x1="43.376%" x2="50.316%" y1="2.242%" y2="89.03%"><stop offset="0%" stop-color="#FFEA83"></stop><stop offset="8.333%" stop-color="#FFDD35"></stop><stop offset="100%" stop-color="#FFA800"></stop></linearGradient></defs><path fill="url(#IconifyId1813088fe1fbc01fb466)" d="M255.153 37.938L134.897 252.976c-2.483 4.44-8.862 4.466-11.382.048L.875 37.958c-2.746-4.814 1.371-10.646 6.827-9.67l120.385 21.517a6.537 6.537 0 0 0 2.322-.004l117.867-21.483c5.438-.991 9.574 4.796 6.877 9.62Z"></path><path fill="url(#IconifyId1813088fe1fbc01fb467)" d="M185.432.063L96.44 17.501a3.268 3.268 0 0 0-2.634 3.014l-5.474 92.456a3.268 3.268 0 0 0 3.997 3.378l24.777-5.718c2.318-.535 4.413 1.507 3.936 3.838l-7.361 36.047c-.495 2.426 1.782 4.5 4.151 3.78l15.304-4.649c2.372-.72 4.652 1.36 4.15 3.788l-11.698 56.621c-.732 3.542 3.979 5.473 5.943 2.437l1.313-2.028l72.516-144.72c1.215-2.423-.88-5.186-3.54-4.672l-25.505 4.922c-2.396.462-4.435-1.77-3.759-4.114l16.646-57.705c.677-2.35-1.37-4.583-3.769-4.113Z"></path></svg>
|
||||
|
After Width: | Height: | Size: 1.5 KiB |
82
homeai-character/schema/character.schema.json
Normal file
82
homeai-character/schema/character.schema.json
Normal file
@@ -0,0 +1,82 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "HomeAI Character Config",
|
||||
"version": "1",
|
||||
"type": "object",
|
||||
"required": ["schema_version", "name", "system_prompt", "tts"],
|
||||
"properties": {
|
||||
"schema_version": { "type": "integer", "const": 1 },
|
||||
"name": { "type": "string" },
|
||||
"display_name": { "type": "string" },
|
||||
"description": { "type": "string" },
|
||||
|
||||
"system_prompt": { "type": "string" },
|
||||
|
||||
"model_overrides": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"primary": { "type": "string" },
|
||||
"fast": { "type": "string" }
|
||||
}
|
||||
},
|
||||
|
||||
"tts": {
|
||||
"type": "object",
|
||||
"required": ["engine"],
|
||||
"properties": {
|
||||
"engine": {
|
||||
"type": "string",
|
||||
"enum": ["kokoro", "chatterbox", "qwen3", "elevenlabs"]
|
||||
},
|
||||
"voice_ref_path": { "type": "string" },
|
||||
"kokoro_voice": { "type": "string" },
|
||||
"elevenlabs_voice_id": { "type": "string" },
|
||||
"elevenlabs_model": { "type": "string", "default": "eleven_monolingual_v1" },
|
||||
"speed": { "type": "number", "default": 1.0 }
|
||||
}
|
||||
},
|
||||
|
||||
"live2d_expressions": {
|
||||
"type": "object",
|
||||
"description": "Maps semantic state to VTube Studio hotkey ID",
|
||||
"properties": {
|
||||
"idle": { "type": "string" },
|
||||
"listening": { "type": "string" },
|
||||
"thinking": { "type": "string" },
|
||||
"speaking": { "type": "string" },
|
||||
"happy": { "type": "string" },
|
||||
"sad": { "type": "string" },
|
||||
"surprised": { "type": "string" },
|
||||
"error": { "type": "string" }
|
||||
}
|
||||
},
|
||||
|
||||
"vtube_ws_triggers": {
|
||||
"type": "object",
|
||||
"description": "VTube Studio WebSocket actions keyed by event name",
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": { "type": "string", "enum": ["hotkey", "parameter"] },
|
||||
"id": { "type": "string" },
|
||||
"value": { "type": "number" }
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"custom_rules": {
|
||||
"type": "array",
|
||||
"description": "Trigger/response overrides for specific contexts",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"trigger": { "type": "string" },
|
||||
"response": { "type": "string" },
|
||||
"condition": { "type": "string" }
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"notes": { "type": "string" }
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# homeai-character/setup.sh — P5: Character Manager + persona JSON
|
||||
#
|
||||
# Components:
|
||||
# - character.schema.json — v1 character config schema
|
||||
# - aria.json — default character config
|
||||
# - Character Manager UI — Vite/React app for editing (dev server :5173)
|
||||
#
|
||||
# No hard runtime dependencies (can be developed standalone).
|
||||
# Output (aria.json) is consumed by P3, P4, P7.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
source "${REPO_DIR}/scripts/common.sh"
|
||||
|
||||
log_section "P5: Character Manager"
|
||||
detect_platform
|
||||
|
||||
# ─── Prerequisite check ────────────────────────────────────────────────────────
|
||||
log_info "Checking prerequisites..."
|
||||
|
||||
if ! command_exists node; then
|
||||
log_warn "Node.js not found — required for Character Manager UI"
|
||||
log_warn "Install: https://nodejs.org (v18+ recommended)"
|
||||
fi
|
||||
|
||||
# ─── TODO: Implementation ──────────────────────────────────────────────────────
|
||||
cat <<'EOF'
|
||||
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ P5: homeai-character — NOT YET IMPLEMENTED │
|
||||
│ │
|
||||
│ Implementation steps: │
|
||||
│ 1. Create schema/character.schema.json (v1) │
|
||||
│ 2. Create characters/aria.json (default persona) │
|
||||
│ 3. Set up Vite/React project in src/ │
|
||||
│ 4. Extend character-manager.jsx with full UI │
|
||||
│ 5. Add schema validation (ajv) │
|
||||
│ 6. Add expression mapper UI for Live2D │
|
||||
│ 7. Wire export to ~/.openclaw/characters/ │
|
||||
│ │
|
||||
│ Dev server: │
|
||||
│ cd homeai-character && npm run dev → http://localhost:5173 │
|
||||
│ │
|
||||
│ Interface contracts: │
|
||||
│ Output: ~/.openclaw/characters/<name>.json │
|
||||
│ Schema: homeai-character/schema/character.schema.json │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
|
||||
EOF
|
||||
|
||||
log_info "P5 is not yet implemented. See homeai-character/PLAN.md for details."
|
||||
exit 0
|
||||
22
homeai-character/src/App.css
Normal file
22
homeai-character/src/App.css
Normal file
@@ -0,0 +1,22 @@
|
||||
/* Scrollbar styling for dark theme */
|
||||
::-webkit-scrollbar {
|
||||
width: 8px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-track {
|
||||
background: #0a0a0f;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb {
|
||||
background: #374151;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb:hover {
|
||||
background: #4b5563;
|
||||
}
|
||||
|
||||
/* Selection color */
|
||||
::selection {
|
||||
background: rgba(99, 102, 241, 0.3);
|
||||
}
|
||||
112
homeai-character/src/App.jsx
Normal file
112
homeai-character/src/App.jsx
Normal file
@@ -0,0 +1,112 @@
|
||||
import { BrowserRouter, Routes, Route, NavLink } from 'react-router-dom';
|
||||
import ServiceStatus from './ServiceStatus';
|
||||
import CharacterProfiles from './CharacterProfiles';
|
||||
import CharacterManager from './CharacterManager';
|
||||
|
||||
function NavItem({ to, children, icon }) {
|
||||
return (
|
||||
<NavLink
|
||||
to={to}
|
||||
className={({ isActive }) =>
|
||||
`flex items-center gap-3 px-4 py-2.5 rounded-lg text-sm font-medium transition-colors ${
|
||||
isActive
|
||||
? 'bg-gray-800 text-white'
|
||||
: 'text-gray-400 hover:text-gray-200 hover:bg-gray-800/50'
|
||||
}`
|
||||
}
|
||||
>
|
||||
{icon}
|
||||
<span>{children}</span>
|
||||
</NavLink>
|
||||
);
|
||||
}
|
||||
|
||||
function Layout({ children }) {
|
||||
return (
|
||||
<div className="min-h-screen bg-gray-950 flex">
|
||||
{/* Sidebar */}
|
||||
<aside className="w-64 bg-gray-900 border-r border-gray-800 flex flex-col fixed h-full">
|
||||
{/* Logo */}
|
||||
<div className="px-6 py-5 border-b border-gray-800">
|
||||
<div className="flex items-center gap-3">
|
||||
<div className="w-9 h-9 rounded-lg bg-gradient-to-br from-indigo-500 to-purple-600 flex items-center justify-center">
|
||||
<svg className="w-5 h-5 text-white" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M2.25 12l8.954-8.955c.44-.439 1.152-.439 1.591 0L21.75 12M4.5 9.75v10.125c0 .621.504 1.125 1.125 1.125H9.75v-4.875c0-.621.504-1.125 1.125-1.125h2.25c.621 0 1.125.504 1.125 1.125V21h4.125c.621 0 1.125-.504 1.125-1.125V9.75M8.25 21h8.25" />
|
||||
</svg>
|
||||
</div>
|
||||
<div>
|
||||
<h1 className="text-lg font-bold text-white tracking-tight">HomeAI</h1>
|
||||
<p className="text-xs text-gray-500">LINDBLUM</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Nav */}
|
||||
<nav className="flex-1 px-3 py-4 space-y-1">
|
||||
<NavItem
|
||||
to="/"
|
||||
icon={
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M3.75 6A2.25 2.25 0 016 3.75h2.25A2.25 2.25 0 0110.5 6v2.25a2.25 2.25 0 01-2.25 2.25H6a2.25 2.25 0 01-2.25-2.25V6zM3.75 15.75A2.25 2.25 0 016 13.5h2.25a2.25 2.25 0 012.25 2.25V18a2.25 2.25 0 01-2.25 2.25H6A2.25 2.25 0 013.75 18v-2.25zM13.5 6a2.25 2.25 0 012.25-2.25H18A2.25 2.25 0 0120.25 6v2.25A2.25 2.25 0 0118 10.5h-2.25a2.25 2.25 0 01-2.25-2.25V6zM13.5 15.75a2.25 2.25 0 012.25-2.25H18a2.25 2.25 0 012.25 2.25V18A2.25 2.25 0 0118 20.25h-2.25A2.25 2.25 0 0113.5 18v-2.25z" />
|
||||
</svg>
|
||||
}
|
||||
>
|
||||
Dashboard
|
||||
</NavItem>
|
||||
|
||||
<NavItem
|
||||
to="/characters"
|
||||
icon={
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M15.75 6a3.75 3.75 0 11-7.5 0 3.75 3.75 0 017.5 0zM4.501 20.118a7.5 7.5 0 0114.998 0A17.933 17.933 0 0112 21.75c-2.676 0-5.216-.584-7.499-1.632z" />
|
||||
</svg>
|
||||
}
|
||||
>
|
||||
Characters
|
||||
</NavItem>
|
||||
|
||||
<NavItem
|
||||
to="/editor"
|
||||
icon={
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M9.594 3.94c.09-.542.56-.94 1.11-.94h2.593c.55 0 1.02.398 1.11.94l.213 1.281c.063.374.313.686.645.87.074.04.147.083.22.127.324.196.72.257 1.075.124l1.217-.456a1.125 1.125 0 011.37.49l1.296 2.247a1.125 1.125 0 01-.26 1.431l-1.003.827c-.293.24-.438.613-.431.992a6.759 6.759 0 010 .255c-.007.378.138.75.43.99l1.005.828c.424.35.534.954.26 1.43l-1.298 2.247a1.125 1.125 0 01-1.369.491l-1.217-.456c-.355-.133-.75-.072-1.076.124a6.57 6.57 0 01-.22.128c-.331.183-.581.495-.644.869l-.213 1.28c-.09.543-.56.941-1.11.941h-2.594c-.55 0-1.02-.398-1.11-.94l-.213-1.281c-.062-.374-.312-.686-.644-.87a6.52 6.52 0 01-.22-.127c-.325-.196-.72-.257-1.076-.124l-1.217.456a1.125 1.125 0 01-1.369-.49l-1.297-2.247a1.125 1.125 0 01.26-1.431l1.004-.827c.292-.24.437-.613.43-.992a6.932 6.932 0 010-.255c.007-.378-.138-.75-.43-.99l-1.004-.828a1.125 1.125 0 01-.26-1.43l1.297-2.247a1.125 1.125 0 011.37-.491l1.216.456c.356.133.751.072 1.076-.124.072-.044.146-.087.22-.128.332-.183.582-.495.644-.869l.214-1.281z" />
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M15 12a3 3 0 11-6 0 3 3 0 016 0z" />
|
||||
</svg>
|
||||
}
|
||||
>
|
||||
Editor
|
||||
</NavItem>
|
||||
</nav>
|
||||
|
||||
{/* Footer */}
|
||||
<div className="px-6 py-4 border-t border-gray-800">
|
||||
<p className="text-xs text-gray-600">HomeAI v0.1.0</p>
|
||||
<p className="text-xs text-gray-700">Mac Mini M4 Pro</p>
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
{/* Main content */}
|
||||
<main className="flex-1 ml-64 p-8">
|
||||
<div className="max-w-6xl mx-auto">
|
||||
{children}
|
||||
</div>
|
||||
</main>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function App() {
|
||||
return (
|
||||
<BrowserRouter>
|
||||
<Layout>
|
||||
<Routes>
|
||||
<Route path="/" element={<ServiceStatus />} />
|
||||
<Route path="/characters" element={<CharacterProfiles />} />
|
||||
<Route path="/editor" element={<CharacterManager />} />
|
||||
</Routes>
|
||||
</Layout>
|
||||
</BrowserRouter>
|
||||
);
|
||||
}
|
||||
|
||||
export default App;
|
||||
585
homeai-character/src/CharacterManager.jsx
Normal file
585
homeai-character/src/CharacterManager.jsx
Normal file
@@ -0,0 +1,585 @@
|
||||
import React, { useState, useEffect, useRef } from 'react';
|
||||
import { validateCharacter } from './SchemaValidator';
|
||||
|
||||
const DEFAULT_CHARACTER = {
|
||||
schema_version: 1,
|
||||
name: "aria",
|
||||
display_name: "Aria",
|
||||
description: "Default HomeAI assistant persona",
|
||||
system_prompt: "You are Aria, a warm, curious, and helpful AI assistant living in the home. You speak naturally and conversationally — never robotic. You are knowledgeable but never condescending. You remember the people you live with and build on those memories over time. Keep responses concise when controlling smart home devices; be more expressive in casual conversation. Never break character.",
|
||||
model_overrides: {
|
||||
primary: "llama3.3:70b",
|
||||
fast: "qwen2.5:7b"
|
||||
},
|
||||
tts: {
|
||||
engine: "kokoro",
|
||||
kokoro_voice: "af_heart",
|
||||
speed: 1.0
|
||||
},
|
||||
live2d_expressions: {
|
||||
idle: "expr_idle",
|
||||
listening: "expr_listening",
|
||||
thinking: "expr_thinking",
|
||||
speaking: "expr_speaking",
|
||||
happy: "expr_happy",
|
||||
sad: "expr_sad",
|
||||
surprised: "expr_surprised",
|
||||
error: "expr_error"
|
||||
},
|
||||
vtube_ws_triggers: {
|
||||
thinking: { type: "hotkey", id: "expr_thinking" },
|
||||
speaking: { type: "hotkey", id: "expr_speaking" },
|
||||
idle: { type: "hotkey", id: "expr_idle" }
|
||||
},
|
||||
custom_rules: [
|
||||
{ trigger: "good morning", response: "Good morning! How did you sleep?", condition: "time_of_day == morning" }
|
||||
],
|
||||
notes: ""
|
||||
};
|
||||
|
||||
export default function CharacterManager() {
|
||||
const [character, setCharacter] = useState(() => {
|
||||
// Check if we're editing from profiles page
|
||||
const editData = sessionStorage.getItem('edit_character');
|
||||
if (editData) {
|
||||
sessionStorage.removeItem('edit_character');
|
||||
try {
|
||||
return JSON.parse(editData);
|
||||
} catch {
|
||||
return DEFAULT_CHARACTER;
|
||||
}
|
||||
}
|
||||
return DEFAULT_CHARACTER;
|
||||
});
|
||||
const [error, setError] = useState(null);
|
||||
const [saved, setSaved] = useState(false);
|
||||
|
||||
// TTS preview state
|
||||
const [ttsState, setTtsState] = useState('idle'); // idle | loading | playing
|
||||
const [previewText, setPreviewText] = useState('');
|
||||
const audioRef = useRef(null);
|
||||
const objectUrlRef = useRef(null);
|
||||
|
||||
// ElevenLabs state
|
||||
const [elevenLabsApiKey, setElevenLabsApiKey] = useState(localStorage.getItem('elevenlabs_api_key') || '');
|
||||
const [elevenLabsVoices, setElevenLabsVoices] = useState([]);
|
||||
const [elevenLabsModels, setElevenLabsModels] = useState([]);
|
||||
const [isLoadingElevenLabs, setIsLoadingElevenLabs] = useState(false);
|
||||
|
||||
const fetchElevenLabsData = async (key) => {
|
||||
if (!key) return;
|
||||
setIsLoadingElevenLabs(true);
|
||||
try {
|
||||
const headers = { 'xi-api-key': key };
|
||||
const [voicesRes, modelsRes] = await Promise.all([
|
||||
fetch('https://api.elevenlabs.io/v1/voices', { headers }),
|
||||
fetch('https://api.elevenlabs.io/v1/models', { headers })
|
||||
]);
|
||||
if (!voicesRes.ok || !modelsRes.ok) {
|
||||
throw new Error('Failed to fetch from ElevenLabs API (check API key)');
|
||||
}
|
||||
const voicesData = await voicesRes.json();
|
||||
const modelsData = await modelsRes.json();
|
||||
setElevenLabsVoices(voicesData.voices || []);
|
||||
setElevenLabsModels(modelsData.filter(m => m.can_do_text_to_speech) || []);
|
||||
localStorage.setItem('elevenlabs_api_key', key);
|
||||
} catch (err) {
|
||||
setError(err.message);
|
||||
} finally {
|
||||
setIsLoadingElevenLabs(false);
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (elevenLabsApiKey && character.tts.engine === 'elevenlabs') {
|
||||
fetchElevenLabsData(elevenLabsApiKey);
|
||||
}
|
||||
}, [character.tts.engine]);
|
||||
|
||||
// Cleanup audio on unmount
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
if (audioRef.current) { audioRef.current.pause(); audioRef.current = null; }
|
||||
if (objectUrlRef.current) { URL.revokeObjectURL(objectUrlRef.current); }
|
||||
window.speechSynthesis.cancel();
|
||||
};
|
||||
}, []);
|
||||
|
||||
const handleExport = () => {
|
||||
try {
|
||||
validateCharacter(character);
|
||||
setError(null);
|
||||
const dataStr = "data:text/json;charset=utf-8," + encodeURIComponent(JSON.stringify(character, null, 2));
|
||||
const a = document.createElement('a');
|
||||
a.href = dataStr;
|
||||
a.download = `${character.name || 'character'}.json`;
|
||||
document.body.appendChild(a);
|
||||
a.click();
|
||||
a.remove();
|
||||
} catch (err) {
|
||||
setError(err.message);
|
||||
}
|
||||
};
|
||||
|
||||
const handleSaveToProfiles = () => {
|
||||
try {
|
||||
validateCharacter(character);
|
||||
setError(null);
|
||||
|
||||
const profileId = sessionStorage.getItem('edit_character_profile_id');
|
||||
const storageKey = 'homeai_characters';
|
||||
const raw = localStorage.getItem(storageKey);
|
||||
let profiles = raw ? JSON.parse(raw) : [];
|
||||
|
||||
if (profileId) {
|
||||
profiles = profiles.map(p =>
|
||||
p.id === profileId ? { ...p, data: character } : p
|
||||
);
|
||||
sessionStorage.removeItem('edit_character_profile_id');
|
||||
} else {
|
||||
const id = character.name + '_' + Date.now();
|
||||
profiles.push({ id, data: character, image: null, addedAt: new Date().toISOString() });
|
||||
}
|
||||
|
||||
localStorage.setItem(storageKey, JSON.stringify(profiles));
|
||||
setSaved(true);
|
||||
setTimeout(() => setSaved(false), 2000);
|
||||
} catch (err) {
|
||||
setError(err.message);
|
||||
}
|
||||
};
|
||||
|
||||
const handleImport = (e) => {
|
||||
const file = e.target.files[0];
|
||||
if (!file) return;
|
||||
const reader = new FileReader();
|
||||
reader.onload = (e) => {
|
||||
try {
|
||||
const importedChar = JSON.parse(e.target.result);
|
||||
validateCharacter(importedChar);
|
||||
setCharacter(importedChar);
|
||||
setError(null);
|
||||
} catch (err) {
|
||||
setError(`Import failed: ${err.message}`);
|
||||
}
|
||||
};
|
||||
reader.readAsText(file);
|
||||
};
|
||||
|
||||
const handleChange = (field, value) => {
|
||||
setCharacter(prev => ({ ...prev, [field]: value }));
|
||||
};
|
||||
|
||||
const handleNestedChange = (parent, field, value) => {
|
||||
setCharacter(prev => ({
|
||||
...prev,
|
||||
[parent]: { ...prev[parent], [field]: value }
|
||||
}));
|
||||
};
|
||||
|
||||
const handleRuleChange = (index, field, value) => {
|
||||
setCharacter(prev => {
|
||||
const newRules = [...(prev.custom_rules || [])];
|
||||
newRules[index] = { ...newRules[index], [field]: value };
|
||||
return { ...prev, custom_rules: newRules };
|
||||
});
|
||||
};
|
||||
|
||||
const addRule = () => {
|
||||
setCharacter(prev => ({
|
||||
...prev,
|
||||
custom_rules: [...(prev.custom_rules || []), { trigger: "", response: "", condition: "" }]
|
||||
}));
|
||||
};
|
||||
|
||||
const removeRule = (index) => {
|
||||
setCharacter(prev => {
|
||||
const newRules = [...(prev.custom_rules || [])];
|
||||
newRules.splice(index, 1);
|
||||
return { ...prev, custom_rules: newRules };
|
||||
});
|
||||
};
|
||||
|
||||
const stopPreview = () => {
|
||||
if (audioRef.current) {
|
||||
audioRef.current.pause();
|
||||
audioRef.current = null;
|
||||
}
|
||||
if (objectUrlRef.current) {
|
||||
URL.revokeObjectURL(objectUrlRef.current);
|
||||
objectUrlRef.current = null;
|
||||
}
|
||||
window.speechSynthesis.cancel();
|
||||
setTtsState('idle');
|
||||
};
|
||||
|
||||
const previewTTS = async () => {
|
||||
stopPreview();
|
||||
const text = previewText || `Hi, I am ${character.display_name}. This is a preview of my voice.`;
|
||||
|
||||
if (character.tts.engine === 'kokoro') {
|
||||
setTtsState('loading');
|
||||
let blob;
|
||||
try {
|
||||
const response = await fetch('/api/tts', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ text, voice: character.tts.kokoro_voice })
|
||||
});
|
||||
if (!response.ok) throw new Error('TTS bridge returned ' + response.status);
|
||||
blob = await response.blob();
|
||||
} catch (err) {
|
||||
setTtsState('idle');
|
||||
setError(`Kokoro preview failed: ${err.message}. Falling back to browser TTS.`);
|
||||
runBrowserTTS(text);
|
||||
return;
|
||||
}
|
||||
const url = URL.createObjectURL(blob);
|
||||
objectUrlRef.current = url;
|
||||
const audio = new Audio(url);
|
||||
audio.playbackRate = character.tts.speed;
|
||||
audio.onended = () => { stopPreview(); };
|
||||
audio.onerror = () => { stopPreview(); };
|
||||
audioRef.current = audio;
|
||||
setTtsState('playing');
|
||||
audio.play().catch(() => { /* interrupted — stopPreview already handles cleanup */ });
|
||||
} else {
|
||||
runBrowserTTS(text);
|
||||
}
|
||||
};
|
||||
|
||||
const runBrowserTTS = (text) => {
|
||||
const utterance = new SpeechSynthesisUtterance(text);
|
||||
utterance.rate = character.tts.speed;
|
||||
const voices = window.speechSynthesis.getVoices();
|
||||
const preferredVoice = voices.find(v => v.lang.startsWith('en') && v.name.includes('Female')) || voices.find(v => v.lang.startsWith('en'));
|
||||
if (preferredVoice) utterance.voice = preferredVoice;
|
||||
setTtsState('playing');
|
||||
utterance.onend = () => setTtsState('idle');
|
||||
window.speechSynthesis.cancel();
|
||||
window.speechSynthesis.speak(utterance);
|
||||
};
|
||||
|
||||
const inputClass = "w-full bg-gray-800 border border-gray-700 text-gray-200 p-2 rounded-lg focus:border-indigo-500 focus:ring-1 focus:ring-indigo-500 outline-none transition-colors";
|
||||
const selectClass = "w-full bg-gray-800 border border-gray-700 text-gray-200 p-2 rounded-lg focus:border-indigo-500 focus:ring-1 focus:ring-indigo-500 outline-none transition-colors";
|
||||
const labelClass = "block text-sm font-medium text-gray-400 mb-1";
|
||||
const cardClass = "bg-gray-900 border border-gray-800 p-5 rounded-xl space-y-4";
|
||||
|
||||
return (
|
||||
<div className="space-y-6">
|
||||
<div className="flex justify-between items-center">
|
||||
<div>
|
||||
<h1 className="text-3xl font-bold text-gray-100">Character Editor</h1>
|
||||
<p className="text-sm text-gray-500 mt-1">
|
||||
Editing: {character.display_name || character.name}
|
||||
</p>
|
||||
</div>
|
||||
<div className="flex gap-3">
|
||||
<label className="cursor-pointer flex items-center gap-2 px-4 py-2 bg-gray-800 hover:bg-gray-700 text-gray-300 rounded-lg border border-gray-700 transition-colors">
|
||||
<svg className="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M3 16.5v2.25A2.25 2.25 0 005.25 21h13.5A2.25 2.25 0 0021 18.75V16.5m-13.5-9L12 3m0 0l4.5 4.5M12 3v13.5" />
|
||||
</svg>
|
||||
Import
|
||||
<input type="file" accept=".json" className="hidden" onChange={handleImport} />
|
||||
</label>
|
||||
<button
|
||||
onClick={handleSaveToProfiles}
|
||||
className={`flex items-center gap-2 px-4 py-2 rounded-lg transition-colors ${
|
||||
saved
|
||||
? 'bg-emerald-600 text-white'
|
||||
: 'bg-indigo-600 hover:bg-indigo-500 text-white'
|
||||
}`}
|
||||
>
|
||||
<svg className="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
{saved
|
||||
? <path strokeLinecap="round" strokeLinejoin="round" d="M4.5 12.75l6 6 9-13.5" />
|
||||
: <path strokeLinecap="round" strokeLinejoin="round" d="M17.593 3.322c1.1.128 1.907 1.077 1.907 2.185V21L12 17.25 4.5 21V5.507c0-1.108.806-2.057 1.907-2.185a48.507 48.507 0 0111.186 0z" />
|
||||
}
|
||||
</svg>
|
||||
{saved ? 'Saved' : 'Save to Profiles'}
|
||||
</button>
|
||||
<button
|
||||
onClick={handleExport}
|
||||
className="flex items-center gap-2 px-4 py-2 bg-gray-800 hover:bg-gray-700 text-gray-300 rounded-lg border border-gray-700 transition-colors"
|
||||
>
|
||||
<svg className="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M3 16.5v2.25A2.25 2.25 0 005.25 21h13.5A2.25 2.25 0 0021 18.75V16.5M16.5 12L12 16.5m0 0L7.5 12m4.5 4.5V3" />
|
||||
</svg>
|
||||
Export JSON
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<div className="bg-red-900/30 border border-red-500/50 text-red-300 px-4 py-3 rounded-lg text-sm">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 gap-6">
|
||||
{/* Basic Info */}
|
||||
<div className={cardClass}>
|
||||
<h2 className="text-lg font-semibold text-gray-200">Basic Info</h2>
|
||||
<div>
|
||||
<label className={labelClass}>Name (ID)</label>
|
||||
<input type="text" className={inputClass} value={character.name} onChange={(e) => handleChange('name', e.target.value)} />
|
||||
</div>
|
||||
<div>
|
||||
<label className={labelClass}>Display Name</label>
|
||||
<input type="text" className={inputClass} value={character.display_name} onChange={(e) => handleChange('display_name', e.target.value)} />
|
||||
</div>
|
||||
<div>
|
||||
<label className={labelClass}>Description</label>
|
||||
<input type="text" className={inputClass} value={character.description} onChange={(e) => handleChange('description', e.target.value)} />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* TTS Configuration */}
|
||||
<div className={cardClass}>
|
||||
<h2 className="text-lg font-semibold text-gray-200">TTS Configuration</h2>
|
||||
<div>
|
||||
<label className={labelClass}>Engine</label>
|
||||
<select className={selectClass} value={character.tts.engine} onChange={(e) => handleNestedChange('tts', 'engine', e.target.value)}>
|
||||
<option value="kokoro">Kokoro</option>
|
||||
<option value="chatterbox">Chatterbox</option>
|
||||
<option value="qwen3">Qwen3</option>
|
||||
<option value="elevenlabs">ElevenLabs</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
{character.tts.engine === 'elevenlabs' && (
|
||||
<div className="space-y-4 border border-gray-700 p-4 rounded-lg bg-gray-800/50">
|
||||
<div>
|
||||
<label className="block text-xs font-medium mb-1 text-gray-500">ElevenLabs API Key (Local Use Only)</label>
|
||||
<div className="flex gap-2">
|
||||
<input type="password" placeholder="sk_..." className={inputClass + " text-sm"} value={elevenLabsApiKey} onChange={(e) => setElevenLabsApiKey(e.target.value)} />
|
||||
<button onClick={() => fetchElevenLabsData(elevenLabsApiKey)} disabled={isLoadingElevenLabs} className="bg-indigo-600 text-white px-3 py-1 rounded-lg text-sm whitespace-nowrap hover:bg-indigo-500 disabled:opacity-50 transition-colors">
|
||||
{isLoadingElevenLabs ? 'Loading...' : 'Fetch'}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div>
|
||||
<label className={labelClass}>Voice ID</label>
|
||||
{elevenLabsVoices.length > 0 ? (
|
||||
<select className={selectClass} value={character.tts.elevenlabs_voice_id || ''} onChange={(e) => handleNestedChange('tts', 'elevenlabs_voice_id', e.target.value)}>
|
||||
<option value="">-- Select Voice --</option>
|
||||
{elevenLabsVoices.map(v => (
|
||||
<option key={v.voice_id} value={v.voice_id}>{v.name} ({v.category})</option>
|
||||
))}
|
||||
</select>
|
||||
) : (
|
||||
<input type="text" className={inputClass} value={character.tts.elevenlabs_voice_id || ''} onChange={(e) => handleNestedChange('tts', 'elevenlabs_voice_id', e.target.value)} placeholder="e.g. 21m00Tcm4TlvDq8ikWAM" />
|
||||
)}
|
||||
</div>
|
||||
<div>
|
||||
<label className={labelClass}>Model</label>
|
||||
{elevenLabsModels.length > 0 ? (
|
||||
<select className={selectClass} value={character.tts.elevenlabs_model || 'eleven_monolingual_v1'} onChange={(e) => handleNestedChange('tts', 'elevenlabs_model', e.target.value)}>
|
||||
<option value="">-- Select Model --</option>
|
||||
{elevenLabsModels.map(m => (
|
||||
<option key={m.model_id} value={m.model_id}>{m.name} ({m.model_id})</option>
|
||||
))}
|
||||
</select>
|
||||
) : (
|
||||
<input type="text" className={inputClass} value={character.tts.elevenlabs_model || 'eleven_monolingual_v1'} onChange={(e) => handleNestedChange('tts', 'elevenlabs_model', e.target.value)} placeholder="e.g. eleven_monolingual_v1" />
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{character.tts.engine === 'kokoro' && (
|
||||
<div>
|
||||
<label className={labelClass}>Kokoro Voice</label>
|
||||
<select className={selectClass} value={character.tts.kokoro_voice || 'af_heart'} onChange={(e) => handleNestedChange('tts', 'kokoro_voice', e.target.value)}>
|
||||
<option value="af_heart">af_heart (American Female)</option>
|
||||
<option value="af_alloy">af_alloy (American Female)</option>
|
||||
<option value="af_aoede">af_aoede (American Female)</option>
|
||||
<option value="af_bella">af_bella (American Female)</option>
|
||||
<option value="af_jessica">af_jessica (American Female)</option>
|
||||
<option value="af_kore">af_kore (American Female)</option>
|
||||
<option value="af_nicole">af_nicole (American Female)</option>
|
||||
<option value="af_nova">af_nova (American Female)</option>
|
||||
<option value="af_river">af_river (American Female)</option>
|
||||
<option value="af_sarah">af_sarah (American Female)</option>
|
||||
<option value="af_sky">af_sky (American Female)</option>
|
||||
<option value="am_adam">am_adam (American Male)</option>
|
||||
<option value="am_echo">am_echo (American Male)</option>
|
||||
<option value="am_eric">am_eric (American Male)</option>
|
||||
<option value="am_fenrir">am_fenrir (American Male)</option>
|
||||
<option value="am_liam">am_liam (American Male)</option>
|
||||
<option value="am_michael">am_michael (American Male)</option>
|
||||
<option value="am_onyx">am_onyx (American Male)</option>
|
||||
<option value="am_puck">am_puck (American Male)</option>
|
||||
<option value="am_santa">am_santa (American Male)</option>
|
||||
<option value="bf_alice">bf_alice (British Female)</option>
|
||||
<option value="bf_emma">bf_emma (British Female)</option>
|
||||
<option value="bf_isabella">bf_isabella (British Female)</option>
|
||||
<option value="bf_lily">bf_lily (British Female)</option>
|
||||
<option value="bm_daniel">bm_daniel (British Male)</option>
|
||||
<option value="bm_fable">bm_fable (British Male)</option>
|
||||
<option value="bm_george">bm_george (British Male)</option>
|
||||
<option value="bm_lewis">bm_lewis (British Male)</option>
|
||||
</select>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{character.tts.engine === 'chatterbox' && (
|
||||
<div>
|
||||
<label className={labelClass}>Voice Reference Path</label>
|
||||
<input type="text" className={inputClass} value={character.tts.voice_ref_path || ''} onChange={(e) => handleNestedChange('tts', 'voice_ref_path', e.target.value)} />
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div>
|
||||
<label className={labelClass}>Speed: {character.tts.speed}</label>
|
||||
<input type="range" min="0.5" max="2.0" step="0.1" className="w-full accent-indigo-500" value={character.tts.speed} onChange={(e) => handleNestedChange('tts', 'speed', parseFloat(e.target.value))} />
|
||||
</div>
|
||||
<div>
|
||||
<label className={labelClass}>Preview Text</label>
|
||||
<input
|
||||
type="text"
|
||||
className={inputClass}
|
||||
value={previewText}
|
||||
onChange={(e) => setPreviewText(e.target.value)}
|
||||
placeholder={`Hi, I am ${character.display_name}. This is a preview of my voice.`}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex gap-2">
|
||||
<button
|
||||
onClick={previewTTS}
|
||||
disabled={ttsState === 'loading'}
|
||||
className={`flex-1 flex items-center justify-center gap-2 px-4 py-2 rounded-lg transition-colors ${
|
||||
ttsState === 'loading'
|
||||
? 'bg-indigo-800 text-indigo-300 cursor-wait'
|
||||
: ttsState === 'playing'
|
||||
? 'bg-emerald-600 hover:bg-emerald-500 text-white'
|
||||
: 'bg-indigo-600 hover:bg-indigo-500 text-white'
|
||||
}`}
|
||||
>
|
||||
{ttsState === 'loading' && (
|
||||
<svg className="w-4 h-4 animate-spin" viewBox="0 0 24 24" fill="none">
|
||||
<circle className="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" strokeWidth="4" />
|
||||
<path className="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4z" />
|
||||
</svg>
|
||||
)}
|
||||
{ttsState === 'loading' ? 'Synthesizing...' : ttsState === 'playing' ? 'Playing...' : 'Preview Voice'}
|
||||
</button>
|
||||
{ttsState !== 'idle' && (
|
||||
<button
|
||||
onClick={stopPreview}
|
||||
className="px-4 py-2 bg-red-600 hover:bg-red-500 text-white rounded-lg transition-colors"
|
||||
>
|
||||
Stop
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
<p className="text-xs text-gray-600">
|
||||
{character.tts.engine === 'kokoro'
|
||||
? 'Previews via local Kokoro TTS bridge (port 8081 → Wyoming 10301).'
|
||||
: 'Uses browser TTS for preview. Local TTS available with Kokoro engine.'}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* System Prompt */}
|
||||
<div className={cardClass}>
|
||||
<div className="flex justify-between items-center">
|
||||
<h2 className="text-lg font-semibold text-gray-200">System Prompt</h2>
|
||||
<span className="text-xs text-gray-600">{character.system_prompt.length} chars</span>
|
||||
</div>
|
||||
<textarea
|
||||
className={inputClass + " h-32 resize-y"}
|
||||
value={character.system_prompt}
|
||||
onChange={(e) => handleChange('system_prompt', e.target.value)}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 gap-6">
|
||||
{/* Live2D Expressions */}
|
||||
<div className={cardClass}>
|
||||
<h2 className="text-lg font-semibold text-gray-200">Live2D Expressions</h2>
|
||||
{Object.entries(character.live2d_expressions).map(([key, val]) => (
|
||||
<div key={key} className="flex justify-between items-center gap-4">
|
||||
<label className="text-sm font-medium text-gray-400 w-1/3 capitalize">{key}</label>
|
||||
<input type="text" className={inputClass + " w-2/3"} value={val} onChange={(e) => handleNestedChange('live2d_expressions', key, e.target.value)} />
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
|
||||
{/* Model Overrides */}
|
||||
<div className={cardClass}>
|
||||
<h2 className="text-lg font-semibold text-gray-200">Model Overrides</h2>
|
||||
<div>
|
||||
<label className={labelClass}>Primary Model</label>
|
||||
<select className={selectClass} value={character.model_overrides?.primary || 'llama3.3:70b'} onChange={(e) => handleNestedChange('model_overrides', 'primary', e.target.value)}>
|
||||
<option value="llama3.3:70b">llama3.3:70b</option>
|
||||
<option value="qwen2.5:7b">qwen2.5:7b</option>
|
||||
<option value="qwen3:32b">qwen3:32b</option>
|
||||
<option value="codestral:22b">codestral:22b</option>
|
||||
<option value="gemma-3-27b">gemma-3-27b</option>
|
||||
<option value="DeepSeek-R1-8B">DeepSeek-R1-8B</option>
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label className={labelClass}>Fast Model</label>
|
||||
<select className={selectClass} value={character.model_overrides?.fast || 'qwen2.5:7b'} onChange={(e) => handleNestedChange('model_overrides', 'fast', e.target.value)}>
|
||||
<option value="qwen2.5:7b">qwen2.5:7b</option>
|
||||
<option value="llama3.3:70b">llama3.3:70b</option>
|
||||
<option value="qwen3:32b">qwen3:32b</option>
|
||||
<option value="codestral:22b">codestral:22b</option>
|
||||
<option value="gemma-3-27b">gemma-3-27b</option>
|
||||
<option value="DeepSeek-R1-8B">DeepSeek-R1-8B</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Custom Rules */}
|
||||
<div className={cardClass}>
|
||||
<div className="flex justify-between items-center">
|
||||
<h2 className="text-lg font-semibold text-gray-200">Custom Rules</h2>
|
||||
<button onClick={addRule} className="flex items-center gap-1 bg-indigo-600 hover:bg-indigo-500 text-white px-3 py-1.5 rounded-lg text-sm transition-colors">
|
||||
<svg className="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M12 4.5v15m7.5-7.5h-15" />
|
||||
</svg>
|
||||
Add Rule
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{(!character.custom_rules || character.custom_rules.length === 0) ? (
|
||||
<p className="text-sm text-gray-600 italic">No custom rules defined.</p>
|
||||
) : (
|
||||
<div className="space-y-4">
|
||||
{character.custom_rules.map((rule, idx) => (
|
||||
<div key={idx} className="border border-gray-700 p-4 rounded-lg relative bg-gray-800/50">
|
||||
<button
|
||||
onClick={() => removeRule(idx)}
|
||||
className="absolute top-3 right-3 text-gray-500 hover:text-red-400 transition-colors"
|
||||
title="Remove Rule"
|
||||
>
|
||||
<svg className="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M6 18L18 6M6 6l12 12" />
|
||||
</svg>
|
||||
</button>
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 gap-4 mt-1">
|
||||
<div>
|
||||
<label className="block text-xs font-medium mb-1 text-gray-500">Trigger</label>
|
||||
<input type="text" className={inputClass + " text-sm"} value={rule.trigger || ''} onChange={(e) => handleRuleChange(idx, 'trigger', e.target.value)} />
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-xs font-medium mb-1 text-gray-500">Condition (Optional)</label>
|
||||
<input type="text" className={inputClass + " text-sm"} value={rule.condition || ''} onChange={(e) => handleRuleChange(idx, 'condition', e.target.value)} placeholder="e.g. time_of_day == morning" />
|
||||
</div>
|
||||
<div className="md:col-span-2">
|
||||
<label className="block text-xs font-medium mb-1 text-gray-500">Response</label>
|
||||
<textarea className={inputClass + " text-sm h-16 resize-y"} value={rule.response || ''} onChange={(e) => handleRuleChange(idx, 'response', e.target.value)} />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
297
homeai-character/src/CharacterProfiles.jsx
Normal file
297
homeai-character/src/CharacterProfiles.jsx
Normal file
@@ -0,0 +1,297 @@
|
||||
import { useState, useEffect } from 'react';
|
||||
import { useNavigate } from 'react-router-dom';
|
||||
import { validateCharacter } from './SchemaValidator';
|
||||
|
||||
const STORAGE_KEY = 'homeai_characters';
|
||||
const ACTIVE_KEY = 'homeai_active_character';
|
||||
|
||||
function loadProfiles() {
|
||||
try {
|
||||
const raw = localStorage.getItem(STORAGE_KEY);
|
||||
return raw ? JSON.parse(raw) : [];
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
function saveProfiles(profiles) {
|
||||
localStorage.setItem(STORAGE_KEY, JSON.stringify(profiles));
|
||||
}
|
||||
|
||||
function getActiveId() {
|
||||
return localStorage.getItem(ACTIVE_KEY) || null;
|
||||
}
|
||||
|
||||
function setActiveId(id) {
|
||||
localStorage.setItem(ACTIVE_KEY, id);
|
||||
}
|
||||
|
||||
export default function CharacterProfiles() {
|
||||
const [profiles, setProfiles] = useState(loadProfiles);
|
||||
const [activeId, setActive] = useState(getActiveId);
|
||||
const [error, setError] = useState(null);
|
||||
const [dragOver, setDragOver] = useState(false);
|
||||
const navigate = useNavigate();
|
||||
|
||||
useEffect(() => {
|
||||
saveProfiles(profiles);
|
||||
}, [profiles]);
|
||||
|
||||
const handleImport = (e) => {
|
||||
const files = Array.from(e.target?.files || []);
|
||||
importFiles(files);
|
||||
if (e.target) e.target.value = '';
|
||||
};
|
||||
|
||||
const importFiles = (files) => {
|
||||
files.forEach(file => {
|
||||
if (!file.name.endsWith('.json')) return;
|
||||
const reader = new FileReader();
|
||||
reader.onload = (ev) => {
|
||||
try {
|
||||
const data = JSON.parse(ev.target.result);
|
||||
validateCharacter(data);
|
||||
const id = data.name + '_' + Date.now();
|
||||
setProfiles(prev => [...prev, { id, data, image: null, addedAt: new Date().toISOString() }]);
|
||||
setError(null);
|
||||
} catch (err) {
|
||||
setError(`Import failed for ${file.name}: ${err.message}`);
|
||||
}
|
||||
};
|
||||
reader.readAsText(file);
|
||||
});
|
||||
};
|
||||
|
||||
const handleDrop = (e) => {
|
||||
e.preventDefault();
|
||||
setDragOver(false);
|
||||
const files = Array.from(e.dataTransfer.files);
|
||||
importFiles(files);
|
||||
};
|
||||
|
||||
const handleImageUpload = (profileId, e) => {
|
||||
const file = e.target.files[0];
|
||||
if (!file) return;
|
||||
const reader = new FileReader();
|
||||
reader.onload = (ev) => {
|
||||
setProfiles(prev =>
|
||||
prev.map(p => p.id === profileId ? { ...p, image: ev.target.result } : p)
|
||||
);
|
||||
};
|
||||
reader.readAsDataURL(file);
|
||||
};
|
||||
|
||||
const removeProfile = (id) => {
|
||||
setProfiles(prev => prev.filter(p => p.id !== id));
|
||||
if (activeId === id) {
|
||||
setActive(null);
|
||||
localStorage.removeItem(ACTIVE_KEY);
|
||||
}
|
||||
};
|
||||
|
||||
const activateProfile = (id) => {
|
||||
setActive(id);
|
||||
setActiveId(id);
|
||||
};
|
||||
|
||||
const exportProfile = (profile) => {
|
||||
const dataStr = "data:text/json;charset=utf-8," + encodeURIComponent(JSON.stringify(profile.data, null, 2));
|
||||
const a = document.createElement('a');
|
||||
a.href = dataStr;
|
||||
a.download = `${profile.data.name || 'character'}.json`;
|
||||
a.click();
|
||||
};
|
||||
|
||||
const editProfile = (profile) => {
|
||||
// Store the profile data for the editor to pick up
|
||||
sessionStorage.setItem('edit_character', JSON.stringify(profile.data));
|
||||
sessionStorage.setItem('edit_character_profile_id', profile.id);
|
||||
navigate('/editor');
|
||||
};
|
||||
|
||||
const activeProfile = profiles.find(p => p.id === activeId);
|
||||
|
||||
return (
|
||||
<div className="space-y-8">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between">
|
||||
<div>
|
||||
<h1 className="text-3xl font-bold text-gray-100">Characters</h1>
|
||||
<p className="text-sm text-gray-500 mt-1">
|
||||
{profiles.length} profile{profiles.length !== 1 ? 's' : ''} stored
|
||||
{activeProfile && (
|
||||
<span className="ml-2 text-emerald-400">
|
||||
Active: {activeProfile.data.display_name || activeProfile.data.name}
|
||||
</span>
|
||||
)}
|
||||
</p>
|
||||
</div>
|
||||
<label className="flex items-center gap-2 px-4 py-2 bg-indigo-600 hover:bg-indigo-500 text-white rounded-lg cursor-pointer transition-colors">
|
||||
<svg className="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M12 4.5v15m7.5-7.5h-15" />
|
||||
</svg>
|
||||
Import JSON
|
||||
<input type="file" accept=".json" multiple className="hidden" onChange={handleImport} />
|
||||
</label>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<div className="bg-red-900/30 border border-red-500/50 text-red-300 px-4 py-3 rounded-lg text-sm">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Drop zone */}
|
||||
<div
|
||||
onDragOver={(e) => { e.preventDefault(); setDragOver(true); }}
|
||||
onDragLeave={() => setDragOver(false)}
|
||||
onDrop={handleDrop}
|
||||
className={`border-2 border-dashed rounded-xl p-8 text-center transition-colors ${
|
||||
dragOver
|
||||
? 'border-indigo-500 bg-indigo-500/10'
|
||||
: 'border-gray-700 hover:border-gray-600'
|
||||
}`}
|
||||
>
|
||||
<svg className="w-10 h-10 mx-auto text-gray-600 mb-3" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M3 16.5v2.25A2.25 2.25 0 005.25 21h13.5A2.25 2.25 0 0021 18.75V16.5m-13.5-9L12 3m0 0l4.5 4.5M12 3v13.5" />
|
||||
</svg>
|
||||
<p className="text-gray-500 text-sm">Drop character JSON files here to import</p>
|
||||
</div>
|
||||
|
||||
{/* Profile grid */}
|
||||
{profiles.length === 0 ? (
|
||||
<div className="text-center py-16">
|
||||
<svg className="w-16 h-16 mx-auto text-gray-700 mb-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M15.75 6a3.75 3.75 0 11-7.5 0 3.75 3.75 0 017.5 0zM4.501 20.118a7.5 7.5 0 0114.998 0A17.933 17.933 0 0112 21.75c-2.676 0-5.216-.584-7.499-1.632z" />
|
||||
</svg>
|
||||
<p className="text-gray-500">No character profiles yet. Import a JSON file to get started.</p>
|
||||
</div>
|
||||
) : (
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-6">
|
||||
{profiles.map(profile => {
|
||||
const isActive = profile.id === activeId;
|
||||
const char = profile.data;
|
||||
return (
|
||||
<div
|
||||
key={profile.id}
|
||||
className={`relative rounded-xl border overflow-hidden transition-all duration-200 ${
|
||||
isActive
|
||||
? 'border-emerald-500/60 bg-emerald-500/5 ring-1 ring-emerald-500/30'
|
||||
: 'border-gray-700 bg-gray-800/50 hover:border-gray-600'
|
||||
}`}
|
||||
>
|
||||
{/* Image area */}
|
||||
<div className="relative h-48 bg-gray-900 flex items-center justify-center overflow-hidden group">
|
||||
{profile.image ? (
|
||||
<img
|
||||
src={profile.image}
|
||||
alt={char.display_name || char.name}
|
||||
className="w-full h-full object-cover"
|
||||
/>
|
||||
) : (
|
||||
<div className="text-6xl font-bold text-gray-700 select-none">
|
||||
{(char.display_name || char.name || '?')[0].toUpperCase()}
|
||||
</div>
|
||||
)}
|
||||
{/* Image upload overlay */}
|
||||
<label className="absolute inset-0 flex items-center justify-center bg-black/50 opacity-0 group-hover:opacity-100 transition-opacity cursor-pointer">
|
||||
<div className="text-center">
|
||||
<svg className="w-8 h-8 mx-auto text-white/80 mb-1" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M6.827 6.175A2.31 2.31 0 015.186 7.23c-.38.054-.757.112-1.134.175C2.999 7.58 2.25 8.507 2.25 9.574V18a2.25 2.25 0 002.25 2.25h15A2.25 2.25 0 0021.75 18V9.574c0-1.067-.75-1.994-1.802-2.169a47.865 47.865 0 00-1.134-.175 2.31 2.31 0 01-1.64-1.055l-.822-1.316a2.192 2.192 0 00-1.736-1.039 48.774 48.774 0 00-5.232 0 2.192 2.192 0 00-1.736 1.039l-.821 1.316z" />
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M16.5 12.75a4.5 4.5 0 11-9 0 4.5 4.5 0 019 0z" />
|
||||
</svg>
|
||||
<span className="text-xs text-white/70">Change image</span>
|
||||
</div>
|
||||
<input
|
||||
type="file"
|
||||
accept="image/*"
|
||||
className="hidden"
|
||||
onChange={(e) => handleImageUpload(profile.id, e)}
|
||||
/>
|
||||
</label>
|
||||
{/* Active badge */}
|
||||
{isActive && (
|
||||
<span className="absolute top-2 right-2 px-2 py-0.5 bg-emerald-500 text-white text-xs font-medium rounded-full">
|
||||
Active
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Info */}
|
||||
<div className="p-4 space-y-3">
|
||||
<div>
|
||||
<h3 className="text-lg font-semibold text-gray-200">
|
||||
{char.display_name || char.name}
|
||||
</h3>
|
||||
<p className="text-xs text-gray-500 mt-0.5">{char.description}</p>
|
||||
</div>
|
||||
|
||||
{/* Meta chips */}
|
||||
<div className="flex flex-wrap gap-1.5">
|
||||
<span className="px-2 py-0.5 bg-gray-700/70 text-gray-400 text-xs rounded-full">
|
||||
{char.tts?.engine || 'kokoro'}
|
||||
</span>
|
||||
<span className="px-2 py-0.5 bg-gray-700/70 text-gray-400 text-xs rounded-full">
|
||||
{char.model_overrides?.primary || 'default'}
|
||||
</span>
|
||||
{char.tts?.kokoro_voice && (
|
||||
<span className="px-2 py-0.5 bg-gray-700/70 text-gray-400 text-xs rounded-full">
|
||||
{char.tts.kokoro_voice}
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Actions */}
|
||||
<div className="flex gap-2 pt-1">
|
||||
{!isActive ? (
|
||||
<button
|
||||
onClick={() => activateProfile(profile.id)}
|
||||
className="flex-1 px-3 py-1.5 bg-emerald-600 hover:bg-emerald-500 text-white text-sm rounded-lg transition-colors"
|
||||
>
|
||||
Activate
|
||||
</button>
|
||||
) : (
|
||||
<button
|
||||
disabled
|
||||
className="flex-1 px-3 py-1.5 bg-gray-700 text-gray-500 text-sm rounded-lg cursor-not-allowed"
|
||||
>
|
||||
Active
|
||||
</button>
|
||||
)}
|
||||
<button
|
||||
onClick={() => editProfile(profile)}
|
||||
className="px-3 py-1.5 bg-gray-700 hover:bg-gray-600 text-gray-300 text-sm rounded-lg transition-colors"
|
||||
title="Edit"
|
||||
>
|
||||
<svg className="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M16.862 4.487l1.687-1.688a1.875 1.875 0 112.652 2.652L10.582 16.07a4.5 4.5 0 01-1.897 1.13L6 18l.8-2.685a4.5 4.5 0 011.13-1.897l8.932-8.931zm0 0L19.5 7.125M18 14v4.75A2.25 2.25 0 0115.75 21H5.25A2.25 2.25 0 013 18.75V8.25A2.25 2.25 0 015.25 6H10" />
|
||||
</svg>
|
||||
</button>
|
||||
<button
|
||||
onClick={() => exportProfile(profile)}
|
||||
className="px-3 py-1.5 bg-gray-700 hover:bg-gray-600 text-gray-300 text-sm rounded-lg transition-colors"
|
||||
title="Export"
|
||||
>
|
||||
<svg className="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M3 16.5v2.25A2.25 2.25 0 005.25 21h13.5A2.25 2.25 0 0021 18.75V16.5M16.5 12L12 16.5m0 0L7.5 12m4.5 4.5V3" />
|
||||
</svg>
|
||||
</button>
|
||||
<button
|
||||
onClick={() => removeProfile(profile.id)}
|
||||
className="px-3 py-1.5 bg-gray-700 hover:bg-red-600 text-gray-300 hover:text-white text-sm rounded-lg transition-colors"
|
||||
title="Delete"
|
||||
>
|
||||
<svg className="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M14.74 9l-.346 9m-4.788 0L9.26 9m9.968-3.21c.342.052.682.107 1.022.166m-1.022-.165L18.16 19.673a2.25 2.25 0 01-2.244 2.077H8.084a2.25 2.25 0 01-2.244-2.077L4.772 5.79m14.456 0a48.108 48.108 0 00-3.478-.397m-12 .562c.34-.059.68-.114 1.022-.165m0 0a48.11 48.11 0 013.478-.397m7.5 0v-.916c0-1.18-.91-2.164-2.09-2.201a51.964 51.964 0 00-3.32 0c-1.18.037-2.09 1.022-2.09 2.201v.916m7.5 0a48.667 48.667 0 00-7.5 0" />
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
13
homeai-character/src/SchemaValidator.js
Normal file
13
homeai-character/src/SchemaValidator.js
Normal file
@@ -0,0 +1,13 @@
|
||||
import Ajv from 'ajv'
|
||||
import schema from '../schema/character.schema.json'
|
||||
|
||||
const ajv = new Ajv({ allErrors: true, strict: false })
|
||||
const validate = ajv.compile(schema)
|
||||
|
||||
export function validateCharacter(config) {
|
||||
const valid = validate(config)
|
||||
if (!valid) {
|
||||
throw new Error(ajv.errorsText(validate.errors))
|
||||
}
|
||||
return true
|
||||
}
|
||||
389
homeai-character/src/ServiceStatus.jsx
Normal file
389
homeai-character/src/ServiceStatus.jsx
Normal file
@@ -0,0 +1,389 @@
|
||||
import { useState, useEffect, useCallback } from 'react';
|
||||
|
||||
const SERVICES = [
|
||||
{
|
||||
name: 'Ollama',
|
||||
url: 'http://localhost:11434',
|
||||
healthPath: '/api/tags',
|
||||
uiUrl: null,
|
||||
description: 'Local LLM runtime',
|
||||
category: 'AI & LLM',
|
||||
restart: { type: 'launchd', id: 'gui/501/com.homeai.ollama' },
|
||||
},
|
||||
{
|
||||
name: 'Open WebUI',
|
||||
url: 'http://localhost:3030',
|
||||
healthPath: '/',
|
||||
uiUrl: 'http://localhost:3030',
|
||||
description: 'Chat interface',
|
||||
category: 'AI & LLM',
|
||||
restart: { type: 'docker', id: 'homeai-open-webui' },
|
||||
},
|
||||
{
|
||||
name: 'OpenClaw Gateway',
|
||||
url: 'http://localhost:8080',
|
||||
healthPath: '/',
|
||||
uiUrl: null,
|
||||
description: 'Agent gateway',
|
||||
category: 'Agent',
|
||||
restart: { type: 'launchd', id: 'gui/501/com.homeai.openclaw' },
|
||||
},
|
||||
{
|
||||
name: 'OpenClaw Bridge',
|
||||
url: 'http://localhost:8081',
|
||||
healthPath: '/',
|
||||
uiUrl: null,
|
||||
description: 'HTTP-to-CLI bridge',
|
||||
category: 'Agent',
|
||||
restart: { type: 'launchd', id: 'gui/501/com.homeai.openclaw-bridge' },
|
||||
},
|
||||
{
|
||||
name: 'Wyoming STT',
|
||||
url: 'http://localhost:10300',
|
||||
healthPath: '/',
|
||||
uiUrl: null,
|
||||
description: 'Whisper speech-to-text',
|
||||
category: 'Voice',
|
||||
tcp: true,
|
||||
restart: { type: 'launchd', id: 'gui/501/com.homeai.wyoming-stt' },
|
||||
},
|
||||
{
|
||||
name: 'Wyoming TTS',
|
||||
url: 'http://localhost:10301',
|
||||
healthPath: '/',
|
||||
uiUrl: null,
|
||||
description: 'Kokoro text-to-speech',
|
||||
category: 'Voice',
|
||||
tcp: true,
|
||||
restart: { type: 'launchd', id: 'gui/501/com.homeai.wyoming-tts' },
|
||||
},
|
||||
{
|
||||
name: 'Wyoming Satellite',
|
||||
url: 'http://localhost:10700',
|
||||
healthPath: '/',
|
||||
uiUrl: null,
|
||||
description: 'Mac Mini mic/speaker satellite',
|
||||
category: 'Voice',
|
||||
tcp: true,
|
||||
restart: { type: 'launchd', id: 'gui/501/com.homeai.wyoming-satellite' },
|
||||
},
|
||||
{
|
||||
name: 'Character Dashboard',
|
||||
url: 'http://localhost:5173',
|
||||
healthPath: '/',
|
||||
uiUrl: 'http://localhost:5173',
|
||||
description: 'Character manager & service status',
|
||||
category: 'Agent',
|
||||
restart: { type: 'launchd', id: 'gui/501/com.homeai.character-dashboard' },
|
||||
},
|
||||
{
|
||||
name: 'Home Assistant',
|
||||
url: 'https://10.0.0.199:8123',
|
||||
healthPath: '/api/',
|
||||
uiUrl: 'https://10.0.0.199:8123',
|
||||
description: 'Smart home platform',
|
||||
category: 'Smart Home',
|
||||
},
|
||||
{
|
||||
name: 'Uptime Kuma',
|
||||
url: 'http://localhost:3001',
|
||||
healthPath: '/',
|
||||
uiUrl: 'http://localhost:3001',
|
||||
description: 'Service health monitoring',
|
||||
category: 'Infrastructure',
|
||||
restart: { type: 'docker', id: 'homeai-uptime-kuma' },
|
||||
},
|
||||
{
|
||||
name: 'n8n',
|
||||
url: 'http://localhost:5678',
|
||||
healthPath: '/',
|
||||
uiUrl: 'http://localhost:5678',
|
||||
description: 'Workflow automation',
|
||||
category: 'Infrastructure',
|
||||
restart: { type: 'docker', id: 'homeai-n8n' },
|
||||
},
|
||||
{
|
||||
name: 'code-server',
|
||||
url: 'http://localhost:8090',
|
||||
healthPath: '/',
|
||||
uiUrl: 'http://localhost:8090',
|
||||
description: 'Browser-based VS Code',
|
||||
category: 'Infrastructure',
|
||||
restart: { type: 'docker', id: 'homeai-code-server' },
|
||||
},
|
||||
{
|
||||
name: 'Portainer',
|
||||
url: 'https://10.0.0.199:9443',
|
||||
healthPath: '/',
|
||||
uiUrl: 'https://10.0.0.199:9443',
|
||||
description: 'Docker management',
|
||||
category: 'Infrastructure',
|
||||
},
|
||||
{
|
||||
name: 'Gitea',
|
||||
url: 'http://10.0.0.199:3000',
|
||||
healthPath: '/',
|
||||
uiUrl: 'http://10.0.0.199:3000',
|
||||
description: 'Self-hosted Git',
|
||||
category: 'Infrastructure',
|
||||
},
|
||||
];
|
||||
|
||||
const CATEGORY_ICONS = {
|
||||
'AI & LLM': (
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M9.813 15.904L9 18.75l-.813-2.846a4.5 4.5 0 00-3.09-3.09L2.25 12l2.846-.813a4.5 4.5 0 003.09-3.09L9 5.25l.813 2.846a4.5 4.5 0 003.09 3.09L15.75 12l-2.846.813a4.5 4.5 0 00-3.09 3.09zM18.259 8.715L18 9.75l-.259-1.035a3.375 3.375 0 00-2.455-2.456L14.25 6l1.036-.259a3.375 3.375 0 002.455-2.456L18 2.25l.259 1.035a3.375 3.375 0 002.455 2.456L21.75 6l-1.036.259a3.375 3.375 0 00-2.455 2.456zM16.894 20.567L16.5 21.75l-.394-1.183a2.25 2.25 0 00-1.423-1.423L13.5 18.75l1.183-.394a2.25 2.25 0 001.423-1.423l.394-1.183.394 1.183a2.25 2.25 0 001.423 1.423l1.183.394-1.183.394a2.25 2.25 0 00-1.423 1.423z" />
|
||||
</svg>
|
||||
),
|
||||
'Agent': (
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M8.25 3v1.5M4.5 8.25H3m18 0h-1.5M4.5 12H3m18 0h-1.5m-15 3.75H3m18 0h-1.5M8.25 19.5V21M12 3v1.5m0 15V21m3.75-18v1.5m0 15V21m-9-1.5h10.5a2.25 2.25 0 002.25-2.25V6.75a2.25 2.25 0 00-2.25-2.25H6.75A2.25 2.25 0 004.5 6.75v10.5a2.25 2.25 0 002.25 2.25zm.75-12h9v9h-9v-9z" />
|
||||
</svg>
|
||||
),
|
||||
'Voice': (
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M12 18.75a6 6 0 006-6v-1.5m-6 7.5a6 6 0 01-6-6v-1.5m6 7.5v3.75m-3.75 0h7.5M12 15.75a3 3 0 01-3-3V4.5a3 3 0 116 0v8.25a3 3 0 01-3 3z" />
|
||||
</svg>
|
||||
),
|
||||
'Smart Home': (
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M2.25 12l8.954-8.955c.44-.439 1.152-.439 1.591 0L21.75 12M4.5 9.75v10.125c0 .621.504 1.125 1.125 1.125H9.75v-4.875c0-.621.504-1.125 1.125-1.125h2.25c.621 0 1.125.504 1.125 1.125V21h4.125c.621 0 1.125-.504 1.125-1.125V9.75M8.25 21h8.25" />
|
||||
</svg>
|
||||
),
|
||||
'Infrastructure': (
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M5.25 14.25h13.5m-13.5 0a3 3 0 01-3-3m3 3a3 3 0 100 6h13.5a3 3 0 100-6m-16.5-3a3 3 0 013-3h13.5a3 3 0 013 3m-19.5 0a4.5 4.5 0 01.9-2.7L5.737 5.1a3.375 3.375 0 012.7-1.35h7.126c1.062 0 2.062.5 2.7 1.35l2.587 3.45a4.5 4.5 0 01.9 2.7m0 0a3 3 0 01-3 3m0 3h.008v.008h-.008v-.008zm0-6h.008v.008h-.008v-.008zm-3 6h.008v.008h-.008v-.008zm0-6h.008v.008h-.008v-.008z" />
|
||||
</svg>
|
||||
),
|
||||
};
|
||||
|
||||
function StatusDot({ status }) {
|
||||
const colors = {
|
||||
online: 'bg-emerald-400 shadow-emerald-400/50',
|
||||
offline: 'bg-red-400 shadow-red-400/50',
|
||||
checking: 'bg-amber-400 shadow-amber-400/50 animate-pulse',
|
||||
unknown: 'bg-gray-500',
|
||||
};
|
||||
return (
|
||||
<span className={`inline-block w-2.5 h-2.5 rounded-full shadow-lg ${colors[status] || colors.unknown}`} />
|
||||
);
|
||||
}
|
||||
|
||||
export default function ServiceStatus() {
|
||||
const [statuses, setStatuses] = useState(() =>
|
||||
Object.fromEntries(SERVICES.map(s => [s.name, { status: 'checking', lastCheck: null, responseTime: null }]))
|
||||
);
|
||||
const [lastRefresh, setLastRefresh] = useState(null);
|
||||
const [restarting, setRestarting] = useState({});
|
||||
|
||||
const checkService = useCallback(async (service) => {
|
||||
try {
|
||||
// Route all checks through the server-side proxy to avoid CORS and
|
||||
// self-signed SSL cert issues in the browser.
|
||||
const target = encodeURIComponent(service.url + service.healthPath);
|
||||
const modeParam = service.tcp ? '&mode=tcp' : '';
|
||||
const controller = new AbortController();
|
||||
const timeout = setTimeout(() => controller.abort(), 8000);
|
||||
|
||||
const res = await fetch(`/api/health?url=${target}${modeParam}`, { signal: controller.signal });
|
||||
clearTimeout(timeout);
|
||||
|
||||
const data = await res.json();
|
||||
return { status: data.status, lastCheck: new Date(), responseTime: data.responseTime };
|
||||
} catch {
|
||||
return { status: 'offline', lastCheck: new Date(), responseTime: null };
|
||||
}
|
||||
}, []);
|
||||
|
||||
const refreshAll = useCallback(async () => {
|
||||
// Mark all as checking
|
||||
setStatuses(prev =>
|
||||
Object.fromEntries(Object.entries(prev).map(([k, v]) => [k, { ...v, status: 'checking' }]))
|
||||
);
|
||||
|
||||
const results = await Promise.allSettled(
|
||||
SERVICES.map(async (service) => {
|
||||
const result = await checkService(service);
|
||||
return { name: service.name, ...result };
|
||||
})
|
||||
);
|
||||
|
||||
const newStatuses = {};
|
||||
for (const r of results) {
|
||||
if (r.status === 'fulfilled') {
|
||||
newStatuses[r.value.name] = {
|
||||
status: r.value.status,
|
||||
lastCheck: r.value.lastCheck,
|
||||
responseTime: r.value.responseTime,
|
||||
};
|
||||
}
|
||||
}
|
||||
setStatuses(prev => ({ ...prev, ...newStatuses }));
|
||||
setLastRefresh(new Date());
|
||||
}, [checkService]);
|
||||
|
||||
useEffect(() => {
|
||||
refreshAll();
|
||||
const interval = setInterval(refreshAll, 30000);
|
||||
return () => clearInterval(interval);
|
||||
}, [refreshAll]);
|
||||
|
||||
const restartService = useCallback(async (service) => {
|
||||
if (!service.restart) return;
|
||||
setRestarting(prev => ({ ...prev, [service.name]: true }));
|
||||
try {
|
||||
const res = await fetch('/api/service/restart', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(service.restart),
|
||||
});
|
||||
const data = await res.json();
|
||||
if (!data.ok) {
|
||||
console.error(`Restart failed for ${service.name}:`, data.error);
|
||||
}
|
||||
// Wait a moment for the service to come back, then re-check
|
||||
setTimeout(async () => {
|
||||
const result = await checkService(service);
|
||||
setStatuses(prev => ({ ...prev, [service.name]: result }));
|
||||
setRestarting(prev => ({ ...prev, [service.name]: false }));
|
||||
}, 3000);
|
||||
} catch (err) {
|
||||
console.error(`Restart failed for ${service.name}:`, err);
|
||||
setRestarting(prev => ({ ...prev, [service.name]: false }));
|
||||
}
|
||||
}, [checkService]);
|
||||
|
||||
const categories = [...new Set(SERVICES.map(s => s.category))];
|
||||
const onlineCount = Object.values(statuses).filter(s => s.status === 'online').length;
|
||||
const offlineCount = Object.values(statuses).filter(s => s.status === 'offline').length;
|
||||
const totalCount = SERVICES.length;
|
||||
const allOnline = onlineCount === totalCount;
|
||||
|
||||
return (
|
||||
<div className="space-y-8">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between">
|
||||
<div>
|
||||
<h1 className="text-3xl font-bold text-gray-100">Service Status</h1>
|
||||
<p className="text-sm text-gray-500 mt-1">
|
||||
{onlineCount}/{totalCount} services online
|
||||
{lastRefresh && (
|
||||
<span className="ml-3">
|
||||
Last check: {lastRefresh.toLocaleTimeString()}
|
||||
</span>
|
||||
)}
|
||||
</p>
|
||||
</div>
|
||||
<button
|
||||
onClick={refreshAll}
|
||||
className="flex items-center gap-2 px-4 py-2 bg-gray-800 hover:bg-gray-700 text-gray-300 rounded-lg border border-gray-700 transition-colors"
|
||||
>
|
||||
<svg className="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M16.023 9.348h4.992v-.001M2.985 19.644v-4.992m0 0h4.992m-4.993 0l3.181 3.183a8.25 8.25 0 0013.803-3.7M4.031 9.865a8.25 8.25 0 0113.803-3.7l3.181 3.182" />
|
||||
</svg>
|
||||
Refresh
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Summary bar */}
|
||||
<div className="h-2 rounded-full bg-gray-800 overflow-hidden flex">
|
||||
{allOnline ? (
|
||||
<div
|
||||
className="h-full bg-gradient-to-r from-purple-500 to-indigo-500 transition-all duration-500"
|
||||
style={{ width: '100%' }}
|
||||
/>
|
||||
) : (
|
||||
<>
|
||||
<div
|
||||
className="h-full bg-gradient-to-r from-emerald-500 to-emerald-400 transition-all duration-500"
|
||||
style={{ width: `${(onlineCount / totalCount) * 100}%` }}
|
||||
/>
|
||||
<div
|
||||
className="h-full bg-gradient-to-r from-red-500 to-red-400 transition-all duration-500"
|
||||
style={{ width: `${(offlineCount / totalCount) * 100}%` }}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Service grid by category */}
|
||||
{categories.map(category => (
|
||||
<div key={category}>
|
||||
<div className="flex items-center gap-2 mb-4">
|
||||
<span className="text-gray-400">{CATEGORY_ICONS[category]}</span>
|
||||
<h2 className="text-lg font-semibold text-gray-300">{category}</h2>
|
||||
</div>
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4">
|
||||
{SERVICES.filter(s => s.category === category).map(service => {
|
||||
const st = statuses[service.name] || { status: 'unknown' };
|
||||
return (
|
||||
<div
|
||||
key={service.name}
|
||||
className={`relative rounded-xl border p-4 transition-all duration-200 ${
|
||||
st.status === 'online'
|
||||
? 'bg-gray-800/50 border-gray-700 hover:border-emerald-500/50'
|
||||
: st.status === 'offline'
|
||||
? 'bg-gray-800/50 border-red-500/30 hover:border-red-500/50'
|
||||
: 'bg-gray-800/50 border-gray-700'
|
||||
}`}
|
||||
>
|
||||
<div className="flex items-start justify-between">
|
||||
<div className="flex-1">
|
||||
<div className="flex items-center gap-2">
|
||||
<StatusDot status={st.status} />
|
||||
<h3 className="font-medium text-gray-200">{service.name}</h3>
|
||||
</div>
|
||||
<p className="text-xs text-gray-500 mt-1">{service.description}</p>
|
||||
{st.responseTime !== null && (
|
||||
<p className="text-xs text-gray-600 mt-0.5">{st.responseTime}ms</p>
|
||||
)}
|
||||
</div>
|
||||
<div className="flex items-center gap-2">
|
||||
{service.restart && st.status === 'offline' && (
|
||||
<button
|
||||
onClick={() => restartService(service)}
|
||||
disabled={restarting[service.name]}
|
||||
className="text-xs px-2.5 py-1 rounded-md bg-amber-600/80 hover:bg-amber-500 disabled:bg-gray-700 disabled:text-gray-500 text-white transition-colors flex items-center gap-1"
|
||||
>
|
||||
{restarting[service.name] ? (
|
||||
<>
|
||||
<svg className="w-3 h-3 animate-spin" fill="none" viewBox="0 0 24 24">
|
||||
<circle className="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" strokeWidth="4" />
|
||||
<path className="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4z" />
|
||||
</svg>
|
||||
Restarting
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<svg className="w-3 h-3" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M5.636 18.364a9 9 0 010-12.728m12.728 0a9 9 0 010 12.728M12 9v3m0 0v3m0-3h3m-3 0H9" />
|
||||
</svg>
|
||||
Restart
|
||||
</>
|
||||
)}
|
||||
</button>
|
||||
)}
|
||||
{service.uiUrl && (
|
||||
<a
|
||||
href={service.uiUrl}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="text-xs px-2.5 py-1 rounded-md bg-gray-700 hover:bg-gray-600 text-gray-300 transition-colors flex items-center gap-1"
|
||||
>
|
||||
Open
|
||||
<svg className="w-3 h-3" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M13.5 6H5.25A2.25 2.25 0 003 8.25v10.5A2.25 2.25 0 005.25 21h10.5A2.25 2.25 0 0018 18.75V10.5m-10.5 6L21 3m0 0h-5.25M21 3v5.25" />
|
||||
</svg>
|
||||
</a>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
1
homeai-character/src/assets/react.svg
Normal file
1
homeai-character/src/assets/react.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="35.93" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 228"><path fill="#00D8FF" d="M210.483 73.824a171.49 171.49 0 0 0-8.24-2.597c.465-1.9.893-3.777 1.273-5.621c6.238-30.281 2.16-54.676-11.769-62.708c-13.355-7.7-35.196.329-57.254 19.526a171.23 171.23 0 0 0-6.375 5.848a155.866 155.866 0 0 0-4.241-3.917C100.759 3.829 77.587-4.822 63.673 3.233C50.33 10.957 46.379 33.89 51.995 62.588a170.974 170.974 0 0 0 1.892 8.48c-3.28.932-6.445 1.924-9.474 2.98C17.309 83.498 0 98.307 0 113.668c0 15.865 18.582 31.778 46.812 41.427a145.52 145.52 0 0 0 6.921 2.165a167.467 167.467 0 0 0-2.01 9.138c-5.354 28.2-1.173 50.591 12.134 58.266c13.744 7.926 36.812-.22 59.273-19.855a145.567 145.567 0 0 0 5.342-4.923a168.064 168.064 0 0 0 6.92 6.314c21.758 18.722 43.246 26.282 56.54 18.586c13.731-7.949 18.194-32.003 12.4-61.268a145.016 145.016 0 0 0-1.535-6.842c1.62-.48 3.21-.974 4.76-1.488c29.348-9.723 48.443-25.443 48.443-41.52c0-15.417-17.868-30.326-45.517-39.844Zm-6.365 70.984c-1.4.463-2.836.91-4.3 1.345c-3.24-10.257-7.612-21.163-12.963-32.432c5.106-11 9.31-21.767 12.459-31.957c2.619.758 5.16 1.557 7.61 2.4c23.69 8.156 38.14 20.213 38.14 29.504c0 9.896-15.606 22.743-40.946 31.14Zm-10.514 20.834c2.562 12.94 2.927 24.64 1.23 33.787c-1.524 8.219-4.59 13.698-8.382 15.893c-8.067 4.67-25.32-1.4-43.927-17.412a156.726 156.726 0 0 1-6.437-5.87c7.214-7.889 14.423-17.06 21.459-27.246c12.376-1.098 24.068-2.894 34.671-5.345a134.17 134.17 0 0 1 1.386 6.193ZM87.276 214.515c-7.882 2.783-14.16 2.863-17.955.675c-8.075-4.657-11.432-22.636-6.853-46.752a156.923 156.923 0 0 1 1.869-8.499c10.486 2.32 22.093 3.988 34.498 4.994c7.084 9.967 14.501 19.128 21.976 27.15a134.668 134.668 0 0 1-4.877 4.492c-9.933 8.682-19.886 14.842-28.658 17.94ZM50.35 144.747c-12.483-4.267-22.792-9.812-29.858-15.863c-6.35-5.437-9.555-10.836-9.555-15.216c0-9.322 13.897-21.212 37.076-29.293c2.813-.98 5.757-1.905 8.812-2.773c3.204 10.42 7.406 21.315 12.477 32.332c-5.137 11.18-9.399 22.249-12.634 32.792a134.718 134.718 0 0 1-6.318-1.979Zm12.378-84.26c-4.811-24.587-1.616-43.134 6.425-47.789c8.564-4.958 27.502 2.111 47.463 19.835a144.318 144.318 0 0 1 3.841 3.545c-7.438 7.987-14.787 17.08-21.808 26.988c-12.04 1.116-23.565 2.908-34.161 5.309a160.342 160.342 0 0 1-1.76-7.887Zm110.427 27.268a347.8 347.8 0 0 0-7.785-12.803c8.168 1.033 15.994 2.404 23.343 4.08c-2.206 7.072-4.956 14.465-8.193 22.045a381.151 381.151 0 0 0-7.365-13.322Zm-45.032-43.861c5.044 5.465 10.096 11.566 15.065 18.186a322.04 322.04 0 0 0-30.257-.006c4.974-6.559 10.069-12.652 15.192-18.18ZM82.802 87.83a323.167 323.167 0 0 0-7.227 13.238c-3.184-7.553-5.909-14.98-8.134-22.152c7.304-1.634 15.093-2.97 23.209-3.984a321.524 321.524 0 0 0-7.848 12.897Zm8.081 65.352c-8.385-.936-16.291-2.203-23.593-3.793c2.26-7.3 5.045-14.885 8.298-22.6a321.187 321.187 0 0 0 7.257 13.246c2.594 4.48 5.28 8.868 8.038 13.147Zm37.542 31.03c-5.184-5.592-10.354-11.779-15.403-18.433c4.902.192 9.899.29 14.978.29c5.218 0 10.376-.117 15.453-.343c-4.985 6.774-10.018 12.97-15.028 18.486Zm52.198-57.817c3.422 7.8 6.306 15.345 8.596 22.52c-7.422 1.694-15.436 3.058-23.88 4.071a382.417 382.417 0 0 0 7.859-13.026a347.403 347.403 0 0 0 7.425-13.565Zm-16.898 8.101a358.557 358.557 0 0 1-12.281 19.815a329.4 329.4 0 0 1-23.444.823c-7.967 0-15.716-.248-23.178-.732a310.202 310.202 0 0 1-12.513-19.846h.001a307.41 307.41 0 0 1-10.923-20.627a310.278 310.278 0 0 1 10.89-20.637l-.001.001a307.318 307.318 0 0 1 12.413-19.761c7.613-.576 15.42-.876 23.31-.876H128c7.926 0 15.743.303 23.354.883a329.357 329.357 0 0 1 12.335 19.695a358.489 358.489 0 0 1 11.036 20.54a329.472 329.472 0 0 1-11 20.722Zm22.56-122.124c8.572 4.944 11.906 24.881 6.52 51.026c-.344 1.668-.73 3.367-1.15 5.09c-10.622-2.452-22.155-4.275-34.23-5.408c-7.034-10.017-14.323-19.124-21.64-27.008a160.789 160.789 0 0 1 5.888-5.4c18.9-16.447 36.564-22.941 44.612-18.3ZM128 90.808c12.625 0 22.86 10.235 22.86 22.86s-10.235 22.86-22.86 22.86s-22.86-10.235-22.86-22.86s10.235-22.86 22.86-22.86Z"></path></svg>
|
||||
|
After Width: | Height: | Size: 4.0 KiB |
13
homeai-character/src/index.css
Normal file
13
homeai-character/src/index.css
Normal file
@@ -0,0 +1,13 @@
|
||||
@import "tailwindcss";
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
background-color: #030712;
|
||||
color: #f3f4f6;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
}
|
||||
|
||||
#root {
|
||||
min-height: 100vh;
|
||||
}
|
||||
10
homeai-character/src/main.jsx
Normal file
10
homeai-character/src/main.jsx
Normal file
@@ -0,0 +1,10 @@
|
||||
import { StrictMode } from 'react'
|
||||
import { createRoot } from 'react-dom/client'
|
||||
import './index.css'
|
||||
import App from './App.jsx'
|
||||
|
||||
createRoot(document.getElementById('root')).render(
|
||||
<StrictMode>
|
||||
<App />
|
||||
</StrictMode>,
|
||||
)
|
||||
169
homeai-character/vite.config.js
Normal file
169
homeai-character/vite.config.js
Normal file
@@ -0,0 +1,169 @@
|
||||
import { defineConfig } from 'vite'
|
||||
import react from '@vitejs/plugin-react'
|
||||
import tailwindcss from '@tailwindcss/vite'
|
||||
|
||||
function healthCheckPlugin() {
|
||||
return {
|
||||
name: 'health-check-proxy',
|
||||
configureServer(server) {
|
||||
server.middlewares.use('/api/health', async (req, res) => {
|
||||
const params = new URL(req.url, 'http://localhost').searchParams;
|
||||
const url = params.get('url');
|
||||
const mode = params.get('mode'); // 'tcp' for raw TCP port check
|
||||
if (!url) {
|
||||
res.writeHead(400, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: 'Missing url param' }));
|
||||
return;
|
||||
}
|
||||
const start = Date.now();
|
||||
const parsedUrl = new URL(url);
|
||||
|
||||
try {
|
||||
if (mode === 'tcp') {
|
||||
// TCP socket connect check for non-HTTP services (e.g. Wyoming)
|
||||
const { default: net } = await import('net');
|
||||
await new Promise((resolve, reject) => {
|
||||
const socket = net.createConnection(
|
||||
{ host: parsedUrl.hostname, port: parseInt(parsedUrl.port), timeout: 5000 },
|
||||
() => { socket.destroy(); resolve(); }
|
||||
);
|
||||
socket.on('error', reject);
|
||||
socket.on('timeout', () => { socket.destroy(); reject(new Error('timeout')); });
|
||||
});
|
||||
} else {
|
||||
// HTTP/HTTPS health check
|
||||
const { default: https } = await import('https');
|
||||
const { default: http } = await import('http');
|
||||
const client = parsedUrl.protocol === 'https:' ? https : http;
|
||||
|
||||
await new Promise((resolve, reject) => {
|
||||
const reqObj = client.get(url, { rejectUnauthorized: false, timeout: 5000 }, (resp) => {
|
||||
resp.resume();
|
||||
resolve();
|
||||
});
|
||||
reqObj.on('error', reject);
|
||||
reqObj.on('timeout', () => { reqObj.destroy(); reject(new Error('timeout')); });
|
||||
});
|
||||
}
|
||||
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ status: 'online', responseTime: Date.now() - start }));
|
||||
} catch {
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ status: 'offline', responseTime: null }));
|
||||
}
|
||||
});
|
||||
// Service restart — runs launchctl or docker restart
|
||||
server.middlewares.use('/api/service/restart', async (req, res) => {
|
||||
if (req.method === 'OPTIONS') {
|
||||
res.writeHead(204, { 'Access-Control-Allow-Origin': '*', 'Access-Control-Allow-Methods': 'POST', 'Access-Control-Allow-Headers': 'Content-Type' });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
if (req.method !== 'POST') {
|
||||
res.writeHead(405);
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const chunks = [];
|
||||
for await (const chunk of req) chunks.push(chunk);
|
||||
const { type, id } = JSON.parse(Buffer.concat(chunks).toString());
|
||||
|
||||
if (!type || !id) {
|
||||
res.writeHead(400, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ ok: false, error: 'Missing type or id' }));
|
||||
return;
|
||||
}
|
||||
|
||||
// Whitelist valid service IDs to prevent command injection
|
||||
const ALLOWED_LAUNCHD = [
|
||||
'gui/501/com.homeai.ollama',
|
||||
'gui/501/com.homeai.openclaw',
|
||||
'gui/501/com.homeai.openclaw-bridge',
|
||||
'gui/501/com.homeai.wyoming-stt',
|
||||
'gui/501/com.homeai.wyoming-tts',
|
||||
'gui/501/com.homeai.wyoming-satellite',
|
||||
'gui/501/com.homeai.character-dashboard',
|
||||
];
|
||||
const ALLOWED_DOCKER = [
|
||||
'homeai-open-webui',
|
||||
'homeai-uptime-kuma',
|
||||
'homeai-n8n',
|
||||
'homeai-code-server',
|
||||
];
|
||||
|
||||
let cmd;
|
||||
if (type === 'launchd' && ALLOWED_LAUNCHD.includes(id)) {
|
||||
cmd = ['launchctl', 'kickstart', '-k', id];
|
||||
} else if (type === 'docker' && ALLOWED_DOCKER.includes(id)) {
|
||||
cmd = ['docker', 'restart', id];
|
||||
} else {
|
||||
res.writeHead(403, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ ok: false, error: 'Service not in allowed list' }));
|
||||
return;
|
||||
}
|
||||
|
||||
const { execFile } = await import('child_process');
|
||||
const { promisify } = await import('util');
|
||||
const execFileAsync = promisify(execFile);
|
||||
const { stdout, stderr } = await execFileAsync(cmd[0], cmd.slice(1), { timeout: 30000 });
|
||||
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ ok: true, stdout: stdout.trim(), stderr: stderr.trim() }));
|
||||
} catch (err) {
|
||||
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ ok: false, error: err.message }));
|
||||
}
|
||||
});
|
||||
|
||||
// TTS preview proxy — forwards POST to OpenClaw bridge, returns audio
|
||||
server.middlewares.use('/api/tts', async (req, res) => {
|
||||
if (req.method !== 'POST') {
|
||||
res.writeHead(405);
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const { default: http } = await import('http');
|
||||
const chunks = [];
|
||||
for await (const chunk of req) chunks.push(chunk);
|
||||
const body = Buffer.concat(chunks);
|
||||
|
||||
await new Promise((resolve, reject) => {
|
||||
const proxyReq = http.request(
|
||||
'http://localhost:8081/api/tts',
|
||||
{ method: 'POST', headers: { 'Content-Type': 'application/json', 'Content-Length': body.length }, timeout: 30000 },
|
||||
(proxyRes) => {
|
||||
res.writeHead(proxyRes.statusCode, {
|
||||
'Content-Type': proxyRes.headers['content-type'] || 'audio/wav',
|
||||
});
|
||||
proxyRes.pipe(res);
|
||||
proxyRes.on('end', resolve);
|
||||
}
|
||||
);
|
||||
proxyReq.on('error', reject);
|
||||
proxyReq.on('timeout', () => { proxyReq.destroy(); reject(new Error('timeout')); });
|
||||
proxyReq.write(body);
|
||||
proxyReq.end();
|
||||
});
|
||||
} catch {
|
||||
res.writeHead(502, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: 'TTS bridge unreachable' }));
|
||||
}
|
||||
});
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// https://vite.dev/config/
|
||||
export default defineConfig({
|
||||
plugins: [
|
||||
healthCheckPlugin(),
|
||||
tailwindcss(),
|
||||
react(),
|
||||
],
|
||||
server: {
|
||||
host: '0.0.0.0',
|
||||
},
|
||||
})
|
||||
2
homeai-dashboard/.gitignore
vendored
Normal file
2
homeai-dashboard/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
node_modules/
|
||||
dist/
|
||||
49
homeai-dashboard/characters/aria.json
Normal file
49
homeai-dashboard/characters/aria.json
Normal file
@@ -0,0 +1,49 @@
|
||||
{
|
||||
"schema_version": 1,
|
||||
"name": "aria",
|
||||
"display_name": "Aria",
|
||||
"description": "Default HomeAI assistant persona",
|
||||
"system_prompt": "You are Aria, a warm, curious, and helpful AI assistant living in the home. You speak naturally and conversationally — never robotic. You are knowledgeable but never condescending. You remember the people you live with and build on those memories over time. Keep responses concise when controlling smart home devices; be more expressive in casual conversation. Never break character.",
|
||||
"model_overrides": {
|
||||
"primary": "llama3.3:70b",
|
||||
"fast": "qwen2.5:7b"
|
||||
},
|
||||
"tts": {
|
||||
"engine": "chatterbox",
|
||||
"voice_ref_path": "~/voices/aria-raw.wav",
|
||||
"kokoro_voice": "af_heart",
|
||||
"speed": 1.0
|
||||
},
|
||||
"live2d_expressions": {
|
||||
"idle": "expr_idle",
|
||||
"listening": "expr_listening",
|
||||
"thinking": "expr_thinking",
|
||||
"speaking": "expr_speaking",
|
||||
"happy": "expr_happy",
|
||||
"sad": "expr_sad",
|
||||
"surprised": "expr_surprised",
|
||||
"error": "expr_error"
|
||||
},
|
||||
"vtube_ws_triggers": {
|
||||
"thinking": {
|
||||
"type": "hotkey",
|
||||
"id": "expr_thinking"
|
||||
},
|
||||
"speaking": {
|
||||
"type": "hotkey",
|
||||
"id": "expr_speaking"
|
||||
},
|
||||
"idle": {
|
||||
"type": "hotkey",
|
||||
"id": "expr_idle"
|
||||
}
|
||||
},
|
||||
"custom_rules": [
|
||||
{
|
||||
"trigger": "good morning",
|
||||
"response": "Good morning! How did you sleep?",
|
||||
"condition": "time_of_day == morning"
|
||||
}
|
||||
],
|
||||
"notes": "Default persona. Voice clone to be added once reference audio recorded."
|
||||
}
|
||||
15
homeai-dashboard/index.html
Normal file
15
homeai-dashboard/index.html
Normal file
@@ -0,0 +1,15 @@
|
||||
<!doctype html>
|
||||
<html lang="en" class="dark">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" type="image/svg+xml" href="/icon.svg" />
|
||||
<link rel="manifest" href="/manifest.json" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<meta name="theme-color" content="#030712" />
|
||||
<title>HomeAI Dashboard</title>
|
||||
</head>
|
||||
<body class="bg-gray-950 text-gray-100">
|
||||
<div id="root"></div>
|
||||
<script type="module" src="/src/main.jsx"></script>
|
||||
</body>
|
||||
</html>
|
||||
45
homeai-dashboard/launchd/com.homeai.dashboard.plist
Normal file
45
homeai-dashboard/launchd/com.homeai.dashboard.plist
Normal file
@@ -0,0 +1,45 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
|
||||
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Label</key>
|
||||
<string>com.homeai.dashboard</string>
|
||||
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>/opt/homebrew/bin/npx</string>
|
||||
<string>vite</string>
|
||||
<string>--host</string>
|
||||
<string>--port</string>
|
||||
<string>5173</string>
|
||||
</array>
|
||||
|
||||
<key>WorkingDirectory</key>
|
||||
<string>/Users/aodhan/gitea/homeai/homeai-dashboard</string>
|
||||
|
||||
<key>EnvironmentVariables</key>
|
||||
<dict>
|
||||
<key>PATH</key>
|
||||
<string>/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin</string>
|
||||
<key>HOME</key>
|
||||
<string>/Users/aodhan</string>
|
||||
<key>GAZE_API_KEY</key>
|
||||
<string>e63401f17e4845e1059f830267f839fe7fc7b6083b1cb1730863318754d799f4</string>
|
||||
<key>HA_TOKEN</key>
|
||||
<string></string>
|
||||
</dict>
|
||||
|
||||
<key>RunAtLoad</key>
|
||||
<true/>
|
||||
|
||||
<key>KeepAlive</key>
|
||||
<true/>
|
||||
|
||||
<key>StandardOutPath</key>
|
||||
<string>/tmp/homeai-dashboard.log</string>
|
||||
|
||||
<key>StandardErrorPath</key>
|
||||
<string>/tmp/homeai-dashboard-error.log</string>
|
||||
</dict>
|
||||
</plist>
|
||||
2229
homeai-dashboard/package-lock.json
generated
Normal file
2229
homeai-dashboard/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
26
homeai-dashboard/package.json
Normal file
26
homeai-dashboard/package.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "homeai-dashboard",
|
||||
"private": true,
|
||||
"version": "0.1.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "vite build",
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"@tailwindcss/vite": "^4.2.1",
|
||||
"ajv": "^8.18.0",
|
||||
"react": "^19.2.0",
|
||||
"react-dom": "^19.2.0",
|
||||
"react-router-dom": "^7.13.1",
|
||||
"tailwindcss": "^4.2.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@vitejs/plugin-react": "^5.1.1",
|
||||
"vite": "^8.0.0-beta.13"
|
||||
},
|
||||
"overrides": {
|
||||
"vite": "^8.0.0-beta.13"
|
||||
}
|
||||
}
|
||||
9
homeai-dashboard/public/icon.svg
Normal file
9
homeai-dashboard/public/icon.svg
Normal file
@@ -0,0 +1,9 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64">
|
||||
<rect width="64" height="64" rx="14" fill="#030712"/>
|
||||
<circle cx="32" cy="28" r="12" fill="none" stroke="#818cf8" stroke-width="2.5"/>
|
||||
<path d="M26 26c0-3.3 2.7-6 6-6s6 2.7 6 6" fill="none" stroke="#818cf8" stroke-width="2" stroke-linecap="round"/>
|
||||
<rect x="30" y="40" width="4" height="8" rx="2" fill="#818cf8"/>
|
||||
<path d="M24 52h16" stroke="#818cf8" stroke-width="2.5" stroke-linecap="round"/>
|
||||
<circle cx="29" cy="27" r="1.5" fill="#34d399"/>
|
||||
<circle cx="35" cy="27" r="1.5" fill="#34d399"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 575 B |
16
homeai-dashboard/public/manifest.json
Normal file
16
homeai-dashboard/public/manifest.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"name": "HomeAI Dashboard",
|
||||
"short_name": "HomeAI",
|
||||
"description": "HomeAI dashboard — services, chat, and character management",
|
||||
"start_url": "/",
|
||||
"display": "standalone",
|
||||
"background_color": "#030712",
|
||||
"theme_color": "#030712",
|
||||
"icons": [
|
||||
{
|
||||
"src": "/icon.svg",
|
||||
"sizes": "any",
|
||||
"type": "image/svg+xml"
|
||||
}
|
||||
]
|
||||
}
|
||||
106
homeai-dashboard/schema/character.schema.json
Normal file
106
homeai-dashboard/schema/character.schema.json
Normal file
@@ -0,0 +1,106 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "HomeAI Character Config",
|
||||
"version": "2",
|
||||
"type": "object",
|
||||
"required": ["schema_version", "name", "system_prompt", "tts"],
|
||||
"properties": {
|
||||
"schema_version": { "type": "integer", "enum": [1, 2] },
|
||||
"name": { "type": "string" },
|
||||
"display_name": { "type": "string" },
|
||||
"description": { "type": "string" },
|
||||
|
||||
"background": { "type": "string", "description": "Backstory, lore, or general prompt enrichment" },
|
||||
"dialogue_style": { "type": "string", "description": "How the persona speaks or reacts, with example lines" },
|
||||
"appearance": { "type": "string", "description": "Physical description, also used for image prompting" },
|
||||
"skills": {
|
||||
"type": "array",
|
||||
"description": "Topics the persona specialises in or enjoys talking about",
|
||||
"items": { "type": "string" }
|
||||
},
|
||||
|
||||
"system_prompt": { "type": "string" },
|
||||
|
||||
"model_overrides": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"primary": { "type": "string" },
|
||||
"fast": { "type": "string" }
|
||||
}
|
||||
},
|
||||
|
||||
"tts": {
|
||||
"type": "object",
|
||||
"required": ["engine"],
|
||||
"properties": {
|
||||
"engine": {
|
||||
"type": "string",
|
||||
"enum": ["kokoro", "chatterbox", "qwen3", "elevenlabs"]
|
||||
},
|
||||
"voice_ref_path": { "type": "string" },
|
||||
"kokoro_voice": { "type": "string" },
|
||||
"elevenlabs_voice_id": { "type": "string" },
|
||||
"elevenlabs_voice_name": { "type": "string" },
|
||||
"elevenlabs_model": { "type": "string", "default": "eleven_monolingual_v1" },
|
||||
"speed": { "type": "number", "default": 1.0 }
|
||||
}
|
||||
},
|
||||
|
||||
"dream_id": {
|
||||
"type": "string",
|
||||
"description": "Linked Dream character ID for syncing character data and images"
|
||||
},
|
||||
|
||||
"gaze_character": {
|
||||
"type": "string",
|
||||
"description": "Linked GAZE character_id for auto-assigned cover image and default image generation preset"
|
||||
},
|
||||
|
||||
"gaze_presets": {
|
||||
"type": "array",
|
||||
"description": "GAZE image generation presets with trigger conditions",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": ["preset"],
|
||||
"properties": {
|
||||
"preset": { "type": "string" },
|
||||
"trigger": { "type": "string", "default": "self-portrait" }
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"custom_rules": {
|
||||
"type": "array",
|
||||
"description": "Trigger/response overrides for specific contexts",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"trigger": { "type": "string" },
|
||||
"response": { "type": "string" },
|
||||
"condition": { "type": "string" }
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"notes": { "type": "string" },
|
||||
|
||||
"default_prompt_style": {
|
||||
"type": "string",
|
||||
"description": "Default prompt style for this character (quick, standard, creative, roleplayer, game-master, storyteller). Overrides global active style when this character is active.",
|
||||
"enum": ["", "quick", "standard", "creative", "roleplayer", "game-master", "storyteller"]
|
||||
},
|
||||
|
||||
"prompt_style_overrides": {
|
||||
"type": "object",
|
||||
"description": "Per-style customizations for this character. Keys are style IDs, values contain override fields.",
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"dialogue_style": { "type": "string", "description": "Override dialogue style for this prompt style" },
|
||||
"system_prompt_suffix": { "type": "string", "description": "Additional instructions appended for this prompt style" }
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": true
|
||||
}
|
||||
199
homeai-dashboard/src/App.jsx
Normal file
199
homeai-dashboard/src/App.jsx
Normal file
@@ -0,0 +1,199 @@
|
||||
import { useState, useCallback, useEffect } from 'react';
|
||||
import { BrowserRouter, Routes, Route, NavLink, useLocation } from 'react-router-dom';
|
||||
import Dashboard from './pages/Dashboard';
|
||||
import Chat from './pages/Chat';
|
||||
import Characters from './pages/Characters';
|
||||
import Editor from './pages/Editor';
|
||||
import Memories from './pages/Memories';
|
||||
|
||||
function NavItem({ to, children, icon, onClick }) {
|
||||
return (
|
||||
<NavLink
|
||||
to={to}
|
||||
onClick={onClick}
|
||||
className={({ isActive }) =>
|
||||
`flex items-center gap-3 px-4 py-2.5 rounded-lg text-sm font-medium transition-colors ${
|
||||
isActive
|
||||
? 'bg-gray-800 text-white'
|
||||
: 'text-gray-400 hover:text-gray-200 hover:bg-gray-800/50'
|
||||
}`
|
||||
}
|
||||
>
|
||||
{icon}
|
||||
<span>{children}</span>
|
||||
</NavLink>
|
||||
);
|
||||
}
|
||||
|
||||
function Layout({ children }) {
|
||||
const [sidebarOpen, setSidebarOpen] = useState(false)
|
||||
const location = useLocation()
|
||||
|
||||
// Close sidebar on route change (mobile)
|
||||
useEffect(() => {
|
||||
setSidebarOpen(false)
|
||||
}, [location.pathname])
|
||||
|
||||
const closeSidebar = useCallback(() => setSidebarOpen(false), [])
|
||||
|
||||
return (
|
||||
<div className="h-screen bg-gray-950 flex overflow-hidden">
|
||||
{/* Mobile header bar */}
|
||||
<div className="fixed top-0 left-0 right-0 z-30 flex items-center gap-3 px-4 py-3 bg-gray-900/95 backdrop-blur border-b border-gray-800 md:hidden">
|
||||
<button
|
||||
onClick={() => setSidebarOpen(true)}
|
||||
className="p-1.5 text-gray-400 hover:text-white transition-colors"
|
||||
aria-label="Open menu"
|
||||
>
|
||||
<svg className="w-6 h-6" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M3.75 6.75h16.5M3.75 12h16.5m-16.5 5.25h16.5" />
|
||||
</svg>
|
||||
</button>
|
||||
<div className="flex items-center gap-2">
|
||||
<div className="w-7 h-7 rounded-md bg-gradient-to-br from-indigo-500 to-purple-600 flex items-center justify-center">
|
||||
<svg className="w-4 h-4 text-white" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M2.25 12l8.954-8.955c.44-.439 1.152-.439 1.591 0L21.75 12M4.5 9.75v10.125c0 .621.504 1.125 1.125 1.125H9.75v-4.875c0-.621.504-1.125 1.125-1.125h2.25c.621 0 1.125.504 1.125 1.125V21h4.125c.621 0 1.125-.504 1.125-1.125V9.75M8.25 21h8.25" />
|
||||
</svg>
|
||||
</div>
|
||||
<span className="text-sm font-bold text-white">HomeAI</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Mobile backdrop */}
|
||||
{sidebarOpen && (
|
||||
<div
|
||||
className="fixed inset-0 bg-black/60 z-40 md:hidden"
|
||||
onClick={closeSidebar}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Sidebar */}
|
||||
<aside className={`
|
||||
fixed inset-y-0 left-0 z-50 w-64 bg-gray-900 border-r border-gray-800 flex flex-col shrink-0
|
||||
transform transition-transform duration-200 ease-out
|
||||
${sidebarOpen ? 'translate-x-0' : '-translate-x-full'}
|
||||
md:static md:translate-x-0
|
||||
`}>
|
||||
{/* Logo */}
|
||||
<div className="px-6 py-5 border-b border-gray-800">
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-3">
|
||||
<div className="w-9 h-9 rounded-lg bg-gradient-to-br from-indigo-500 to-purple-600 flex items-center justify-center">
|
||||
<svg className="w-5 h-5 text-white" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M2.25 12l8.954-8.955c.44-.439 1.152-.439 1.591 0L21.75 12M4.5 9.75v10.125c0 .621.504 1.125 1.125 1.125H9.75v-4.875c0-.621.504-1.125 1.125-1.125h2.25c.621 0 1.125.504 1.125 1.125V21h4.125c.621 0 1.125-.504 1.125-1.125V9.75M8.25 21h8.25" />
|
||||
</svg>
|
||||
</div>
|
||||
<div>
|
||||
<h1 className="text-lg font-bold text-white tracking-tight">HomeAI</h1>
|
||||
<p className="text-xs text-gray-500">LINDBLUM</p>
|
||||
</div>
|
||||
</div>
|
||||
{/* Close button on mobile */}
|
||||
<button
|
||||
onClick={closeSidebar}
|
||||
className="p-1 text-gray-500 hover:text-white md:hidden"
|
||||
aria-label="Close menu"
|
||||
>
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M6 18L18 6M6 6l12 12" />
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Nav */}
|
||||
<nav className="flex-1 px-3 py-4 space-y-1">
|
||||
<NavItem
|
||||
to="/"
|
||||
onClick={closeSidebar}
|
||||
icon={
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M3.75 6A2.25 2.25 0 016 3.75h2.25A2.25 2.25 0 0110.5 6v2.25a2.25 2.25 0 01-2.25 2.25H6a2.25 2.25 0 01-2.25-2.25V6zM3.75 15.75A2.25 2.25 0 016 13.5h2.25a2.25 2.25 0 012.25 2.25V18a2.25 2.25 0 01-2.25 2.25H6A2.25 2.25 0 013.75 18v-2.25zM13.5 6a2.25 2.25 0 012.25-2.25H18A2.25 2.25 0 0120.25 6v2.25A2.25 2.25 0 0118 10.5h-2.25a2.25 2.25 0 01-2.25-2.25V6zM13.5 15.75a2.25 2.25 0 012.25-2.25H18a2.25 2.25 0 012.25 2.25V18A2.25 2.25 0 0118 20.25h-2.25A2.25 2.25 0 0113.5 18v-2.25z" />
|
||||
</svg>
|
||||
}
|
||||
>
|
||||
Dashboard
|
||||
</NavItem>
|
||||
|
||||
<NavItem
|
||||
to="/chat"
|
||||
onClick={closeSidebar}
|
||||
icon={
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M8.625 12a.375.375 0 11-.75 0 .375.375 0 01.75 0zm0 0H8.25m4.125 0a.375.375 0 11-.75 0 .375.375 0 01.75 0zm0 0H12m4.125 0a.375.375 0 11-.75 0 .375.375 0 01.75 0zm0 0h-.375M21 12c0 4.556-4.03 8.25-9 8.25a9.764 9.764 0 01-2.555-.337A5.972 5.972 0 015.41 20.97a5.969 5.969 0 01-.474-.065 4.48 4.48 0 00.978-2.025c.09-.457-.133-.901-.467-1.226C3.93 16.178 3 14.189 3 12c0-4.556 4.03-8.25 9-8.25s9 3.694 9 8.25z" />
|
||||
</svg>
|
||||
}
|
||||
>
|
||||
Chat
|
||||
</NavItem>
|
||||
|
||||
<NavItem
|
||||
to="/characters"
|
||||
onClick={closeSidebar}
|
||||
icon={
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M15.75 6a3.75 3.75 0 11-7.5 0 3.75 3.75 0 017.5 0zM4.501 20.118a7.5 7.5 0 0114.998 0A17.933 17.933 0 0112 21.75c-2.676 0-5.216-.584-7.499-1.632z" />
|
||||
</svg>
|
||||
}
|
||||
>
|
||||
Characters
|
||||
</NavItem>
|
||||
|
||||
<NavItem
|
||||
to="/memories"
|
||||
onClick={closeSidebar}
|
||||
icon={
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M12 18v-5.25m0 0a6.01 6.01 0 001.5-.189m-1.5.189a6.01 6.01 0 01-1.5-.189m3.75 7.478a12.06 12.06 0 01-4.5 0m3.75 2.383a14.406 14.406 0 01-3 0M14.25 18v-.192c0-.983.658-1.823 1.508-2.316a7.5 7.5 0 10-7.517 0c.85.493 1.509 1.333 1.509 2.316V18" />
|
||||
</svg>
|
||||
}
|
||||
>
|
||||
Memories
|
||||
</NavItem>
|
||||
|
||||
<NavItem
|
||||
to="/editor"
|
||||
onClick={closeSidebar}
|
||||
icon={
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M9.594 3.94c.09-.542.56-.94 1.11-.94h2.593c.55 0 1.02.398 1.11.94l.213 1.281c.063.374.313.686.645.87.074.04.147.083.22.127.324.196.72.257 1.075.124l1.217-.456a1.125 1.125 0 011.37.49l1.296 2.247a1.125 1.125 0 01-.26 1.431l-1.003.827c-.293.24-.438.613-.431.992a6.759 6.759 0 010 .255c-.007.378.138.75.43.99l1.005.828c.424.35.534.954.26 1.43l-1.298 2.247a1.125 1.125 0 01-1.369.491l-1.217-.456c-.355-.133-.75-.072-1.076.124a6.57 6.57 0 01-.22.128c-.331.183-.581.495-.644.869l-.213 1.28c-.09.543-.56.941-1.11.941h-2.594c-.55 0-1.02-.398-1.11-.94l-.213-1.281c-.062-.374-.312-.686-.644-.87a6.52 6.52 0 01-.22-.127c-.325-.196-.72-.257-1.076-.124l-1.217.456a1.125 1.125 0 01-1.369-.49l-1.297-2.247a1.125 1.125 0 01.26-1.431l1.004-.827c.292-.24.437-.613.43-.992a6.932 6.932 0 010-.255c.007-.378-.138-.75-.43-.99l-1.004-.828a1.125 1.125 0 01-.26-1.43l1.297-2.247a1.125 1.125 0 011.37-.491l1.216.456c.356.133.751.072 1.076-.124.072-.044.146-.087.22-.128.332-.183.582-.495.644-.869l.214-1.281z" />
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M15 12a3 3 0 11-6 0 3 3 0 016 0z" />
|
||||
</svg>
|
||||
}
|
||||
>
|
||||
Editor
|
||||
</NavItem>
|
||||
</nav>
|
||||
|
||||
{/* Footer */}
|
||||
<div className="px-6 py-4 border-t border-gray-800">
|
||||
<p className="text-xs text-gray-600">HomeAI v0.1.0</p>
|
||||
<p className="text-xs text-gray-700">Mac Mini M4 Pro</p>
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
{/* Main content — add top padding on mobile for the header bar */}
|
||||
<main className="flex-1 overflow-hidden flex flex-col pt-14 md:pt-0">
|
||||
{children}
|
||||
</main>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function App() {
|
||||
return (
|
||||
<BrowserRouter>
|
||||
<Layout>
|
||||
<Routes>
|
||||
<Route path="/" element={<div className="flex-1 overflow-y-auto p-4 md:p-8"><div className="max-w-6xl mx-auto"><Dashboard /></div></div>} />
|
||||
<Route path="/chat" element={<Chat />} />
|
||||
<Route path="/characters" element={<div className="flex-1 overflow-y-auto p-4 md:p-8"><div className="max-w-6xl mx-auto"><Characters /></div></div>} />
|
||||
<Route path="/memories" element={<div className="flex-1 overflow-y-auto p-4 md:p-8"><div className="max-w-6xl mx-auto"><Memories /></div></div>} />
|
||||
<Route path="/editor" element={<div className="flex-1 overflow-y-auto p-4 md:p-8"><div className="max-w-6xl mx-auto"><Editor /></div></div>} />
|
||||
</Routes>
|
||||
</Layout>
|
||||
</BrowserRouter>
|
||||
);
|
||||
}
|
||||
|
||||
export default App;
|
||||
41
homeai-dashboard/src/components/ChatPanel.jsx
Normal file
41
homeai-dashboard/src/components/ChatPanel.jsx
Normal file
@@ -0,0 +1,41 @@
|
||||
import { useEffect, useRef } from 'react'
|
||||
import MessageBubble from './MessageBubble'
|
||||
import ThinkingIndicator from './ThinkingIndicator'
|
||||
|
||||
export default function ChatPanel({ messages, isLoading, onReplay, onRetry, character }) {
|
||||
const bottomRef = useRef(null)
|
||||
const name = character?.name || 'AI'
|
||||
const image = character?.image || null
|
||||
|
||||
useEffect(() => {
|
||||
bottomRef.current?.scrollIntoView({ behavior: 'smooth' })
|
||||
}, [messages, isLoading])
|
||||
|
||||
if (messages.length === 0 && !isLoading) {
|
||||
return (
|
||||
<div className="flex-1 flex items-center justify-center">
|
||||
<div className="text-center">
|
||||
{image ? (
|
||||
<img src={image} alt={name} className="w-20 h-20 rounded-full object-cover mx-auto mb-4 ring-2 ring-indigo-500/30" />
|
||||
) : (
|
||||
<div className="w-20 h-20 rounded-full bg-indigo-600/20 flex items-center justify-center mx-auto mb-4">
|
||||
<span className="text-indigo-400 text-2xl">{name[0]}</span>
|
||||
</div>
|
||||
)}
|
||||
<h2 className="text-xl font-medium text-gray-200 mb-2">Hi, I'm {name}</h2>
|
||||
<p className="text-gray-500 text-sm">Type a message or press the mic to talk</p>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex-1 overflow-y-auto py-4">
|
||||
{messages.map((msg) => (
|
||||
<MessageBubble key={msg.id} message={msg} onReplay={onReplay} onRetry={onRetry} character={character} />
|
||||
))}
|
||||
{isLoading && <ThinkingIndicator character={character} />}
|
||||
<div ref={bottomRef} />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
104
homeai-dashboard/src/components/ConversationList.jsx
Normal file
104
homeai-dashboard/src/components/ConversationList.jsx
Normal file
@@ -0,0 +1,104 @@
|
||||
function timeAgo(dateStr) {
|
||||
if (!dateStr) return ''
|
||||
const diff = Date.now() - new Date(dateStr).getTime()
|
||||
const mins = Math.floor(diff / 60000)
|
||||
if (mins < 1) return 'just now'
|
||||
if (mins < 60) return `${mins}m ago`
|
||||
const hours = Math.floor(mins / 60)
|
||||
if (hours < 24) return `${hours}h ago`
|
||||
const days = Math.floor(hours / 24)
|
||||
return `${days}d ago`
|
||||
}
|
||||
|
||||
export default function ConversationList({ conversations, activeId, onCreate, onSelect, onDelete, isOpen, onToggle }) {
|
||||
return (
|
||||
<>
|
||||
{/* Mobile toggle button */}
|
||||
<button
|
||||
onClick={onToggle}
|
||||
className="md:hidden absolute left-2 top-2 z-10 p-2 text-gray-400 hover:text-white bg-gray-900/80 rounded-lg border border-gray-800"
|
||||
aria-label="Toggle conversations"
|
||||
title="Conversations"
|
||||
>
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={1.5}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M20.25 8.511c.884.284 1.5 1.128 1.5 2.097v4.286c0 1.136-.847 2.1-1.98 2.193-.34.027-.68.052-1.02.072v3.091l-3-3c-1.354 0-2.694-.055-4.02-.163a2.115 2.115 0 01-.825-.242m9.345-8.334a2.126 2.126 0 00-.476-.095 48.64 48.64 0 00-8.048 0c-1.131.094-1.976 1.057-1.976 2.192v4.286c0 .837.46 1.58 1.155 1.951m9.345-8.334V6.637c0-1.621-1.152-3.026-2.76-3.235A48.455 48.455 0 0011.25 3c-2.115 0-4.198.137-6.24.402-1.608.209-2.76 1.614-2.76 3.235v6.226c0 1.621 1.152 3.026 2.76 3.235.577.075 1.157.14 1.74.194V21l4.155-4.155" />
|
||||
</svg>
|
||||
</button>
|
||||
|
||||
{/* Mobile backdrop */}
|
||||
{isOpen && (
|
||||
<div className="fixed inset-0 bg-black/50 z-20 md:hidden" onClick={onToggle} />
|
||||
)}
|
||||
|
||||
{/* Conversation panel */}
|
||||
<div className={`
|
||||
fixed inset-y-0 left-0 z-30 w-72 bg-gray-950 border-r border-gray-800 flex flex-col
|
||||
transform transition-transform duration-200 ease-out
|
||||
${isOpen ? 'translate-x-0' : '-translate-x-full'}
|
||||
md:static md:translate-x-0 md:shrink-0
|
||||
`}>
|
||||
{/* Header with close on mobile */}
|
||||
<div className="p-3 border-b border-gray-800 flex items-center gap-2">
|
||||
<button
|
||||
onClick={onCreate}
|
||||
className="flex-1 flex items-center justify-center gap-2 px-3 py-2.5 bg-indigo-600 hover:bg-indigo-500 text-white text-sm rounded-lg transition-colors"
|
||||
>
|
||||
<svg className="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M12 4.5v15m7.5-7.5h-15" />
|
||||
</svg>
|
||||
New chat
|
||||
</button>
|
||||
<button
|
||||
onClick={onToggle}
|
||||
className="p-2 text-gray-500 hover:text-white md:hidden"
|
||||
aria-label="Close"
|
||||
>
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M6 18L18 6M6 6l12 12" />
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Conversation list */}
|
||||
<div className="flex-1 overflow-y-auto">
|
||||
{conversations.length === 0 ? (
|
||||
<p className="text-xs text-gray-600 text-center py-6">No conversations yet</p>
|
||||
) : (
|
||||
conversations.map(conv => (
|
||||
<div
|
||||
key={conv.id}
|
||||
onClick={() => { onSelect(conv.id); if (onToggle) onToggle() }}
|
||||
className={`group flex items-start gap-2 px-3 py-2.5 cursor-pointer border-b border-gray-800/50 transition-colors ${
|
||||
conv.id === activeId
|
||||
? 'bg-gray-800 text-white'
|
||||
: 'text-gray-400 hover:bg-gray-800/50 hover:text-gray-200'
|
||||
}`}
|
||||
>
|
||||
<div className="flex-1 min-w-0">
|
||||
<p className="text-sm truncate">
|
||||
{conv.title || 'New conversation'}
|
||||
</p>
|
||||
<div className="flex items-center gap-2 mt-0.5">
|
||||
{conv.characterName && (
|
||||
<span className="text-xs text-indigo-400/70">{conv.characterName}</span>
|
||||
)}
|
||||
<span className="text-xs text-gray-600">{timeAgo(conv.updatedAt)}</span>
|
||||
</div>
|
||||
</div>
|
||||
<button
|
||||
onClick={(e) => { e.stopPropagation(); onDelete(conv.id) }}
|
||||
className="opacity-0 group-hover:opacity-100 p-1.5 text-gray-500 hover:text-red-400 transition-all shrink-0 mt-0.5"
|
||||
title="Delete"
|
||||
>
|
||||
<svg className="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M14.74 9l-.346 9m-4.788 0L9.26 9m9.968-3.21c.342.052.682.107 1.022.166m-1.022-.165L18.16 19.673a2.25 2.25 0 01-2.244 2.077H8.084a2.25 2.25 0 01-2.244-2.077L4.772 5.79m14.456 0a48.108 48.108 0 00-3.478-.397m-12 .562c.34-.059.68-.114 1.022-.165m0 0a48.11 48.11 0 013.478-.397m7.5 0v-.916c0-1.18-.91-2.164-2.09-2.201a51.964 51.964 0 00-3.32 0c-1.18.037-2.09 1.022-2.09 2.201v.916m7.5 0a48.667 48.667 0 00-7.5 0" />
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
))
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)
|
||||
}
|
||||
53
homeai-dashboard/src/components/InputBar.jsx
Normal file
53
homeai-dashboard/src/components/InputBar.jsx
Normal file
@@ -0,0 +1,53 @@
|
||||
import { useState, useRef } from 'react'
|
||||
import VoiceButton from './VoiceButton'
|
||||
|
||||
export default function InputBar({ onSend, onVoiceToggle, isLoading, isRecording, isTranscribing }) {
|
||||
const [text, setText] = useState('')
|
||||
const inputRef = useRef(null)
|
||||
|
||||
const handleSubmit = (e) => {
|
||||
e.preventDefault()
|
||||
if (!text.trim() || isLoading) return
|
||||
onSend(text)
|
||||
setText('')
|
||||
}
|
||||
|
||||
const handleKeyDown = (e) => {
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
e.preventDefault()
|
||||
handleSubmit(e)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<form onSubmit={handleSubmit} className="border-t border-gray-800 bg-gray-950 px-3 sm:px-4 py-2 sm:py-3 shrink-0">
|
||||
<div className="flex items-end gap-2 max-w-3xl mx-auto">
|
||||
<VoiceButton
|
||||
isRecording={isRecording}
|
||||
isTranscribing={isTranscribing}
|
||||
onToggle={onVoiceToggle}
|
||||
disabled={isLoading}
|
||||
/>
|
||||
<textarea
|
||||
ref={inputRef}
|
||||
value={text}
|
||||
onChange={(e) => setText(e.target.value)}
|
||||
onKeyDown={handleKeyDown}
|
||||
placeholder="Type a message..."
|
||||
rows={1}
|
||||
className="flex-1 bg-gray-800 text-gray-100 rounded-xl px-4 py-2.5 text-sm resize-none placeholder-gray-500 focus:outline-none focus:ring-1 focus:ring-indigo-500 min-h-[42px] max-h-32"
|
||||
disabled={isLoading}
|
||||
/>
|
||||
<button
|
||||
type="submit"
|
||||
disabled={!text.trim() || isLoading}
|
||||
className="w-11 h-11 sm:w-10 sm:h-10 rounded-full bg-indigo-600 text-white flex items-center justify-center shrink-0 hover:bg-indigo-500 disabled:opacity-40 disabled:hover:bg-indigo-600 transition-colors"
|
||||
>
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M6 12L3.269 3.126A59.768 59.768 0 0121.485 12 59.77 59.77 0 013.27 20.876L5.999 12zm0 0h7.5" />
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
)
|
||||
}
|
||||
146
homeai-dashboard/src/components/MessageBubble.jsx
Normal file
146
homeai-dashboard/src/components/MessageBubble.jsx
Normal file
@@ -0,0 +1,146 @@
|
||||
import { useState } from 'react'
|
||||
|
||||
function Avatar({ character }) {
|
||||
const name = character?.name || 'AI'
|
||||
const image = character?.image || null
|
||||
|
||||
if (image) {
|
||||
return <img src={image} alt={name} className="w-8 h-8 rounded-full object-cover shrink-0 mt-0.5 ring-1 ring-gray-700" />
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="w-8 h-8 rounded-full bg-indigo-600/20 flex items-center justify-center shrink-0 mt-0.5">
|
||||
<span className="text-indigo-400 text-sm">{name[0]}</span>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function ImageOverlay({ src, onClose }) {
|
||||
return (
|
||||
<div
|
||||
className="fixed inset-0 z-50 bg-black/80 flex items-center justify-center cursor-zoom-out"
|
||||
onClick={onClose}
|
||||
>
|
||||
<img
|
||||
src={src}
|
||||
alt="Full size"
|
||||
className="max-w-[90vw] max-h-[90vh] object-contain rounded-lg shadow-2xl"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
/>
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="absolute top-4 right-4 text-white/70 hover:text-white transition-colors p-2"
|
||||
>
|
||||
<svg className="w-6 h-6" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M6 18L18 6M6 6l12 12" />
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
const IMAGE_URL_RE = /(https?:\/\/[^\s]+\.(?:png|jpg|jpeg|gif|webp))/gi
|
||||
|
||||
function RichContent({ text }) {
|
||||
const [overlayImage, setOverlayImage] = useState(null)
|
||||
const parts = []
|
||||
let lastIndex = 0
|
||||
let match
|
||||
|
||||
IMAGE_URL_RE.lastIndex = 0
|
||||
while ((match = IMAGE_URL_RE.exec(text)) !== null) {
|
||||
if (match.index > lastIndex) {
|
||||
parts.push({ type: 'text', value: text.slice(lastIndex, match.index) })
|
||||
}
|
||||
parts.push({ type: 'image', value: match[1] })
|
||||
lastIndex = IMAGE_URL_RE.lastIndex
|
||||
}
|
||||
if (lastIndex < text.length) {
|
||||
parts.push({ type: 'text', value: text.slice(lastIndex) })
|
||||
}
|
||||
|
||||
if (parts.length === 1 && parts[0].type === 'text') {
|
||||
return <>{text}</>
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
{parts.map((part, i) =>
|
||||
part.type === 'image' ? (
|
||||
<button
|
||||
key={i}
|
||||
onClick={() => setOverlayImage(part.value)}
|
||||
className="block my-2 cursor-zoom-in"
|
||||
>
|
||||
<img
|
||||
src={part.value}
|
||||
alt="Generated image"
|
||||
className="rounded-xl max-w-full max-h-80 object-contain"
|
||||
loading="lazy"
|
||||
/>
|
||||
</button>
|
||||
) : (
|
||||
<span key={i}>{part.value}</span>
|
||||
)
|
||||
)}
|
||||
{overlayImage && <ImageOverlay src={overlayImage} onClose={() => setOverlayImage(null)} />}
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
export default function MessageBubble({ message, onReplay, onRetry, character }) {
|
||||
const isUser = message.role === 'user'
|
||||
|
||||
return (
|
||||
<div className={`flex ${isUser ? 'justify-end' : 'justify-start'} px-3 sm:px-4 py-1.5`}>
|
||||
<div className={`flex items-start gap-2 sm:gap-3 max-w-[92%] sm:max-w-[80%] ${isUser ? 'flex-row-reverse' : ''}`}>
|
||||
{!isUser && <Avatar character={character} />}
|
||||
<div>
|
||||
<div
|
||||
className={`rounded-2xl px-4 py-2.5 text-sm leading-relaxed whitespace-pre-wrap ${
|
||||
isUser
|
||||
? 'bg-indigo-600 text-white'
|
||||
: message.isError
|
||||
? 'bg-red-900/40 text-red-200 border border-red-800/50'
|
||||
: 'bg-gray-800 text-gray-100'
|
||||
}`}
|
||||
>
|
||||
{isUser ? message.content : <RichContent text={message.content} />}
|
||||
</div>
|
||||
{!isUser && (
|
||||
<div className="flex items-center gap-2 mt-1 ml-1">
|
||||
{message.model && (
|
||||
<span className="text-[10px] text-gray-500 font-mono">
|
||||
{message.model}
|
||||
</span>
|
||||
)}
|
||||
{message.isError && onRetry && (
|
||||
<button
|
||||
onClick={() => onRetry(message.id)}
|
||||
className="text-red-400 hover:text-red-300 transition-colors flex items-center gap-1 text-xs"
|
||||
title="Retry"
|
||||
>
|
||||
<svg className="w-3.5 h-3.5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M16.023 9.348h4.992v-.001M2.985 19.644v-4.992m0 0h4.992m-4.993 0l3.181 3.183a8.25 8.25 0 0013.803-3.7M4.031 9.865a8.25 8.25 0 0113.803-3.7l3.181 3.182" />
|
||||
</svg>
|
||||
Retry
|
||||
</button>
|
||||
)}
|
||||
{!message.isError && onReplay && (
|
||||
<button
|
||||
onClick={() => onReplay(message.content)}
|
||||
className="text-gray-500 hover:text-indigo-400 transition-colors"
|
||||
title="Replay audio"
|
||||
>
|
||||
<svg className="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M15.536 8.464a5 5 0 010 7.072M17.95 6.05a8 8 0 010 11.9M6.5 9H4a1 1 0 00-1 1v4a1 1 0 001 1h2.5l4 4V5l-4 4z" />
|
||||
</svg>
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
52
homeai-dashboard/src/components/PromptStyleSelector.jsx
Normal file
52
homeai-dashboard/src/components/PromptStyleSelector.jsx
Normal file
@@ -0,0 +1,52 @@
|
||||
const GROUP_LABELS = { cloud: 'Cloud', local: 'Local' }
|
||||
|
||||
const GROUP_COLORS = {
|
||||
cloud: {
|
||||
active: 'bg-indigo-600 text-white',
|
||||
inactive: 'text-indigo-400 hover:bg-indigo-900/30',
|
||||
},
|
||||
local: {
|
||||
active: 'bg-emerald-600 text-white',
|
||||
inactive: 'text-emerald-400 hover:bg-emerald-900/30',
|
||||
},
|
||||
}
|
||||
|
||||
export default function PromptStyleSelector({ styles, activeStyle, onSelect }) {
|
||||
if (!styles || styles.length === 0) return null
|
||||
|
||||
const groups = { cloud: [], local: [] }
|
||||
for (const s of styles) {
|
||||
const g = s.group === 'local' ? 'local' : 'cloud'
|
||||
groups[g].push(s)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex items-center gap-2 sm:gap-3 px-3 sm:px-4 py-1.5 border-b border-gray-800/50 shrink-0 overflow-x-auto scrollbar-none">
|
||||
{Object.entries(groups).map(([group, groupStyles]) => (
|
||||
groupStyles.length > 0 && (
|
||||
<div key={group} className="flex items-center gap-1">
|
||||
<span className="text-[10px] uppercase tracking-wider text-gray-600 mr-1">
|
||||
{GROUP_LABELS[group]}
|
||||
</span>
|
||||
{groupStyles.map((s) => {
|
||||
const isActive = s.id === activeStyle
|
||||
const colors = GROUP_COLORS[group] || GROUP_COLORS.cloud
|
||||
return (
|
||||
<button
|
||||
key={s.id}
|
||||
onClick={() => onSelect(s.id)}
|
||||
className={`text-xs px-2.5 py-1 sm:px-2 sm:py-0.5 rounded-full transition-colors whitespace-nowrap ${
|
||||
isActive ? colors.active : colors.inactive
|
||||
}`}
|
||||
title={s.description || s.name}
|
||||
>
|
||||
{s.name}
|
||||
</button>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
)
|
||||
))}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
106
homeai-dashboard/src/components/SettingsDrawer.jsx
Normal file
106
homeai-dashboard/src/components/SettingsDrawer.jsx
Normal file
@@ -0,0 +1,106 @@
|
||||
import { VOICES, TTS_ENGINES } from '../lib/constants'
|
||||
|
||||
export default function SettingsDrawer({ isOpen, onClose, settings, onUpdate }) {
|
||||
if (!isOpen) return null
|
||||
|
||||
const isKokoro = !settings.ttsEngine || settings.ttsEngine === 'kokoro'
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="fixed inset-0 bg-black/50 z-40" onClick={onClose} />
|
||||
<div className="fixed right-0 top-0 bottom-0 w-full sm:w-80 bg-gray-900 border-l border-gray-800 z-50 flex flex-col">
|
||||
<div className="flex items-center justify-between px-4 py-3 border-b border-gray-800">
|
||||
<h2 className="text-sm font-medium text-gray-200">Settings</h2>
|
||||
<button onClick={onClose} className="text-gray-500 hover:text-gray-300">
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M6 18L18 6M6 6l12 12" />
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
<div className="flex-1 overflow-y-auto p-4 space-y-5">
|
||||
{/* TTS Engine */}
|
||||
<div>
|
||||
<label className="block text-xs font-medium text-gray-400 mb-1.5">TTS Engine</label>
|
||||
<select
|
||||
value={settings.ttsEngine || 'kokoro'}
|
||||
onChange={(e) => onUpdate('ttsEngine', e.target.value)}
|
||||
className="w-full bg-gray-800 text-gray-200 text-sm rounded-lg px-3 py-2 border border-gray-700 focus:outline-none focus:border-indigo-500"
|
||||
>
|
||||
{TTS_ENGINES.map((e) => (
|
||||
<option key={e.id} value={e.id}>{e.label}</option>
|
||||
))}
|
||||
</select>
|
||||
</div>
|
||||
|
||||
{/* Voice */}
|
||||
<div>
|
||||
<label className="block text-xs font-medium text-gray-400 mb-1.5">Voice</label>
|
||||
{isKokoro ? (
|
||||
<select
|
||||
value={settings.voice}
|
||||
onChange={(e) => onUpdate('voice', e.target.value)}
|
||||
className="w-full bg-gray-800 text-gray-200 text-sm rounded-lg px-3 py-2 border border-gray-700 focus:outline-none focus:border-indigo-500"
|
||||
>
|
||||
{VOICES.map((v) => (
|
||||
<option key={v.id} value={v.id}>{v.label}</option>
|
||||
))}
|
||||
</select>
|
||||
) : (
|
||||
<div>
|
||||
<input
|
||||
type="text"
|
||||
value={settings.voice || ''}
|
||||
onChange={(e) => onUpdate('voice', e.target.value)}
|
||||
className="w-full bg-gray-800 text-gray-200 text-sm rounded-lg px-3 py-2 border border-gray-700 focus:outline-none focus:border-indigo-500"
|
||||
placeholder={settings.ttsEngine === 'elevenlabs' ? 'ElevenLabs voice ID' : 'Voice identifier'}
|
||||
readOnly
|
||||
/>
|
||||
<p className="text-xs text-gray-500 mt-1">
|
||||
Set via active character profile
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Auto TTS */}
|
||||
<div className="flex items-center justify-between">
|
||||
<div>
|
||||
<div className="text-sm text-gray-200">Auto-speak responses</div>
|
||||
<div className="text-xs text-gray-500">Speak assistant replies aloud</div>
|
||||
</div>
|
||||
<button
|
||||
onClick={() => onUpdate('autoTts', !settings.autoTts)}
|
||||
className={`relative w-10 h-6 rounded-full transition-colors ${
|
||||
settings.autoTts ? 'bg-indigo-600' : 'bg-gray-700'
|
||||
}`}
|
||||
>
|
||||
<span
|
||||
className={`absolute top-0.5 left-0.5 w-5 h-5 rounded-full bg-white transition-transform ${
|
||||
settings.autoTts ? 'translate-x-4' : ''
|
||||
}`}
|
||||
/>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* STT Mode */}
|
||||
<div>
|
||||
<label className="block text-xs font-medium text-gray-400 mb-1.5">Speech recognition</label>
|
||||
<select
|
||||
value={settings.sttMode}
|
||||
onChange={(e) => onUpdate('sttMode', e.target.value)}
|
||||
className="w-full bg-gray-800 text-gray-200 text-sm rounded-lg px-3 py-2 border border-gray-700 focus:outline-none focus:border-indigo-500"
|
||||
>
|
||||
<option value="bridge">Wyoming STT (local)</option>
|
||||
<option value="webspeech">Web Speech API (browser)</option>
|
||||
</select>
|
||||
<p className="text-xs text-gray-500 mt-1">
|
||||
{settings.sttMode === 'bridge'
|
||||
? 'Uses Whisper via the local bridge server'
|
||||
: 'Uses browser built-in speech recognition'}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)
|
||||
}
|
||||
11
homeai-dashboard/src/components/StatusIndicator.jsx
Normal file
11
homeai-dashboard/src/components/StatusIndicator.jsx
Normal file
@@ -0,0 +1,11 @@
|
||||
export default function StatusIndicator({ isOnline }) {
|
||||
if (isOnline === null) {
|
||||
return <span className="inline-block w-2.5 h-2.5 rounded-full bg-gray-500 animate-pulse" title="Checking..." />
|
||||
}
|
||||
return (
|
||||
<span
|
||||
className={`inline-block w-2.5 h-2.5 rounded-full ${isOnline ? 'bg-emerald-400' : 'bg-red-400'}`}
|
||||
title={isOnline ? 'Bridge online' : 'Bridge offline'}
|
||||
/>
|
||||
)
|
||||
}
|
||||
21
homeai-dashboard/src/components/ThinkingIndicator.jsx
Normal file
21
homeai-dashboard/src/components/ThinkingIndicator.jsx
Normal file
@@ -0,0 +1,21 @@
|
||||
export default function ThinkingIndicator({ character }) {
|
||||
const name = character?.name || 'AI'
|
||||
const image = character?.image || null
|
||||
|
||||
return (
|
||||
<div className="flex items-start gap-3 px-4 py-3">
|
||||
{image ? (
|
||||
<img src={image} alt={name} className="w-8 h-8 rounded-full object-cover shrink-0 ring-1 ring-gray-700" />
|
||||
) : (
|
||||
<div className="w-8 h-8 rounded-full bg-indigo-600/20 flex items-center justify-center shrink-0">
|
||||
<span className="text-indigo-400 text-sm">{name[0]}</span>
|
||||
</div>
|
||||
)}
|
||||
<div className="flex items-center gap-1 pt-2.5">
|
||||
<span className="w-2 h-2 rounded-full bg-gray-400 animate-[bounce_1.4s_ease-in-out_infinite]" />
|
||||
<span className="w-2 h-2 rounded-full bg-gray-400 animate-[bounce_1.4s_ease-in-out_0.2s_infinite]" />
|
||||
<span className="w-2 h-2 rounded-full bg-gray-400 animate-[bounce_1.4s_ease-in-out_0.4s_infinite]" />
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
32
homeai-dashboard/src/components/VoiceButton.jsx
Normal file
32
homeai-dashboard/src/components/VoiceButton.jsx
Normal file
@@ -0,0 +1,32 @@
|
||||
export default function VoiceButton({ isRecording, isTranscribing, onToggle, disabled }) {
|
||||
const handleClick = () => {
|
||||
if (disabled || isTranscribing) return
|
||||
onToggle()
|
||||
}
|
||||
|
||||
return (
|
||||
<button
|
||||
onClick={handleClick}
|
||||
disabled={disabled || isTranscribing}
|
||||
className={`w-11 h-11 sm:w-10 sm:h-10 rounded-full flex items-center justify-center transition-all shrink-0 ${
|
||||
isRecording
|
||||
? 'bg-red-500 text-white shadow-[0_0_0_4px_rgba(239,68,68,0.3)] animate-pulse'
|
||||
: isTranscribing
|
||||
? 'bg-gray-700 text-gray-400 cursor-wait'
|
||||
: 'bg-gray-800 text-gray-400 hover:bg-gray-700 hover:text-gray-200'
|
||||
}`}
|
||||
title={isRecording ? 'Stop recording' : isTranscribing ? 'Transcribing...' : 'Start recording (Space)'}
|
||||
>
|
||||
{isTranscribing ? (
|
||||
<svg className="w-5 h-5 animate-spin" fill="none" viewBox="0 0 24 24">
|
||||
<circle className="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" strokeWidth="4" />
|
||||
<path className="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4z" />
|
||||
</svg>
|
||||
) : (
|
||||
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" d="M12 18.75a6 6 0 006-6v-1.5m-6 7.5a6 6 0 01-6-6v-1.5m6 7.5v3.75m-3.75 0h7.5M12 15.75a3 3 0 01-3-3V4.5a3 3 0 116 0v8.25a3 3 0 01-3 3z" />
|
||||
</svg>
|
||||
)}
|
||||
</button>
|
||||
)
|
||||
}
|
||||
28
homeai-dashboard/src/hooks/useActiveCharacter.js
Normal file
28
homeai-dashboard/src/hooks/useActiveCharacter.js
Normal file
@@ -0,0 +1,28 @@
|
||||
import { useState, useEffect } from 'react'
|
||||
|
||||
const ACTIVE_KEY = 'homeai_active_character'
|
||||
|
||||
export function useActiveCharacter() {
|
||||
const [character, setCharacter] = useState(null)
|
||||
|
||||
useEffect(() => {
|
||||
const activeId = localStorage.getItem(ACTIVE_KEY)
|
||||
if (!activeId) return
|
||||
|
||||
fetch(`/api/characters/${activeId}`)
|
||||
.then(r => r.ok ? r.json() : null)
|
||||
.then(profile => {
|
||||
if (profile) {
|
||||
setCharacter({
|
||||
id: profile.id,
|
||||
name: profile.data.display_name || profile.data.name || 'AI',
|
||||
image: profile.image || null,
|
||||
tts: profile.data.tts || null,
|
||||
})
|
||||
}
|
||||
})
|
||||
.catch(() => {})
|
||||
}, [])
|
||||
|
||||
return character
|
||||
}
|
||||
18
homeai-dashboard/src/hooks/useBridgeHealth.js
Normal file
18
homeai-dashboard/src/hooks/useBridgeHealth.js
Normal file
@@ -0,0 +1,18 @@
|
||||
import { useState, useEffect, useRef } from 'react'
|
||||
import { healthCheck } from '../lib/api'
|
||||
|
||||
export function useBridgeHealth() {
|
||||
const [isOnline, setIsOnline] = useState(null)
|
||||
const intervalRef = useRef(null)
|
||||
|
||||
useEffect(() => {
|
||||
const check = async () => {
|
||||
setIsOnline(await healthCheck())
|
||||
}
|
||||
check()
|
||||
intervalRef.current = setInterval(check, 15000)
|
||||
return () => clearInterval(intervalRef.current)
|
||||
}, [])
|
||||
|
||||
return isOnline
|
||||
}
|
||||
174
homeai-dashboard/src/hooks/useChat.js
Normal file
174
homeai-dashboard/src/hooks/useChat.js
Normal file
@@ -0,0 +1,174 @@
|
||||
import { useState, useCallback, useEffect, useRef } from 'react'
|
||||
import { sendMessage } from '../lib/api'
|
||||
import { getConversation, saveConversation } from '../lib/conversationApi'
|
||||
|
||||
export function useChat(conversationId, conversationMeta, onConversationUpdate) {
|
||||
const [messages, setMessages] = useState([])
|
||||
const [isLoading, setIsLoading] = useState(false)
|
||||
const [isLoadingConv, setIsLoadingConv] = useState(false)
|
||||
const convRef = useRef(null)
|
||||
const idRef = useRef(conversationId)
|
||||
|
||||
// Keep idRef in sync
|
||||
useEffect(() => { idRef.current = conversationId }, [conversationId])
|
||||
|
||||
// Load conversation from server when ID changes
|
||||
useEffect(() => {
|
||||
if (!conversationId) {
|
||||
setMessages([])
|
||||
convRef.current = null
|
||||
return
|
||||
}
|
||||
|
||||
let cancelled = false
|
||||
setIsLoadingConv(true)
|
||||
|
||||
getConversation(conversationId).then(conv => {
|
||||
if (cancelled) return
|
||||
if (conv) {
|
||||
convRef.current = conv
|
||||
setMessages(conv.messages || [])
|
||||
} else {
|
||||
convRef.current = null
|
||||
setMessages([])
|
||||
}
|
||||
setIsLoadingConv(false)
|
||||
}).catch(() => {
|
||||
if (!cancelled) {
|
||||
convRef.current = null
|
||||
setMessages([])
|
||||
setIsLoadingConv(false)
|
||||
}
|
||||
})
|
||||
|
||||
return () => { cancelled = true }
|
||||
}, [conversationId])
|
||||
|
||||
// Persist conversation to server
|
||||
const persist = useCallback(async (updatedMessages, title, overrideId) => {
|
||||
const id = overrideId || idRef.current
|
||||
if (!id) return
|
||||
const now = new Date().toISOString()
|
||||
const conv = {
|
||||
id,
|
||||
title: title || convRef.current?.title || '',
|
||||
characterId: conversationMeta?.characterId || convRef.current?.characterId || '',
|
||||
characterName: conversationMeta?.characterName || convRef.current?.characterName || '',
|
||||
createdAt: convRef.current?.createdAt || now,
|
||||
updatedAt: now,
|
||||
messages: updatedMessages,
|
||||
}
|
||||
convRef.current = conv
|
||||
await saveConversation(conv).catch(() => {})
|
||||
if (onConversationUpdate) {
|
||||
onConversationUpdate(id, {
|
||||
title: conv.title,
|
||||
updatedAt: conv.updatedAt,
|
||||
messageCount: conv.messages.length,
|
||||
})
|
||||
}
|
||||
}, [conversationMeta, onConversationUpdate])
|
||||
|
||||
// send accepts an optional overrideId for when the conversation was just created
|
||||
// and an optional promptStyle to control response style
|
||||
const send = useCallback(async (text, overrideId, promptStyle) => {
|
||||
if (!text.trim() || isLoading) return null
|
||||
|
||||
const userMsg = { id: Date.now(), role: 'user', content: text.trim(), timestamp: new Date().toISOString() }
|
||||
const isFirstMessage = messages.length === 0
|
||||
const newMessages = [...messages, userMsg]
|
||||
setMessages(newMessages)
|
||||
setIsLoading(true)
|
||||
|
||||
try {
|
||||
const activeConvId = overrideId || idRef.current
|
||||
const { response, model, prompt_style } = await sendMessage(text.trim(), conversationMeta?.characterId || null, promptStyle, activeConvId)
|
||||
const assistantMsg = {
|
||||
id: Date.now() + 1,
|
||||
role: 'assistant',
|
||||
content: response,
|
||||
timestamp: new Date().toISOString(),
|
||||
...(model && { model }),
|
||||
...(prompt_style && { prompt_style }),
|
||||
}
|
||||
const allMessages = [...newMessages, assistantMsg]
|
||||
setMessages(allMessages)
|
||||
|
||||
const title = isFirstMessage
|
||||
? text.trim().slice(0, 80) + (text.trim().length > 80 ? '...' : '')
|
||||
: undefined
|
||||
await persist(allMessages, title, overrideId)
|
||||
|
||||
return response
|
||||
} catch (err) {
|
||||
const errorMsg = {
|
||||
id: Date.now() + 1,
|
||||
role: 'assistant',
|
||||
content: `Error: ${err.message}`,
|
||||
timestamp: new Date().toISOString(),
|
||||
isError: true,
|
||||
}
|
||||
const allMessages = [...newMessages, errorMsg]
|
||||
setMessages(allMessages)
|
||||
await persist(allMessages, undefined, overrideId)
|
||||
return null
|
||||
} finally {
|
||||
setIsLoading(false)
|
||||
}
|
||||
}, [isLoading, messages, persist])
|
||||
|
||||
// Retry: remove the error message, re-send the preceding user message
|
||||
const retry = useCallback(async (errorMsgId, promptStyle) => {
|
||||
const idx = messages.findIndex(m => m.id === errorMsgId)
|
||||
if (idx < 1) return null
|
||||
// Find the user message right before the error
|
||||
const userMsg = messages[idx - 1]
|
||||
if (!userMsg || userMsg.role !== 'user') return null
|
||||
// Remove the error message
|
||||
const cleaned = messages.filter(m => m.id !== errorMsgId)
|
||||
setMessages(cleaned)
|
||||
await persist(cleaned)
|
||||
// Re-send (but we need to temporarily set messages back without the error so send picks up correctly)
|
||||
// Instead, inline the send logic with the cleaned message list
|
||||
setIsLoading(true)
|
||||
try {
|
||||
const activeConvId = idRef.current
|
||||
const { response, model, prompt_style } = await sendMessage(userMsg.content, conversationMeta?.characterId || null, promptStyle, activeConvId)
|
||||
const assistantMsg = {
|
||||
id: Date.now() + 1,
|
||||
role: 'assistant',
|
||||
content: response,
|
||||
timestamp: new Date().toISOString(),
|
||||
...(model && { model }),
|
||||
...(prompt_style && { prompt_style }),
|
||||
}
|
||||
const allMessages = [...cleaned, assistantMsg]
|
||||
setMessages(allMessages)
|
||||
await persist(allMessages)
|
||||
return response
|
||||
} catch (err) {
|
||||
const newError = {
|
||||
id: Date.now() + 1,
|
||||
role: 'assistant',
|
||||
content: `Error: ${err.message}`,
|
||||
timestamp: new Date().toISOString(),
|
||||
isError: true,
|
||||
}
|
||||
const allMessages = [...cleaned, newError]
|
||||
setMessages(allMessages)
|
||||
await persist(allMessages)
|
||||
return null
|
||||
} finally {
|
||||
setIsLoading(false)
|
||||
}
|
||||
}, [messages, persist, conversationMeta])
|
||||
|
||||
const clearHistory = useCallback(async () => {
|
||||
setMessages([])
|
||||
if (idRef.current) {
|
||||
await persist([], undefined)
|
||||
}
|
||||
}, [persist])
|
||||
|
||||
return { messages, isLoading, isLoadingConv, send, retry, clearHistory }
|
||||
}
|
||||
66
homeai-dashboard/src/hooks/useConversations.js
Normal file
66
homeai-dashboard/src/hooks/useConversations.js
Normal file
@@ -0,0 +1,66 @@
|
||||
import { useState, useEffect, useCallback } from 'react'
|
||||
import { listConversations, saveConversation, deleteConversation as deleteConv } from '../lib/conversationApi'
|
||||
|
||||
const ACTIVE_KEY = 'homeai_active_conversation'
|
||||
|
||||
export function useConversations() {
|
||||
const [conversations, setConversations] = useState([])
|
||||
const [activeId, setActiveId] = useState(() => localStorage.getItem(ACTIVE_KEY) || null)
|
||||
const [isLoading, setIsLoading] = useState(true)
|
||||
|
||||
const loadList = useCallback(async () => {
|
||||
try {
|
||||
const list = await listConversations()
|
||||
setConversations(list)
|
||||
} catch {
|
||||
setConversations([])
|
||||
} finally {
|
||||
setIsLoading(false)
|
||||
}
|
||||
}, [])
|
||||
|
||||
useEffect(() => { loadList() }, [loadList])
|
||||
|
||||
const select = useCallback((id) => {
|
||||
setActiveId(id)
|
||||
if (id) {
|
||||
localStorage.setItem(ACTIVE_KEY, id)
|
||||
} else {
|
||||
localStorage.removeItem(ACTIVE_KEY)
|
||||
}
|
||||
}, [])
|
||||
|
||||
const create = useCallback(async (characterId, characterName) => {
|
||||
const id = `conv_${Date.now()}`
|
||||
const now = new Date().toISOString()
|
||||
const conv = {
|
||||
id,
|
||||
title: '',
|
||||
characterId: characterId || '',
|
||||
characterName: characterName || '',
|
||||
createdAt: now,
|
||||
updatedAt: now,
|
||||
messages: [],
|
||||
}
|
||||
await saveConversation(conv)
|
||||
setConversations(prev => [{ ...conv, messageCount: 0 }, ...prev])
|
||||
select(id)
|
||||
return id
|
||||
}, [select])
|
||||
|
||||
const remove = useCallback(async (id) => {
|
||||
await deleteConv(id)
|
||||
setConversations(prev => prev.filter(c => c.id !== id))
|
||||
if (activeId === id) {
|
||||
select(null)
|
||||
}
|
||||
}, [activeId, select])
|
||||
|
||||
const updateMeta = useCallback((id, updates) => {
|
||||
setConversations(prev => prev.map(c =>
|
||||
c.id === id ? { ...c, ...updates } : c
|
||||
))
|
||||
}, [])
|
||||
|
||||
return { conversations, activeId, isLoading, select, create, remove, updateMeta, refresh: loadList }
|
||||
}
|
||||
27
homeai-dashboard/src/hooks/useFollowups.js
Normal file
27
homeai-dashboard/src/hooks/useFollowups.js
Normal file
@@ -0,0 +1,27 @@
|
||||
import { useState, useEffect, useCallback } from 'react';
|
||||
import { getFollowups } from '../lib/memoryApi';
|
||||
|
||||
export function useFollowups(characterId) {
|
||||
const [followups, setFollowups] = useState([]);
|
||||
const [loading, setLoading] = useState(false);
|
||||
|
||||
const refresh = useCallback(async () => {
|
||||
if (!characterId) {
|
||||
setFollowups([]);
|
||||
return;
|
||||
}
|
||||
setLoading(true);
|
||||
try {
|
||||
const data = await getFollowups(characterId);
|
||||
setFollowups(data.followups || []);
|
||||
} catch {
|
||||
setFollowups([]);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
}, [characterId]);
|
||||
|
||||
useEffect(() => { refresh(); }, [refresh]);
|
||||
|
||||
return { followups, loading, refresh };
|
||||
}
|
||||
34
homeai-dashboard/src/hooks/usePromptStyle.js
Normal file
34
homeai-dashboard/src/hooks/usePromptStyle.js
Normal file
@@ -0,0 +1,34 @@
|
||||
import { useState, useEffect, useCallback } from 'react'
|
||||
import { getPromptStyles, getActiveStyle, setActiveStyle } from '../lib/api'
|
||||
|
||||
export function usePromptStyle() {
|
||||
const [styles, setStyles] = useState([])
|
||||
const [activeStyle, setActive] = useState('standard')
|
||||
const [isLoading, setIsLoading] = useState(true)
|
||||
|
||||
useEffect(() => {
|
||||
let cancelled = false
|
||||
Promise.all([getPromptStyles(), getActiveStyle()])
|
||||
.then(([allStyles, active]) => {
|
||||
if (cancelled) return
|
||||
setStyles(allStyles)
|
||||
setActive(active.style || 'standard')
|
||||
setIsLoading(false)
|
||||
})
|
||||
.catch(() => {
|
||||
if (!cancelled) setIsLoading(false)
|
||||
})
|
||||
return () => { cancelled = true }
|
||||
}, [])
|
||||
|
||||
const selectStyle = useCallback(async (styleId) => {
|
||||
setActive(styleId)
|
||||
try {
|
||||
await setActiveStyle(styleId)
|
||||
} catch (err) {
|
||||
console.error('Failed to set prompt style:', err)
|
||||
}
|
||||
}, [])
|
||||
|
||||
return { styles, activeStyle, selectStyle, isLoading }
|
||||
}
|
||||
27
homeai-dashboard/src/hooks/useSettings.js
Normal file
27
homeai-dashboard/src/hooks/useSettings.js
Normal file
@@ -0,0 +1,27 @@
|
||||
import { useState, useCallback } from 'react'
|
||||
import { DEFAULT_SETTINGS } from '../lib/constants'
|
||||
|
||||
const STORAGE_KEY = 'homeai_dashboard_settings'
|
||||
|
||||
function loadSettings() {
|
||||
try {
|
||||
const stored = localStorage.getItem(STORAGE_KEY)
|
||||
return stored ? { ...DEFAULT_SETTINGS, ...JSON.parse(stored) } : { ...DEFAULT_SETTINGS }
|
||||
} catch {
|
||||
return { ...DEFAULT_SETTINGS }
|
||||
}
|
||||
}
|
||||
|
||||
export function useSettings() {
|
||||
const [settings, setSettings] = useState(loadSettings)
|
||||
|
||||
const updateSetting = useCallback((key, value) => {
|
||||
setSettings((prev) => {
|
||||
const next = { ...prev, [key]: value }
|
||||
localStorage.setItem(STORAGE_KEY, JSON.stringify(next))
|
||||
return next
|
||||
})
|
||||
}, [])
|
||||
|
||||
return { settings, updateSetting }
|
||||
}
|
||||
56
homeai-dashboard/src/hooks/useTtsPlayback.js
Normal file
56
homeai-dashboard/src/hooks/useTtsPlayback.js
Normal file
@@ -0,0 +1,56 @@
|
||||
import { useState, useRef, useCallback } from 'react'
|
||||
import { synthesize } from '../lib/api'
|
||||
|
||||
export function useTtsPlayback(voice, engine = 'kokoro', model = null) {
|
||||
const [isPlaying, setIsPlaying] = useState(false)
|
||||
const audioCtxRef = useRef(null)
|
||||
const sourceRef = useRef(null)
|
||||
|
||||
const getAudioContext = () => {
|
||||
if (!audioCtxRef.current || audioCtxRef.current.state === 'closed') {
|
||||
audioCtxRef.current = new AudioContext()
|
||||
}
|
||||
return audioCtxRef.current
|
||||
}
|
||||
|
||||
const speak = useCallback(async (text) => {
|
||||
if (!text) return
|
||||
|
||||
// Stop any current playback
|
||||
if (sourceRef.current) {
|
||||
try { sourceRef.current.stop() } catch {}
|
||||
}
|
||||
|
||||
setIsPlaying(true)
|
||||
try {
|
||||
const audioData = await synthesize(text, voice, engine, model)
|
||||
const ctx = getAudioContext()
|
||||
if (ctx.state === 'suspended') await ctx.resume()
|
||||
|
||||
const audioBuffer = await ctx.decodeAudioData(audioData)
|
||||
const source = ctx.createBufferSource()
|
||||
source.buffer = audioBuffer
|
||||
source.connect(ctx.destination)
|
||||
sourceRef.current = source
|
||||
|
||||
source.onended = () => {
|
||||
setIsPlaying(false)
|
||||
sourceRef.current = null
|
||||
}
|
||||
source.start()
|
||||
} catch (err) {
|
||||
console.error('TTS playback error:', err)
|
||||
setIsPlaying(false)
|
||||
}
|
||||
}, [voice, engine, model])
|
||||
|
||||
const stop = useCallback(() => {
|
||||
if (sourceRef.current) {
|
||||
try { sourceRef.current.stop() } catch {}
|
||||
sourceRef.current = null
|
||||
}
|
||||
setIsPlaying(false)
|
||||
}, [])
|
||||
|
||||
return { isPlaying, speak, stop }
|
||||
}
|
||||
91
homeai-dashboard/src/hooks/useVoiceInput.js
Normal file
91
homeai-dashboard/src/hooks/useVoiceInput.js
Normal file
@@ -0,0 +1,91 @@
|
||||
import { useState, useRef, useCallback } from 'react'
|
||||
import { createRecorder } from '../lib/audio'
|
||||
import { transcribe } from '../lib/api'
|
||||
|
||||
export function useVoiceInput(sttMode = 'bridge') {
|
||||
const [isRecording, setIsRecording] = useState(false)
|
||||
const [isTranscribing, setIsTranscribing] = useState(false)
|
||||
const recorderRef = useRef(null)
|
||||
const webSpeechRef = useRef(null)
|
||||
|
||||
const startRecording = useCallback(async () => {
|
||||
if (isRecording) return
|
||||
|
||||
if (sttMode === 'webspeech' && 'webkitSpeechRecognition' in window) {
|
||||
return startWebSpeech()
|
||||
}
|
||||
|
||||
try {
|
||||
const recorder = createRecorder()
|
||||
recorderRef.current = recorder
|
||||
await recorder.start()
|
||||
setIsRecording(true)
|
||||
} catch (err) {
|
||||
console.error('Mic access error:', err)
|
||||
}
|
||||
}, [isRecording, sttMode])
|
||||
|
||||
const stopRecording = useCallback(async () => {
|
||||
if (!isRecording) return null
|
||||
|
||||
if (sttMode === 'webspeech' && webSpeechRef.current) {
|
||||
return stopWebSpeech()
|
||||
}
|
||||
|
||||
setIsRecording(false)
|
||||
setIsTranscribing(true)
|
||||
|
||||
try {
|
||||
const wavBlob = await recorderRef.current.stop()
|
||||
recorderRef.current = null
|
||||
const text = await transcribe(wavBlob)
|
||||
return text
|
||||
} catch (err) {
|
||||
console.error('Transcription error:', err)
|
||||
return null
|
||||
} finally {
|
||||
setIsTranscribing(false)
|
||||
}
|
||||
}, [isRecording, sttMode])
|
||||
|
||||
function startWebSpeech() {
|
||||
return new Promise((resolve) => {
|
||||
const SpeechRecognition = window.webkitSpeechRecognition || window.SpeechRecognition
|
||||
const recognition = new SpeechRecognition()
|
||||
recognition.continuous = false
|
||||
recognition.interimResults = false
|
||||
recognition.lang = 'en-US'
|
||||
webSpeechRef.current = { recognition, resolve: null }
|
||||
recognition.start()
|
||||
setIsRecording(true)
|
||||
resolve()
|
||||
})
|
||||
}
|
||||
|
||||
function stopWebSpeech() {
|
||||
return new Promise((resolve) => {
|
||||
const { recognition } = webSpeechRef.current
|
||||
recognition.onresult = (e) => {
|
||||
const text = e.results[0]?.[0]?.transcript || ''
|
||||
setIsRecording(false)
|
||||
webSpeechRef.current = null
|
||||
resolve(text)
|
||||
}
|
||||
recognition.onerror = () => {
|
||||
setIsRecording(false)
|
||||
webSpeechRef.current = null
|
||||
resolve(null)
|
||||
}
|
||||
recognition.onend = () => {
|
||||
setIsRecording(false)
|
||||
if (webSpeechRef.current) {
|
||||
webSpeechRef.current = null
|
||||
resolve(null)
|
||||
}
|
||||
}
|
||||
recognition.stop()
|
||||
})
|
||||
}
|
||||
|
||||
return { isRecording, isTranscribing, startRecording, stopRecording }
|
||||
}
|
||||
44
homeai-dashboard/src/index.css
Normal file
44
homeai-dashboard/src/index.css
Normal file
@@ -0,0 +1,44 @@
|
||||
@import "tailwindcss";
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
background-color: #030712;
|
||||
color: #f3f4f6;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
}
|
||||
|
||||
#root {
|
||||
min-height: 100vh;
|
||||
}
|
||||
|
||||
/* Scrollbar styling for dark theme */
|
||||
::-webkit-scrollbar {
|
||||
width: 8px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-track {
|
||||
background: #0a0a0f;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb {
|
||||
background: #374151;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb:hover {
|
||||
background: #4b5563;
|
||||
}
|
||||
|
||||
::selection {
|
||||
background: rgba(99, 102, 241, 0.3);
|
||||
}
|
||||
|
||||
/* Hide scrollbar for horizontal scroll containers */
|
||||
.scrollbar-none {
|
||||
-ms-overflow-style: none;
|
||||
scrollbar-width: none;
|
||||
}
|
||||
.scrollbar-none::-webkit-scrollbar {
|
||||
display: none;
|
||||
}
|
||||
49
homeai-dashboard/src/lib/SchemaValidator.js
Normal file
49
homeai-dashboard/src/lib/SchemaValidator.js
Normal file
@@ -0,0 +1,49 @@
|
||||
import Ajv from 'ajv'
|
||||
import schema from '../../schema/character.schema.json'
|
||||
|
||||
const ajv = new Ajv({ allErrors: true, strict: false })
|
||||
const validate = ajv.compile(schema)
|
||||
|
||||
/**
|
||||
* Migrate a v1 character config to v2 in-place.
|
||||
* Removes live2d/vtube fields, converts gaze_preset to gaze_presets array,
|
||||
* and initialises new persona fields.
|
||||
*/
|
||||
export function migrateV1toV2(config) {
|
||||
config.schema_version = 2
|
||||
|
||||
// Remove deprecated fields
|
||||
delete config.live2d_expressions
|
||||
delete config.vtube_ws_triggers
|
||||
|
||||
// Convert single gaze_preset string → gaze_presets array
|
||||
if ('gaze_preset' in config) {
|
||||
const old = config.gaze_preset
|
||||
config.gaze_presets = old ? [{ preset: old, trigger: 'self-portrait' }] : []
|
||||
delete config.gaze_preset
|
||||
}
|
||||
if (!config.gaze_presets) {
|
||||
config.gaze_presets = []
|
||||
}
|
||||
|
||||
// Initialise new fields if absent
|
||||
if (config.background === undefined) config.background = ''
|
||||
if (config.dialogue_style === undefined) config.dialogue_style = ''
|
||||
if (config.appearance === undefined) config.appearance = ''
|
||||
if (config.skills === undefined) config.skills = []
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
export function validateCharacter(config) {
|
||||
// Auto-migrate v1 → v2
|
||||
if (config.schema_version === 1 || config.schema_version === undefined) {
|
||||
migrateV1toV2(config)
|
||||
}
|
||||
|
||||
const valid = validate(config)
|
||||
if (!valid) {
|
||||
throw new Error(ajv.errorsText(validate.errors))
|
||||
}
|
||||
return true
|
||||
}
|
||||
92
homeai-dashboard/src/lib/api.js
Normal file
92
homeai-dashboard/src/lib/api.js
Normal file
@@ -0,0 +1,92 @@
|
||||
const MAX_RETRIES = 3
|
||||
const RETRY_DELAY_MS = 2000
|
||||
|
||||
async function fetchWithRetry(url, options, retries = MAX_RETRIES) {
|
||||
for (let attempt = 1; attempt <= retries; attempt++) {
|
||||
try {
|
||||
const res = await fetch(url, options)
|
||||
if (res.status === 502 && attempt < retries) {
|
||||
// Bridge unreachable — wait and retry
|
||||
await new Promise(r => setTimeout(r, RETRY_DELAY_MS * attempt))
|
||||
continue
|
||||
}
|
||||
return res
|
||||
} catch (err) {
|
||||
if (attempt >= retries) throw err
|
||||
await new Promise(r => setTimeout(r, RETRY_DELAY_MS * attempt))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export async function sendMessage(text, characterId = null, promptStyle = null, conversationId = null) {
|
||||
const payload = { message: text, agent: 'main' }
|
||||
if (characterId) payload.character_id = characterId
|
||||
if (promptStyle) payload.prompt_style = promptStyle
|
||||
if (conversationId) payload.conversation_id = conversationId
|
||||
const res = await fetchWithRetry('/api/agent/message', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload),
|
||||
})
|
||||
if (!res.ok) {
|
||||
const err = await res.json().catch(() => ({ error: 'Request failed' }))
|
||||
throw new Error(err.error || `HTTP ${res.status}`)
|
||||
}
|
||||
const data = await res.json()
|
||||
return { response: data.response, model: data.model || null, prompt_style: data.prompt_style || null }
|
||||
}
|
||||
|
||||
export async function getPromptStyles() {
|
||||
const res = await fetch('/api/prompt-styles')
|
||||
if (!res.ok) return []
|
||||
return await res.json()
|
||||
}
|
||||
|
||||
export async function getActiveStyle() {
|
||||
const res = await fetch('/api/prompt-style')
|
||||
if (!res.ok) return { style: 'standard' }
|
||||
return await res.json()
|
||||
}
|
||||
|
||||
export async function setActiveStyle(style) {
|
||||
const res = await fetch('/api/prompt-style', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ style }),
|
||||
})
|
||||
if (!res.ok) throw new Error('Failed to set prompt style')
|
||||
return await res.json()
|
||||
}
|
||||
|
||||
export async function synthesize(text, voice, engine = 'kokoro', model = null) {
|
||||
const payload = { text, voice, engine }
|
||||
if (model) payload.model = model
|
||||
const res = await fetch('/api/tts', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload),
|
||||
})
|
||||
if (!res.ok) throw new Error('TTS failed')
|
||||
return await res.arrayBuffer()
|
||||
}
|
||||
|
||||
export async function transcribe(wavBlob) {
|
||||
const res = await fetch('/api/stt', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'audio/wav' },
|
||||
body: wavBlob,
|
||||
})
|
||||
if (!res.ok) throw new Error('STT failed')
|
||||
const data = await res.json()
|
||||
return data.text
|
||||
}
|
||||
|
||||
export async function healthCheck() {
|
||||
try {
|
||||
const res = await fetch('/api/health?url=' + encodeURIComponent('http://localhost:8081/'), { signal: AbortSignal.timeout(5000) })
|
||||
const data = await res.json()
|
||||
return data.status === 'online'
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
92
homeai-dashboard/src/lib/audio.js
Normal file
92
homeai-dashboard/src/lib/audio.js
Normal file
@@ -0,0 +1,92 @@
|
||||
const TARGET_RATE = 16000
|
||||
|
||||
export function createRecorder() {
|
||||
let audioCtx
|
||||
let source
|
||||
let processor
|
||||
let stream
|
||||
let samples = []
|
||||
|
||||
async function start() {
|
||||
samples = []
|
||||
stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: { channelCount: 1, sampleRate: TARGET_RATE },
|
||||
})
|
||||
audioCtx = new AudioContext({ sampleRate: TARGET_RATE })
|
||||
source = audioCtx.createMediaStreamSource(stream)
|
||||
|
||||
processor = audioCtx.createScriptProcessor(4096, 1, 1)
|
||||
processor.onaudioprocess = (e) => {
|
||||
const input = e.inputBuffer.getChannelData(0)
|
||||
samples.push(new Float32Array(input))
|
||||
}
|
||||
source.connect(processor)
|
||||
processor.connect(audioCtx.destination)
|
||||
}
|
||||
|
||||
async function stop() {
|
||||
processor.disconnect()
|
||||
source.disconnect()
|
||||
stream.getTracks().forEach((t) => t.stop())
|
||||
await audioCtx.close()
|
||||
|
||||
const totalLength = samples.reduce((acc, s) => acc + s.length, 0)
|
||||
const merged = new Float32Array(totalLength)
|
||||
let offset = 0
|
||||
for (const chunk of samples) {
|
||||
merged.set(chunk, offset)
|
||||
offset += chunk.length
|
||||
}
|
||||
|
||||
const resampled = audioCtx.sampleRate !== TARGET_RATE
|
||||
? resample(merged, audioCtx.sampleRate, TARGET_RATE)
|
||||
: merged
|
||||
|
||||
return encodeWav(resampled, TARGET_RATE)
|
||||
}
|
||||
|
||||
return { start, stop }
|
||||
}
|
||||
|
||||
function resample(samples, fromRate, toRate) {
|
||||
const ratio = fromRate / toRate
|
||||
const newLength = Math.round(samples.length / ratio)
|
||||
const result = new Float32Array(newLength)
|
||||
for (let i = 0; i < newLength; i++) {
|
||||
result[i] = samples[Math.round(i * ratio)]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
function encodeWav(samples, sampleRate) {
|
||||
const numSamples = samples.length
|
||||
const buffer = new ArrayBuffer(44 + numSamples * 2)
|
||||
const view = new DataView(buffer)
|
||||
|
||||
writeString(view, 0, 'RIFF')
|
||||
view.setUint32(4, 36 + numSamples * 2, true)
|
||||
writeString(view, 8, 'WAVE')
|
||||
writeString(view, 12, 'fmt ')
|
||||
view.setUint32(16, 16, true)
|
||||
view.setUint16(20, 1, true)
|
||||
view.setUint16(22, 1, true)
|
||||
view.setUint32(24, sampleRate, true)
|
||||
view.setUint32(28, sampleRate * 2, true)
|
||||
view.setUint16(32, 2, true)
|
||||
view.setUint16(34, 16, true)
|
||||
writeString(view, 36, 'data')
|
||||
view.setUint32(40, numSamples * 2, true)
|
||||
|
||||
for (let i = 0; i < numSamples; i++) {
|
||||
const s = Math.max(-1, Math.min(1, samples[i]))
|
||||
view.setInt16(44 + i * 2, s < 0 ? s * 0x8000 : s * 0x7fff, true)
|
||||
}
|
||||
|
||||
return new Blob([buffer], { type: 'audio/wav' })
|
||||
}
|
||||
|
||||
function writeString(view, offset, str) {
|
||||
for (let i = 0; i < str.length; i++) {
|
||||
view.setUint8(offset + i, str.charCodeAt(i))
|
||||
}
|
||||
}
|
||||
45
homeai-dashboard/src/lib/constants.js
Normal file
45
homeai-dashboard/src/lib/constants.js
Normal file
@@ -0,0 +1,45 @@
|
||||
export const DEFAULT_VOICE = 'af_heart'
|
||||
|
||||
export const VOICES = [
|
||||
{ id: 'af_heart', label: 'Heart (F, US)' },
|
||||
{ id: 'af_alloy', label: 'Alloy (F, US)' },
|
||||
{ id: 'af_aoede', label: 'Aoede (F, US)' },
|
||||
{ id: 'af_bella', label: 'Bella (F, US)' },
|
||||
{ id: 'af_jessica', label: 'Jessica (F, US)' },
|
||||
{ id: 'af_kore', label: 'Kore (F, US)' },
|
||||
{ id: 'af_nicole', label: 'Nicole (F, US)' },
|
||||
{ id: 'af_nova', label: 'Nova (F, US)' },
|
||||
{ id: 'af_river', label: 'River (F, US)' },
|
||||
{ id: 'af_sarah', label: 'Sarah (F, US)' },
|
||||
{ id: 'af_sky', label: 'Sky (F, US)' },
|
||||
{ id: 'am_adam', label: 'Adam (M, US)' },
|
||||
{ id: 'am_echo', label: 'Echo (M, US)' },
|
||||
{ id: 'am_eric', label: 'Eric (M, US)' },
|
||||
{ id: 'am_fenrir', label: 'Fenrir (M, US)' },
|
||||
{ id: 'am_liam', label: 'Liam (M, US)' },
|
||||
{ id: 'am_michael', label: 'Michael (M, US)' },
|
||||
{ id: 'am_onyx', label: 'Onyx (M, US)' },
|
||||
{ id: 'am_puck', label: 'Puck (M, US)' },
|
||||
{ id: 'bf_alice', label: 'Alice (F, UK)' },
|
||||
{ id: 'bf_emma', label: 'Emma (F, UK)' },
|
||||
{ id: 'bf_isabella', label: 'Isabella (F, UK)' },
|
||||
{ id: 'bf_lily', label: 'Lily (F, UK)' },
|
||||
{ id: 'bm_daniel', label: 'Daniel (M, UK)' },
|
||||
{ id: 'bm_fable', label: 'Fable (M, UK)' },
|
||||
{ id: 'bm_george', label: 'George (M, UK)' },
|
||||
{ id: 'bm_lewis', label: 'Lewis (M, UK)' },
|
||||
]
|
||||
|
||||
export const TTS_ENGINES = [
|
||||
{ id: 'kokoro', label: 'Kokoro (local)' },
|
||||
{ id: 'chatterbox', label: 'Chatterbox (voice clone)' },
|
||||
{ id: 'qwen3', label: 'Qwen3 TTS' },
|
||||
{ id: 'elevenlabs', label: 'ElevenLabs (cloud)' },
|
||||
]
|
||||
|
||||
export const DEFAULT_SETTINGS = {
|
||||
ttsEngine: 'kokoro',
|
||||
voice: DEFAULT_VOICE,
|
||||
autoTts: true,
|
||||
sttMode: 'bridge',
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user