Compare commits

..

2 Commits

Author SHA1 Message Date
Aodhan Collins
858d7be33c Merge branch 'setup': self-deploying setup scripts for all sub-projects
Mac Mini M4 Pro is now the primary development machine.
Scripts handle both macOS (launchd, Metal) and Linux (systemd, CUDA/ROCm).

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-04 21:11:13 +00:00
Aodhan Collins
7978eaea14 Add self-deploying setup scripts for all sub-projects (P1-P8)
- Root setup.sh orchestrator with per-phase dispatch (./setup.sh p1..p8 | all | status)
- Makefile convenience targets (make infra, make llm, make status, etc.)
- scripts/common.sh: shared bash library for OS detection, Docker helpers,
  service management (launchd/systemd), package install, env management
- .env.example + .gitignore: shared config template and secret exclusions

P1 (homeai-infra): full implementation
- docker-compose.yml: Uptime Kuma, code-server, n8n
- Note: Home Assistant, Portainer, Gitea are pre-existing instances
- setup.sh: Docker install, homeai network, container health checks

P2 (homeai-llm): full implementation
- Ollama native install with CUDA/ROCm/Metal auto-detection
- launchd plist (macOS) + systemd service (Linux) for auto-start
- scripts/pull-models.sh: idempotent model puller from manifest
- scripts/benchmark.sh: tokens/sec measurement per model
- Open WebUI on port 3030 (avoids Gitea :3000 conflict)

P3-P8: working stubs with prerequisite checks and TODO sections

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-04 21:10:53 +00:00
23 changed files with 2525 additions and 0 deletions

46
.env.example Normal file
View File

@@ -0,0 +1,46 @@
# HomeAI — Shared Configuration Template
# Copy to .env and fill in your values.
# .env is gitignored — never commit it.
# ─── Data & Paths ──────────────────────────────────────────────────────────────
DATA_DIR=${HOME}/homeai-data
REPO_DIR=${HOME}/Projects/HomeAI
# ─── Network ───────────────────────────────────────────────────────────────────
# Set to your machine's local IP (not 127.0.0.1)
HOST_IP=192.168.1.100
# ─── P1: Infrastructure ────────────────────────────────────────────────────────
# Pre-existing instances — set these to your actual URLs
HA_URL=http://localhost:8123
HA_TOKEN= # Generated in Home Assistant UI → Profile → Security
PORTAINER_URL=https://localhost:9443
GITEA_URL=http://localhost:3000
# Managed by homeai-infra docker-compose
UPTIME_KUMA_URL=http://localhost:3001
CODE_SERVER_URL=http://localhost:8090
CODE_SERVER_PASS= # Set in homeai-infra/docker/.env
N8N_URL=http://localhost:5678
N8N_USER=admin
N8N_PASS= # Set in homeai-infra/docker/.env
# ─── P2: LLM ───────────────────────────────────────────────────────────────────
OLLAMA_URL=http://localhost:11434
OLLAMA_API_URL=http://localhost:11434/v1
OPEN_WEBUI_URL=http://localhost:3030
OLLAMA_PRIMARY_MODEL=llama3.3:70b
OLLAMA_FAST_MODEL=qwen2.5:7b
# ─── P3: Voice ─────────────────────────────────────────────────────────────────
WYOMING_STT_URL=tcp://localhost:10300
WYOMING_TTS_URL=tcp://localhost:10301
# ─── P4: Agent ─────────────────────────────────────────────────────────────────
OPENCLAW_URL=http://localhost:8080
# ─── P7: Visual ────────────────────────────────────────────────────────────────
VTUBE_WS_URL=ws://localhost:8001
# ─── P8: Images ────────────────────────────────────────────────────────────────
COMFYUI_URL=http://localhost:8188

47
.gitignore vendored Normal file
View File

@@ -0,0 +1,47 @@
# Secrets — never commit
.env
.env.secrets
.env.services
**/secrets.yaml
**/secrets.yml
# Docker volumes / data
homeai-data/
**/data/
# Python
__pycache__/
*.pyc
*.pyo
.venv/
venv/
*.egg-info/
# Node
node_modules/
dist/
.cache/
# macOS
.DS_Store
*.localized
# Editor
.vscode/
*.swp
*.swo
# Ollama model cache (large binaries)
*.gguf
*.safetensors
*.bin
*.ckpt
*.pt
# ESPHome secrets
homeai-esp32/esphome/secrets.yaml
# Generated
homeai-llm/benchmark-results.md
homeai-character/characters/*.json
!homeai-character/characters/.gitkeep

86
Makefile Normal file
View File

@@ -0,0 +1,86 @@
# HomeAI Makefile — convenience wrapper around setup.sh
# Run `make help` to see all targets.
SHELL := /bin/bash
REPO_DIR := $(shell pwd)
.PHONY: help all status infra llm voice agent character esp32 visual images \
up down restart logs ps clean
help:
@echo ""
@echo " HomeAI — Make targets"
@echo ""
@echo " Setup:"
@echo " make all Run full setup (all phases)"
@echo " make infra P1: Docker infra stack"
@echo " make llm P2: Ollama + Open WebUI"
@echo " make voice P3: STT / TTS / Wyoming"
@echo " make agent P4: OpenClaw + skills"
@echo " make character P5: Character Manager"
@echo " make esp32 P6: ESPHome firmware"
@echo " make visual P7: VTube Studio bridge"
@echo " make images P8: ComfyUI"
@echo ""
@echo " Operations:"
@echo " make status Show service health"
@echo " make up Start all Docker services"
@echo " make down Stop all Docker services"
@echo " make restart Restart all Docker services"
@echo " make logs Tail logs (all services)"
@echo " make ps Show running containers"
@echo " make clean Remove stopped containers"
@echo ""
all:
bash setup.sh all
status:
bash setup.sh status
infra:
bash setup.sh p1
llm:
bash setup.sh p2
voice:
bash setup.sh p3
agent:
bash setup.sh p4
character:
bash setup.sh p5
esp32:
bash setup.sh p6
visual:
bash setup.sh p7
images:
bash setup.sh p8
# ─── Docker operations ─────────────────────────────────────────────────────────
up:
@cd homeai-infra && docker compose -f docker/docker-compose.yml up -d
@cd homeai-llm && docker compose -f docker/docker-compose.yml up -d
down:
@cd homeai-infra && docker compose -f docker/docker-compose.yml down || true
@cd homeai-llm && docker compose -f docker/docker-compose.yml down || true
restart:
@cd homeai-infra && docker compose -f docker/docker-compose.yml restart
@cd homeai-llm && docker compose -f docker/docker-compose.yml restart
logs:
@cd homeai-infra && docker compose -f docker/docker-compose.yml logs -f --tail=50
ps:
@docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep homeai || \
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
clean:
@docker container prune -f

65
homeai-agent/setup.sh Normal file
View File

@@ -0,0 +1,65 @@
#!/usr/bin/env bash
# homeai-agent/setup.sh — P4: OpenClaw agent + skills + mem0
#
# Components:
# - OpenClaw — AI agent runtime (port 8080)
# - skills/ — home_assistant, memory, weather, timer, music stubs
# - mem0 — long-term memory (Chroma backend)
# - n8n workflows — morning briefing, notification router, memory backup
#
# Prerequisites:
# - P1 (homeai-infra) — Home Assistant running, HA_TOKEN set
# - P2 (homeai-llm) — Ollama running with llama3.3:70b + nomic-embed-text
# - P3 (homeai-voice) — Wyoming TTS running (for voice output)
# - P5 (homeai-character) — aria.json character config exists
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
source "${REPO_DIR}/scripts/common.sh"
log_section "P4: Agent (OpenClaw + skills + mem0)"
detect_platform
# ─── Prerequisite check ────────────────────────────────────────────────────────
log_info "Checking prerequisites..."
for service in "http://localhost:11434:Ollama(P2)" "http://localhost:8123:HomeAssistant(P1)"; do
url="${service%%:*}"; name="${service##*:}"
if ! curl -sf "$url" -o /dev/null 2>/dev/null; then
log_warn "$name not reachable at $url"
fi
done
load_env_services
if [[ -z "${HA_TOKEN:-}" ]]; then
log_warn "HA_TOKEN not set in ~/.env.services — needed for home_assistant skill"
fi
# ─── TODO: Implementation ──────────────────────────────────────────────────────
cat <<'EOF'
┌─────────────────────────────────────────────────────────────────┐
│ P4: homeai-agent — NOT YET IMPLEMENTED │
│ │
│ OPEN QUESTION: Which OpenClaw version/fork to use? │
│ Decide before implementing. See homeai-agent/PLAN.md. │
│ │
│ Implementation steps: │
│ 1. Install OpenClaw (pip install or git clone) │
│ 2. Create ~/.openclaw/config.yaml from config/config.yaml.example │
│ 3. Create skills: home_assistant, memory, weather, timer, music│
│ 4. Install mem0 + Chroma backend │
│ 5. Create systemd/launchd service for OpenClaw (port 8080) │
│ 6. Import n8n workflows from workflows/ │
│ 7. Smoke test: POST /chat "turn on living room lights" │
│ │
│ Interface contracts: │
│ OPENCLAW_URL=http://localhost:8080 │
└─────────────────────────────────────────────────────────────────┘
EOF
log_info "P4 is not yet implemented. See homeai-agent/PLAN.md for details."
exit 0

View File

@@ -0,0 +1,686 @@
import { useState, useEffect, useCallback } from "react";
const STORAGE_KEY = "ai-character-profiles";
const DEFAULT_MODELS = [
"llama3.3:70b", "qwen2.5:72b", "mistral-large", "llama3.1:8b",
"qwen2.5:14b", "gemma3:27b", "deepseek-r1:14b", "phi4:14b"
];
const TTS_MODELS = ["Kokoro", "Chatterbox", "F5-TTS", "Qwen3-TTS", "Piper"];
const STT_MODELS = ["Whisper Large-v3", "Whisper Medium", "Whisper Small", "Whisper Turbo"];
const IMAGE_MODELS = ["SDXL", "Flux.1-dev", "Flux.1-schnell", "SD 1.5", "Pony Diffusion"];
const PERSONALITY_TRAITS = [
"Warm", "Witty", "Calm", "Energetic", "Sarcastic", "Nurturing",
"Curious", "Playful", "Formal", "Casual", "Empathetic", "Direct",
"Creative", "Analytical", "Protective", "Mischievous"
];
const SPEAKING_STYLES = [
"Conversational", "Poetic", "Concise", "Verbose", "Academic",
"Informal", "Dramatic", "Deadpan", "Enthusiastic", "Measured"
];
const EMPTY_CHARACTER = {
id: null,
name: "",
tagline: "",
avatar: "",
accentColor: "#7c6fff",
personality: {
traits: [],
speakingStyle: "",
coreValues: "",
quirks: "",
backstory: "",
motivation: "",
},
prompts: {
systemPrompt: "",
wakeWordResponse: "",
fallbackResponse: "",
errorResponse: "",
customPrompts: [],
},
models: {
llm: "",
tts: "",
stt: "",
imageGen: "",
voiceCloneRef: "",
ttsSpeed: 1.0,
temperature: 0.7,
},
liveRepresentation: {
live2dModel: "",
idleExpression: "",
speakingExpression: "",
thinkingExpression: "",
happyExpression: "",
vtsTriggers: "",
},
userNotes: "",
createdAt: null,
updatedAt: null,
};
const TABS = ["Identity", "Personality", "Prompts", "Models", "Live2D", "Notes"];
const TAB_ICONS = {
Identity: "◈",
Personality: "◉",
Prompts: "◎",
Models: "⬡",
Live2D: "◇",
Notes: "▣",
};
function generateId() {
return Date.now().toString(36) + Math.random().toString(36).slice(2);
}
function ColorPicker({ value, onChange }) {
const presets = [
"#7c6fff","#ff6b9d","#00d4aa","#ff9f43","#48dbfb",
"#ff6348","#a29bfe","#fd79a8","#55efc4","#fdcb6e"
];
return (
<div style={{ display: "flex", gap: 8, alignItems: "center", flexWrap: "wrap" }}>
{presets.map(c => (
<button key={c} onClick={() => onChange(c)} style={{
width: 28, height: 28, borderRadius: "50%", background: c, border: value === c ? "3px solid #fff" : "3px solid transparent",
cursor: "pointer", outline: "none", boxShadow: value === c ? `0 0 0 2px ${c}` : "none", transition: "all 0.2s"
}} />
))}
<input type="color" value={value} onChange={e => onChange(e.target.value)}
style={{ width: 28, height: 28, borderRadius: "50%", border: "none", cursor: "pointer", background: "none", padding: 0 }} />
</div>
);
}
function TagSelector({ options, selected, onChange, max = 6 }) {
return (
<div style={{ display: "flex", flexWrap: "wrap", gap: 8 }}>
{options.map(opt => {
const active = selected.includes(opt);
return (
<button key={opt} onClick={() => {
if (active) onChange(selected.filter(s => s !== opt));
else if (selected.length < max) onChange([...selected, opt]);
}} style={{
padding: "5px 14px", borderRadius: 20, fontSize: 13, fontFamily: "inherit",
background: active ? "var(--accent)" : "rgba(255,255,255,0.06)",
color: active ? "#fff" : "rgba(255,255,255,0.55)",
border: active ? "1px solid var(--accent)" : "1px solid rgba(255,255,255,0.1)",
cursor: "pointer", transition: "all 0.18s", fontWeight: active ? 600 : 400,
}}>
{opt}
</button>
);
})}
</div>
);
}
function Field({ label, hint, children }) {
return (
<div style={{ marginBottom: 22 }}>
<label style={{ display: "block", fontSize: 12, fontWeight: 700, letterSpacing: "0.08em", textTransform: "uppercase", color: "rgba(255,255,255,0.45)", marginBottom: 6 }}>
{label}
</label>
{hint && <p style={{ fontSize: 12, color: "rgba(255,255,255,0.3)", marginBottom: 8, marginTop: -2 }}>{hint}</p>}
{children}
</div>
);
}
function Input({ value, onChange, placeholder, type = "text" }) {
return (
<input type={type} value={value} onChange={e => onChange(e.target.value)} placeholder={placeholder}
style={{
width: "100%", background: "rgba(255,255,255,0.05)", border: "1px solid rgba(255,255,255,0.1)",
borderRadius: 8, padding: "10px 14px", color: "#fff", fontSize: 14, fontFamily: "inherit",
outline: "none", boxSizing: "border-box", transition: "border-color 0.2s",
}}
onFocus={e => e.target.style.borderColor = "var(--accent)"}
onBlur={e => e.target.style.borderColor = "rgba(255,255,255,0.1)"}
/>
);
}
function Textarea({ value, onChange, placeholder, rows = 4 }) {
return (
<textarea value={value} onChange={e => onChange(e.target.value)} placeholder={placeholder} rows={rows}
style={{
width: "100%", background: "rgba(255,255,255,0.05)", border: "1px solid rgba(255,255,255,0.1)",
borderRadius: 8, padding: "10px 14px", color: "#fff", fontSize: 14, fontFamily: "inherit",
outline: "none", boxSizing: "border-box", resize: "vertical", lineHeight: 1.6,
transition: "border-color 0.2s",
}}
onFocus={e => e.target.style.borderColor = "var(--accent)"}
onBlur={e => e.target.style.borderColor = "rgba(255,255,255,0.1)"}
/>
);
}
function Select({ value, onChange, options, placeholder }) {
return (
<select value={value} onChange={e => onChange(e.target.value)}
style={{
width: "100%", background: "rgba(20,20,35,0.95)", border: "1px solid rgba(255,255,255,0.1)",
borderRadius: 8, padding: "10px 14px", color: value ? "#fff" : "rgba(255,255,255,0.35)",
fontSize: 14, fontFamily: "inherit", outline: "none", cursor: "pointer",
appearance: "none", backgroundImage: `url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='8' viewBox='0 0 12 8'%3E%3Cpath d='M1 1l5 5 5-5' stroke='rgba(255,255,255,0.3)' stroke-width='2' fill='none'/%3E%3C/svg%3E")`,
backgroundRepeat: "no-repeat", backgroundPosition: "right 14px center",
}}>
<option value="">{placeholder || "Select..."}</option>
{options.map(o => <option key={o} value={o}>{o}</option>)}
</select>
);
}
function Slider({ value, onChange, min, max, step, label }) {
return (
<div style={{ display: "flex", alignItems: "center", gap: 14 }}>
<input type="range" min={min} max={max} step={step} value={value}
onChange={e => onChange(parseFloat(e.target.value))}
style={{ flex: 1, accentColor: "var(--accent)", cursor: "pointer" }} />
<span style={{ fontSize: 14, color: "rgba(255,255,255,0.7)", minWidth: 38, textAlign: "right", fontVariantNumeric: "tabular-nums" }}>
{value.toFixed(1)}
</span>
</div>
);
}
function CustomPromptsEditor({ prompts, onChange }) {
const add = () => onChange([...prompts, { trigger: "", response: "" }]);
const remove = i => onChange(prompts.filter((_, idx) => idx !== i));
const update = (i, field, val) => {
const next = [...prompts];
next[i] = { ...next[i], [field]: val };
onChange(next);
};
return (
<div>
{prompts.map((p, i) => (
<div key={i} style={{ background: "rgba(255,255,255,0.04)", borderRadius: 10, padding: 14, marginBottom: 10, position: "relative" }}>
<button onClick={() => remove(i)} style={{
position: "absolute", top: 10, right: 10, background: "rgba(255,80,80,0.15)",
border: "none", color: "#ff6b6b", borderRadius: 6, cursor: "pointer", padding: "2px 8px", fontSize: 12
}}></button>
<div style={{ marginBottom: 8 }}>
<Input value={p.trigger} onChange={v => update(i, "trigger", v)} placeholder="Trigger keyword or context..." />
</div>
<Textarea value={p.response} onChange={v => update(i, "response", v)} placeholder="Custom response or behaviour..." rows={2} />
</div>
))}
<button onClick={add} style={{
width: "100%", padding: "10px", background: "rgba(255,255,255,0.04)",
border: "1px dashed rgba(255,255,255,0.15)", borderRadius: 8, color: "rgba(255,255,255,0.45)",
cursor: "pointer", fontSize: 13, fontFamily: "inherit", transition: "all 0.2s"
}}
onMouseEnter={e => e.target.style.borderColor = "var(--accent)"}
onMouseLeave={e => e.target.style.borderColor = "rgba(255,255,255,0.15)"}
>+ Add Custom Prompt</button>
</div>
);
}
function CharacterCard({ character, active, onSelect, onDelete }) {
const initials = character.name ? character.name.slice(0, 2).toUpperCase() : "??";
return (
<div onClick={() => onSelect(character.id)} style={{
padding: "14px 16px", borderRadius: 12, cursor: "pointer", marginBottom: 8,
background: active ? `linear-gradient(135deg, ${character.accentColor}22, ${character.accentColor}11)` : "rgba(255,255,255,0.04)",
border: active ? `1px solid ${character.accentColor}66` : "1px solid rgba(255,255,255,0.07)",
transition: "all 0.2s", position: "relative",
}}>
<div style={{ display: "flex", alignItems: "center", gap: 12 }}>
<div style={{
width: 40, height: 40, borderRadius: "50%", background: `linear-gradient(135deg, ${character.accentColor}, ${character.accentColor}88)`,
display: "flex", alignItems: "center", justifyContent: "center", fontSize: 14, fontWeight: 800,
color: "#fff", flexShrink: 0, boxShadow: `0 4px 12px ${character.accentColor}44`
}}>{initials}</div>
<div style={{ flex: 1, minWidth: 0 }}>
<div style={{ fontWeight: 700, fontSize: 15, color: "#fff", whiteSpace: "nowrap", overflow: "hidden", textOverflow: "ellipsis" }}>
{character.name || "Unnamed"}
</div>
{character.tagline && (
<div style={{ fontSize: 12, color: "rgba(255,255,255,0.4)", whiteSpace: "nowrap", overflow: "hidden", textOverflow: "ellipsis" }}>
{character.tagline}
</div>
)}
</div>
<button onClick={e => { e.stopPropagation(); onDelete(character.id); }} style={{
background: "none", border: "none", color: "rgba(255,255,255,0.2)", cursor: "pointer",
fontSize: 16, padding: "2px 6px", borderRadius: 4, transition: "color 0.15s", flexShrink: 0
}}
onMouseEnter={e => e.target.style.color = "#ff6b6b"}
onMouseLeave={e => e.target.style.color = "rgba(255,255,255,0.2)"}
>×</button>
</div>
{character.personality.traits.length > 0 && (
<div style={{ display: "flex", gap: 4, flexWrap: "wrap", marginTop: 10 }}>
{character.personality.traits.slice(0, 3).map(t => (
<span key={t} style={{
fontSize: 10, padding: "2px 8px", borderRadius: 10, fontWeight: 600, letterSpacing: "0.04em",
background: `${character.accentColor}22`, color: character.accentColor, border: `1px solid ${character.accentColor}44`
}}>{t}</span>
))}
{character.personality.traits.length > 3 && (
<span style={{ fontSize: 10, color: "rgba(255,255,255,0.3)", padding: "2px 4px" }}>+{character.personality.traits.length - 3}</span>
)}
</div>
)}
</div>
);
}
function ExportModal({ character, onClose }) {
const json = JSON.stringify(character, null, 2);
const [copied, setCopied] = useState(false);
const copy = () => {
navigator.clipboard.writeText(json);
setCopied(true);
setTimeout(() => setCopied(false), 2000);
};
return (
<div style={{
position: "fixed", inset: 0, background: "rgba(0,0,0,0.7)", zIndex: 100,
display: "flex", alignItems: "center", justifyContent: "center", padding: 24
}} onClick={onClose}>
<div onClick={e => e.stopPropagation()} style={{
background: "#13131f", border: "1px solid rgba(255,255,255,0.1)", borderRadius: 16,
padding: 28, width: "100%", maxWidth: 640, maxHeight: "80vh", display: "flex", flexDirection: "column"
}}>
<div style={{ display: "flex", justifyContent: "space-between", alignItems: "center", marginBottom: 16 }}>
<h3 style={{ margin: 0, fontSize: 18, color: "#fff" }}>Export Character</h3>
<button onClick={onClose} style={{ background: "none", border: "none", color: "rgba(255,255,255,0.4)", fontSize: 22, cursor: "pointer" }}>×</button>
</div>
<pre style={{
flex: 1, overflow: "auto", background: "rgba(0,0,0,0.3)", borderRadius: 10,
padding: 16, fontSize: 12, color: "rgba(255,255,255,0.7)", lineHeight: 1.6, margin: 0
}}>{json}</pre>
<button onClick={copy} style={{
marginTop: 16, padding: "12px", background: "var(--accent)", border: "none",
borderRadius: 10, color: "#fff", fontWeight: 700, fontSize: 14, cursor: "pointer",
fontFamily: "inherit", transition: "opacity 0.2s"
}}>{copied ? "✓ Copied!" : "Copy to Clipboard"}</button>
</div>
</div>
);
}
export default function CharacterManager() {
const [characters, setCharacters] = useState([]);
const [activeId, setActiveId] = useState(null);
const [activeTab, setActiveTab] = useState("Identity");
const [exportModal, setExportModal] = useState(false);
const [saved, setSaved] = useState(false);
// Load from storage
useEffect(() => {
try {
const stored = localStorage.getItem(STORAGE_KEY);
if (stored) {
const parsed = JSON.parse(stored);
setCharacters(parsed);
if (parsed.length > 0) setActiveId(parsed[0].id);
}
} catch (e) {}
}, []);
// Save to storage
const saveToStorage = useCallback((chars) => {
try {
localStorage.setItem(STORAGE_KEY, JSON.stringify(chars));
} catch (e) {}
}, []);
const activeCharacter = characters.find(c => c.id === activeId) || null;
const updateCharacter = (updater) => {
setCharacters(prev => {
const next = prev.map(c => c.id === activeId ? { ...updater(c), updatedAt: new Date().toISOString() } : c);
saveToStorage(next);
return next;
});
setSaved(true);
setTimeout(() => setSaved(false), 1500);
};
const createCharacter = () => {
const newChar = {
...JSON.parse(JSON.stringify(EMPTY_CHARACTER)),
id: generateId(),
accentColor: ["#7c6fff","#ff6b9d","#00d4aa","#ff9f43","#48dbfb"][Math.floor(Math.random() * 5)],
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
};
const next = [newChar, ...characters];
setCharacters(next);
setActiveId(newChar.id);
setActiveTab("Identity");
saveToStorage(next);
};
const deleteCharacter = (id) => {
const next = characters.filter(c => c.id !== id);
setCharacters(next);
saveToStorage(next);
if (activeId === id) setActiveId(next.length > 0 ? next[0].id : null);
};
const accentColor = activeCharacter?.accentColor || "#7c6fff";
const set = (path, value) => {
updateCharacter(c => {
const parts = path.split(".");
const next = JSON.parse(JSON.stringify(c));
let obj = next;
for (let i = 0; i < parts.length - 1; i++) obj = obj[parts[i]];
obj[parts[parts.length - 1]] = value;
return next;
});
};
const renderTab = () => {
if (!activeCharacter) return null;
const c = activeCharacter;
switch (activeTab) {
case "Identity":
return (
<div>
<Field label="Character Name">
<Input value={c.name} onChange={v => set("name", v)} placeholder="e.g. Aria, Nova, Echo..." />
</Field>
<Field label="Tagline" hint="A short phrase that captures their essence">
<Input value={c.tagline} onChange={v => set("tagline", v)} placeholder="e.g. Your curious, warm-hearted companion" />
</Field>
<Field label="Accent Color" hint="Used for UI theming and visual identity">
<ColorPicker value={c.accentColor} onChange={v => set("accentColor", v)} />
</Field>
<Field label="Live2D / Avatar Reference" hint="Filename or URL of the character's visual model">
<Input value={c.avatar} onChange={v => set("avatar", v)} placeholder="e.g. aria_v2.model3.json" />
</Field>
<Field label="Backstory" hint="Who are they? Where do they come from? Keep it rich.">
<Textarea value={c.personality.backstory} onChange={v => set("personality.backstory", v)}
placeholder="Write a detailed origin story, background, and personal history for this character..." rows={5} />
</Field>
<Field label="Core Motivation" hint="What drives them? What do they care about most?">
<Textarea value={c.personality.motivation} onChange={v => set("personality.motivation", v)}
placeholder="e.g. A deep desire to help and grow alongside their human companion..." rows={3} />
</Field>
</div>
);
case "Personality":
return (
<div>
<Field label="Personality Traits" hint={`Select up to 6 traits (${c.personality.traits.length}/6)`}>
<TagSelector options={PERSONALITY_TRAITS} selected={c.personality.traits}
onChange={v => set("personality.traits", v)} max={6} />
</Field>
<Field label="Speaking Style">
<TagSelector options={SPEAKING_STYLES} selected={c.personality.speakingStyle ? [c.personality.speakingStyle] : []}
onChange={v => set("personality.speakingStyle", v[v.length - 1] || "")} max={1} />
</Field>
<Field label="Core Values" hint="What principles guide their responses and behaviour?">
<Textarea value={c.personality.coreValues} onChange={v => set("personality.coreValues", v)}
placeholder="e.g. Honesty, kindness, intellectual curiosity, loyalty to their user..." rows={3} />
</Field>
<Field label="Quirks & Mannerisms" hint="Unique behavioural patterns, phrases, habits that make them feel real">
<Textarea value={c.personality.quirks} onChange={v => set("personality.quirks", v)}
placeholder="e.g. Tends to use nautical metaphors. Hums softly when thinking. Has strong opinions about tea..." rows={3} />
</Field>
</div>
);
case "Prompts":
return (
<div>
<Field label="System Prompt" hint="The core instruction set defining who this character is to the LLM">
<Textarea value={c.prompts.systemPrompt} onChange={v => set("prompts.systemPrompt", v)}
placeholder="You are [name], a [description]. Your personality is [traits]. You speak in a [style] manner. You care deeply about [values]..." rows={8} />
</Field>
<Field label="Wake Word Response" hint="First response when activated by wake word">
<Textarea value={c.prompts.wakeWordResponse} onChange={v => set("prompts.wakeWordResponse", v)}
placeholder="e.g. 'Yes? I'm here.' or 'Hmm? What do you need?'" rows={2} />
</Field>
<Field label="Fallback Response" hint="When the character doesn't understand or can't help">
<Textarea value={c.prompts.fallbackResponse} onChange={v => set("prompts.fallbackResponse", v)}
placeholder="e.g. 'I'm not sure I follow — could you say that differently?'" rows={2} />
</Field>
<Field label="Error Response" hint="When something goes wrong technically">
<Textarea value={c.prompts.errorResponse} onChange={v => set("prompts.errorResponse", v)}
placeholder="e.g. 'Something went wrong on my end. Give me a moment.'" rows={2} />
</Field>
<Field label="Custom Prompt Rules" hint="Context-specific overrides and triggers">
<CustomPromptsEditor prompts={c.prompts.customPrompts}
onChange={v => set("prompts.customPrompts", v)} />
</Field>
</div>
);
case "Models":
return (
<div>
<Field label="LLM (Language Model)" hint="Primary reasoning and conversation model via Ollama">
<Select value={c.models.llm} onChange={v => set("models.llm", v)} options={DEFAULT_MODELS} placeholder="Select LLM..." />
</Field>
<Field label="LLM Temperature" hint="Higher = more creative, lower = more focused">
<Slider value={c.models.temperature} onChange={v => set("models.temperature", v)} min={0} max={2} step={0.1} />
</Field>
<Field label="Text-to-Speech Engine">
<Select value={c.models.tts} onChange={v => set("models.tts", v)} options={TTS_MODELS} placeholder="Select TTS..." />
</Field>
<Field label="TTS Speed">
<Slider value={c.models.ttsSpeed} onChange={v => set("models.ttsSpeed", v)} min={0.5} max={2.0} step={0.1} />
</Field>
<Field label="Voice Clone Reference" hint="Path or filename of reference audio for voice cloning">
<Input value={c.models.voiceCloneRef} onChange={v => set("models.voiceCloneRef", v)} placeholder="e.g. /voices/aria_reference.wav" />
</Field>
<Field label="Speech-to-Text Engine">
<Select value={c.models.stt} onChange={v => set("models.stt", v)} options={STT_MODELS} placeholder="Select STT..." />
</Field>
<Field label="Image Generation Model" hint="Used when character generates images or self-portraits">
<Select value={c.models.imageGen} onChange={v => set("models.imageGen", v)} options={IMAGE_MODELS} placeholder="Select image model..." />
</Field>
</div>
);
case "Live2D":
return (
<div>
<Field label="Live2D Model File" hint="Path to .model3.json file, relative to VTube Studio models folder">
<Input value={c.liveRepresentation.live2dModel} onChange={v => set("liveRepresentation.live2dModel", v)} placeholder="e.g. Aria/aria.model3.json" />
</Field>
<Field label="Idle Expression" hint="VTube Studio expression name when listening/waiting">
<Input value={c.liveRepresentation.idleExpression} onChange={v => set("liveRepresentation.idleExpression", v)} placeholder="e.g. idle_blink" />
</Field>
<Field label="Speaking Expression" hint="Expression triggered when TTS audio is playing">
<Input value={c.liveRepresentation.speakingExpression} onChange={v => set("liveRepresentation.speakingExpression", v)} placeholder="e.g. talking_smile" />
</Field>
<Field label="Thinking Expression" hint="Triggered while LLM is processing a response">
<Input value={c.liveRepresentation.thinkingExpression} onChange={v => set("liveRepresentation.thinkingExpression", v)} placeholder="e.g. thinking_tilt" />
</Field>
<Field label="Happy / Positive Expression" hint="Triggered on positive sentiment responses">
<Input value={c.liveRepresentation.happyExpression} onChange={v => set("liveRepresentation.happyExpression", v)} placeholder="e.g. happy_bright" />
</Field>
<Field label="VTube Studio Custom Triggers" hint="Additional WebSocket API trigger mappings (JSON)">
<Textarea value={c.liveRepresentation.vtsTriggers} onChange={v => set("liveRepresentation.vtsTriggers", v)}
placeholder={'{\n "on_error": "expression_concerned",\n "on_wake": "expression_alert"\n}'} rows={5} />
</Field>
</div>
);
case "Notes":
return (
<div>
<Field label="Developer Notes" hint="Freeform notes, ideas, todos, and observations about this character">
<Textarea value={c.userNotes} onChange={v => set("userNotes", v)}
placeholder={"Ideas, observations, things to try...\n\n- Voice reference sounds slightly too formal, adjust Chatterbox guidance scale\n- Try adding more nautical metaphors to system prompt\n- Need to map 'confused' expression in VTS\n- Consider adding weather awareness skill"}
rows={16} />
</Field>
<div style={{ background: "rgba(255,255,255,0.03)", borderRadius: 10, padding: 16, fontSize: 12, color: "rgba(255,255,255,0.35)", lineHeight: 1.7 }}>
<div style={{ marginBottom: 4, fontWeight: 700, color: "rgba(255,255,255,0.45)", letterSpacing: "0.06em", textTransform: "uppercase", fontSize: 11 }}>Character Info</div>
<div>ID: <span style={{ color: "rgba(255,255,255,0.5)", fontFamily: "monospace" }}>{c.id}</span></div>
{c.createdAt && <div>Created: {new Date(c.createdAt).toLocaleString()}</div>}
{c.updatedAt && <div>Updated: {new Date(c.updatedAt).toLocaleString()}</div>}
</div>
</div>
);
default:
return null;
}
};
return (
<div style={{
"--accent": accentColor,
minHeight: "100vh",
background: "#0d0d18",
color: "#fff",
fontFamily: "'DM Sans', 'Segoe UI', system-ui, sans-serif",
display: "flex",
flexDirection: "column",
}}>
<style>{`
@import url('https://fonts.googleapis.com/css2?family=DM+Sans:wght@400;500;600;700;800&family=DM+Mono:wght@400;500&display=swap');
* { box-sizing: border-box; }
::-webkit-scrollbar { width: 6px; }
::-webkit-scrollbar-track { background: transparent; }
::-webkit-scrollbar-thumb { background: rgba(255,255,255,0.1); border-radius: 3px; }
input::placeholder, textarea::placeholder { color: rgba(255,255,255,0.2); }
select option { background: #13131f; }
`}</style>
{/* Header */}
<div style={{
padding: "18px 28px", borderBottom: "1px solid rgba(255,255,255,0.06)",
display: "flex", alignItems: "center", justifyContent: "space-between",
background: "rgba(0,0,0,0.2)", backdropFilter: "blur(10px)",
position: "sticky", top: 0, zIndex: 10,
}}>
<div style={{ display: "flex", alignItems: "center", gap: 14 }}>
<div style={{
width: 36, height: 36, borderRadius: 10,
background: `linear-gradient(135deg, ${accentColor}, ${accentColor}88)`,
display: "flex", alignItems: "center", justifyContent: "center", fontSize: 18,
boxShadow: `0 4px 16px ${accentColor}44`
}}></div>
<div>
<div style={{ fontWeight: 800, fontSize: 17, letterSpacing: "-0.01em" }}>Character Manager</div>
<div style={{ fontSize: 12, color: "rgba(255,255,255,0.35)" }}>AI Personality Configuration</div>
</div>
</div>
<div style={{ display: "flex", gap: 10, alignItems: "center" }}>
{saved && <span style={{ fontSize: 12, color: accentColor, fontWeight: 600 }}> Saved</span>}
{activeCharacter && (
<button onClick={() => setExportModal(true)} style={{
padding: "8px 16px", background: "rgba(255,255,255,0.07)", border: "1px solid rgba(255,255,255,0.12)",
borderRadius: 8, color: "rgba(255,255,255,0.7)", fontSize: 13, cursor: "pointer",
fontFamily: "inherit", fontWeight: 600, transition: "all 0.2s"
}}>Export JSON</button>
)}
</div>
</div>
<div style={{ display: "flex", flex: 1, overflow: "hidden" }}>
{/* Sidebar */}
<div style={{
width: 260, borderRight: "1px solid rgba(255,255,255,0.06)",
display: "flex", flexDirection: "column", background: "rgba(0,0,0,0.15)",
flexShrink: 0,
}}>
<div style={{ padding: "16px 16px 8px" }}>
<button onClick={createCharacter} style={{
width: "100%", padding: "11px", background: `linear-gradient(135deg, ${accentColor}cc, ${accentColor}88)`,
border: "none", borderRadius: 10, color: "#fff", fontWeight: 700, fontSize: 14,
cursor: "pointer", fontFamily: "inherit", transition: "opacity 0.2s",
boxShadow: `0 4px 16px ${accentColor}33`
}}>+ New Character</button>
</div>
<div style={{ flex: 1, overflowY: "auto", padding: "4px 16px 16px" }}>
{characters.length === 0 ? (
<div style={{ textAlign: "center", padding: "40px 16px", color: "rgba(255,255,255,0.2)", fontSize: 13, lineHeight: 1.6 }}>
No characters yet.<br />Create your first one above.
</div>
) : (
characters.map(c => (
<CharacterCard key={c.id} character={c} active={c.id === activeId}
onSelect={setActiveId} onDelete={deleteCharacter} />
))
)}
</div>
</div>
{/* Main editor */}
{activeCharacter ? (
<div style={{ flex: 1, display: "flex", flexDirection: "column", overflow: "hidden" }}>
{/* Character header */}
<div style={{
padding: "20px 28px 0", borderBottom: "1px solid rgba(255,255,255,0.06)",
background: `linear-gradient(180deg, ${accentColor}0a 0%, transparent 100%)`,
}}>
<div style={{ display: "flex", alignItems: "center", gap: 16, marginBottom: 18 }}>
<div style={{
width: 52, height: 52, borderRadius: 16, flexShrink: 0,
background: `linear-gradient(135deg, ${accentColor}, ${accentColor}66)`,
display: "flex", alignItems: "center", justifyContent: "center",
fontSize: 20, fontWeight: 800, boxShadow: `0 6px 20px ${accentColor}44`
}}>
{activeCharacter.name ? activeCharacter.name.slice(0, 2).toUpperCase() : "??"}
</div>
<div>
<div style={{ fontSize: 22, fontWeight: 800, letterSpacing: "-0.02em", lineHeight: 1.2 }}>
{activeCharacter.name || <span style={{ color: "rgba(255,255,255,0.25)" }}>Unnamed Character</span>}
</div>
{activeCharacter.tagline && (
<div style={{ fontSize: 14, color: "rgba(255,255,255,0.45)", marginTop: 2 }}>{activeCharacter.tagline}</div>
)}
</div>
</div>
{/* Tabs */}
<div style={{ display: "flex", gap: 2 }}>
{TABS.map(tab => (
<button key={tab} onClick={() => setActiveTab(tab)} style={{
padding: "9px 16px", background: "none", border: "none",
borderBottom: activeTab === tab ? `2px solid ${accentColor}` : "2px solid transparent",
color: activeTab === tab ? "#fff" : "rgba(255,255,255,0.4)",
fontSize: 13, fontWeight: activeTab === tab ? 700 : 500,
cursor: "pointer", fontFamily: "inherit", transition: "all 0.18s",
display: "flex", alignItems: "center", gap: 6,
}}>
<span style={{ fontSize: 11 }}>{TAB_ICONS[tab]}</span>{tab}
</button>
))}
</div>
</div>
{/* Tab content */}
<div style={{ flex: 1, overflowY: "auto", padding: "24px 28px" }}>
{renderTab()}
</div>
</div>
) : (
<div style={{
flex: 1, display: "flex", alignItems: "center", justifyContent: "center",
flexDirection: "column", gap: 16, color: "rgba(255,255,255,0.2)"
}}>
<div style={{ fontSize: 64, opacity: 0.3 }}></div>
<div style={{ fontSize: 16, fontWeight: 600 }}>No character selected</div>
<div style={{ fontSize: 13 }}>Create a new character to get started</div>
</div>
)}
</div>
{exportModal && activeCharacter && (
<ExportModal character={activeCharacter} onClose={() => setExportModal(false)} />
)}
</div>
);
}

55
homeai-character/setup.sh Normal file
View File

@@ -0,0 +1,55 @@
#!/usr/bin/env bash
# homeai-character/setup.sh — P5: Character Manager + persona JSON
#
# Components:
# - character.schema.json — v1 character config schema
# - aria.json — default character config
# - Character Manager UI — Vite/React app for editing (dev server :5173)
#
# No hard runtime dependencies (can be developed standalone).
# Output (aria.json) is consumed by P3, P4, P7.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
source "${REPO_DIR}/scripts/common.sh"
log_section "P5: Character Manager"
detect_platform
# ─── Prerequisite check ────────────────────────────────────────────────────────
log_info "Checking prerequisites..."
if ! command_exists node; then
log_warn "Node.js not found — required for Character Manager UI"
log_warn "Install: https://nodejs.org (v18+ recommended)"
fi
# ─── TODO: Implementation ──────────────────────────────────────────────────────
cat <<'EOF'
┌─────────────────────────────────────────────────────────────────┐
│ P5: homeai-character — NOT YET IMPLEMENTED │
│ │
│ Implementation steps: │
│ 1. Create schema/character.schema.json (v1) │
│ 2. Create characters/aria.json (default persona) │
│ 3. Set up Vite/React project in src/ │
│ 4. Extend character-manager.jsx with full UI │
│ 5. Add schema validation (ajv) │
│ 6. Add expression mapper UI for Live2D │
│ 7. Wire export to ~/.openclaw/characters/ │
│ │
│ Dev server: │
│ cd homeai-character && npm run dev → http://localhost:5173 │
│ │
│ Interface contracts: │
│ Output: ~/.openclaw/characters/<name>.json │
│ Schema: homeai-character/schema/character.schema.json │
└─────────────────────────────────────────────────────────────────┘
EOF
log_info "P5 is not yet implemented. See homeai-character/PLAN.md for details."
exit 0

76
homeai-esp32/setup.sh Normal file
View File

@@ -0,0 +1,76 @@
#!/usr/bin/env bash
# homeai-esp32/setup.sh — P6: ESPHome firmware for ESP32-S3-BOX-3
#
# Components:
# - ESPHome — firmware build + flash tool
# - base.yaml — shared device config
# - voice.yaml — Wyoming Satellite + microWakeWord
# - display.yaml — LVGL animated face
# - Per-room configs — s3-box-living-room.yaml, etc.
#
# Prerequisites:
# - P1 (homeai-infra) — Home Assistant running
# - P3 (homeai-voice) — Wyoming STT/TTS running (ports 10300/10301)
# - Python 3.10+
# - USB-C cable for first flash (subsequent updates via OTA)
# - On Linux: ensure user is in the dialout group for USB access
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
source "${REPO_DIR}/scripts/common.sh"
log_section "P6: ESP32 Firmware (ESPHome)"
detect_platform
# ─── Prerequisite check ────────────────────────────────────────────────────────
log_info "Checking prerequisites..."
if ! command_exists python3; then
log_warn "python3 not found — required for ESPHome"
fi
if ! command_exists esphome; then
log_info "ESPHome not installed. To install: pip install esphome"
fi
if [[ "$OS_TYPE" == "linux" ]]; then
if ! groups "$USER" | grep -q dialout; then
log_warn "User '$USER' not in 'dialout' group — USB flashing may fail."
log_warn "Fix: sudo usermod -aG dialout $USER (then log out and back in)"
fi
fi
# Check P3 dependency
if ! curl -sf http://localhost:8123 -o /dev/null 2>/dev/null; then
log_warn "Home Assistant (P1) not reachable — ESP32 units won't auto-discover"
fi
# ─── TODO: Implementation ──────────────────────────────────────────────────────
cat <<'EOF'
┌─────────────────────────────────────────────────────────────────┐
│ P6: homeai-esp32 — NOT YET IMPLEMENTED │
│ │
│ Implementation steps: │
│ 1. pip install esphome │
│ 2. Create esphome/secrets.yaml (gitignored) │
│ 3. Create esphome/base.yaml (WiFi, API, OTA) │
│ 4. Create esphome/voice.yaml (Wyoming Satellite, wakeword) │
│ 5. Create esphome/display.yaml (LVGL face, 5 states) │
│ 6. Create esphome/animations.yaml (face state scripts) │
│ 7. Create per-room configs (s3-box-living-room.yaml, etc.) │
│ 8. First flash via USB: esphome run esphome/<room>.yaml │
│ 9. Subsequent OTA: esphome upload esphome/<room>.yaml │
│ 10. Add to Home Assistant → assign Wyoming voice pipeline │
│ │
│ Quick flash (once esphome/ is ready): │
│ esphome run esphome/s3-box-living-room.yaml │
│ esphome logs esphome/s3-box-living-room.yaml │
└─────────────────────────────────────────────────────────────────┘
EOF
log_info "P6 is not yet implemented. See homeai-esp32/PLAN.md for details."
exit 0

65
homeai-images/setup.sh Normal file
View File

@@ -0,0 +1,65 @@
#!/usr/bin/env bash
# homeai-images/setup.sh — P8: ComfyUI + SDXL/Flux + character LoRA
#
# Components:
# - ComfyUI — image generation UI + API (port 8188)
# - SDXL, Flux.1 — base checkpoints
# - ControlNet — pose/depth guidance
# - aria LoRA — character-consistent fine-tune
# - comfyui.py skill — OpenClaw integration
# - workflows/ — saved workflows (quick, portrait, scene, upscale)
#
# Prerequisites:
# - P4 (homeai-agent) — OpenClaw running (for skill integration)
# - Python 3.10+
# - macOS: Metal GPU (MPS) — runs natively
# - Linux: CUDA GPU recommended (NVIDIA); CPU is very slow for image gen
#
# ComfyUI runs NATIVELY (not Docker) for GPU acceleration.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
source "${REPO_DIR}/scripts/common.sh"
log_section "P8: Image Generation (ComfyUI)"
detect_platform
detect_gpu
log_info "GPU: ${GPU_TYPE}${GPU_INFO:+ — ${GPU_INFO}}"
if [[ "$GPU_TYPE" == "none" ]]; then
log_warn "No GPU detected. ComfyUI will use CPU — image generation will be very slow."
log_warn "On Linux: install CUDA drivers if you have an NVIDIA card."
fi
# ─── TODO: Implementation ──────────────────────────────────────────────────────
cat <<'EOF'
┌─────────────────────────────────────────────────────────────────┐
│ P8: homeai-images — NOT YET IMPLEMENTED │
│ │
│ Implementation steps: │
│ 1. Clone ComfyUI: git clone https://github.com/comfyanonymous/ComfyUI ~/ComfyUI │
│ 2. Create venv + install PyTorch (MPS for macOS, CUDA for Linux) │
│ 3. scripts/download-models.sh — SDXL, Flux.1, VAE, ControlNet │
│ 4. Create launchd/systemd service (port 8188) │
│ 5. Create workflows/quick.json, portrait.json, scene.json │
│ 6. Implement skills/comfyui.py OpenClaw integration │
│ 7. (Later) Train aria LoRA with kohya_ss │
│ │
│ Model sizes (download bandwidth required): │
│ SDXL base ~6.5 GB │
│ Flux.1-dev ~24 GB │
│ Flux.1-schnell ~24 GB │
│ ControlNet ~1.5 GB each │
│ │
│ Interface contracts: │
│ COMFYUI_URL=http://localhost:8188 │
└─────────────────────────────────────────────────────────────────┘
EOF
log_info "P8 is not yet implemented. See homeai-images/PLAN.md for details."
exit 0

View File

@@ -0,0 +1,12 @@
# homeai-infra Docker secrets
# Copy to .env — never commit .env
DATA_DIR=${HOME}/homeai-data
# ─── code-server ───────────────────────────────────────────────────────────────
CODE_SERVER_PASSWORD=changeme123
# ─── n8n ───────────────────────────────────────────────────────────────────────
N8N_BASIC_AUTH_USER=admin
N8N_BASIC_AUTH_PASSWORD=changeme123
N8N_ENCRYPTION_KEY=changeme_random_32_char_string

View File

@@ -0,0 +1,91 @@
---
# homeai-infra/docker/docker-compose.yml
# P1 — Infrastructure services
#
# Provides: Uptime Kuma, code-server, n8n
#
# NOTE: Home Assistant, Portainer, and Gitea are pre-existing instances
# and are NOT managed here. Point .env.services at their existing URLs.
#
# Prerequisites:
# - Docker installed and running
# - `homeai` Docker network exists (created by setup.sh)
# - .env file present (copy from .env.example)
#
# Usage:
# docker compose -f docker/docker-compose.yml up -d
# docker compose -f docker/docker-compose.yml down
name: homeai-infra
# Linux compatibility: host.docker.internal:host-gateway resolves host IP
# On macOS this is already defined; on Linux it maps to the Docker bridge gateway.
x-host-gateway: &host-gateway
extra_hosts:
- "host.docker.internal:host-gateway"
services:
# ─── Uptime Kuma ─────────────────────────────────────────────────────────────
uptime-kuma:
container_name: homeai-uptime-kuma
image: louislam/uptime-kuma:latest
restart: unless-stopped
ports:
- "3001:3001"
volumes:
- ${DATA_DIR:-~/homeai-data}/uptime-kuma:/app/data
networks:
- homeai
labels:
- homeai.service=uptime-kuma
- homeai.url=http://localhost:3001
# ─── code-server (browser VS Code) ───────────────────────────────────────────
code-server:
container_name: homeai-code-server
image: codercom/code-server:latest
restart: unless-stopped
ports:
- "8090:8080" # Note: exposed on 8090 to avoid conflict with OpenClaw (8080)
volumes:
- ${DATA_DIR:-~/homeai-data}/code-server:/home/coder/.config
- ${HOME}:/home/coder/host:rw # Mount home dir for file access
environment:
- PASSWORD=${CODE_SERVER_PASSWORD:-changeme123}
<<: *host-gateway
networks:
- homeai
labels:
- homeai.service=code-server
- homeai.url=http://localhost:8090
# ─── n8n (workflow automation) ───────────────────────────────────────────────
n8n:
container_name: homeai-n8n
image: n8nio/n8n:latest
restart: unless-stopped
ports:
- "5678:5678"
volumes:
- ${DATA_DIR:-~/homeai-data}/n8n:/home/node/.n8n
environment:
- N8N_BASIC_AUTH_ACTIVE=true
- N8N_BASIC_AUTH_USER=${N8N_BASIC_AUTH_USER:-admin}
- N8N_BASIC_AUTH_PASSWORD=${N8N_BASIC_AUTH_PASSWORD:-changeme123}
- N8N_ENCRYPTION_KEY=${N8N_ENCRYPTION_KEY:-changeme}
- N8N_HOST=0.0.0.0
- N8N_PORT=5678
- N8N_PROTOCOL=http
- WEBHOOK_URL=http://localhost:5678/
<<: *host-gateway
networks:
- homeai
labels:
- homeai.service=n8n
- homeai.url=http://localhost:5678
networks:
homeai:
external: true
name: homeai

135
homeai-infra/setup.sh Normal file
View File

@@ -0,0 +1,135 @@
#!/usr/bin/env bash
# homeai-infra/setup.sh — P1: Infrastructure stack
#
# Installs Docker (if needed), creates the homeai network,
# and starts new infrastructure services.
#
# Services started:
# Uptime Kuma :3001
# code-server :8090
# n8n :5678
#
# NOTE: Home Assistant, Portainer, and Gitea are pre-existing instances.
# Set their URLs in ~/.env.services manually.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
# shellcheck source=../scripts/common.sh
source "${REPO_DIR}/scripts/common.sh"
COMPOSE_FILE="${SCRIPT_DIR}/docker/docker-compose.yml"
ENV_FILE="${SCRIPT_DIR}/docker/.env"
ENV_EXAMPLE="${SCRIPT_DIR}/docker/.env.example"
# ─── Pre-flight ────────────────────────────────────────────────────────────────
preflight() {
log_section "P1 Preflight"
detect_platform
# Ensure .env exists
if [[ ! -f "$ENV_FILE" ]]; then
if [[ -f "$ENV_EXAMPLE" ]]; then
cp "$ENV_EXAMPLE" "$ENV_FILE"
log_warn "Created ${ENV_FILE} from .env.example"
log_warn "Edit it now with real secrets, then re-run this script."
echo ""
echo " Secrets to set in ${ENV_FILE}:"
echo " CODE_SERVER_PASSWORD — your password"
echo " N8N_BASIC_AUTH_PASSWORD — your password"
echo " N8N_ENCRYPTION_KEY — 32-char random string (run: openssl rand -hex 16)"
echo ""
if ! confirm "Secrets are still at defaults. Continue anyway? (not safe for production)"; then
log_info "Aborting. Fill in ${ENV_FILE} and re-run."
exit 0
fi
else
die "No .env or .env.example found at ${SCRIPT_DIR}/docker/"
fi
fi
# Create data directory
local data_dir
load_env "$ENV_FILE"
data_dir="${DATA_DIR:-${HOME}/homeai-data}"
log_step "Data directory: ${data_dir}"
mkdir -p \
"${data_dir}/uptime-kuma" \
"${data_dir}/code-server" \
"${data_dir}/n8n"
log_success "Data directories ready."
}
# ─── Docker ────────────────────────────────────────────────────────────────────
setup_docker() {
log_section "Docker"
install_docker
ensure_docker_running
ensure_docker_network homeai
}
# ─── Services ──────────────────────────────────────────────────────────────────
start_services() {
log_section "Starting infra services"
log_step "Pulling latest images..."
docker_compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" pull
log_step "Starting containers..."
docker_compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" up -d
log_success "Containers started."
}
# ─── Health checks ─────────────────────────────────────────────────────────────
health_check() {
log_section "Health checks"
wait_for_http "http://localhost:3001" "Uptime Kuma" 60
wait_for_http "http://localhost:5678" "n8n" 90
wait_for_http "http://localhost:8090" "code-server" 60
}
# ─── Register services ─────────────────────────────────────────────────────────
register_services() {
log_section "Writing service URLs"
write_env_service "UPTIME_KUMA_URL" "http://localhost:3001"
write_env_service "CODE_SERVER_URL" "http://localhost:8090"
write_env_service "N8N_URL" "http://localhost:5678"
log_warn "Set these in ~/.env.services manually (pre-existing instances):"
log_warn " HA_URL, HA_TOKEN, PORTAINER_URL, GITEA_URL"
log_success "Service URLs written to ~/.env.services"
}
# ─── Summary ───────────────────────────────────────────────────────────────────
print_infra_summary() {
print_summary "P1 Infrastructure — Ready" \
"Uptime Kuma" "http://localhost:3001" \
"code-server" "http://localhost:8090" \
"n8n" "http://localhost:5678"
echo " Pre-existing services (not managed here):"
echo " Home Assistant, Portainer, Gitea"
echo ""
echo " Next steps:"
echo " 1. Add HA_URL, HA_TOKEN, PORTAINER_URL, GITEA_URL to ~/.env.services"
echo " 2. Add Uptime Kuma monitors for all HomeAI services"
echo " 3. Run: ./setup.sh p2 (Ollama + LLM)"
echo ""
}
# ─── Main ──────────────────────────────────────────────────────────────────────
main() {
preflight
setup_docker
start_services
health_check
register_services
print_infra_summary
}
main "$@"

View File

@@ -0,0 +1,7 @@
# homeai-llm Docker secrets
# Copy to .env — never commit .env
DATA_DIR=${HOME}/homeai-data
# Open WebUI
WEBUI_SECRET_KEY=changeme_random_32_char_string_here

View File

@@ -0,0 +1,45 @@
---
# homeai-llm/docker/docker-compose.yml
# P2 — Open WebUI
#
# Ollama runs NATIVELY (not in Docker) for GPU acceleration.
# This compose file only starts the Open WebUI frontend.
#
# Prerequisites:
# - Ollama installed and running on the host at port 11434
# - `homeai` Docker network exists (created by P1 setup)
#
# Usage:
# docker compose -f docker/docker-compose.yml up -d
name: homeai-llm
services:
# ─── Open WebUI ──────────────────────────────────────────────────────────────
open-webui:
container_name: homeai-open-webui
image: ghcr.io/open-webui/open-webui:main
restart: unless-stopped
ports:
- "3030:8080" # Exposed on 3030 to avoid conflict with Gitea (3000)
volumes:
- ${DATA_DIR:-~/homeai-data}/open-webui:/app/backend/data
environment:
# Connect to Ollama on the host
- OLLAMA_BASE_URL=http://host.docker.internal:11434
- WEBUI_SECRET_KEY=${WEBUI_SECRET_KEY:-changeme_random_32_char}
- ENABLE_SIGNUP=true
- DEFAULT_MODELS=llama3.3:70b
extra_hosts:
- "host.docker.internal:host-gateway" # Linux compat
networks:
- homeai
labels:
- homeai.service=open-webui
- homeai.url=http://localhost:3030
networks:
homeai:
external: true
name: homeai

View File

@@ -0,0 +1,37 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.homeai.ollama</string>
<key>ProgramArguments</key>
<array>
<string>/usr/local/bin/ollama</string>
<string>serve</string>
</array>
<key>EnvironmentVariables</key>
<dict>
<key>OLLAMA_HOST</key>
<string>0.0.0.0:11434</string>
<!-- Metal GPU is used automatically on Apple Silicon; no env var needed -->
</dict>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<true/>
<key>StandardOutPath</key>
<string>/tmp/homeai-ollama.log</string>
<key>StandardErrorPath</key>
<string>/tmp/homeai-ollama-error.log</string>
<key>ThrottleInterval</key>
<integer>5</integer>
</dict>
</plist>

View File

@@ -0,0 +1,21 @@
# Ollama model manifest
# One model per line. Lines starting with # are ignored.
# Format: <model>:<tag> or just <model> for latest
#
# Pull all models: bash scripts/pull-models.sh
# Pull specific: ollama pull <model>
# ─── Primary (main conversation) ───────────────────────────────────────────────
llama3.3:70b
# ─── Alternative primary ───────────────────────────────────────────────────────
qwen2.5:72b
# ─── Fast / low-latency (voice pipeline, quick tasks) ─────────────────────────
qwen2.5:7b
# ─── Code generation ───────────────────────────────────────────────────────────
qwen2.5-coder:32b
# ─── Embeddings (mem0 memory store) ────────────────────────────────────────────
nomic-embed-text

View File

@@ -0,0 +1,88 @@
#!/usr/bin/env bash
# scripts/benchmark.sh — Benchmark Ollama model inference speed
#
# Measures tokens/sec for each installed model.
# Results written to benchmark-results.md
#
# Usage:
# bash scripts/benchmark.sh
# bash scripts/benchmark.sh qwen2.5:7b # benchmark one model
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_DIR="$(cd "${SCRIPT_DIR}/../.." && pwd)"
source "${REPO_DIR}/scripts/common.sh"
RESULTS_FILE="${SCRIPT_DIR}/../benchmark-results.md"
PROMPT="Tell me a short story about a robot who loves cooking. Keep it to exactly 200 words."
if ! command_exists ollama; then
die "Ollama not found."
fi
if ! curl -sf http://localhost:11434 -o /dev/null; then
die "Ollama is not running."
fi
benchmark_model() {
local model="$1"
log_step "Benchmarking $model..."
local start end elapsed
start=$(date +%s%3N)
local response
response=$(ollama run "$model" "$PROMPT" 2>&1) || {
log_error "Model $model failed to run."
echo "| $model | ERROR | — |"
return
}
end=$(date +%s%3N)
elapsed=$(( (end - start) ))
local word_count
word_count=$(echo "$response" | wc -w)
local tokens_est=$(( word_count * 4 / 3 )) # rough estimate: 1 token ≈ 0.75 words
local elapsed_sec
elapsed_sec=$(echo "scale=1; $elapsed / 1000" | bc)
local tps
tps=$(echo "scale=1; $tokens_est / ($elapsed / 1000)" | bc 2>/dev/null || echo "?")
printf " %-30s %6s tok/s (%ss)\n" "$model" "$tps" "$elapsed_sec"
echo "| \`$model\` | ${tps} tok/s | ${elapsed_sec}s |"
}
log_section "Ollama Benchmark"
log_info "Prompt: '$PROMPT'"
echo ""
if [[ -n "${1:-}" ]]; then
models=("$@")
else
# Get list of installed models
mapfile -t models < <(ollama list 2>/dev/null | tail -n +2 | awk '{print $1}')
fi
if [[ ${#models[@]} -eq 0 ]]; then
die "No models installed. Run: bash scripts/pull-models.sh"
fi
{
echo "# Ollama Benchmark Results"
echo "> Generated: $(date)"
echo ""
echo "| Model | Speed | Time for ~200 tok |"
echo "|---|---|---|"
} > "$RESULTS_FILE"
for model in "${models[@]}"; do
benchmark_model "$model" | tee -a "$RESULTS_FILE"
done
echo "" >> "$RESULTS_FILE"
log_success "Results written to $RESULTS_FILE"
echo ""
cat "$RESULTS_FILE"

View File

@@ -0,0 +1,86 @@
#!/usr/bin/env bash
# scripts/pull-models.sh — Pull all Ollama models from the manifest
#
# Usage:
# bash scripts/pull-models.sh # pull all models
# bash scripts/pull-models.sh nomic-embed-text # pull specific model
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_DIR="$(cd "${SCRIPT_DIR}/../.." && pwd)"
source "${REPO_DIR}/scripts/common.sh"
MANIFEST="${SCRIPT_DIR}/../ollama-models.txt"
if ! command_exists ollama; then
die "Ollama not found. Run: bash homeai-llm/setup.sh first."
fi
if ! curl -sf http://localhost:11434 -o /dev/null; then
die "Ollama is not running. Start it first."
fi
# If a specific model is given as arg, just pull that
if [[ $# -gt 0 ]]; then
for model in "$@"; do
log_info "Pulling $model..."
ollama pull "$model"
log_success "Pulled $model"
done
exit 0
fi
# Pull all models from manifest
log_section "Pulling Ollama models"
total=0; pulled=0; skipped=0; failed=0
while IFS= read -r line || [[ -n "$line" ]]; do
# Skip comments and blank lines
[[ "$line" =~ ^[[:space:]]*# ]] && continue
[[ -z "${line// }" ]] && continue
model="${line%% *}" # strip any trailing comment
total=$((total + 1))
# Check if model is already present
if ollama list 2>/dev/null | grep -q "^${model%%:*}"; then
tag="${model##*:}"
model_name="${model%%:*}"
if [[ "$tag" != "$model_name" ]]; then
# Has explicit tag — check exact match
if ollama list 2>/dev/null | grep -q "^${model_name}.*${tag}"; then
log_info "Already present: $model — skipping"
skipped=$((skipped + 1))
continue
fi
else
log_info "Already present: $model — skipping"
skipped=$((skipped + 1))
continue
fi
fi
log_step "Pulling $model..."
if ollama pull "$model"; then
log_success "Pulled $model"
pulled=$((pulled + 1))
else
log_error "Failed to pull $model"
failed=$((failed + 1))
fi
done < "$MANIFEST"
echo ""
log_info "Pull complete: ${pulled} pulled, ${skipped} already present, ${failed} failed (of ${total} total)"
if [[ $failed -gt 0 ]]; then
log_warn "Some models failed to pull. Check your internet connection and retry."
exit 1
fi
echo ""
log_info "Installed models:"
ollama list

227
homeai-llm/setup.sh Normal file
View File

@@ -0,0 +1,227 @@
#!/usr/bin/env bash
# homeai-llm/setup.sh — P2: Ollama + Open WebUI
#
# Installs Ollama natively (for GPU access), sets up auto-start,
# pulls models from the manifest, and starts Open WebUI in Docker.
#
# GPU support:
# Linux — CUDA (NVIDIA) or ROCm (AMD) or CPU fallback
# macOS — Metal (automatic for Apple Silicon)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
# shellcheck source=../scripts/common.sh
source "${REPO_DIR}/scripts/common.sh"
COMPOSE_FILE="${SCRIPT_DIR}/docker/docker-compose.yml"
ENV_FILE="${SCRIPT_DIR}/docker/.env"
ENV_EXAMPLE="${SCRIPT_DIR}/docker/.env.example"
MANIFEST="${SCRIPT_DIR}/ollama-models.txt"
# ─── Pre-flight ────────────────────────────────────────────────────────────────
preflight() {
log_section "P2 Preflight"
detect_platform
detect_gpu
# Check P1 dependency (homeai Docker network must exist)
if ! docker network inspect homeai &>/dev/null 2>&1; then
log_warn "Docker network 'homeai' not found. Has P1 been run?"
log_warn "Run: ./setup.sh p1 first, or: docker network create homeai"
if ! confirm "Create 'homeai' network now and continue?"; then
die "Aborted. Run ./setup.sh p1 first."
fi
docker network create homeai
fi
# Bootstrap .env for Open WebUI
if [[ ! -f "$ENV_FILE" && -f "$ENV_EXAMPLE" ]]; then
cp "$ENV_EXAMPLE" "$ENV_FILE"
log_warn "Created ${ENV_FILE} from .env.example"
log_warn "Set WEBUI_SECRET_KEY in ${ENV_FILE} (run: openssl rand -hex 16)"
fi
# Create data dir
load_env "$ENV_FILE" 2>/dev/null || true
local data_dir="${DATA_DIR:-${HOME}/homeai-data}"
mkdir -p "${data_dir}/open-webui"
}
# ─── Ollama Installation ───────────────────────────────────────────────────────
install_ollama() {
log_section "Ollama"
if command_exists ollama; then
log_success "Ollama already installed: $(ollama --version 2>/dev/null || echo 'version unknown')"
return
fi
log_info "Installing Ollama..."
if [[ "$OS_TYPE" == "macos" ]]; then
if command_exists brew; then
brew install ollama
else
log_info "Downloading Ollama for macOS..."
curl -fsSL https://ollama.com/install.sh | sh
fi
else
# Linux — official install script handles CUDA/ROCm detection
log_info "Downloading and running Ollama installer..."
curl -fsSL https://ollama.com/install.sh | sh
fi
if ! command_exists ollama; then
die "Ollama installation failed. Check the output above."
fi
log_success "Ollama installed: $(ollama --version 2>/dev/null || echo 'ok')"
}
# ─── Ollama Service ────────────────────────────────────────────────────────────
setup_ollama_service() {
log_section "Ollama service"
# Check if already running
if curl -sf http://localhost:11434 -o /dev/null 2>/dev/null; then
log_success "Ollama is already running."
return
fi
install_service \
"homeai-ollama" \
"${SCRIPT_DIR}/systemd/homeai-ollama.service" \
"${SCRIPT_DIR}/launchd/com.homeai.ollama.plist"
# Give it a few seconds to start
log_step "Waiting for Ollama to start..."
local i=0
while ! curl -sf http://localhost:11434 -o /dev/null 2>/dev/null; do
sleep 2; i=$((i + 2))
if [[ $i -ge 30 ]]; then
log_warn "Ollama did not start within 30s. Trying to start manually..."
ollama serve &>/dev/null &
sleep 5
break
fi
done
if curl -sf http://localhost:11434 -o /dev/null 2>/dev/null; then
log_success "Ollama is running."
else
die "Ollama failed to start. Check: ollama serve"
fi
}
# ─── GPU Verification ──────────────────────────────────────────────────────────
verify_gpu() {
log_section "GPU verification"
local models_response
models_response=$(curl -sf http://localhost:11434/api/tags 2>/dev/null || echo '{}')
case "$GPU_TYPE" in
metal)
log_success "Apple Silicon Metal GPU — inference will be fast."
;;
cuda)
log_info "NVIDIA CUDA GPU detected: ${GPU_INFO:-unknown}"
# Verify Ollama can see it
if ollama run qwen2.5:7b "Say OK" &>/dev/null 2>&1; then
log_success "CUDA inference verified."
else
log_warn "Could not verify CUDA inference. Ollama may fall back to CPU."
fi
;;
rocm)
log_info "AMD ROCm GPU detected: ${GPU_INFO:-unknown}"
log_warn "ROCm support depends on your GPU and driver version."
;;
none)
log_warn "No GPU detected — Ollama will use CPU."
log_warn "70B parameter models will be very slow on CPU. Consider qwen2.5:7b for testing."
;;
esac
}
# ─── Pull Models ───────────────────────────────────────────────────────────────
pull_models() {
log_section "Pulling models"
if [[ ! -f "$MANIFEST" ]]; then
log_warn "No model manifest at $MANIFEST — skipping model pull."
return
fi
# On CPU-only, skip the big models and warn
if [[ "$GPU_TYPE" == "none" ]]; then
log_warn "CPU-only mode: skipping 70B models (too slow). Pulling small models only."
log_warn "Edit $MANIFEST to select which models to pull, then run:"
log_warn " bash ${SCRIPT_DIR}/scripts/pull-models.sh"
log_warn "Pulling only: qwen2.5:7b and nomic-embed-text"
ollama pull qwen2.5:7b
ollama pull nomic-embed-text
return
fi
bash "${SCRIPT_DIR}/scripts/pull-models.sh"
}
# ─── Open WebUI ────────────────────────────────────────────────────────────────
start_open_webui() {
log_section "Open WebUI"
ensure_docker_running
log_step "Pulling Open WebUI image..."
docker_compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" pull
log_step "Starting Open WebUI..."
docker_compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" up -d
wait_for_http "http://localhost:3030" "Open WebUI" 90
}
# ─── Register services ─────────────────────────────────────────────────────────
register_services() {
write_env_service "OLLAMA_URL" "http://localhost:11434"
write_env_service "OLLAMA_API_URL" "http://localhost:11434/v1"
write_env_service "OPEN_WEBUI_URL" "http://localhost:3030"
log_success "Service URLs written to ~/.env.services"
}
# ─── Summary ───────────────────────────────────────────────────────────────────
print_llm_summary() {
local model_list
model_list=$(ollama list 2>/dev/null | tail -n +2 | awk '{print $1}' | tr '\n' ', ' | sed 's/,$//')
print_summary "P2 LLM — Ready" \
"Ollama API" "http://localhost:11434" \
"OpenAI compat" "http://localhost:11434/v1" \
"Open WebUI" "http://localhost:3030" \
"GPU" "${GPU_TYPE}" \
"Models" "${model_list:-none pulled yet}"
echo " Next steps:"
echo " 1. Open http://localhost:3030 and create your admin account"
echo " 2. Test a chat with $OLLAMA_PRIMARY_MODEL"
echo " 3. Run benchmark: bash ${SCRIPT_DIR}/scripts/benchmark.sh"
echo " 4. Run: ./setup.sh p3 (Voice pipeline)"
echo ""
}
# ─── Main ──────────────────────────────────────────────────────────────────────
main() {
preflight
install_ollama
setup_ollama_service
verify_gpu
pull_models
start_open_webui
register_services
print_llm_summary
}
main "$@"

View File

@@ -0,0 +1,26 @@
[Unit]
Description=Ollama AI inference server (HomeAI)
Documentation=https://ollama.com
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=%i
ExecStart=/usr/local/bin/ollama serve
Restart=always
RestartSec=5
# Environment
Environment=OLLAMA_HOST=0.0.0.0:11434
Environment=OLLAMA_MODELS=/usr/share/ollama/.ollama/models
# Limits
LimitNOFILE=65536
# CUDA GPU support
# Uncomment and set if you have multiple GPUs:
# Environment=CUDA_VISIBLE_DEVICES=0
[Install]
WantedBy=default.target

60
homeai-visual/setup.sh Normal file
View File

@@ -0,0 +1,60 @@
#!/usr/bin/env bash
# homeai-visual/setup.sh — P7: VTube Studio bridge + Live2D expressions
#
# Components:
# - vtube_studio.py — WebSocket client skill for OpenClaw
# - lipsync.py — amplitude-based lip sync
# - auth.py — VTube Studio token management
#
# Prerequisites:
# - P4 (homeai-agent) — OpenClaw running
# - P5 (homeai-character) — aria.json with live2d_expressions set
# - macOS: VTube Studio installed (Mac App Store)
# - Linux: N/A — VTube Studio is macOS/Windows/iOS only
# Linux dev can test the skill code but not the VTube Studio side
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
source "${REPO_DIR}/scripts/common.sh"
log_section "P7: VTube Studio Bridge"
detect_platform
if [[ "$OS_TYPE" == "linux" ]]; then
log_warn "VTube Studio is not available on Linux."
log_warn "This sub-project requires macOS (Mac Mini)."
fi
# ─── TODO: Implementation ──────────────────────────────────────────────────────
cat <<'EOF'
┌─────────────────────────────────────────────────────────────────┐
│ P7: homeai-visual — NOT YET IMPLEMENTED │
│ │
│ macOS only (VTube Studio is macOS/iOS/Windows) │
│ │
│ Implementation steps: │
│ 1. Install VTube Studio from Mac App Store │
│ 2. Enable WebSocket API in VTube Studio (Settings → port 8001) │
│ 3. Source/purchase Live2D model │
│ 4. Create expression hotkeys for 8 states │
│ 5. Implement skills/vtube_studio.py (WebSocket client) │
│ 6. Implement skills/lipsync.py (amplitude → MouthOpen param) │
│ 7. Implement skills/auth.py (token request + persistence) │
│ 8. Register vtube_studio skill with OpenClaw │
│ 9. Update aria.json live2d_expressions with hotkey IDs │
│ 10. Test all 8 expression states │
│ │
│ On Linux: implement Python skills, test WebSocket protocol │
│ with a mock server before connecting to real VTube Studio. │
│ │
│ Interface contracts: │
│ VTUBE_WS_URL=ws://localhost:8001 │
└─────────────────────────────────────────────────────────────────┘
EOF
log_info "P7 is not yet implemented. See homeai-visual/PLAN.md for details."
exit 0

93
homeai-voice/setup.sh Normal file
View File

@@ -0,0 +1,93 @@
#!/usr/bin/env bash
# homeai-voice/setup.sh — P3: Voice pipeline (STT / TTS / Wyoming)
#
# Components:
# - Whisper.cpp — speech-to-text (Apple Silicon / CUDA optimised)
# - wyoming-faster-whisper — Wyoming STT adapter (port 10300)
# - Kokoro TTS — fast text-to-speech via ONNX
# - wyoming-kokoro — Wyoming TTS adapter (port 10301)
# - Chatterbox TTS — voice cloning (MPS / CUDA)
# - openWakeWord — always-on wake word detection
#
# Prerequisites:
# - P1 (homeai-infra) completed — Home Assistant running
# - P2 (homeai-llm) completed — Ollama running
# - Python 3.10+ installed
# - macOS: Xcode Command Line Tools (for whisper.cpp compilation)
# - Linux: build-essential, cmake
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
source "${REPO_DIR}/scripts/common.sh"
log_section "P3: Voice Pipeline"
detect_platform
# ─── Prerequisite check ────────────────────────────────────────────────────────
log_info "Checking prerequisites..."
prereq_ok=true
if ! curl -sf http://localhost:8123 -o /dev/null 2>/dev/null; then
log_warn "Home Assistant (P1) not reachable at :8123"
prereq_ok=false
fi
if ! curl -sf http://localhost:11434 -o /dev/null 2>/dev/null; then
log_warn "Ollama (P2) not reachable at :11434"
prereq_ok=false
fi
if ! command_exists python3; then
log_warn "python3 not found — required for STT/TTS adapters"
prereq_ok=false
fi
if [[ "$prereq_ok" == "false" ]]; then
log_warn "Prerequisites not met. Complete P1 and P2 first."
fi
# ─── TODO: Implementation ──────────────────────────────────────────────────────
cat <<'EOF'
┌─────────────────────────────────────────────────────────────────┐
│ P3: homeai-voice — NOT YET IMPLEMENTED │
│ │
│ Implementation steps (see homeai-voice/PLAN.md): │
│ │
│ 1. whisper/install.sh │
│ - Clone + compile whisper.cpp (Metal/CUDA flags) │
│ - Download models: large-v3, medium.en │
│ - Install wyoming-faster-whisper Python package │
│ │
│ 2. tts/install-kokoro.sh │
│ - pip install kokoro-onnx │
│ - Install wyoming-kokoro adapter │
│ │
│ 3. tts/install-chatterbox.sh │
│ - pip install chatterbox-tts │
│ - Verify MPS (macOS) or CUDA (Linux) acceleration │
│ │
│ 4. wyoming/install.sh │
│ - Install wyoming-openwakeword │
│ - Configure wake word: hey_jarvis │
│ │
│ 5. scripts/launchd/ or systemd/ │
│ - wyoming-stt (port 10300) │
│ - wyoming-tts (port 10301) │
│ - wakeword daemon │
│ │
│ 6. wyoming/test-pipeline.sh │
│ - End-to-end smoke test │
│ │
│ Interface contracts: │
│ WYOMING_STT_URL=tcp://localhost:10300 │
│ WYOMING_TTS_URL=tcp://localhost:10301 │
└─────────────────────────────────────────────────────────────────┘
EOF
log_info "P3 is not yet implemented. See homeai-voice/PLAN.md for details."
exit 0

331
scripts/common.sh Normal file
View File

@@ -0,0 +1,331 @@
#!/usr/bin/env bash
# scripts/common.sh — Shared bash library for HomeAI setup scripts
# Source this file: source "$(dirname "$0")/../scripts/common.sh"
# Do NOT execute directly.
set -euo pipefail
# ─── Colours ───────────────────────────────────────────────────────────────────
if [[ -t 1 ]]; then
RED='\033[0;31m'; YELLOW='\033[0;33m'; GREEN='\033[0;32m'
BLUE='\033[0;34m'; CYAN='\033[0;36m'; BOLD='\033[1m'; RESET='\033[0m'
else
RED=''; YELLOW=''; GREEN=''; BLUE=''; CYAN=''; BOLD=''; RESET=''
fi
# ─── Logging ───────────────────────────────────────────────────────────────────
log_info() { echo -e "${BLUE}[INFO]${RESET} $*"; }
log_success() { echo -e "${GREEN}[OK]${RESET} $*"; }
log_warn() { echo -e "${YELLOW}[WARN]${RESET} $*"; }
log_error() { echo -e "${RED}[ERROR]${RESET} $*" >&2; }
log_section() { echo -e "\n${BOLD}${CYAN}══ $* ══${RESET}"; }
log_step() { echo -e "${CYAN}${RESET} $*"; }
die() { log_error "$*"; exit 1; }
# ─── OS & Architecture Detection ───────────────────────────────────────────────
detect_os() {
case "$(uname -s)" in
Linux*) OS_TYPE=linux ;;
Darwin*) OS_TYPE=macos ;;
*) die "Unsupported OS: $(uname -s)" ;;
esac
export OS_TYPE
}
detect_arch() {
case "$(uname -m)" in
x86_64|amd64) ARCH=amd64 ;;
arm64|aarch64) ARCH=arm64 ;;
*) ARCH=unknown ;;
esac
export ARCH
}
detect_distro() {
DISTRO=unknown
if [[ "$OS_TYPE" == "linux" ]]; then
if [[ -f /etc/os-release ]]; then
# shellcheck disable=SC1091
source /etc/os-release
DISTRO="${ID:-unknown}"
fi
fi
export DISTRO
}
detect_platform() {
detect_os
detect_arch
detect_distro
log_info "Platform: ${OS_TYPE}/${ARCH} (${DISTRO})"
}
# ─── GPU Detection ─────────────────────────────────────────────────────────────
detect_gpu() {
GPU_TYPE=none
if [[ "$OS_TYPE" == "macos" && "$ARCH" == "arm64" ]]; then
GPU_TYPE=metal
elif command -v nvidia-smi &>/dev/null && nvidia-smi &>/dev/null; then
GPU_TYPE=cuda
GPU_INFO=$(nvidia-smi --query-gpu=name --format=csv,noheader 2>/dev/null | head -1 || true)
elif command -v rocm-smi &>/dev/null; then
GPU_TYPE=rocm
GPU_INFO=$(rocm-smi --showproductname 2>/dev/null | head -1 || true)
fi
export GPU_TYPE GPU_INFO
log_info "GPU: ${GPU_TYPE}${GPU_INFO:+ — ${GPU_INFO}}"
}
# ─── Dependency Checking ───────────────────────────────────────────────────────
require_command() {
local cmd="$1"
local hint="${2:-install $cmd}"
if ! command -v "$cmd" &>/dev/null; then
die "'$cmd' is required but not found. Hint: $hint"
fi
}
command_exists() { command -v "$1" &>/dev/null; }
require_min_version() {
local cmd="$1" required="$2"
local actual
actual="$("$cmd" --version 2>&1 | grep -oE '[0-9]+\.[0-9]+(\.[0-9]+)?' | head -1)"
if [[ "$(printf '%s\n' "$required" "$actual" | sort -V | head -1)" != "$required" ]]; then
die "$cmd version $required+ required, found $actual"
fi
}
# ─── Package Management ────────────────────────────────────────────────────────
install_package() {
local pkg="$1"
log_step "Installing $pkg..."
case "$OS_TYPE" in
linux)
case "$DISTRO" in
ubuntu|debian|linuxmint|pop)
sudo apt-get install -y -qq "$pkg" ;;
fedora|rhel|centos|rocky|almalinux)
sudo dnf install -y -q "$pkg" ;;
arch|manjaro|endeavouros)
sudo pacman -S --noconfirm --quiet "$pkg" ;;
opensuse*)
sudo zypper install -y -q "$pkg" ;;
*)
die "Unknown distro '$DISTRO' — install $pkg manually" ;;
esac ;;
macos)
if ! command_exists brew; then
die "Homebrew not found. Install from https://brew.sh"
fi
brew install "$pkg" ;;
esac
}
update_package_index() {
if [[ "$OS_TYPE" == "linux" ]]; then
case "$DISTRO" in
ubuntu|debian|linuxmint|pop) sudo apt-get update -qq ;;
fedora|rhel|centos|rocky|almalinux) sudo dnf check-update -q || true ;;
esac
fi
}
# ─── Docker ────────────────────────────────────────────────────────────────────
check_docker_installed() {
command_exists docker
}
check_docker_running() {
docker info &>/dev/null
}
install_docker() {
if check_docker_installed; then
log_success "Docker already installed: $(docker --version)"
return
fi
log_info "Installing Docker..."
if [[ "$OS_TYPE" == "macos" ]]; then
die "On macOS, install Docker Desktop manually: https://www.docker.com/products/docker-desktop/"
fi
# Linux: use the official convenience script
curl -fsSL https://get.docker.com -o /tmp/get-docker.sh
sudo sh /tmp/get-docker.sh
rm /tmp/get-docker.sh
# Add current user to docker group
sudo usermod -aG docker "$USER"
log_warn "Added $USER to docker group. Log out and back in (or run 'newgrp docker') for it to take effect."
# Start and enable Docker
sudo systemctl enable --now docker
log_success "Docker installed: $(docker --version)"
}
ensure_docker_running() {
if ! check_docker_running; then
if [[ "$OS_TYPE" == "linux" ]]; then
log_info "Starting Docker..."
sudo systemctl start docker
sleep 2
else
die "Docker Desktop is not running. Start it from your Applications folder."
fi
fi
if ! check_docker_running; then
die "Docker is not running. Start Docker and retry."
fi
}
ensure_docker_network() {
local network="${1:-homeai}"
if ! docker network inspect "$network" &>/dev/null; then
log_step "Creating Docker network '$network'..."
docker network create "$network"
log_success "Network '$network' created."
else
log_info "Docker network '$network' already exists."
fi
}
# ─── Docker Compose ────────────────────────────────────────────────────────────
# Handles both `docker compose` (v2) and `docker-compose` (v1)
docker_compose() {
if docker compose version &>/dev/null 2>&1; then
docker compose "$@"
elif command_exists docker-compose; then
docker-compose "$@"
else
die "docker compose not found. Ensure Docker is up to date."
fi
}
# ─── Service Management ────────────────────────────────────────────────────────
install_service() {
# Usage: install_service <name> <systemd_unit_file> <launchd_plist_file>
local name="$1"
local systemd_file="$2"
local launchd_file="$3"
if [[ "$OS_TYPE" == "linux" ]]; then
if [[ ! -f "$systemd_file" ]]; then
log_warn "No systemd unit file at $systemd_file — skipping service install."
return
fi
log_step "Installing systemd service: $name"
sudo cp "$systemd_file" "/etc/systemd/system/${name}.service"
sudo systemctl daemon-reload
sudo systemctl enable --now "$name"
log_success "Service '$name' enabled and started."
elif [[ "$OS_TYPE" == "macos" ]]; then
if [[ ! -f "$launchd_file" ]]; then
log_warn "No launchd plist at $launchd_file — skipping service install."
return
fi
local plist_dest="${HOME}/Library/LaunchAgents/$(basename "$launchd_file")"
log_step "Installing launchd agent: $name"
cp "$launchd_file" "$plist_dest"
launchctl load -w "$plist_dest"
log_success "LaunchAgent '$name' installed and loaded."
fi
}
uninstall_service() {
local name="$1"
local plist_label="${2:-$name}"
if [[ "$OS_TYPE" == "linux" ]]; then
sudo systemctl disable --now "$name" 2>/dev/null || true
sudo rm -f "/etc/systemd/system/${name}.service"
sudo systemctl daemon-reload
elif [[ "$OS_TYPE" == "macos" ]]; then
local plist_path="${HOME}/Library/LaunchAgents/${plist_label}.plist"
launchctl unload -w "$plist_path" 2>/dev/null || true
rm -f "$plist_path"
fi
}
# ─── Environment Files ─────────────────────────────────────────────────────────
load_env() {
local env_file="${1:-.env}"
if [[ -f "$env_file" ]]; then
# shellcheck disable=SC1090
set -a; source "$env_file"; set +a
fi
}
load_env_services() {
local services_file="${HOME}/.env.services"
if [[ -f "$services_file" ]]; then
set -a; source "$services_file"; set +a
fi
}
write_env_service() {
# Append or update KEY=VALUE in ~/.env.services
local key="$1" value="$2"
local file="${HOME}/.env.services"
touch "$file"
if grep -q "^${key}=" "$file" 2>/dev/null; then
# Update existing
sed -i "s|^${key}=.*|${key}=${value}|" "$file"
else
echo "${key}=${value}" >> "$file"
fi
}
# Bootstrap .env from .env.example if not present
bootstrap_env() {
local dir="${1:-.}"
if [[ ! -f "${dir}/.env" && -f "${dir}/.env.example" ]]; then
cp "${dir}/.env.example" "${dir}/.env"
log_warn "Created ${dir}/.env from .env.example — fill in secrets before continuing."
fi
}
# ─── Network Helpers ───────────────────────────────────────────────────────────
wait_for_http() {
local url="$1" name="${2:-service}" timeout="${3:-60}"
log_step "Waiting for $name at $url (up to ${timeout}s)..."
local elapsed=0
while ! curl -sf "$url" -o /dev/null; do
sleep 3; elapsed=$((elapsed + 3))
if [[ $elapsed -ge $timeout ]]; then
log_warn "$name did not respond within ${timeout}s. It may still be starting."
return 1
fi
done
log_success "$name is up."
}
check_port_free() {
local port="$1"
if ss -tlnp "sport = :${port}" 2>/dev/null | grep -q ":${port}"; then
return 1 # port in use
fi
return 0
}
# ─── Utility ───────────────────────────────────────────────────────────────────
confirm() {
local msg="${1:-Continue?}"
read -rp "$(echo -e "${YELLOW}${msg} [y/N]${RESET} ")" response
[[ "${response,,}" == "y" ]]
}
print_summary() {
local title="$1"; shift
echo ""
log_section "$title"
while [[ $# -gt 0 ]]; do
local key="$1" val="$2"; shift 2
printf " ${BOLD}%-24s${RESET} %s\n" "$key" "$val"
done
echo ""
}

140
setup.sh Normal file
View File

@@ -0,0 +1,140 @@
#!/usr/bin/env bash
# setup.sh — HomeAI root orchestrator
#
# Usage:
# ./setup.sh all # run all phases in order
# ./setup.sh p1 # infra only
# ./setup.sh p2 # llm only
# ./setup.sh p1 p2 # multiple phases
# ./setup.sh status # show service health
#
# Phases: p1=infra p2=llm p3=voice p4=agent p5=character
# p6=esp32 p7=visual p8=images
set -euo pipefail
REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck source=scripts/common.sh
source "${REPO_DIR}/scripts/common.sh"
# ─── Phase definitions ─────────────────────────────────────────────────────────
declare -A PHASE_NAME=(
[p1]="homeai-infra (Docker stack)"
[p2]="homeai-llm (Ollama + Open WebUI)"
[p3]="homeai-voice (STT / TTS / Wyoming)"
[p4]="homeai-agent (OpenClaw + skills + mem0)"
[p5]="homeai-character (Character Manager UI)"
[p6]="homeai-esp32 (ESPHome firmware)"
[p7]="homeai-visual (VTube Studio bridge)"
[p8]="homeai-images (ComfyUI workflows)"
)
PHASE_ORDER=(p1 p2 p3 p4 p5 p6 p7 p8)
run_phase() {
local phase="$1"
local subdir="${REPO_DIR}/homeai-${phase#p}"
# Map phase ID to directory name
case "$phase" in
p1) subdir="${REPO_DIR}/homeai-infra" ;;
p2) subdir="${REPO_DIR}/homeai-llm" ;;
p3) subdir="${REPO_DIR}/homeai-voice" ;;
p4) subdir="${REPO_DIR}/homeai-agent" ;;
p5) subdir="${REPO_DIR}/homeai-character" ;;
p6) subdir="${REPO_DIR}/homeai-esp32" ;;
p7) subdir="${REPO_DIR}/homeai-visual" ;;
p8) subdir="${REPO_DIR}/homeai-images" ;;
*) die "Unknown phase: $phase" ;;
esac
local setup_script="${subdir}/setup.sh"
if [[ ! -f "$setup_script" ]]; then
die "Setup script not found: $setup_script"
fi
log_section "Phase ${phase^^}: ${PHASE_NAME[$phase]}"
bash "$setup_script"
}
show_status() {
log_section "HomeAI Service Status"
load_env_services
local services=(
"Home Assistant|${HA_URL:-http://localhost:8123}"
"Portainer|${PORTAINER_URL:-https://localhost:9443}"
"Uptime Kuma|${UPTIME_KUMA_URL:-http://localhost:3001}"
"Gitea|${GITEA_URL:-http://localhost:3000}"
"code-server|${CODE_SERVER_URL:-http://localhost:8090}"
"n8n|${N8N_URL:-http://localhost:5678}"
"Ollama|${OLLAMA_URL:-http://localhost:11434}"
"Open WebUI|${OPEN_WEBUI_URL:-http://localhost:3030}"
)
for entry in "${services[@]}"; do
local name="${entry%%|*}"
local url="${entry##*|}"
if curl -sf --max-time 2 "$url" -o /dev/null 2>/dev/null; then
printf " ${GREEN}${RESET} %-20s %s\n" "$name" "$url"
else
printf " ${RED}${RESET} %-20s %s\n" "$name" "$url"
fi
done
echo ""
}
usage() {
echo ""
echo " Usage: $0 <phase...> | all | status"
echo ""
echo " Phases:"
for p in "${PHASE_ORDER[@]}"; do
printf " %-6s %s\n" "$p" "${PHASE_NAME[$p]}"
done
echo ""
echo " Examples:"
echo " $0 all # setup everything in order"
echo " $0 p1 p2 # infra + llm only"
echo " $0 status # check service health"
echo ""
}
# ─── Main ──────────────────────────────────────────────────────────────────────
main() {
if [[ $# -eq 0 ]]; then
usage
exit 0
fi
detect_platform
# Bootstrap root .env if missing
bootstrap_env "${REPO_DIR}"
local phases=()
for arg in "$@"; do
case "$arg" in
all) phases=("${PHASE_ORDER[@]}") ;;
status) show_status; exit 0 ;;
p[1-8]) phases+=("$arg") ;;
help|-h|--help) usage; exit 0 ;;
*) die "Unknown argument: $arg. Run '$0 help' for usage." ;;
esac
done
if [[ ${#phases[@]} -eq 0 ]]; then
usage; exit 0
fi
for phase in "${phases[@]}"; do
run_phase "$phase"
done
log_section "Setup complete"
show_status
}
main "$@"