- Root setup.sh orchestrator with per-phase dispatch (./setup.sh p1..p8 | all | status) - Makefile convenience targets (make infra, make llm, make status, etc.) - scripts/common.sh: shared bash library for OS detection, Docker helpers, service management (launchd/systemd), package install, env management - .env.example + .gitignore: shared config template and secret exclusions P1 (homeai-infra): full implementation - docker-compose.yml: Uptime Kuma, code-server, n8n - Note: Home Assistant, Portainer, Gitea are pre-existing instances - setup.sh: Docker install, homeai network, container health checks P2 (homeai-llm): full implementation - Ollama native install with CUDA/ROCm/Metal auto-detection - launchd plist (macOS) + systemd service (Linux) for auto-start - scripts/pull-models.sh: idempotent model puller from manifest - scripts/benchmark.sh: tokens/sec measurement per model - Open WebUI on port 3030 (avoids Gitea :3000 conflict) P3-P8: working stubs with prerequisite checks and TODO sections Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
66 lines
3.3 KiB
Bash
66 lines
3.3 KiB
Bash
#!/usr/bin/env bash
|
|
# homeai-images/setup.sh — P8: ComfyUI + SDXL/Flux + character LoRA
|
|
#
|
|
# Components:
|
|
# - ComfyUI — image generation UI + API (port 8188)
|
|
# - SDXL, Flux.1 — base checkpoints
|
|
# - ControlNet — pose/depth guidance
|
|
# - aria LoRA — character-consistent fine-tune
|
|
# - comfyui.py skill — OpenClaw integration
|
|
# - workflows/ — saved workflows (quick, portrait, scene, upscale)
|
|
#
|
|
# Prerequisites:
|
|
# - P4 (homeai-agent) — OpenClaw running (for skill integration)
|
|
# - Python 3.10+
|
|
# - macOS: Metal GPU (MPS) — runs natively
|
|
# - Linux: CUDA GPU recommended (NVIDIA); CPU is very slow for image gen
|
|
#
|
|
# ComfyUI runs NATIVELY (not Docker) for GPU acceleration.
|
|
|
|
set -euo pipefail
|
|
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
REPO_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
|
source "${REPO_DIR}/scripts/common.sh"
|
|
|
|
log_section "P8: Image Generation (ComfyUI)"
|
|
detect_platform
|
|
detect_gpu
|
|
|
|
log_info "GPU: ${GPU_TYPE}${GPU_INFO:+ — ${GPU_INFO}}"
|
|
|
|
if [[ "$GPU_TYPE" == "none" ]]; then
|
|
log_warn "No GPU detected. ComfyUI will use CPU — image generation will be very slow."
|
|
log_warn "On Linux: install CUDA drivers if you have an NVIDIA card."
|
|
fi
|
|
|
|
# ─── TODO: Implementation ──────────────────────────────────────────────────────
|
|
cat <<'EOF'
|
|
|
|
┌─────────────────────────────────────────────────────────────────┐
|
|
│ P8: homeai-images — NOT YET IMPLEMENTED │
|
|
│ │
|
|
│ Implementation steps: │
|
|
│ 1. Clone ComfyUI: git clone https://github.com/comfyanonymous/ComfyUI ~/ComfyUI │
|
|
│ 2. Create venv + install PyTorch (MPS for macOS, CUDA for Linux) │
|
|
│ 3. scripts/download-models.sh — SDXL, Flux.1, VAE, ControlNet │
|
|
│ 4. Create launchd/systemd service (port 8188) │
|
|
│ 5. Create workflows/quick.json, portrait.json, scene.json │
|
|
│ 6. Implement skills/comfyui.py OpenClaw integration │
|
|
│ 7. (Later) Train aria LoRA with kohya_ss │
|
|
│ │
|
|
│ Model sizes (download bandwidth required): │
|
|
│ SDXL base ~6.5 GB │
|
|
│ Flux.1-dev ~24 GB │
|
|
│ Flux.1-schnell ~24 GB │
|
|
│ ControlNet ~1.5 GB each │
|
|
│ │
|
|
│ Interface contracts: │
|
|
│ COMFYUI_URL=http://localhost:8188 │
|
|
└─────────────────────────────────────────────────────────────────┘
|
|
|
|
EOF
|
|
|
|
log_info "P8 is not yet implemented. See homeai-images/PLAN.md for details."
|
|
exit 0
|