Add self-deploying setup scripts for all sub-projects (P1-P8)

- Root setup.sh orchestrator with per-phase dispatch (./setup.sh p1..p8 | all | status)
- Makefile convenience targets (make infra, make llm, make status, etc.)
- scripts/common.sh: shared bash library for OS detection, Docker helpers,
  service management (launchd/systemd), package install, env management
- .env.example + .gitignore: shared config template and secret exclusions

P1 (homeai-infra): full implementation
- docker-compose.yml: Uptime Kuma, code-server, n8n
- Note: Home Assistant, Portainer, Gitea are pre-existing instances
- setup.sh: Docker install, homeai network, container health checks

P2 (homeai-llm): full implementation
- Ollama native install with CUDA/ROCm/Metal auto-detection
- launchd plist (macOS) + systemd service (Linux) for auto-start
- scripts/pull-models.sh: idempotent model puller from manifest
- scripts/benchmark.sh: tokens/sec measurement per model
- Open WebUI on port 3030 (avoids Gitea :3000 conflict)

P3-P8: working stubs with prerequisite checks and TODO sections

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Aodhan Collins
2026-03-04 21:10:53 +00:00
parent 38247d7cc4
commit 7978eaea14
23 changed files with 2525 additions and 0 deletions

View File

@@ -0,0 +1,88 @@
#!/usr/bin/env bash
# scripts/benchmark.sh — Benchmark Ollama model inference speed
#
# Measures tokens/sec for each installed model.
# Results written to benchmark-results.md
#
# Usage:
# bash scripts/benchmark.sh
# bash scripts/benchmark.sh qwen2.5:7b # benchmark one model
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_DIR="$(cd "${SCRIPT_DIR}/../.." && pwd)"
source "${REPO_DIR}/scripts/common.sh"
RESULTS_FILE="${SCRIPT_DIR}/../benchmark-results.md"
PROMPT="Tell me a short story about a robot who loves cooking. Keep it to exactly 200 words."
if ! command_exists ollama; then
die "Ollama not found."
fi
if ! curl -sf http://localhost:11434 -o /dev/null; then
die "Ollama is not running."
fi
benchmark_model() {
local model="$1"
log_step "Benchmarking $model..."
local start end elapsed
start=$(date +%s%3N)
local response
response=$(ollama run "$model" "$PROMPT" 2>&1) || {
log_error "Model $model failed to run."
echo "| $model | ERROR | — |"
return
}
end=$(date +%s%3N)
elapsed=$(( (end - start) ))
local word_count
word_count=$(echo "$response" | wc -w)
local tokens_est=$(( word_count * 4 / 3 )) # rough estimate: 1 token ≈ 0.75 words
local elapsed_sec
elapsed_sec=$(echo "scale=1; $elapsed / 1000" | bc)
local tps
tps=$(echo "scale=1; $tokens_est / ($elapsed / 1000)" | bc 2>/dev/null || echo "?")
printf " %-30s %6s tok/s (%ss)\n" "$model" "$tps" "$elapsed_sec"
echo "| \`$model\` | ${tps} tok/s | ${elapsed_sec}s |"
}
log_section "Ollama Benchmark"
log_info "Prompt: '$PROMPT'"
echo ""
if [[ -n "${1:-}" ]]; then
models=("$@")
else
# Get list of installed models
mapfile -t models < <(ollama list 2>/dev/null | tail -n +2 | awk '{print $1}')
fi
if [[ ${#models[@]} -eq 0 ]]; then
die "No models installed. Run: bash scripts/pull-models.sh"
fi
{
echo "# Ollama Benchmark Results"
echo "> Generated: $(date)"
echo ""
echo "| Model | Speed | Time for ~200 tok |"
echo "|---|---|---|"
} > "$RESULTS_FILE"
for model in "${models[@]}"; do
benchmark_model "$model" | tee -a "$RESULTS_FILE"
done
echo "" >> "$RESULTS_FILE"
log_success "Results written to $RESULTS_FILE"
echo ""
cat "$RESULTS_FILE"

View File

@@ -0,0 +1,86 @@
#!/usr/bin/env bash
# scripts/pull-models.sh — Pull all Ollama models from the manifest
#
# Usage:
# bash scripts/pull-models.sh # pull all models
# bash scripts/pull-models.sh nomic-embed-text # pull specific model
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_DIR="$(cd "${SCRIPT_DIR}/../.." && pwd)"
source "${REPO_DIR}/scripts/common.sh"
MANIFEST="${SCRIPT_DIR}/../ollama-models.txt"
if ! command_exists ollama; then
die "Ollama not found. Run: bash homeai-llm/setup.sh first."
fi
if ! curl -sf http://localhost:11434 -o /dev/null; then
die "Ollama is not running. Start it first."
fi
# If a specific model is given as arg, just pull that
if [[ $# -gt 0 ]]; then
for model in "$@"; do
log_info "Pulling $model..."
ollama pull "$model"
log_success "Pulled $model"
done
exit 0
fi
# Pull all models from manifest
log_section "Pulling Ollama models"
total=0; pulled=0; skipped=0; failed=0
while IFS= read -r line || [[ -n "$line" ]]; do
# Skip comments and blank lines
[[ "$line" =~ ^[[:space:]]*# ]] && continue
[[ -z "${line// }" ]] && continue
model="${line%% *}" # strip any trailing comment
total=$((total + 1))
# Check if model is already present
if ollama list 2>/dev/null | grep -q "^${model%%:*}"; then
tag="${model##*:}"
model_name="${model%%:*}"
if [[ "$tag" != "$model_name" ]]; then
# Has explicit tag — check exact match
if ollama list 2>/dev/null | grep -q "^${model_name}.*${tag}"; then
log_info "Already present: $model — skipping"
skipped=$((skipped + 1))
continue
fi
else
log_info "Already present: $model — skipping"
skipped=$((skipped + 1))
continue
fi
fi
log_step "Pulling $model..."
if ollama pull "$model"; then
log_success "Pulled $model"
pulled=$((pulled + 1))
else
log_error "Failed to pull $model"
failed=$((failed + 1))
fi
done < "$MANIFEST"
echo ""
log_info "Pull complete: ${pulled} pulled, ${skipped} already present, ${failed} failed (of ${total} total)"
if [[ $failed -gt 0 ]]; then
log_warn "Some models failed to pull. Check your internet connection and retry."
exit 1
fi
echo ""
log_info "Installed models:"
ollama list