--- # homeai-llm/docker/docker-compose.yml # P2 — Open WebUI # # Ollama runs NATIVELY (not in Docker) for GPU acceleration. # This compose file only starts the Open WebUI frontend. # # Prerequisites: # - Ollama installed and running on the host at port 11434 # - `homeai` Docker network exists (created by P1 setup) # # Usage: # docker compose -f docker/docker-compose.yml up -d name: homeai-llm services: # ─── Open WebUI ────────────────────────────────────────────────────────────── open-webui: container_name: homeai-open-webui image: ghcr.io/open-webui/open-webui:main restart: unless-stopped ports: - "3030:8080" # Exposed on 3030 to avoid conflict with Gitea (3000) volumes: - ${DATA_DIR:-~/homeai-data}/open-webui:/app/backend/data environment: # Connect to Ollama on the host - OLLAMA_BASE_URL=http://host.docker.internal:11434 - WEBUI_SECRET_KEY=${WEBUI_SECRET_KEY:-changeme_random_32_char} - ENABLE_SIGNUP=true - DEFAULT_MODELS=llama3.3:70b extra_hosts: - "host.docker.internal:host-gateway" # Linux compat networks: - homeai labels: - homeai.service=open-webui - homeai.url=http://localhost:3030 networks: homeai: external: true name: homeai