From 778188ed95ccf50d2e21938bf5b542d76e066f63 Mon Sep 17 00:00:00 2001 From: ben Date: Sun, 12 Jan 2025 14:37:13 +0100 Subject: Initial commit, first public version. --- docker-compose.yml | 98 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 docker-compose.yml (limited to 'docker-compose.yml') diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..25d4ef3 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,98 @@ +services: + ollama: + image: ollama/ollama + volumes: + - ollama:/root/.ollama + restart: always + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: [gpu] + healthcheck: + test: ollama --version && ollama ps || exit 1 + interval: 60s + retries: 5 + start_period: 20s + timeout: 10s + openedai-speech: + build: + dockerfile: src/tts/Dockerfile + environment: + - TTS_HOME=voices + volumes: + - voices:/app/voices + - speech-config:/app/config + restart: unless-stopped + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: [gpu] + healthcheck: + test: curl --fail http://localhost:8000 || exit 1 + interval: 60s + retries: 5 + start_period: 10s + timeout: 10s + llm_provision: + build: + dockerfile: src/llm_provision/Dockerfile + environment: + - MODELS=qwen2.5:latest,qwen2.5-coder:32b,nomic-embed-text:latest + restart: no + depends_on: + ollama: + condition: service_healthy + restart: true + links: + - ollama + aichat-build: + build: + dockerfile: src/aichat/Dockerfile + faster-whisper-server: + image: fedirz/faster-whisper-server:latest-cuda + environment: + - WHISPER__MODEL=Systran/faster-whisper-large-v3 + volumes: + - hf-hub-cache:/home/ubuntu/.cache/huggingface/hub + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: [gpu] + healthcheck: + test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/8000' || exit 1 + interval: 30s + timeout: 15s + retries: 3 + nginx: + image: nginx + volumes: + - ./src/nginx/nginx.conf:/etc/nginx/templates/nginx.conf.template + environment: + - NGINX_ENVSUBST_OUTPUT_DIR=/etc/nginx + - API_KEY=${LLM_API_KEY} + depends_on: + - openedai-speech + - faster-whisper-server + - ollama + links: + - ollama + - faster-whisper-server + - openedai-speech + ports: + - "11434:11434" + - "8000:8000" + - "8001:8001" +volumes: + ollama: + voices: + speech-config: + hf-hub-cache: -- cgit v1.2.3