From 778188ed95ccf50d2e21938bf5b542d76e066f63 Mon Sep 17 00:00:00 2001 From: ben Date: Sun, 12 Jan 2025 14:37:13 +0100 Subject: Initial commit, first public version. --- src/aichat/Dockerfile | 7 ++++ src/aichat/config.yaml | 8 +++++ src/llm_provision/Dockerfile | 12 +++++++ src/llm_provision/entrypoint.sh | 4 +++ src/llm_provision/init_models.sh | 17 ++++++++++ src/nginx/nginx.conf | 61 +++++++++++++++++++++++++++++++++++ src/tts/Dockerfile | 47 +++++++++++++++++++++++++++ src/tts/download_voices_tts-1.sh | 8 +++++ src/tts/voice_to_speaker.default.yaml | 36 +++++++++++++++++++++ src/whisper/Dockerfile | 13 ++++++++ 10 files changed, 213 insertions(+) create mode 100644 src/aichat/Dockerfile create mode 100644 src/aichat/config.yaml create mode 100644 src/llm_provision/Dockerfile create mode 100644 src/llm_provision/entrypoint.sh create mode 100755 src/llm_provision/init_models.sh create mode 100644 src/nginx/nginx.conf create mode 100644 src/tts/Dockerfile create mode 100644 src/tts/download_voices_tts-1.sh create mode 100644 src/tts/voice_to_speaker.default.yaml create mode 100644 src/whisper/Dockerfile (limited to 'src') diff --git a/src/aichat/Dockerfile b/src/aichat/Dockerfile new file mode 100644 index 0000000..df13f63 --- /dev/null +++ b/src/aichat/Dockerfile @@ -0,0 +1,7 @@ +FROM rust:latest + +RUN rustup target add x86_64-unknown-linux-musl +RUN apt update && apt install -y musl-tools musl-dev +RUN update-ca-certificates + +RUN cargo install --target x86_64-unknown-linux-musl aichat diff --git a/src/aichat/config.yaml b/src/aichat/config.yaml new file mode 100644 index 0000000..a74af2c --- /dev/null +++ b/src/aichat/config.yaml @@ -0,0 +1,8 @@ +# see https://github.com/sigoden/aichat/blob/main/config.example.yaml + +model: ollama +clients: +- type: openai-compatible + name: ollama + api_base: http://localhost:11434/v1 + api_key: __LLM_API_KEY__ diff --git a/src/llm_provision/Dockerfile b/src/llm_provision/Dockerfile new file mode 100644 index 0000000..77701fe --- /dev/null +++ b/src/llm_provision/Dockerfile @@ -0,0 +1,12 @@ +FROM debian:bookworm-slim + +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update +RUN apt-get --yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confnew" install bash curl jq + +ADD ./src/llm_provision/init_models.sh /init_models.sh +ADD ./src/llm_provision/entrypoint.sh /entrypoint.sh +RUN chmod 755 /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] +#ENTRYPOINT ["tail", "-f", "/dev/null"] # to debug diff --git a/src/llm_provision/entrypoint.sh b/src/llm_provision/entrypoint.sh new file mode 100644 index 0000000..d0b6e85 --- /dev/null +++ b/src/llm_provision/entrypoint.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +echo "pull models into ollama volumes" +bash /init_models.sh diff --git a/src/llm_provision/init_models.sh b/src/llm_provision/init_models.sh new file mode 100755 index 0000000..0afbbd0 --- /dev/null +++ b/src/llm_provision/init_models.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +OLLAMA_HOST="http://ollama:11434" + +IFS=',' read -r -a models_arr <<< "${MODELS}" + +## now loop through the above array +for m in "${models_arr[@]}" +do + curl -s "${OLLAMA_HOST}/api/tags" | jq '.models[].name' | grep ${m} > /dev/null + if [[ $? -ne 0 ]] + then + curl -s "${OLLAMA_HOST}/api/pull" -d "{\"model\": \"${m}\"}" + else + echo "${m} already installed" + fi +done diff --git a/src/nginx/nginx.conf b/src/nginx/nginx.conf new file mode 100644 index 0000000..2dc6d52 --- /dev/null +++ b/src/nginx/nginx.conf @@ -0,0 +1,61 @@ +events{} +http { + server_tokens off; + client_max_body_size 200m; + + server { + listen 11434; + set $deny 1; + if ($http_authorization = "Bearer $API_KEY") { + set $deny 0; + } + if ($deny) { + return 403; + } + location / { + proxy_pass http://ollama:11434; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + } + server { + listen 8000; + set $deny 1; + if ($http_authorization = "Bearer $API_KEY") { + set $deny 0; + } + if ($deny) { + return 403; + } + location / { + proxy_pass http://openedai-speech:8000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + } + server { + listen 8001; + set $deny 1; + if ($http_authorization = "Bearer $API_KEY") { + set $deny 0; + } + if ($deny) { + return 403; + } + location / { + proxy_pass http://faster-whisper-server:8000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_read_timeout 180; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + } + } +} diff --git a/src/tts/Dockerfile b/src/tts/Dockerfile new file mode 100644 index 0000000..1636bd2 --- /dev/null +++ b/src/tts/Dockerfile @@ -0,0 +1,47 @@ +FROM python:3.11-slim + +RUN --mount=type=cache,target=/root/.cache/pip pip install -U pip + +ARG TARGETPLATFORM +RUN < /dev/null +for i in $models ; do + [ ! -e "voices/$i.onnx" ] && piper --data-dir voices --download-dir voices --model $i < /dev/null > /dev/null +done + diff --git a/src/tts/voice_to_speaker.default.yaml b/src/tts/voice_to_speaker.default.yaml new file mode 100644 index 0000000..53acda6 --- /dev/null +++ b/src/tts/voice_to_speaker.default.yaml @@ -0,0 +1,36 @@ +# Use https://rhasspy.github.io/piper-samples/ to configure +tts-1: + alloy: + model: voices/en_US-libritts_r-medium.onnx + speaker: 79 + siwis: + model: voices/fr_FR-siwis-medium.onnx + speaker: 0 + tom: + model: voices/fr_FR-tom-medium.onnx + speaker: 0 + pierre: + model: voices/fr_FR-upmc-medium.onnx + speaker: 1 + jessica: + model: voices/fr_FR-upmc-medium.onnx + speaker: 0 + alba: + model: voices/en_GB-alba-medium.onnx + speaker: 0 + jack: + model: voices/en_GB-northern_english_male-medium.onnx + speaker: 0 + john: + model: voices/en_US-john-medium.onnx + speaker: 0 + bryce: + model: voices/en_US-bryce-medium.onnx + speaker: 0 + ryan: + model: voices/en_US-ryan-high.onnx + speaker: 0 + echo: + model: voices/en_US-libritts_r-medium.onnx + speaker: 134 + diff --git a/src/whisper/Dockerfile b/src/whisper/Dockerfile new file mode 100644 index 0000000..2909803 --- /dev/null +++ b/src/whisper/Dockerfile @@ -0,0 +1,13 @@ +FROM debian:bookworm-slim + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + sudo \ + python3 \ + python3-distutils \ + python3-pip \ + ffmpeg + +RUN pip install -U openai-whisper --break-system-packages +WORKDIR /app + +CMD ["whisper"] -- cgit v1.2.3