Spaces:
Running
Running
Add whisper launch args
Browse files- Dockerfile +3 -1
Dockerfile
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
FROM nvidia/cuda:12.1.1-devel-ubuntu22.04
|
2 |
ARG MODEL
|
3 |
ARG IMGMODEL
|
|
|
4 |
ARG MMPROJ
|
5 |
ARG MODEL_NAME
|
6 |
ARG ADDITIONAL
|
@@ -13,5 +14,6 @@ RUN make -j$(nproc) LLAMA_OPENBLAS=1 LLAMA_CUBLAS=1 LLAMA_PORTABLE=1 LLAMA_COLAB
|
|
13 |
RUN wget -O model.ggml $MODEL || true
|
14 |
RUN wget -O imgmodel.ggml $IMGMODEL || true
|
15 |
RUN wget -O mmproj.ggml $MMPROJ || true
|
16 |
-
|
|
|
17 |
|
|
|
1 |
FROM nvidia/cuda:12.1.1-devel-ubuntu22.04
|
2 |
ARG MODEL
|
3 |
ARG IMGMODEL
|
4 |
+
ARG WHISPERMODEL
|
5 |
ARG MMPROJ
|
6 |
ARG MODEL_NAME
|
7 |
ARG ADDITIONAL
|
|
|
14 |
RUN wget -O model.ggml $MODEL || true
|
15 |
RUN wget -O imgmodel.ggml $IMGMODEL || true
|
16 |
RUN wget -O mmproj.ggml $MMPROJ || true
|
17 |
+
RUN wget -O whispermodel.ggml $WHISPERMODEL || true
|
18 |
+
CMD /bin/python3 ./koboldcpp.py --model model.ggml --whispermodel whispermodel.ggml --sdmodel imgmodel.ggml --sdthreads 4 --sdquant --sdclamped --mmproj mmproj.ggml $ADDITIONAL --port 7860 --hordemodelname $MODEL_NAME --hordemaxctx 1 --hordegenlen 1 --preloadstory default.json --ignoremissing
|
19 |
|