PMAlpha / start.py
Sergidev's picture
Update start.py
f3be2ab verified
raw
history blame
507 Bytes
import subprocess
import os
# Uninstall the existing llama-cpp-python package
subprocess.run("pip uninstall -y llama-cpp-python", shell=True)
# Set the required environment variables for installing with CUDA support
os.environ["CMAKE_ARGS"] = "-DLLAMA_CUBLAS=on"
os.environ["FORCE_CMAKE"] = "1"
# Install llama-cpp-python with CUDA support
subprocess.run("pip install llama-cpp-python", shell=True)
# Start the Hugging Face Space
subprocess.run("uvicorn app:app --host 0.0.0.0 --port 7860", shell=True)