Sergidev commited on
Commit
5325562
1 Parent(s): 365dec9

Update start.py

Browse files
Files changed (1) hide show
  1. start.py +2 -36
start.py CHANGED
@@ -1,41 +1,7 @@
1
- import os
2
  import subprocess
3
- import platform
4
- import urllib.request
5
- import tarfile
6
 
7
- # Uninstall the existing llama-cpp-python package
8
  subprocess.run("pip uninstall -y llama-cpp-python", shell=True)
9
 
10
- # Check if CUDA compiler is available
11
- cuda_path = os.environ.get("CUDA_PATH", "/usr/local/cuda")
12
- nvcc_path = os.path.join(cuda_path, "bin", "nvcc")
13
-
14
- if not os.path.exists(nvcc_path):
15
- # CUDA compiler not found, download and install CUDA toolkit
16
- print("CUDA compiler not found, downloading and installing CUDA toolkit...")
17
-
18
- # Determine the appropriate CUDA toolkit URL based on the platform
19
- system = platform.system()
20
- machine = platform.machine()
21
- if system == "Linux" and machine == "x86_64":
22
- cuda_url = "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"
23
- else:
24
- raise ValueError(f"Unsupported platform: {system} {machine}")
25
-
26
- # Download the CUDA toolkit installer
27
- cuda_installer, _ = urllib.request.urlretrieve(cuda_url)
28
-
29
- # Install the CUDA toolkit
30
- subprocess.run(f"sh {cuda_installer} --silent --toolkit --override", shell=True)
31
-
32
- # Update the CUDA path and compiler path
33
- cuda_path = "/usr/local/cuda"
34
- nvcc_path = os.path.join(cuda_path, "bin", "nvcc")
35
-
36
- # Set the CMAKE_CUDA_COMPILER environment variable
37
- os.environ["CMAKE_CUDA_COMPILER"] = nvcc_path
38
-
39
- # Install llama-cpp-python with CUDA support
40
- install_command = "pip install llama-cpp-python --install-option='--cmake-args=-DLLAMA_CUDA=on'"
41
  subprocess.run(install_command, shell=True)
 
 
1
  import subprocess
 
 
 
2
 
3
+ # commented because the existing llama-cpp-python package was renoved fron requirements.txt
4
  subprocess.run("pip uninstall -y llama-cpp-python", shell=True)
5
 
6
+ install_command = "CMAKE_ARGS='-DLLAMA_CUDA=on -DCMAKE_CUDA_COMPILER=/home/user/local/cuda/bin/nvcc' pip install llama-cpp-python"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  subprocess.run(install_command, shell=True)