Update README.md
Browse files
README.md
CHANGED
@@ -1,35 +1,10 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
## Full docs:
|
12 |
-
```
|
13 |
-
# Base ctransformers with no GPU acceleration
|
14 |
-
pip install llama-cpp-python
|
15 |
-
# With NVidia CUDA acceleration
|
16 |
-
CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python
|
17 |
-
# Or with OpenBLAS acceleration
|
18 |
-
CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python
|
19 |
-
# Or with CLBLast acceleration
|
20 |
-
CMAKE_ARGS="-DLLAMA_CLBLAST=on" pip install llama-cpp-python
|
21 |
-
# Or with AMD ROCm GPU acceleration (Linux only)
|
22 |
-
CMAKE_ARGS="-DLLAMA_HIPBLAS=on" pip install llama-cpp-python
|
23 |
-
# Or with Metal GPU acceleration for macOS systems only
|
24 |
-
CMAKE_ARGS="-DLLAMA_METAL=on" pip install llama-cpp-python
|
25 |
-
|
26 |
-
# In windows, to set the variables CMAKE_ARGS in PowerShell, follow this format; eg for NVidia CUDA:
|
27 |
-
$env:CMAKE_ARGS = "-DLLAMA_OPENBLAS=on"
|
28 |
-
pip install llama-cpp-python
|
29 |
-
|
30 |
-
huggingface-cli download TheBloke/Silicon-Maid-7B-GGUF silicon-maid-7b.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False
|
31 |
-
|
32 |
-
huggingface-cli download lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF Meta-Llama-3-8B-Instruct-Q8_0.gguf --local-dir . --local-dir-use-symlinks False
|
33 |
-
|
34 |
-
|
35 |
-
```
|
|
|
1 |
+
title: Chat With PMB
|
2 |
+
emoji: 🧠
|
3 |
+
colorFrom: red
|
4 |
+
colorTo: purple
|
5 |
+
sdk: other
|
6 |
+
sdk_version:
|
7 |
+
app_file: app.py
|
8 |
+
pinned: true
|
9 |
+
license: mit
|
10 |
+
short_description: Persistant Memory Bot powered by an Open Source Model.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|