limhyeonseok commited on
Commit
eb39122
1 Parent(s): e10b8a8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -86,12 +86,12 @@ Refer to the [original model card](https://huggingface.co/MLP-KTLim/llama-3-Kore
86
  ## Example code
87
  ```python
88
  !CMAKE_ARGS="-DLLAMA_CUDA=on" pip install llama-cpp-python
89
- !huggingface-cli download MLP-KTLim/llama-3-Korean-Bllossom-8B-4bit --local-dir='YOUR-LOCAL-FOLDER-PATH'
90
 
91
  from llama_cpp import Llama
92
  from transformers import AutoTokenizer
93
 
94
- model_id = 'MLP-KTLim/llama-3-Korean-Bllossom-8B-4bit'
95
  tokenizer = AutoTokenizer.from_pretrained(model_id)
96
  model = Llama(
97
  model_path='YOUR-LOCAL-FOLDER-PATH/llama-3-Korean-Bllossom-8B-Q4_K_M.gguf',
 
86
  ## Example code
87
  ```python
88
  !CMAKE_ARGS="-DLLAMA_CUDA=on" pip install llama-cpp-python
89
+ !huggingface-cli download MLP-KTLim/llama-3-Korean-Bllossom-8B-gguf-Q4_K_M --local-dir='YOUR-LOCAL-FOLDER-PATH'
90
 
91
  from llama_cpp import Llama
92
  from transformers import AutoTokenizer
93
 
94
+ model_id = 'MLP-KTLim/llama-3-Korean-Bllossom-8B-gguf-Q4_K_M'
95
  tokenizer = AutoTokenizer.from_pretrained(model_id)
96
  model = Llama(
97
  model_path='YOUR-LOCAL-FOLDER-PATH/llama-3-Korean-Bllossom-8B-Q4_K_M.gguf',