Update README.md
Browse files
README.md
CHANGED
@@ -1,4 +1,37 @@
|
|
1 |
---
|
2 |
library_name: peft
|
3 |
base_model: yanolja/EEVE-Korean-Instruct-10.8B-v1.0
|
4 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
library_name: peft
|
3 |
base_model: yanolja/EEVE-Korean-Instruct-10.8B-v1.0
|
4 |
+
---
|
5 |
+
|
6 |
+
``` python
|
7 |
+
import torch
|
8 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
9 |
+
from peft import prepare_model_for_kbit_training, PeftModel, PeftConfig
|
10 |
+
|
11 |
+
model_path = 'yanolja/EEVE-Korean-10.8B-v1.0'
|
12 |
+
lora_path = './ALMA/checkpoint-378/adapter_model'
|
13 |
+
|
14 |
+
bnb_config = BitsAndBytesConfig(load_in_4bit=True,bnb_4bit_quant_type="nf4",bnb_4bit_compute_dtype=torch.float16,)
|
15 |
+
model = AutoModelForCausalLM.from_pretrained(model_path, quantization_config=bnb_config, trust_remote_code=True)
|
16 |
+
model.config.use_cache = False
|
17 |
+
model = PeftModel.from_pretrained(model, lora_path)
|
18 |
+
model = prepare_model_for_kbit_training(model)
|
19 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, padding_side='left')
|
20 |
+
|
21 |
+
en_text = 'Hi.'
|
22 |
+
ko_text = 'μλ
νμΈμ.'
|
23 |
+
en_prompt = f"μ΄κ²μ μμ΄μμ νκ΅μ΄λ‘ λ²μνμΈμ:\nμμ΄: {en_text}\nνκ΅μ΄:"
|
24 |
+
ko_prompt = f"Translate this from Korean to English:\nKorean: {ko_text}\nEnglish:"
|
25 |
+
|
26 |
+
input_ids = tokenizer(en_prompt, return_tensors="pt", padding=True, max_length=256, truncation=True).input_ids.cuda()
|
27 |
+
with torch.no_grad():
|
28 |
+
generated_ids = model.generate(input_ids=input_ids, num_beams=5, max_new_tokens=20, do_sample=True, temperature=0.6, top_p=0.9)
|
29 |
+
outputs = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
30 |
+
print(outputs)
|
31 |
+
|
32 |
+
input_ids = tokenizer(ko_prompt, return_tensors="pt", padding=True, max_length=256, truncation=True).input_ids.cuda()
|
33 |
+
with torch.no_grad():
|
34 |
+
generated_ids = model.generate(input_ids=input_ids, num_beams=5, max_new_tokens=20, do_sample=True, temperature=0.6, top_p=0.9)
|
35 |
+
outputs = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
36 |
+
print(outputs)
|
37 |
+
```
|