Update README.md
Browse files
README.md
CHANGED
@@ -12,3 +12,43 @@ tags:
|
|
12 |
---
|
13 |
|
14 |
Experimental Repository :)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
---
|
13 |
|
14 |
Experimental Repository :)
|
15 |
+
|
16 |
+
Here's some test:
|
17 |
+
|
18 |
+
```python
|
19 |
+
from transformers import pipeline
|
20 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
21 |
+
|
22 |
+
model = AutoModelForCausalLM.from_pretrained(
|
23 |
+
'beomi/Mistral-Ko-Inst-dev',
|
24 |
+
torch_dtype='auto',
|
25 |
+
device_map='auto',
|
26 |
+
)
|
27 |
+
tokenizer = AutoTokenizer.from_pretrained('beomi/Mistral-Ko-Inst-dev')
|
28 |
+
|
29 |
+
pipe = pipeline(
|
30 |
+
'text-generation',
|
31 |
+
model=model,
|
32 |
+
tokenizer=tokenizer,
|
33 |
+
do_sample=True,
|
34 |
+
max_new_tokens=350,
|
35 |
+
return_full_text=False,
|
36 |
+
no_repeat_ngram_size=6,
|
37 |
+
eos_token_id=1, # not yet tuned to gen </s>, use <s> instead.
|
38 |
+
)
|
39 |
+
|
40 |
+
|
41 |
+
def gen(x):
|
42 |
+
chat = tokenizer.apply_chat_template([
|
43 |
+
{"role": "user", "content": x},
|
44 |
+
# {"role": "assistant", "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"},
|
45 |
+
# {"role": "user", "content": "Do you have mayonnaise recipes? please say in Korean."}
|
46 |
+
], tokenize=False)
|
47 |
+
print(pipe(chat)[0]['generated_text'].strip())
|
48 |
+
|
49 |
+
gen("μ€νλ²
μ€μ μ€νλ²
μ€ μ½λ¦¬μμ μ°¨μ΄λ?")
|
50 |
+
|
51 |
+
# (μμ± μμ)
|
52 |
+
# μ€νλ²
μ€λ μ μΈκ³μ μΌλ‘ μ΄μνκ³ μλ μ»€νΌ μ λ¬Έμ¬μ΄λ€. νκ΅μλ μ€νλ²
μ€ μ½λ¦¬μλΌλ μ΄λ¦μΌλ‘ μ΄μλκ³ μλ€.
|
53 |
+
# μ€νλ²
μ€ μ½λ¦¬μλ λνλ―Όκ΅μ μ
μ ν μ΄ν 2009λ
κ³Ό 2010λ
μ λ μ°¨λ‘μ λΈλλκ³Όμ μ¬κ²ν λ° μλ‘μ΄ λμμΈμ ν΅ν΄ μλ‘μ΄ λΈλλλ€. μ»€νΌ μ λ¬Έμ ν리미μ μ΄λ―Έμ§λ₯Ό μ μ§νκ³ μκ³ , μ€νλ²
μ€ μ½λ¦¬μλ νκ΅μ λννλ ν리미μ μ»€νΌ μ λ¬Έ λΈλλμ λ§λ€κ³ μλ€.
|
54 |
+
```
|