Update README.md
Browse files
README.md
CHANGED
@@ -108,9 +108,10 @@ model = LlamaForCausalLM.from_pretrained(ckpt, device_map='auto', low_cpu_mem_us
|
|
108 |
tokenizer = AutoTokenizer.from_pretrained(ckpt)
|
109 |
prompt = "Human: 写一首中文歌曲,赞美大自然 \n\nAssistant: "
|
110 |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
|
111 |
-
generate_ids = model.generate(input_ids, max_new_tokens=
|
112 |
output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
113 |
response = output[len(prompt):]
|
|
|
114 |
|
115 |
```
|
116 |
|
|
|
108 |
tokenizer = AutoTokenizer.from_pretrained(ckpt)
|
109 |
prompt = "Human: 写一首中文歌曲,赞美大自然 \n\nAssistant: "
|
110 |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
|
111 |
+
generate_ids = model.generate(input_ids, max_new_tokens=300, do_sample = True, top_k = 30, top_p = 0.85, temperature = 0.5,repetition_penalty=1.2, eos_token_id=2, bos_token_id=1, pad_token_id=0)
|
112 |
output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
113 |
response = output[len(prompt):]
|
114 |
+
print(response)
|
115 |
|
116 |
```
|
117 |
|