jay68 commited on
Commit
d010905
1 Parent(s): 9ab3f4e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -108,9 +108,10 @@ model = LlamaForCausalLM.from_pretrained(ckpt, device_map='auto', low_cpu_mem_us
108
  tokenizer = AutoTokenizer.from_pretrained(ckpt)
109
  prompt = "Human: 写一首中文歌曲,赞美大自然 \n\nAssistant: "
110
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
111
- generate_ids = model.generate(input_ids, max_new_tokens=500, do_sample = True, top_k = 30, top_p = 0.85, temperature = 0.5, repetition_penalty=1., eos_token_id=2, bos_token_id=1, pad_token_id=0)
112
  output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
113
  response = output[len(prompt):]
 
114
 
115
  ```
116
 
 
108
  tokenizer = AutoTokenizer.from_pretrained(ckpt)
109
  prompt = "Human: 写一首中文歌曲,赞美大自然 \n\nAssistant: "
110
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
111
+ generate_ids = model.generate(input_ids, max_new_tokens=300, do_sample = True, top_k = 30, top_p = 0.85, temperature = 0.5repetition_penalty=1.2, eos_token_id=2, bos_token_id=1, pad_token_id=0)
112
  output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
113
  response = output[len(prompt):]
114
+ print(response)
115
 
116
  ```
117