Update README.md
Browse files1) return_full_text=False returns only generated part of text. Changes provide the same result.
Source:
https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.TextGenerationPipeline.example
2) print(prompt) in test_inference provides some misunderstandings
3) typo in Колмогоров
README.md
CHANGED
@@ -27,13 +27,12 @@ from transformers import AutoTokenizer, pipeline
|
|
27 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
28 |
prompts = [
|
29 |
"В чем разница между фруктом и овощем?",
|
30 |
-
"Годы жизни
|
31 |
|
32 |
def test_inference(prompt):
|
33 |
prompt = pipe.tokenizer.apply_chat_template([{"role": "user", "content": prompt}], tokenize=False, add_generation_prompt=True)
|
34 |
-
|
35 |
-
outputs
|
36 |
-
return outputs[0]['generated_text'][len(prompt):].strip()
|
37 |
|
38 |
|
39 |
for prompt in prompts:
|
|
|
27 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
28 |
prompts = [
|
29 |
"В чем разница между фруктом и овощем?",
|
30 |
+
"Годы жизни колмогорова?"]
|
31 |
|
32 |
def test_inference(prompt):
|
33 |
prompt = pipe.tokenizer.apply_chat_template([{"role": "user", "content": prompt}], tokenize=False, add_generation_prompt=True)
|
34 |
+
outputs = pipe(prompt, max_new_tokens=512, return_full_text=False, do_sample=True, num_beams=1, temperature=0.25, top_k=50, top_p=0.98, eos_token_id=79097)
|
35 |
+
return outputs[0]['generated_text'].strip()
|
|
|
36 |
|
37 |
|
38 |
for prompt in prompts:
|