File size: 1,724 Bytes
cf72dd7
 
b2f90f8
 
 
cf72dd7
 
18fc650
 
be78c10
 
9674b29
 
18fc650
6343902
18fc650
 
6343902
 
1f3c630
6343902
 
 
 
1f3c630
6343902
 
 
 
258323b
6343902
 
 
258323b
 
6343902
 
 
 
 
 
 
b2f90f8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
---
library_name: transformers
language:
- ru
- en
---

# Релиз вихря 0.3-0.4 

Долили сильно больше данных в sft, теперь стабильнее работает json и multiturn, слегка подточили параметры претрена модели

 - [Google Colab](https://colab.research.google.com/drive/15O9LwZhVUa1LWhZa2UKr_B-KOKenJBvv#scrollTo=5EeNFU2-9ERi)
 - [GGUF](https://huggingface.co/Vikhrmodels/Vikhr-7B-instruct_0.4-GGUF)

```python


from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
model = AutoModelForCausalLM.from_pretrained("Vikhrmodels/Vikhr-7B-instruct_0.4",
                                             device_map="auto",
                                             attn_implementation="flash_attention_2",
                                             torch_dtype=torch.bfloat16)

tokenizer = AutoTokenizer.from_pretrained("Vikhrmodels/Vikhr-7B-instruct_0.4")
from transformers import  AutoTokenizer, pipeline
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
prompts = [
    "В чем разница между фруктом и овощем?",
    "Годы жизни колмогорова?"]

def test_inference(prompt):
    prompt = pipe.tokenizer.apply_chat_template([{"role": "user", "content": prompt}], tokenize=False, add_generation_prompt=True)
    outputs = pipe(prompt, max_new_tokens=512, return_full_text=False, do_sample=True, num_beams=1, temperature=0.25, top_k=50, top_p=0.98, eos_token_id=79097)
    return outputs[0]['generated_text'].strip()


for prompt in prompts:
    print(f"    prompt:\n{prompt}")
    print(f"    response:\n{test_inference(prompt)}")
    print("-"*50)

```