Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,31 +1,16 @@
|
|
1 |
-
# from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config
|
2 |
-
#
|
3 |
-
#
|
4 |
-
# model = GPT2LMHeadModel.from_pretrained("rugpt3large_for_qna_120k10")
|
5 |
-
# tokenizer = GPT2Tokenizer.from_pretrained("rugpt3large_for_qna_120k10")
|
6 |
-
#
|
7 |
-
# print(tokenizer.decode(model.generate(
|
8 |
-
# tokenizer.encode('<s> [user] Ты кто? [assistant]',
|
9 |
-
# return_tensors="pt"),
|
10 |
-
# max_new_tokens=100, no_repeat_ngram_size=2, temperature=0.7, do_sample=True)[0]))
|
11 |
-
|
12 |
import gradio as gr
|
13 |
import torch
|
14 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
15 |
|
16 |
-
# Загрузка модели и токенизатора
|
17 |
-
tokenizer = GPT2Tokenizer.from_pretrained("ERmak1581/rugpt3large_for_qna_400k")
|
18 |
-
model = GPT2LMHeadModel.from_pretrained("ERmak1581/rugpt3large_for_qna_400k")
|
19 |
|
20 |
-
|
|
|
|
|
21 |
def gen(request, temperature, maxnewtokens):
|
22 |
input_text = f"<s> [user] {request} [assistant]"
|
23 |
max_new_tokens = maxnewtokens
|
24 |
-
|
25 |
-
# Преобразование входной строки в токены
|
26 |
input_ids = tokenizer.encode(input_text, return_tensors='pt')
|
27 |
-
|
28 |
-
# Генерация текста
|
29 |
output = model.generate(
|
30 |
input_ids,
|
31 |
do_sample=True,
|
@@ -34,14 +19,14 @@ def gen(request, temperature, maxnewtokens):
|
|
34 |
no_repeat_ngram_size=3
|
35 |
)
|
36 |
|
37 |
-
|
38 |
decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
|
39 |
res = decoded_output.split("[assistant]")[1]
|
40 |
res.removesuffix("</s>")
|
41 |
res = res.strip()
|
42 |
return res
|
43 |
|
44 |
-
|
45 |
inputs = [
|
46 |
gr.Textbox(lines=5, label="Input Text"),
|
47 |
gr.Slider(minimum=0.1, maximum=1.9, value=1.0, label="Temperature", step=0.05),
|
@@ -49,5 +34,5 @@ inputs = [
|
|
49 |
]
|
50 |
output = gr.Textbox(label="Output Text")
|
51 |
|
52 |
-
interface = gr.Interface(gen, inputs, output, title="GPT-2 Text Generation", theme="compact", description="Демонстрация <a href=https://huggingface.co/ERmak1581/
|
53 |
interface.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
4 |
|
|
|
|
|
|
|
5 |
|
6 |
+
tokenizer = GPT2Tokenizer.from_pretrained("ERmak1581/rugpt3large_for_qna_400k1")
|
7 |
+
model = GPT2LMHeadModel.from_pretrained("ERmak1581/rugpt3large_for_qna_400k1")
|
8 |
+
|
9 |
def gen(request, temperature, maxnewtokens):
|
10 |
input_text = f"<s> [user] {request} [assistant]"
|
11 |
max_new_tokens = maxnewtokens
|
|
|
|
|
12 |
input_ids = tokenizer.encode(input_text, return_tensors='pt')
|
13 |
+
|
|
|
14 |
output = model.generate(
|
15 |
input_ids,
|
16 |
do_sample=True,
|
|
|
19 |
no_repeat_ngram_size=3
|
20 |
)
|
21 |
|
22 |
+
|
23 |
decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
|
24 |
res = decoded_output.split("[assistant]")[1]
|
25 |
res.removesuffix("</s>")
|
26 |
res = res.strip()
|
27 |
return res
|
28 |
|
29 |
+
|
30 |
inputs = [
|
31 |
gr.Textbox(lines=5, label="Input Text"),
|
32 |
gr.Slider(minimum=0.1, maximum=1.9, value=1.0, label="Temperature", step=0.05),
|
|
|
34 |
]
|
35 |
output = gr.Textbox(label="Output Text")
|
36 |
|
37 |
+
interface = gr.Interface(gen, inputs, output, title="GPT-2 Text Generation", theme="compact", description="Демонстрация <a href=https://huggingface.co/ERmak1581/rugpt3large_for_qna_400k1>модели</a> для задачи Question-Answer")
|
38 |
interface.launch(share=True)
|