binxu's picture
Update app.py
27e2c26
import gradio as gr
import transformers
# import tokenizers
import torch
from transformers import pipeline, set_seed
from transformers import pipeline, T5Tokenizer, T5ForConditionalGeneration
# https://huggingface.co/docs/hub/spaces-sdks-gradio
# model = GPT2LMHeadModel.from_pretrained("binxu/Ziyue-GPT2-deep")
# generator = pipeline('text-generation', model=model, tokenizer='bert-base-chinese')
if torch.cuda.is_available():
device = 0
else:
device = -1
tokenizer = T5Tokenizer.from_pretrained("Langboat/mengzi-t5-base")
model = T5ForConditionalGeneration.from_pretrained("binxu/mengzi-t5-base-finetuned-punctuation")
text2text_generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer, device=device)
def generate(prompt, ):
torch.manual_seed(42)
max_length = 150
outputs = text2text_generator(prompt, max_length=max_length )
output_texts = outputs[0]['generated_text']
return output_texts
examples = ["子曰学而时习之不亦说乎有朋自远方来不亦乐乎",
"赐以优言问所好尚励短引长莫不恳笃",
"范文正为秀才时即以天下为己任程子曰一命之士苟存心于利物于人必有所济",
"昔楚庄王之霸也以民生在勤箴其民以日讨军实儆其军以祸至无日训其国人",
]
iface = gr.Interface(fn=generate,
inputs=gr.inputs.Textbox(lines=3, label="Prompt"),
outputs=gr.outputs.Textbox(label="Generated Text"),
examples=examples)
iface.launch()