|
import gradio as gr |
|
from transformers import AutoTokenizer |
|
import re |
|
from peft import PeftModel, PeftConfig |
|
from transformers import AutoModelForCausalLM |
|
|
|
config = PeftConfig.from_pretrained("mohamedemam/Arabic-meeting-summarization") |
|
model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-3b") |
|
model = PeftModel.from_pretrained(model, "mohamedemam/Arabic-meeting-summarization") |
|
|
|
model_name ="bigscience/bloomz-3b" |
|
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-3b") |
|
model.eval() |
|
import wikipediaapi |
|
|
|
|
|
|
|
wiki_wiki = wikipediaapi.Wikipedia('MyProjectName ([email protected])', 'en') |
|
page_py = wiki_wiki.page('Leo messi') |
|
example_contexts=page_py.text.split(f"\n") |
|
for i in range(len(example_contexts)): |
|
example_contexts[i]=re.sub(f'\n'," ", example_contexts[i]) |
|
|
|
|
|
|
|
|
|
def generate_qa(context, temperature, top_p,num_seq,l_p, num_b): |
|
input_text = context |
|
input_ids = tokenizer(input_text, return_tensors='pt') |
|
|
|
|
|
output = model.generate( |
|
**input_ids, |
|
temperature=temperature, |
|
top_p=top_p, |
|
num_return_sequences=num_seq, |
|
|
|
max_length=100, |
|
num_beams=num_b, |
|
length_penalty=l_p, |
|
do_sample=True, |
|
|
|
) |
|
|
|
generated_text = tokenizer.batch_decode(output, skip_special_tokens=True) |
|
|
|
formatted_output = "\n\n".join(set(generated_text)) |
|
return formatted_output |
|
iface = gr.Interface( |
|
fn=generate_qa, |
|
inputs=[ |
|
gr.inputs.Dropdown(example_contexts, label="Choose an Example"), |
|
gr.inputs.Slider(minimum=0.0, maximum=5, default=2.1, step=0.01, label="Temperature"), |
|
gr.inputs.Slider(minimum=0.0, maximum=1, default=0.5, step=0.01, label="Top-p"), |
|
gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of sequance"), |
|
gr.inputs.Slider(minimum=0.01, maximum=5, default=3, step=.01, label="l_p") |
|
, |
|
gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of beams"), |
|
|
|
|
|
], |
|
outputs=gr.outputs.Textbox(label="Generated Output"), |
|
title="Question Generation and Answering", |
|
description="Select an example context, choose a recommended word, adjust temperature and top-p. The model will generate questions and answers.", |
|
) |
|
|
|
iface.launch() |