File size: 2,501 Bytes
ddaf33f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import gradio as gr
from transformers import AutoTokenizer
import re
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM

config = PeftConfig.from_pretrained("mohamedemam/Arabic-meeting-summarization")
model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-3b")
model = PeftModel.from_pretrained(model, "mohamedemam/Arabic-meeting-summarization")
# Load the tokenizer and model
model_name ="bigscience/bloomz-3b"
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-3b")
model.eval()
import wikipediaapi


# Create a Wikipedia API instance
wiki_wiki = wikipediaapi.Wikipedia('MyProjectName ([email protected])', 'en')
page_py = wiki_wiki.page('Leo messi')
example_contexts=page_py.text.split(f"\n")
for i in range(len(example_contexts)):
    example_contexts[i]=re.sub(f'\n'," ", example_contexts[i])
# Recommended words for users to choose from


# Function to generate questions and answers with configurable parameters
def generate_qa(context, temperature, top_p,num_seq,l_p, num_b):
    input_text = context
    input_ids = tokenizer(input_text, return_tensors='pt')
    
    # Generate with configurable parameters
    output = model.generate(
        **input_ids,
        temperature=temperature,
        top_p=top_p,
        num_return_sequences=num_seq,
   
        max_length=100,
        num_beams=num_b,
        length_penalty=l_p,    
        do_sample=True,
        
    )
    #
    generated_text = tokenizer.batch_decode(output, skip_special_tokens=True)

    formatted_output = "\n\n".join(set(generated_text))
    return formatted_output
iface = gr.Interface(
    fn=generate_qa,
    inputs=[
        gr.inputs.Dropdown(example_contexts, label="Choose an Example"),
        gr.inputs.Slider(minimum=0.0, maximum=5, default=2.1, step=0.01, label="Temperature"),
        gr.inputs.Slider(minimum=0.0, maximum=1, default=0.5, step=0.01, label="Top-p"),     
        gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of sequance"), 
        gr.inputs.Slider(minimum=0.01, maximum=5, default=3, step=.01, label="l_p")
     ,
        gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of beams"),


    ],
    outputs=gr.outputs.Textbox(label="Generated Output"),
    title="Question Generation and Answering",
    description="Select an example context, choose a recommended word, adjust temperature and top-p. The model will generate questions and answers.",
)
# Launch the interface
iface.launch()