Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoModelForQuestionAnswering, AutoTokenizer | |
import torch | |
def load_model(): | |
model_path = "YasirAbdali/roberta_qoura" # Replace with your actual model path | |
model = AutoModelForQuestionAnswering.from_pretrained(model_path) | |
tokenizer = AutoTokenizer.from_pretrained(model_path) | |
return model, tokenizer | |
def answer_question(question, model, tokenizer): | |
inputs = tokenizer(question, return_tensors="pt", max_length=512, truncation=True, padding="max_length") | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
start_logits = outputs.start_logits | |
end_logits = outputs.end_logits | |
start_index = torch.argmax(start_logits) | |
end_index = torch.argmax(end_logits) | |
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs["input_ids"][0][start_index:end_index+1])) | |
return answer | |
st.title("Quora Question Answering") | |
model, tokenizer = load_model() | |
st.write("Enter a question, and the model will provide an answer based on its knowledge.") | |
question = st.text_area("Question") | |
if st.button("Get Answer"): | |
if question: | |
answer = answer_question(question, model, tokenizer) | |
st.write("Answer:", answer) | |
else: | |
st.write("Please provide a question.") | |
# Optional: Add some example questions | |
st.sidebar.header("Example Questions") | |
example_questions = [ | |
"What is the capital of France?", | |
"Who wrote 'Romeo and Juliet'?", | |
"What is the boiling point of water?", | |
"What year did World War II end?", | |
] | |
for example in example_questions: | |
if st.sidebar.button(example): | |
st.text_input("Question", value=example) |