Llama3ByNvidia / app.py
AbdulMoid's picture
Update app.py
33c200c verified
raw
history blame
No virus
560 Bytes
import gradio as gr
from transformers import pipeline
# Load the model
model_name = "nvidia/Llama3-ChatQA-1.5-8B"
qa_pipeline = pipeline("text-generation", model=model_name)
def generate_answer(question):
# Generate the answer using the model
response = qa_pipeline(question, max_length=250)
return response[0]["generated_text"]
# Create the Gradio interface
iface = gr.Interface(fn=generate_answer, inputs="text", outputs="text", title="Llama3 ChatQA")
# Launch the interface and set share=True to create a public URL
iface.launch(share=True)