ypatel / app.py
root
Add application file
80be8a0
raw
history blame
1.01 kB
import streamlit as st
from ingest import parse_document, create_embeddings
from run_localGPT import generate_answer
# Function to run the pipeline
def run_pipeline(document):
# Parse the document and create embeddings
embeddings = create_embeddings(parse_document(document))
# Generate an answer using the local LLM and the extracted context
answer = generate_answer(embeddings)
return answer
# Streamlit app
def main():
st.title("Local GPT Pipeline")
st.write("How does it work?\n"
"Selecting the right local models and the power of LangChain, "
"you can run the entire pipeline locally without any data leaving your environment, and with reasonable performance.")
# Input section
document = st.text_area("Document")
# Run the pipeline when the "Run" button is clicked
if st.button("Run"):
answer = run_pipeline(document)
st.write("Answer:", answer)
if __name__ == "__main__":
main()