File size: 1,014 Bytes
80be8a0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import streamlit as st
from ingest import parse_document, create_embeddings
from run_localGPT import generate_answer

# Function to run the pipeline
def run_pipeline(document):
    # Parse the document and create embeddings
    embeddings = create_embeddings(parse_document(document))

    # Generate an answer using the local LLM and the extracted context
    answer = generate_answer(embeddings)

    return answer

# Streamlit app
def main():
    st.title("Local GPT Pipeline")
    st.write("How does it work?\n"
             "Selecting the right local models and the power of LangChain, "
             "you can run the entire pipeline locally without any data leaving your environment, and with reasonable performance.")

    # Input section
    document = st.text_area("Document")

    # Run the pipeline when the "Run" button is clicked
    if st.button("Run"):
        answer = run_pipeline(document)
        st.write("Answer:", answer)

if __name__ == "__main__":
    main()