File size: 3,207 Bytes
0793154
 
 
 
 
 
 
 
 
 
 
b8222fb
0793154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b8222fb
 
 
0793154
b8222fb
 
 
0793154
 
 
 
 
b8222fb
0793154
 
 
b8222fb
0793154
 
 
 
 
 
b8222fb
 
 
0793154
b8222fb
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import os
import getpass
from langchain_community.document_loaders import ConfluenceLoader
from langchain_google_genai import ChatGoogleGenerativeAI, GoogleGenerativeAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores.faiss import FAISS
import google.generativeai as genai
from langchain.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
import streamlit as st


confluence_api_key = os.environ["CONFLUENCE_API_KEY"]

if "GOOGLE_API_KEY" not in os.environ:
    os.environ["GOOGLE_API_KEY"] = getpass.getpass("Please provide Google API Key")

google_api_key = os.environ['GOOGLE_API_KEY']
genai.configure(api_key=google_api_key)


loader = ConfluenceLoader(
    url=os.environ["CONFLUENCE_URL"], space_key=os.environ['SPACE_KEY'], username=os.environ['USERNAME'], api_key=confluence_api_key
)

conf_docs = loader.load(page_id=os.environ["PAGE_ID"])


text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
chunks = text_splitter.split_text(conf_docs[-1].page_content)

embeddings = GoogleGenerativeAIEmbeddings(model='models/embedding-001')
llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash-latest")

vector_store = FAISS.from_texts(chunks, embedding=embeddings)
vector_store.save_local("faiss_index")

#chat_history = []

def get_response(query, chat_history):
    prompt_template = """
        Answer the question as detailed as possible based on the conversation history and the provided context, make sure to provide all the details, if the answer is not in
        provided context just say, "I am not able to help. Please contact Platform Support Team at [email protected]", don't provide the wrong answer\n\n
        Conversation History:\n {chat_history}\n
        Context:\n {context}?\n
        Question: \n{question}\n

        Answer:
    """
    prompt = PromptTemplate(template=prompt_template, input_variables=["chat_history", "context", "question"])
    chain = load_qa_chain(llm, chain_type="stuff", prompt=prompt)
    db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
    docs = db.similarity_search(query)
    response =  chain({"input_documents" : docs, "question": query, "chat_history": chat_history}, return_only_outputs = True)
    return response["output_text"]


if __name__ == '__main__':
    st.set_page_config("Chat with Confluence Page")
    st.header("Chat with Confluence Page using AI")
    
    if "messages" not in st.session_state:
        st.session_state.messages = []

    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])
    
    if question := st.chat_input("Ask questions related to login and registration"):
        st.session_state.messages.append({"role": "user", "content": question})
        with st.chat_message("user"):
            st.markdown(question)
        
        with st.chat_message("assistant"):
            answer = get_response(question, st.session_state.messages)
            st.write(answer)
        st.session_state.messages.append({"role": "assistant", "content": answer})