ankush-003 commited on
Commit
03b8ca7
β€’
1 Parent(s): a27fa49

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +174 -0
app.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ from collections.abc import Collection
4
+ from langchain.memory import ChatMessageHistory
5
+ from langchain_community.chat_message_histories import (
6
+ StreamlitChatMessageHistory,
7
+ )
8
+ from langchain_groq import ChatGroq
9
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
10
+ from langchain_community.vectorstores import MongoDBAtlasVectorSearch
11
+ from langchain_community.embeddings import HuggingFaceEmbeddings
12
+ from langchain.chains import create_history_aware_retriever, create_retrieval_chain
13
+ from langchain.chains.combine_documents import create_stuff_documents_chain
14
+ from langchain.output_parsers import ResponseSchema, StructuredOutputParser
15
+ from langchain_core.runnables.history import RunnableWithMessageHistory
16
+ from langchain_core.chat_history import BaseChatMessageHistory
17
+ from langchain.chains import RetrievalQA
18
+ import nest_asyncio
19
+ nest_asyncio.apply()
20
+
21
+ st.title('ASMR Query Bot πŸ””')
22
+
23
+ # config
24
+ database = "AlertSimAndRemediation"
25
+ collection = "alert_embed"
26
+ index_name = "alert_index"
27
+
28
+ # llm
29
+ chat = ChatGroq(temperature=0, model_name="mixtral-8x7b-32768")
30
+
31
+ # embedding model
32
+ embedding_args = {
33
+ "model_name" : "BAAI/bge-large-en-v1.5",
34
+ "model_kwargs" : {"device": "cpu"},
35
+ "encode_kwargs" : {"normalize_embeddings": True}
36
+ }
37
+ embedding_model = HuggingFaceEmbeddings(**embedding_args)
38
+
39
+ # chat history
40
+ # chat_history = ChatMessageHistory()
41
+
42
+ # vector search
43
+ vector_search = MongoDBAtlasVectorSearch.from_connection_string(
44
+ os.environ["MONGO_URI"],
45
+ f"{database}.{collection}",
46
+ embedding_model,
47
+ index_name=index_name,
48
+ )
49
+
50
+ qa_retriever = vector_search.as_retriever(
51
+ search_type="similarity",
52
+ search_kwargs={"k": 5},
53
+ )
54
+
55
+ # contextualising prev chats
56
+ contextualize_q_system_prompt = """Given a chat history and the latest user question \
57
+ which might reference context in the chat history, formulate a standalone question \
58
+ which can be understood without the chat history. Do NOT answer the question, \
59
+ just reformulate it if needed and otherwise return it as is."""
60
+ contextualize_q_prompt = ChatPromptTemplate.from_messages(
61
+ [
62
+ ("system", contextualize_q_system_prompt),
63
+ MessagesPlaceholder("chat_history"),
64
+ ("human", "{input}"),
65
+ ]
66
+ )
67
+ history_aware_retriever = create_history_aware_retriever(
68
+ chat, qa_retriever, contextualize_q_prompt
69
+ )
70
+
71
+ # prompt
72
+ system_prompt = """
73
+ You are a helpful query assistant for Alertmanager, an open-source system for monitoring and alerting on system metrics. Your goal is to accurately answer questions related to alerts triggered within the Alertmanager system based on the alert information provided to you. \
74
+ You will be given details about specific alerts, including the alert source, severity, category, and any other relevant metadata. Using this information, you should be able to respond to queries about the nature of the alert, what it signifies, potential causes, and recommended actions or troubleshooting steps. \
75
+ Your responses should be clear, concise, and tailored to the specific alert details provided, while also drawing from your broader knowledge about Alertmanager and monitoring best practices when relevant. If you cannot provide a satisfactory answer due to insufficient information, politely indicate that and ask for any additional context needed. \
76
+
77
+ <context>
78
+ {context}
79
+ </context>
80
+ """
81
+
82
+ chat_history = []
83
+
84
+ qa_prompt = ChatPromptTemplate.from_messages(
85
+ [
86
+ ("system", system_prompt),
87
+ MessagesPlaceholder("chat_history"),
88
+ ("human", "{input}"),
89
+ ]
90
+ )
91
+ question_answer_chain = create_stuff_documents_chain(chat, qa_prompt)
92
+
93
+ # output parser
94
+ response_schemas = [
95
+ ResponseSchema(name="answer", description="answer to the user's question"),
96
+ ResponseSchema(
97
+ name="source",
98
+ description="source used to answer the user's question, should be a website.",
99
+ )
100
+ ]
101
+ output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
102
+
103
+
104
+ rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
105
+
106
+ # managing message history
107
+ # store = {}
108
+
109
+ # def get_session_history(session_id: str) -> BaseChatMessageHistory:
110
+ # if session_id not in store:
111
+ # store[session_id] = ChatMessageHistory()
112
+ # return store[session_id]
113
+
114
+
115
+
116
+ # conversational_rag_chain = RunnableWithMessageHistory(
117
+ # rag_chain,
118
+ # get_session_history,
119
+ # input_messages_key="input",
120
+ # history_messages_key="chat_history",
121
+ # output_messages_key="answer",
122
+ # )
123
+
124
+ # schema
125
+ # print(conversational_rag_chain.input_schema.schema())
126
+ # print(conversational_rag_chain.output_schema.schema())
127
+
128
+
129
+ # Retrieves documents
130
+ # retriever_chain = create_history_aware_retriever(chat, qa_retriever, prompt)
131
+
132
+ # retriever_chain.invoke({
133
+ # "chat_history": chat_history,
134
+ # "input": "Tell me about the latest alert"
135
+ # })
136
+
137
+ # conversational_rag_chain.invoke(
138
+ # {"input": "What is the remedy to the latest alert"},
139
+ # config={
140
+ # "configurable": {"session_id": "abc123"}
141
+ # }, # constructs a key "abc123" in `store`.
142
+ # )
143
+
144
+ if "chat_messages" not in st.session_state:
145
+ st.session_state.chat_messages = []
146
+
147
+ # streamlit history
148
+ history = StreamlitChatMessageHistory(key="chat_messages")
149
+
150
+ # Initialize chat history
151
+
152
+
153
+ if len(history.messages) == 0:
154
+ history.add_ai_message("How can I help you?")
155
+
156
+ conversational_rag_chain = RunnableWithMessageHistory(
157
+ rag_chain,
158
+ lambda session_id: history,
159
+ input_messages_key="input",
160
+ history_messages_key="chat_history",
161
+ output_messages_key="answer",
162
+ )
163
+
164
+ for msg in history.messages:
165
+ st.chat_message(msg.type).write(msg.content)
166
+
167
+
168
+ if prompt := st.chat_input():
169
+ st.chat_message("human").write(prompt)
170
+
171
+ # As usual, new messages are added to StreamlitChatMessageHistory when the Chain is called.
172
+ config = {"configurable": {"session_id": "any"}}
173
+ response = conversational_rag_chain.invoke({"input": prompt}, config)
174
+ st.chat_message("ai").write(response["answer"])