Asankhaya Sharma commited on
Commit
033cc04
1 Parent(s): 189073a

update co pilot

Browse files

*.tar.* filter=lfs diff=lfs merge=lfs -text

Files changed (2) hide show
  1. main.py +29 -100
  2. question.py +40 -56
main.py CHANGED
@@ -3,15 +3,10 @@ import os
3
  import tempfile
4
 
5
  import streamlit as st
6
- from files import file_uploader, url_uploader
7
  from question import chat_with_doc
8
- from brain import brain
9
  from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings
10
  from langchain.vectorstores import SupabaseVectorStore
11
  from supabase import Client, create_client
12
- from explorer import view_document
13
- from stats import get_usage_today
14
- from st_login_form import login_form
15
 
16
  supabase_url = st.secrets.SUPABASE_URL
17
  supabase_key = st.secrets.SUPABASE_KEY
@@ -20,6 +15,7 @@ anthropic_api_key = st.secrets.anthropic_api_key
20
  hf_api_key = st.secrets.hf_api_key
21
  supabase: Client = create_client(supabase_url, supabase_key)
22
  self_hosted = st.secrets.self_hosted
 
23
 
24
  # embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
25
 
@@ -42,103 +38,36 @@ if anthropic_api_key:
42
 
43
  # Set the theme
44
  st.set_page_config(
45
- page_title="meraKB",
46
- layout="wide",
47
- initial_sidebar_state="expanded",
 
 
 
 
 
 
48
  )
49
 
50
-
51
- st.title("🧠 meraKB - Your digital brain 🧠")
52
- st.markdown("Store your knowledge in a vector store and chat with it.")
53
- if self_hosted == "false":
54
- st.markdown('**📢 Note: In the public demo, access to functionality is restricted. You can only use the GPT-3.5-turbo model and upload files up to 1Mb. To use more models and upload larger files, consider self-hosting meraKB.**')
55
 
56
  st.markdown("---\n\n")
57
 
58
- st.session_state["overused"] = False
59
- if self_hosted == "false":
60
- usage = get_usage_today(supabase)
61
- if usage > st.secrets.usage_limit:
62
- st.markdown(
63
- f"<span style='color:red'>You have used {usage} tokens today, which is more than your daily limit of {st.secrets.usage_limit} tokens. Please come back later or consider self-hosting.</span>", unsafe_allow_html=True)
64
- st.session_state["overused"] = True
65
- else:
66
- st.markdown(f"<span style='color:blue'>Usage today: {usage} tokens out of {st.secrets.usage_limit}</span>", unsafe_allow_html=True)
67
- st.write("---")
68
-
69
- client = login_form()
70
-
71
- if st.session_state["authenticated"]:
72
- if st.session_state["username"]:
73
- st.success(f"Welcome {st.session_state['username']}")
74
- else:
75
- st.session_state["username"] = 'guest'
76
- st.success("Welcome guest")
77
-
78
- # Initialize session state variables
79
- if 'model' not in st.session_state:
80
- st.session_state['model'] = "meta-llama/Llama-2-70b-chat-hf"
81
- if 'temperature' not in st.session_state:
82
- st.session_state['temperature'] = 0.1
83
- if 'chunk_size' not in st.session_state:
84
- st.session_state['chunk_size'] = 500
85
- if 'chunk_overlap' not in st.session_state:
86
- st.session_state['chunk_overlap'] = 0
87
- if 'max_tokens' not in st.session_state:
88
- st.session_state['max_tokens'] = 500
89
-
90
- # Create a radio button for user to choose between adding knowledge or asking a question
91
- user_choice = st.radio(
92
- "Choose an action", ('Add Knowledge', 'Chat with your Brain', 'Forget', "Explore"))
93
-
94
- st.markdown("---\n\n")
95
-
96
- if user_choice == 'Add Knowledge':
97
- # Display chunk size and overlap selection only when adding knowledge
98
- st.sidebar.title("Configuration")
99
- st.sidebar.markdown(
100
- "Choose your chunk size and overlap for adding knowledge.")
101
- st.session_state['chunk_size'] = st.sidebar.slider(
102
- "Select Chunk Size", 100, 1000, st.session_state['chunk_size'], 50)
103
- st.session_state['chunk_overlap'] = st.sidebar.slider(
104
- "Select Chunk Overlap", 0, 100, st.session_state['chunk_overlap'], 10)
105
-
106
- # Create two columns for the file uploader and URL uploader
107
- col1, col2 = st.columns(2)
108
-
109
- with col1:
110
- file_uploader(supabase, vector_store)
111
- with col2:
112
- url_uploader(supabase, vector_store)
113
- elif user_choice == 'Chat with your Brain':
114
- # Display model and temperature selection only when asking questions
115
- st.sidebar.title("Configuration")
116
- st.sidebar.markdown(
117
- "Choose your model and temperature for asking questions.")
118
- if self_hosted != "false":
119
- st.session_state['model'] = st.sidebar.selectbox(
120
- "Select Model", models, index=(models).index(st.session_state['model']))
121
- else:
122
- st.sidebar.write("**Model**: gpt-3.5-turbo")
123
- st.sidebar.write("**Self Host to unlock more models such as claude-v1 and GPT4**")
124
- st.session_state['model'] = "gpt-3.5-turbo"
125
- st.session_state['temperature'] = st.sidebar.slider(
126
- "Select Temperature", 0.1, 1.0, st.session_state['temperature'], 0.1)
127
- if st.secrets.self_hosted != "false":
128
- st.session_state['max_tokens'] = st.sidebar.slider(
129
- "Select Max Tokens", 500, 4000, st.session_state['max_tokens'], 500)
130
- else:
131
- st.session_state['max_tokens'] = 500
132
-
133
- chat_with_doc(st.session_state['model'], vector_store, stats_db=supabase)
134
- elif user_choice == 'Forget':
135
- st.sidebar.title("Configuration")
136
-
137
- brain(supabase)
138
- elif user_choice == 'Explore':
139
- st.sidebar.title("Configuration")
140
- view_document(supabase)
141
-
142
- st.markdown("---\n\n")
143
- else:
144
- st.error("Not authenticated")
 
3
  import tempfile
4
 
5
  import streamlit as st
 
6
  from question import chat_with_doc
 
7
  from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings
8
  from langchain.vectorstores import SupabaseVectorStore
9
  from supabase import Client, create_client
 
 
 
10
 
11
  supabase_url = st.secrets.SUPABASE_URL
12
  supabase_key = st.secrets.SUPABASE_KEY
 
15
  hf_api_key = st.secrets.hf_api_key
16
  supabase: Client = create_client(supabase_url, supabase_key)
17
  self_hosted = st.secrets.self_hosted
18
+ username = st.secrets.username
19
 
20
  # embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
21
 
 
38
 
39
  # Set the theme
40
  st.set_page_config(
41
+ page_title="Securade.ai - Safety Copilot",
42
+ page_icon="https://securade.ai/favicon.ico",
43
+ layout="centered",
44
+ initial_sidebar_state="collapsed",
45
+ menu_items={
46
+ "About": "# Securade.ai Safety Copilot v0.1\n [https://securade.ai](https://securade.ai)",
47
+ "Get Help" : "https://securade.ai",
48
+ "Report a Bug": "mailto:[email protected]"
49
+ }
50
  )
51
 
52
+ st.title("👷‍♂️ Safety Copilot 🦺")
53
+ st.markdown("Chat with your personal assistant about health and safety information.")
 
 
 
54
 
55
  st.markdown("---\n\n")
56
 
57
+ # Initialize session state variables
58
+ if 'model' not in st.session_state:
59
+ st.session_state['model'] = "meta-llama/Llama-2-70b-chat-hf"
60
+ if 'temperature' not in st.session_state:
61
+ st.session_state['temperature'] = 0.1
62
+ if 'chunk_size' not in st.session_state:
63
+ st.session_state['chunk_size'] = 500
64
+ if 'chunk_overlap' not in st.session_state:
65
+ st.session_state['chunk_overlap'] = 0
66
+ if 'max_tokens' not in st.session_state:
67
+ st.session_state['max_tokens'] = 500
68
+ if 'username' not in st.session_state:
69
+ st.session_state['username'] = username
70
+
71
+ chat_with_doc(st.session_state['model'], vector_store, stats_db=supabase)
72
+
73
+ st.markdown("---\n\n")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
question.py CHANGED
@@ -16,30 +16,19 @@ hf_api_key = st.secrets.hf_api_key
16
  logger = get_logger(__name__)
17
 
18
 
19
- def count_tokens(question, model):
20
- count = f'Words: {len(question.split())}'
21
- if model.startswith("claude"):
22
- count += f' | Tokens: {anthropic.count_tokens(question)}'
23
- return count
24
-
25
-
26
  def chat_with_doc(model, vector_store: SupabaseVectorStore, stats_db):
27
 
28
  if 'chat_history' not in st.session_state:
29
  st.session_state['chat_history'] = []
30
 
31
-
32
-
33
  question = st.text_area("## Ask a question")
34
- columns = st.columns(3)
35
  with columns[0]:
36
  button = st.button("Ask")
37
  with columns[1]:
38
- count_button = st.button("Count Tokens", type='secondary')
39
- with columns[2]:
40
  clear_history = st.button("Clear History", type='secondary')
41
-
42
-
43
 
44
  if clear_history:
45
  # Clear memory in Langchain
@@ -49,48 +38,43 @@ def chat_with_doc(model, vector_store: SupabaseVectorStore, stats_db):
49
 
50
  if button:
51
  qa = None
52
- if not st.session_state["overused"]:
53
- add_usage(stats_db, "chat", "prompt" + question, {"model": model, "temperature": st.session_state['temperature']})
54
- if model.startswith("gpt"):
55
- logger.info('Using OpenAI model %s', model)
56
- qa = ConversationalRetrievalChain.from_llm(
57
- OpenAI(
58
- model_name=st.session_state['model'], openai_api_key=openai_api_key, temperature=st.session_state['temperature'], max_tokens=st.session_state['max_tokens']), vector_store.as_retriever(), memory=memory, verbose=True)
59
- elif anthropic_api_key and model.startswith("claude"):
60
- logger.info('Using Anthropics model %s', model)
61
- qa = ConversationalRetrievalChain.from_llm(
62
- ChatAnthropic(
63
- model=st.session_state['model'], anthropic_api_key=anthropic_api_key, temperature=st.session_state['temperature'], max_tokens_to_sample=st.session_state['max_tokens']), vector_store.as_retriever(), memory=memory, verbose=True, max_tokens_limit=102400)
64
- elif hf_api_key:
65
- logger.info('Using HF model %s', model)
66
- # print(st.session_state['max_tokens'])
67
- endpoint_url = ("https://api-inference.huggingface.co/models/"+ model)
68
- model_kwargs = {"temperature" : st.session_state['temperature'],
69
- "max_new_tokens" : st.session_state['max_tokens'],
70
- "return_full_text" : False}
71
- hf = HuggingFaceEndpoint(
72
- endpoint_url=endpoint_url,
73
- task="text-generation",
74
- huggingfacehub_api_token=hf_api_key,
75
- model_kwargs=model_kwargs
76
- )
77
- qa = ConversationalRetrievalChain.from_llm(hf, retriever=vector_store.as_retriever(), memory=memory, verbose=True, return_source_documents=True)
78
-
79
- st.session_state['chat_history'].append(("You", question))
80
-
81
- # Generate model's response and add it to chat history
82
- model_response = qa({"question": question})
83
- logger.info('Result: %s', model_response["answer"])
84
 
85
- st.session_state['chat_history'].append(("meraKB", model_response["answer"]))
86
- logger.info('Sources: %s', model_response["source_documents"])
 
87
 
88
- # Display chat history
89
- st.empty()
90
- for speaker, text in st.session_state['chat_history']:
91
- st.markdown(f"**{speaker}:** {text}")
92
- else:
93
- st.error("You have used all your free credits. Please try again later or self host.")
94
 
95
- if count_button:
96
- st.write(count_tokens(question, model))
 
 
 
 
16
  logger = get_logger(__name__)
17
 
18
 
 
 
 
 
 
 
 
19
  def chat_with_doc(model, vector_store: SupabaseVectorStore, stats_db):
20
 
21
  if 'chat_history' not in st.session_state:
22
  st.session_state['chat_history'] = []
23
 
 
 
24
  question = st.text_area("## Ask a question")
25
+ columns = st.columns(2)
26
  with columns[0]:
27
  button = st.button("Ask")
28
  with columns[1]:
 
 
29
  clear_history = st.button("Clear History", type='secondary')
30
+
31
+ st.markdown("---\n\n")
32
 
33
  if clear_history:
34
  # Clear memory in Langchain
 
38
 
39
  if button:
40
  qa = None
41
+ add_usage(stats_db, "chat", "prompt" + question, {"model": model, "temperature": st.session_state['temperature']})
42
+ if model.startswith("gpt"):
43
+ logger.info('Using OpenAI model %s', model)
44
+ qa = ConversationalRetrievalChain.from_llm(
45
+ OpenAI(
46
+ model_name=st.session_state['model'], openai_api_key=openai_api_key, temperature=st.session_state['temperature'], max_tokens=st.session_state['max_tokens']), vector_store.as_retriever(), memory=memory, verbose=True)
47
+ elif anthropic_api_key and model.startswith("claude"):
48
+ logger.info('Using Anthropics model %s', model)
49
+ qa = ConversationalRetrievalChain.from_llm(
50
+ ChatAnthropic(
51
+ model=st.session_state['model'], anthropic_api_key=anthropic_api_key, temperature=st.session_state['temperature'], max_tokens_to_sample=st.session_state['max_tokens']), vector_store.as_retriever(), memory=memory, verbose=True, max_tokens_limit=102400)
52
+ elif hf_api_key:
53
+ logger.info('Using HF model %s', model)
54
+ # print(st.session_state['max_tokens'])
55
+ endpoint_url = ("https://api-inference.huggingface.co/models/"+ model)
56
+ model_kwargs = {"temperature" : st.session_state['temperature'],
57
+ "max_new_tokens" : st.session_state['max_tokens'],
58
+ "return_full_text" : False}
59
+ hf = HuggingFaceEndpoint(
60
+ endpoint_url=endpoint_url,
61
+ task="text-generation",
62
+ huggingfacehub_api_token=hf_api_key,
63
+ model_kwargs=model_kwargs
64
+ )
65
+ qa = ConversationalRetrievalChain.from_llm(hf, retriever=vector_store.as_retriever(search_kwargs={"score_threshold": 0.6, "k": 4,"filter": {"user": st.session_state["username"]}}), memory=memory, verbose=True, return_source_documents=True)
66
+
67
+ st.session_state['chat_history'].append(("You", question))
 
 
 
 
 
68
 
69
+ # Generate model's response and add it to chat history
70
+ model_response = qa({"question": question})
71
+ logger.info('Result: %s', model_response["answer"])
72
 
73
+ st.session_state['chat_history'].append(("Safety Copilot", model_response["answer"]))
74
+ logger.info('Sources: %s', model_response["source_documents"])
 
 
 
 
75
 
76
+ # Display chat history
77
+ st.empty()
78
+ chat_history = st.session_state['chat_history']
79
+ for speaker, text in chat_history:
80
+ st.markdown(f"**{speaker}:** {text}")