ali121300 commited on
Commit
cbf031d
1 Parent(s): 806107f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -11
app.py CHANGED
@@ -14,6 +14,7 @@ from deep_translator import GoogleTranslator
14
  import pandas as pd
15
  from langchain_groq import ChatGroq
16
  from openai import OpenAI
 
17
  # set this key as an environment variable
18
  os.environ["HUGGINGFACEHUB_API_TOKEN"] = st.secrets['Key2']
19
  ###########################################################################################
@@ -52,23 +53,20 @@ def get_vectorstore(text_chunks : list) -> FAISS:
52
  return vectorstore
53
 
54
 
55
- def get_conversation_chain(vectorstore: FAISS) -> ConversationalRetrievalChain:
56
- client = OpenAI(base_url="http://localhost:1234/v1", api_key="lm-studio")
57
- llm = client.chat.completions.create(
58
- model="lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF",
59
- messages=[
60
- {"role": "system", "content": "Always answer in rhymes."},
61
- {"role": "user", "content": "Introduce yourself."}
62
- ],
63
- temperature=0.5,
64
- )
65
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
66
  conversation_chain = ConversationalRetrievalChain.from_llm(
67
- llm=llm, retriever=vectorstore.as_retriever(), memory=memory
 
 
 
68
  )
69
  return conversation_chain
70
 
71
 
 
72
  def handle_userinput(user_question:str):
73
  response = st.session_state.conversation({"question": user_question})
74
  st.session_state.chat_history = response["chat_history"]
 
14
  import pandas as pd
15
  from langchain_groq import ChatGroq
16
  from openai import OpenAI
17
+ from langchain.chat_models import ChatOpenAI
18
  # set this key as an environment variable
19
  os.environ["HUGGINGFACEHUB_API_TOKEN"] = st.secrets['Key2']
20
  ###########################################################################################
 
53
  return vectorstore
54
 
55
 
56
+ def get_conversation_chain(vectorstore):
57
+ llm = ChatOpenAI(temperature=0.2)
58
+ # llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.2, "max_length":512})
 
 
 
 
 
 
 
59
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
60
  conversation_chain = ConversationalRetrievalChain.from_llm(
61
+ llm=llm,
62
+ retriever=vectorstore.as_retriever(),
63
+ memory=memory,
64
+ # retriever_kwargs={"k": 1},
65
  )
66
  return conversation_chain
67
 
68
 
69
+
70
  def handle_userinput(user_question:str):
71
  response = st.session_state.conversation({"question": user_question})
72
  st.session_state.chat_history = response["chat_history"]