ali121300 commited on
Commit
26a5f45
1 Parent(s): f8b9743

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -12,6 +12,7 @@ from htmlTemplates import css, bot_template, user_template
12
  from langchain.llms import HuggingFaceHub
13
  from deep_translator import GoogleTranslator
14
  import pandas as pd
 
15
  # set this key as an environment variable
16
  os.environ["HUGGINGFACEHUB_API_TOKEN"] = st.secrets['Key2']
17
  ###########################################################################################
@@ -52,14 +53,14 @@ def get_vectorstore(text_chunks : list) -> FAISS:
52
 
53
  def get_conversation_chain(vectorstore:FAISS) -> ConversationalRetrievalChain:
54
  # llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
55
- llm = HuggingFaceHub(
56
  #repo_id="mistralai/Mistral-7B-Instruct-v0.2",
57
  #repo_id="cognitivecomputations/Llama-3-70B-Gradient-1048k-adapter",
58
  #repo_id="TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF"
59
- repo_id="mostafaamiri/persian-llama-7b-GGUF-Q4",
60
- model_kwargs={"temperature": 0.1, "max_length": 2048},
61
- )
62
-
63
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
64
  conversation_chain = ConversationalRetrievalChain.from_llm(
65
  llm=llm, retriever=vectorstore.as_retriever(),memory=memory
 
12
  from langchain.llms import HuggingFaceHub
13
  from deep_translator import GoogleTranslator
14
  import pandas as pd
15
+ from langchain_groq import ChatGroq
16
  # set this key as an environment variable
17
  os.environ["HUGGINGFACEHUB_API_TOKEN"] = st.secrets['Key2']
18
  ###########################################################################################
 
53
 
54
  def get_conversation_chain(vectorstore:FAISS) -> ConversationalRetrievalChain:
55
  # llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
56
+ #llm = HuggingFaceHub(
57
  #repo_id="mistralai/Mistral-7B-Instruct-v0.2",
58
  #repo_id="cognitivecomputations/Llama-3-70B-Gradient-1048k-adapter",
59
  #repo_id="TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF"
60
+ # repo_id="mostafaamiri/persian-llama-7b-GGUF-Q4",
61
+ # model_kwargs={"temperature": 0.1, "max_length": 2048},
62
+ #)
63
+ llm = ChatGroq(temperature=0.5, model_name="llama3-8b-8192", groq_api_key='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')
64
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
65
  conversation_chain = ConversationalRetrievalChain.from_llm(
66
  llm=llm, retriever=vectorstore.as_retriever(),memory=memory