Amirizaniani commited on
Commit
bb3f3bb
1 Parent(s): f3cbe36

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -14,8 +14,8 @@ def generate_prompts(user_input):
14
  input_variables=["Question"],
15
  template=f"list 10 quetion prompts for {user_input}"
16
  )
17
- config = {'max_new_tokens': 512, 'temperature': 0.7, 'context_length': 512}
18
- llm = CTransformers(model="TheBloke/zephyr-7B-alpha-GGUF",
19
  config=config)
20
  hub_chain = LLMChain(prompt = prompt_template, llm = llm)
21
  input_data = {"Question": user_input}
@@ -33,8 +33,8 @@ def answer_question(prompt):
33
  input_variables=["Question"],
34
  template=f"Answer '{prompt} 'and do not consider the number behind it."
35
  )
36
- config = {'max_new_tokens': 512, 'temperature': 0.7, 'context_length': 512}
37
- llm = CTransformers(model="mistralai/Mistral-7B-v0.1",
38
  config=config)
39
  hub_chain = LLMChain(prompt = prompt_template, llm = llm)
40
  input_data = {"Question": prompt}
 
14
  input_variables=["Question"],
15
  template=f"list 10 quetion prompts for {user_input}"
16
  )
17
+ config = {'max_new_tokens': 64, 'temperature': 0.7, 'context_length': 64}
18
+ llm = CTransformers(model="TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
19
  config=config)
20
  hub_chain = LLMChain(prompt = prompt_template, llm = llm)
21
  input_data = {"Question": user_input}
 
33
  input_variables=["Question"],
34
  template=f"Answer '{prompt} 'and do not consider the number behind it."
35
  )
36
+ config = {'max_new_tokens': 64, 'temperature': 0.7, 'context_length': 64}
37
+ llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGML",
38
  config=config)
39
  hub_chain = LLMChain(prompt = prompt_template, llm = llm)
40
  input_data = {"Question": prompt}