Spaces:
Sleeping
Sleeping
Amirizaniani
commited on
Commit
•
bb3f3bb
1
Parent(s):
f3cbe36
Update app.py
Browse files
app.py
CHANGED
@@ -14,8 +14,8 @@ def generate_prompts(user_input):
|
|
14 |
input_variables=["Question"],
|
15 |
template=f"list 10 quetion prompts for {user_input}"
|
16 |
)
|
17 |
-
config = {'max_new_tokens':
|
18 |
-
llm = CTransformers(model="TheBloke/
|
19 |
config=config)
|
20 |
hub_chain = LLMChain(prompt = prompt_template, llm = llm)
|
21 |
input_data = {"Question": user_input}
|
@@ -33,8 +33,8 @@ def answer_question(prompt):
|
|
33 |
input_variables=["Question"],
|
34 |
template=f"Answer '{prompt} 'and do not consider the number behind it."
|
35 |
)
|
36 |
-
config = {'max_new_tokens':
|
37 |
-
llm = CTransformers(model="
|
38 |
config=config)
|
39 |
hub_chain = LLMChain(prompt = prompt_template, llm = llm)
|
40 |
input_data = {"Question": prompt}
|
|
|
14 |
input_variables=["Question"],
|
15 |
template=f"list 10 quetion prompts for {user_input}"
|
16 |
)
|
17 |
+
config = {'max_new_tokens': 64, 'temperature': 0.7, 'context_length': 64}
|
18 |
+
llm = CTransformers(model="TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
|
19 |
config=config)
|
20 |
hub_chain = LLMChain(prompt = prompt_template, llm = llm)
|
21 |
input_data = {"Question": user_input}
|
|
|
33 |
input_variables=["Question"],
|
34 |
template=f"Answer '{prompt} 'and do not consider the number behind it."
|
35 |
)
|
36 |
+
config = {'max_new_tokens': 64, 'temperature': 0.7, 'context_length': 64}
|
37 |
+
llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGML",
|
38 |
config=config)
|
39 |
hub_chain = LLMChain(prompt = prompt_template, llm = llm)
|
40 |
input_data = {"Question": prompt}
|