Spaces:
Sleeping
Sleeping
Amirizaniani
commited on
Commit
•
76e70fb
1
Parent(s):
d4b3a9a
Update app.py
Browse files
app.py
CHANGED
@@ -1,25 +1,23 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
from langchain import PromptTemplate, LLMChain, HuggingFaceHub
|
3 |
from langchain.llms import CTransformers
|
4 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
5 |
from transformers import pipeline
|
6 |
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
|
7 |
|
|
|
8 |
|
9 |
def generate_prompts(user_input):
|
10 |
|
11 |
-
model_name = "TheBloke/Mistral-7B-Instruct-v0.1-GGUF"
|
12 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
13 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
14 |
-
|
15 |
-
config = {"temperature": 0.7, "max_length": 50} # Example configuration
|
16 |
-
llm = CTransformers(model=model, tokenizer=tokenizer, config=config)
|
17 |
-
|
18 |
prompt_template = PromptTemplate(
|
19 |
input_variables=["Question"],
|
20 |
-
template=f"Just list 10 quetion prompts for {user_input} and don't put number before each of the prompts."
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
23 |
hub_chain = LLMChain(prompt = prompt_template, llm = llm)
|
24 |
input_data = {"Question": user_input}
|
25 |
|
|
|
1 |
import gradio as gr
|
2 |
+
from dotenv import load_dotenv
|
3 |
from langchain import PromptTemplate, LLMChain, HuggingFaceHub
|
4 |
from langchain.llms import CTransformers
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
from transformers import pipeline
|
7 |
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
|
8 |
|
9 |
+
load_dotenv()
|
10 |
|
11 |
def generate_prompts(user_input):
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
prompt_template = PromptTemplate(
|
14 |
input_variables=["Question"],
|
15 |
+
template=f"Just list 10 quetion prompts for {user_input} and don't put number before each of the prompts."
|
16 |
+
)
|
17 |
+
config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
|
18 |
+
llm = CTransformers(model="TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
|
19 |
+
config=config,
|
20 |
+
threads=os.cpu_count())
|
21 |
hub_chain = LLMChain(prompt = prompt_template, llm = llm)
|
22 |
input_data = {"Question": user_input}
|
23 |
|