Amirizaniani commited on
Commit
1e30676
1 Parent(s): 0e2c7c3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -0
app.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from langchain import PromptTemplate, LLMChain
3
+ from langchain.llms import CTransformers
4
+
5
+ def generate_prompts(user_input):
6
+ prompt_template = PromptTemplate(
7
+ input_variables=["Question"],
8
+ template=f"Just list 10 quetion prompts for {user_input} and don't put number before each of the prompts."
9
+ )
10
+ config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
11
+ llm = CTransformers(model="TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
12
+ config=config,
13
+ threads=os.cpu_count())
14
+ hub_chain = LLMChain(prompt = prompt_template, llm = llm)
15
+
16
+ input_data = {"Question": user_input}
17
+
18
+ generated_prompts = hub_chain.run(input_data)
19
+ questions_list = generated_prompts.split('\n')
20
+ formatted_questions = "\n".join(f"Question: {question}" for i, question in enumerate(questions_list) if question.strip())
21
+ questions_list = formatted_questions.split("Question:")[1:]
22
+ return questions_list
23
+
24
+ def answer_question(prompt):
25
+ prompt_template = PromptTemplate(
26
+ input_variables=["Question"],
27
+ template=f"give one answer for {prompt} and do not consider the number behind it."
28
+ )
29
+ config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
30
+ llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGML",
31
+ config=config,
32
+ threads=os.cpu_count())
33
+ hub_chain = LLMChain(prompt = prompt_template, llm = llm)
34
+ input_data = {"Question": prompt}
35
+ generated_answer = hub_chain.run(input_data)
36
+ return generated_answer
37
+
38
+ text_list = []
39
+
40
+ def updateChoices(prompt):
41
+ newChoices = generate_prompts(prompt)
42
+ return gr.CheckboxGroup(choices=newChoices)
43
+
44
+ def setTextVisibility(cbg):
45
+ update_show = [gr.Textbox(visible=True, label=text, value=answer_question(text)) for text in cbg]
46
+ update_hide = [gr.Textbox(visible=False, label="") for _ in range(10-len(cbg))]
47
+ return update_show + update_hide
48
+
49
+ with gr.Blocks() as demo:
50
+
51
+ gr.HTML("""
52
+ <div style="text-align: center; max-width: 1240px; margin: 0 auto;">
53
+ <h1 style="font-weight: 200; font-size: 20px; margin-bottom:8px; margin-top:0px;">
54
+ Auditing LLMs
55
+ </h1>
56
+ <hr style="margin-bottom:5px; margin-top:5px;">
57
+
58
+ </div>
59
+ """)
60
+
61
+ with gr.Row():
62
+ prompt_input = gr.Textbox(label="Enter your question", placeholder="Enter Your Question")
63
+ with gr.Row():
64
+ generate_button = gr.Button("Generate")
65
+ with gr.Column():
66
+ cbg = gr.CheckboxGroup(choices=[], label="List of the prompts", interactive=True)
67
+
68
+ generate_button.click(updateChoices, inputs=[prompt_input], outputs=[cbg])
69
+
70
+ with gr.Row(variant="compact") as exec:
71
+ btnExec = gr.Button("Execute")
72
+ with gr.Column() as texts:
73
+ for i in range(10):
74
+ text = gr.Textbox(label="_", visible=False)
75
+ text_list.append(text)
76
+
77
+ btnExec.click(setTextVisibility, inputs=cbg, outputs=text_list)
78
+
79
+ clear = gr.ClearButton(link="http://127.0.0.1:8160")
80
+
81
+ # Launch the Gradio app
82
+ demo.launch()