Amirizaniani commited on
Commit
d90f64f
1 Parent(s): 7ad3815

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +164 -143
app.py CHANGED
@@ -21,126 +21,137 @@ import csv
21
 
22
  load_dotenv()
23
 
 
 
 
 
 
 
 
 
 
 
24
  def generate_prompts(user_input):
 
 
25
  prompt_template = PromptTemplate(
26
  input_variables=["Question"],
27
- template=f"Just list 10 question prompts for {user_input} and don't put number before each of the prompts."
28
  )
29
- config = {'max_new_tokens': 64, 'temperature': 0.7, 'context_length': 64}
30
  llm = CTransformers(model="TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
31
  config=config)
32
- hub_chain = LLMChain(prompt = prompt_template, llm = llm)
33
  input_data = {"Question": user_input}
34
 
35
  generated_prompts = hub_chain.run(input_data)
36
  questions_list = generated_prompts.split('\n')
37
-
38
 
39
- formatted_questions = "\n".join(f"Question: {question}" for i, question in enumerate(questions_list) if question.strip())
40
  questions_list = formatted_questions.split("Question:")[1:]
41
  return questions_list
42
 
43
- def answer_question(prompt):
 
 
 
 
 
 
 
 
44
  prompt_template = PromptTemplate(
45
  input_variables=["Question"],
46
- template=f"give one answer for {prompt} and do not consider the number behind it."
47
  )
48
- config = {'max_new_tokens': 64, 'temperature': 0.7, 'context_length': 64}
49
- llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGML",
50
- config=config)
51
- hub_chain = LLMChain(prompt = prompt_template, llm = llm)
52
- input_data = {"Question": prompt}
 
53
  generated_answer = hub_chain.run(input_data)
 
 
54
  return generated_answer
55
 
56
- def calculate_similarity(word, other_words, model, threshold=0.5):
57
- embeddings_word = model.encode([word])
58
- embeddings_other_words = model.encode(other_words)
59
- for i, embedding in enumerate(embeddings_other_words):
60
- similarity = 1 - scipy.spatial.distance.cosine(embeddings_word[0], embedding)
61
- if similarity > threshold and similarity < 0.85:
62
- return i, similarity
63
- return None, None
64
-
65
-
66
- def highlight_similar_paragraphs_with_colors(paragraphs, similarity_threshold=0.75):
67
  model = SentenceTransformer('all-MiniLM-L6-v2')
 
 
68
 
69
- # Split each paragraph into sentences
70
- all_sentences = [tokenize.sent_tokenize(paragraph) for paragraph in paragraphs]
71
-
72
- # Initialize storage for highlighted sentences
73
- highlighted_sentences = [['' for sentence in para] for para in all_sentences]
74
- colors = ['yellow', 'lightgreen', 'lightblue', 'pink', 'lavender', 'salmon', 'peachpuff', 'powderblue', 'khaki', 'wheat']
75
 
76
- # Track which sentences belong to which paragraph
77
- sentence_to_paragraph_index = [idx for idx, para in enumerate(all_sentences) for sentence in para]
78
-
79
- # Encode all sentences into vectors
80
- flattened_sentences = [sentence for para in all_sentences for sentence in para]
81
- sentence_embeddings = model.encode(flattened_sentences)
82
-
83
- # Calculate cosine similarities between all pairs of sentences
84
- cosine_similarities = util.pytorch_cos_sim(sentence_embeddings, sentence_embeddings)
85
-
86
- # Iterate through each sentence pair and highlight if they are similar but from different paragraphs
87
- color_index = 0
88
- for i, embedding_i in enumerate(sentence_embeddings):
89
- for j, embedding_j in enumerate(sentence_embeddings):
90
- if i != j and cosine_similarities[i, j] > similarity_threshold and sentence_to_paragraph_index[i] != sentence_to_paragraph_index[j]:
91
- color = colors[color_index % len(colors)]
92
- if highlighted_sentences[sentence_to_paragraph_index[i]][i % len(all_sentences[sentence_to_paragraph_index[i]])] == '':
93
- highlighted_sentences[sentence_to_paragraph_index[i]][i % len(all_sentences[sentence_to_paragraph_index[i]])] = ("<span style='color: "+ color +"'>"+ flattened_sentences[i]+"</span>")
94
- if highlighted_sentences[sentence_to_paragraph_index[j]][j % len(all_sentences[sentence_to_paragraph_index[j]])] == '':
95
- highlighted_sentences[sentence_to_paragraph_index[j]][j % len(all_sentences[sentence_to_paragraph_index[j]])] = ("<span style='color: "+ color +"'>"+ flattened_sentences[j]+"</span>")
96
- color_index += 1 # Move to the next color
97
-
98
- # Combine sentences back into paragraphs
99
- highlighted_paragraphs = [' '.join(para) for para in highlighted_sentences]
100
-
101
- # Combine all paragraphs into one HTML string
102
- html_output = '<div>' + '<br/><br/>'.join(highlighted_paragraphs) + '</div>'
103
- return highlighted_paragraphs
 
 
 
 
 
104
 
105
-
106
- def calculate_similarity_score(sentences):
107
- # Encode all sentences to get their embeddings
108
- model = SentenceTransformer('all-MiniLM-L6-v2')
109
- embeddings = model.encode(sentences)
110
-
111
- # Calculate average cosine similarity
112
- total_similarity = 0
113
- comparisons = 0
114
- for i in range(len(embeddings)):
115
- for j in range(i+1, len(embeddings)):
116
- # Cosine similarity between embeddings
117
- similarity = 1 - cosine(embeddings[i], embeddings[j])
118
- total_similarity += similarity
119
- comparisons += 1
120
-
121
- # Average similarity
122
- average_similarity = total_similarity / comparisons if comparisons > 0 else 0
123
-
124
- # Scale from [-1, 1] to [0, 100]
125
- score_out_of_100 = (average_similarity + 1) / 2 * 100
126
- return score_out_of_100
127
-
128
- def answer_question1(prompt):
129
- prompt_template = PromptTemplate.from_template(
130
- input_variables=["Question"],
131
- template=f"give one answer for {prompt} and do not consider the number behind it."
132
- )
133
- config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
134
- llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGML",
135
- config=config,
136
- threads=os.cpu_count())
137
- hub_chain = LLMChain(prompt = prompt_template, llm = llm)
138
- input_data = {"Question": prompt}
139
- generated_answer = hub_chain.run(input_data)
140
- return generated_answer
141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
- def process_inputs(llm, file, relevance, diversity):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  # Check if file is uploaded
145
  if file is not None:
146
  # Read questions from the uploaded Excel file
@@ -156,11 +167,11 @@ def process_inputs(llm, file, relevance, diversity):
156
  # Extract the first column
157
  questions_list = df.iloc[:, 0]
158
 
159
- # Initialize lists to store the expanded data
160
  # Initialize lists to store the expanded data
161
  expanded_questions = []
162
  expanded_prompts = []
163
  expanded_answers = []
 
164
 
165
  # Generate prompts for each question and expand the data
166
  for question in questions_list:
@@ -169,14 +180,43 @@ def process_inputs(llm, file, relevance, diversity):
169
  expanded_prompts.extend(prompts)
170
 
171
  # Generate answers for each prompt
172
- answers = [answer_question(prompt) for prompt in prompts]
173
  expanded_answers.extend(answers)
174
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  # Combine the expanded data into a DataFrame
176
  output_df = pd.DataFrame({
177
  'Questions': expanded_questions,
178
  'Generated Prompts': expanded_prompts,
179
- 'Answers': expanded_answers
 
180
  })
181
 
182
  # Save the DataFrame to a new Excel file
@@ -186,40 +226,20 @@ def process_inputs(llm, file, relevance, diversity):
186
  return "No questions provided.", None
187
 
188
  return "Processing complete. Download the file below.", output_file
189
-
190
 
191
  text_list = []
192
 
 
 
 
 
 
 
 
193
  def updateChoices(prompt):
194
  newChoices = generate_prompts(prompt)
195
  return gr.CheckboxGroup(choices=newChoices)
196
 
197
- def setTextVisibility(cbg, model_name_input):
198
-
199
- sentences = [answer_question(text, model_name_input) for text in cbg]
200
-
201
- # Apply highlighting to all processed sentences, receiving one complete HTML string.
202
- highlighted_html = []
203
- highlighted_html = highlight_similar_paragraphs_with_colors(sentences, similarity_threshold=0.75)
204
-
205
-
206
- result = []
207
- # Iterate through each original 'cbg' sentence and pair it with the entire highlighted block.
208
- for idx, sentence in enumerate(highlighted_html):
209
- result.append("<p><strong>"+ cbg[idx] +"</strong></p><p>"+ sentence +"</p><br/>")
210
-
211
- score = round(calculate_similarity_score(highlighted_html))
212
-
213
- final_html = f"""<div>{result}<div style="text-align: center; font-size: 24px; font-weight: bold;">Similarity Score: {score}</div></div>"""
214
-
215
-
216
- return final_html
217
-
218
-
219
- # update_show = [gr.Textbox(visible=True, label=text, value=answer_question(text, model_name_input)) for text in cbg]
220
- # update_hide = [gr.Textbox(visible=False, label="") for _ in range(10-len(cbg))]
221
- # return update_show + update_hide
222
-
223
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
224
 
225
  with gr.Tab("Live Mode"):
@@ -228,7 +248,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
228
  gr.Markdown("In Live Auditing Mode, you gain the ability to probe the LLM directly.")
229
  gr.Markdown("First, select the LLM you wish to audit. Then, enter your question. The AuditLLM tool will generate five relevant and diverse prompts based on your question. You can now select these prompts for auditing the LLMs. Examine the similarity scores in the answers generated from these prompts to assess the LLM's performance effectively")
230
  with gr.Row():
231
- model_name_input = gr.Dropdown([("Llama-2-7B", "TheBloke/Llama-2-7B-Chat-GGML"), ("Falcon-180B", "TheBloke/Falcon-180B-Chat-GGUF"), ("Zephyr-7B", "TheBloke/zephyr-quiklang-3b-4K-GGUF"),("Vicuna-33B", "TheBloke/vicuna-33B-GGUF"),("Claude2","TheBloke/claude2-alpaca-13B-GGUF"),("Alpaca-7B","TheBloke/LeoScorpius-GreenNode-Alpaca-7B-v1-GGUF")], label="Large Language Model")
232
  with gr.Row():
233
  prompt_input = gr.Textbox(label="Enter your question", placeholder="Enter Your Question")
234
  with gr.Row():
@@ -241,22 +261,17 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
241
  with gr.Row() as exec:
242
  btnExec = gr.Button("Execute", variant="primary", min_width=200)
243
 
244
-
245
  with gr.Column() as texts:
246
  for i in range(10):
247
  text = gr.Textbox(label="_", visible=False)
248
  text_list.append(text)
249
 
250
  with gr.Column():
251
- html_result = gr.HTML("""<div style="color: red"></div>""")
252
 
253
- #btnExec.click(setTextVisibility, inputs=[cbg, model_name_input], outputs=text_list)
254
  btnExec.click(setTextVisibility, inputs=[cbg, model_name_input], outputs=html_result)
255
- gr.HTML("""
256
- <div style="text-align: center; font-size: 24px; font-weight: bold;">Similarity Score: </div>
257
- """)
258
 
259
- clear = gr.ClearButton(link = "http://127.0.0.1:7865")
260
 
261
  with gr.Tab("Batch Mode"):
262
 
@@ -265,14 +280,18 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
265
  gr.Markdown("To tailor the generation of these five prompts from your original question, you can adjust the relevance and diversity scores. The relevance score determines how closely the generated prompts should align with the original question, while the diversity score dictates the variance among the prompts themselves.")
266
  gr.Markdown("Upon completion, please provide your email address. We will compile and send the answers to you promptly.")
267
 
268
- llm_dropdown = gr.Dropdown(choices=[
269
- ("Llama-2-7B", "TheBloke/Llama-2-7B-Chat-GGML"),
270
- ("Falcon-180B", "TheBloke/Falcon-180B-Chat-GGUF"),
271
- ("Zephyr-7B", "TheBloke/zephyr-quiklang-3b-4K-GGUF"),
272
- ("Vicuna-33B", "TheBloke/vicuna-33B-GGUF"),
273
- ("Claude2", "TheBloke/claude2-alpaca-13B-GGUF"),
274
- ("Alpaca-7B", "TheBloke/LeoScorpius-GreenNode-Alpaca-7B-v1-GGUF")],
275
- label="Large Language Model")
 
 
 
 
276
  file_upload = gr.File(label="Upload an Excel File with Questions", file_types=[".xlsx"])
277
  with gr.Row():
278
  relevance_slider = gr.Slider(1, 100, value=70, label="Relevance", info="Choose between 0 and 100", interactive=True)
@@ -283,11 +302,13 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
283
  output_textbox = gr.Textbox(label="Output")
284
  download_button = gr.File(label="Download Processed File")
285
 
286
- def on_submit(llm, file, relevance, diversity):
287
- result, output_file = process_inputs(llm, file, relevance, diversity)
 
 
288
  return result, output_file
289
 
290
- submit_button.click(fn=on_submit, inputs=[llm_dropdown, file_upload, relevance_slider, diversity_slider], outputs=[output_textbox, download_button])
291
 
292
  # Launch the Gradio app
293
  demo.launch()
 
21
 
22
  load_dotenv()
23
 
24
+ # Global array of different possible LLM selection options
25
+ LLM_OPTIONS = [
26
+ ("Llama-2-7B", "TheBloke/Llama-2-7B-Chat-GGML"),
27
+ ("Falcon-180B", "TheBloke/Falcon-180B-Chat-GGUF"),
28
+ ("Zephyr-7B", "zephyr-quiklang-3b-4k.Q4_K_M.gguf"),
29
+ ("Vicuna-33B", "TheBloke/vicuna-33B-GGUF"),
30
+ ("Claude2", "TheBloke/claude2-alpaca-13B-GGUF"),
31
+ ("Alpaca-7B", "TheBloke/LeoScorpius-GreenNode-Alpaca-7B-v1-GGUF")
32
+ ]
33
+
34
  def generate_prompts(user_input):
35
+ print("User input here")
36
+ print(user_input)
37
  prompt_template = PromptTemplate(
38
  input_variables=["Question"],
39
+ template=f"Just list 5 distinct and separate yet relevant question prompts for {user_input} and don't put number before any of the prompts."
40
  )
41
+ config = {'max_new_tokens': 256, 'temperature': 0.7, 'context_length': 256}
42
  llm = CTransformers(model="TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
43
  config=config)
44
+ hub_chain = LLMChain(prompt=prompt_template, llm=llm)
45
  input_data = {"Question": user_input}
46
 
47
  generated_prompts = hub_chain.run(input_data)
48
  questions_list = generated_prompts.split('\n')
 
49
 
50
+ formatted_questions = "\n".join(f"Question: {question}" for question in questions_list if question.strip())
51
  questions_list = formatted_questions.split("Question:")[1:]
52
  return questions_list
53
 
54
+ def answer_question(prompt, model_name):
55
+ print("inside answer question function")
56
+ print("prompt")
57
+ print(prompt)
58
+ print("")
59
+ print("model name")
60
+ print(model_name)
61
+ print("")
62
+
63
  prompt_template = PromptTemplate(
64
  input_variables=["Question"],
65
+ template=f"Please provide a concise and relevant answer for {prompt} in three sentences or less and don't put Answer in front of what you return. You are a helpful and factual assistant, do not say thank you or you are happy to assist just answer the question."
66
  )
67
+ config = {'max_new_tokens': 256, 'temperature': 0.7, 'context_length': 256}
68
+ llm = CTransformers(model=model_name,
69
+ config=config,
70
+ threads=os.cpu_count())
71
+ hub_chain = LLMChain(prompt=prompt_template, llm=llm)
72
+ input_data = {"Answer the question": prompt}
73
  generated_answer = hub_chain.run(input_data)
74
+ print("generated answer")
75
+ print(generated_answer)
76
  return generated_answer
77
 
78
+ def calculate_sentence_similarities(sentences_list):
 
 
 
 
 
 
 
 
 
 
79
  model = SentenceTransformer('all-MiniLM-L6-v2')
80
+ embeddings_list = [model.encode(sentences) for sentences in sentences_list]
81
+ similarity_matrices = []
82
 
83
+ for i in range(len(embeddings_list)):
84
+ for j in range(i + 1, len(embeddings_list)):
85
+ similarity_matrix = util.pytorch_cos_sim(embeddings_list[i], embeddings_list[j]).numpy()
86
+ similarity_matrices.append((i, j, similarity_matrix))
 
 
87
 
88
+ return similarity_matrices
89
+
90
+ def highlight_similar_sentences(sentences_list, similarity_threshold):
91
+ similarity_matrices = calculate_sentence_similarities(sentences_list)
92
+ highlighted_sentences = [[] for _ in sentences_list]
93
+
94
+ for (i, j, similarity_matrix) in similarity_matrices:
95
+ for idx1 in range(similarity_matrix.shape[0]):
96
+ for idx2 in range(similarity_matrix.shape[1]):
97
+ similarity = similarity_matrix[idx1][idx2]
98
+ print(f"Similarity between sentence {idx1} in paragraph {i} and sentence {idx2} in paragraph {j}: {similarity:.2f}")
99
+ if similarity >= similarity_threshold:
100
+ print("Greater than sim!")
101
+ if (idx1, "powderblue", similarity) not in highlighted_sentences[i]:
102
+ highlighted_sentences[i].append((idx1, "powderblue", similarity))
103
+ if (idx2, "powderblue", similarity) not in highlighted_sentences[j]:
104
+ highlighted_sentences[j].append((idx2, "powderblue", similarity))
105
+
106
+ for i, sentences in enumerate(sentences_list):
107
+ highlighted = []
108
+ for j, sentence in enumerate(sentences):
109
+ color = "none"
110
+ score = 0
111
+ for idx, col, sim in highlighted_sentences[i]:
112
+ if idx == j:
113
+ color = col
114
+ score = sim
115
+ break
116
+ highlighted.append({"text": sentence, "background-color": color, "score": score})
117
+ highlighted_sentences[i] = highlighted
118
+
119
+ print(highlighted_sentences)
120
+ return highlighted_sentences
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
+ def setTextVisibility(cbg, model_name_input):
124
+ selected_prompts = cbg
125
+ answers = [answer_question(prompt, model_name_input) for prompt in selected_prompts]
126
+
127
+ sentences_list = [tokenize.sent_tokenize(answer) for answer in answers]
128
+ highlighted_sentences_list = highlight_similar_sentences(sentences_list, 0.5)
129
+
130
+ result = []
131
+ for idx, (prompt, highlighted_sentences) in enumerate(zip(selected_prompts, highlighted_sentences_list)):
132
+ result.append(f"<p><strong>Prompt: {prompt}</strong></p>")
133
+ for sentence_info in highlighted_sentences:
134
+ color = sentence_info.get('background-color', 'none') # Read the 'color' parameter
135
+ result.append(f"<p style='background-color: {color};'><strong>{sentence_info['text']}</strong></p>")
136
 
137
+ blue_scores_list = [[info['score'] for info in highlighted_sentences if info['background-color'] == 'powderblue'] for highlighted_sentences in highlighted_sentences_list]
138
+ blue_scores = [score for scores in blue_scores_list for score in scores]
139
+
140
+ if blue_scores:
141
+ overall_score = round(np.mean(blue_scores) * 100)
142
+ else:
143
+ overall_score = 0
144
+
145
+ final_html = f"""<div>{''.join(result)}<div style="text-align: center; font-size: 24px; font-weight: bold;">Similarity Score: {overall_score}</div></div>"""
146
+
147
+ print("")
148
+ print("final html")
149
+ print(final_html)
150
+ return final_html
151
+
152
+
153
+
154
+ def process_inputs(file, relevance, diversity, model_name):
155
  # Check if file is uploaded
156
  if file is not None:
157
  # Read questions from the uploaded Excel file
 
167
  # Extract the first column
168
  questions_list = df.iloc[:, 0]
169
 
 
170
  # Initialize lists to store the expanded data
171
  expanded_questions = []
172
  expanded_prompts = []
173
  expanded_answers = []
174
+ semantic_similarities = []
175
 
176
  # Generate prompts for each question and expand the data
177
  for question in questions_list:
 
180
  expanded_prompts.extend(prompts)
181
 
182
  # Generate answers for each prompt
183
+ answers = [answer_question(prompt, model_name) for prompt in prompts]
184
  expanded_answers.extend(answers)
185
 
186
+ # Calculate semantic similarity score for each answer
187
+ similarity_scores = []
188
+ for answer in answers:
189
+ sentences_list = tokenize.sent_tokenize(answer)
190
+ highlighted_sentences_list = highlight_similar_sentences([sentences_list], 0.5)
191
+ print("highlighted sentences list")
192
+ print(highlighted_sentences_list)
193
+
194
+ blue_scores_list = [[info['score'] for info in highlighted_sentences if info['background-color'] == 'powderblue'] for highlighted_sentences in highlighted_sentences_list]
195
+ blue_scores = [score for scores in blue_scores_list for score in scores]
196
+
197
+ if blue_scores:
198
+ overall_score = round(np.mean(blue_scores) * 100)
199
+ else:
200
+ overall_score = 0
201
+
202
+ similarity_scores.append(overall_score)
203
+ print("overall score")
204
+ print(overall_score)
205
+
206
+ # Calculate mean similarity score for each question
207
+ question_similarity_score = np.mean(similarity_scores)
208
+ print("question sim score")
209
+ print(question_similarity_score)
210
+
211
+ # Extend the list with the same score for all answers to this question
212
+ semantic_similarities.extend([question_similarity_score] * len(prompts))
213
+
214
  # Combine the expanded data into a DataFrame
215
  output_df = pd.DataFrame({
216
  'Questions': expanded_questions,
217
  'Generated Prompts': expanded_prompts,
218
+ 'Answers': expanded_answers,
219
+ 'Semantic Similarity': semantic_similarities
220
  })
221
 
222
  # Save the DataFrame to a new Excel file
 
226
  return "No questions provided.", None
227
 
228
  return "Processing complete. Download the file below.", output_file
 
229
 
230
  text_list = []
231
 
232
+ def get_model_name(model_label):
233
+ # Retrieve the model name based on the selected label
234
+ for label, name in LLM_OPTIONS:
235
+ if label == model_label:
236
+ return name
237
+ return None
238
+
239
  def updateChoices(prompt):
240
  newChoices = generate_prompts(prompt)
241
  return gr.CheckboxGroup(choices=newChoices)
242
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
244
 
245
  with gr.Tab("Live Mode"):
 
248
  gr.Markdown("In Live Auditing Mode, you gain the ability to probe the LLM directly.")
249
  gr.Markdown("First, select the LLM you wish to audit. Then, enter your question. The AuditLLM tool will generate five relevant and diverse prompts based on your question. You can now select these prompts for auditing the LLMs. Examine the similarity scores in the answers generated from these prompts to assess the LLM's performance effectively")
250
  with gr.Row():
251
+ model_name_input = gr.Dropdown(choices=LLM_OPTIONS, label="Large Language Model")
252
  with gr.Row():
253
  prompt_input = gr.Textbox(label="Enter your question", placeholder="Enter Your Question")
254
  with gr.Row():
 
261
  with gr.Row() as exec:
262
  btnExec = gr.Button("Execute", variant="primary", min_width=200)
263
 
 
264
  with gr.Column() as texts:
265
  for i in range(10):
266
  text = gr.Textbox(label="_", visible=False)
267
  text_list.append(text)
268
 
269
  with gr.Column():
270
+ html_result = gr.HTML("""<div style="background-color: powderblue"></div>""")
271
 
 
272
  btnExec.click(setTextVisibility, inputs=[cbg, model_name_input], outputs=html_result)
273
+ clear = gr.ClearButton(link="http://127.0.0.1:7860")
 
 
274
 
 
275
 
276
  with gr.Tab("Batch Mode"):
277
 
 
280
  gr.Markdown("To tailor the generation of these five prompts from your original question, you can adjust the relevance and diversity scores. The relevance score determines how closely the generated prompts should align with the original question, while the diversity score dictates the variance among the prompts themselves.")
281
  gr.Markdown("Upon completion, please provide your email address. We will compile and send the answers to you promptly.")
282
 
283
+ # llm_dropdown = gr.Dropdown(choices=[
284
+ # ("Llama-2-7B", "TheBloke/Llama-2-7B-Chat-GGML"),
285
+ # ("Falcon-180B", "TheBloke/Falcon-180B-Chat-GGUF"),
286
+ # ("Zephyr-7B", "TheBloke/zephyr-quiklang-3b-4K-GGUF"),
287
+ # ("Vicuna-33B", "TheBloke/vicuna-33B-GGUF"),
288
+ # ("Claude2", "TheBloke/claude2-alpaca-13B-GGUF"),
289
+ # ("Alpaca-7B", "TheBloke/LeoScorpius-GreenNode-Alpaca-7B-v1-GGUF")],
290
+ # label="Large Language Model")
291
+
292
+ with gr.Row():
293
+ model_name_batch_input = gr.Dropdown(choices=LLM_OPTIONS, label="Large Language Model")
294
+
295
  file_upload = gr.File(label="Upload an Excel File with Questions", file_types=[".xlsx"])
296
  with gr.Row():
297
  relevance_slider = gr.Slider(1, 100, value=70, label="Relevance", info="Choose between 0 and 100", interactive=True)
 
302
  output_textbox = gr.Textbox(label="Output")
303
  download_button = gr.File(label="Download Processed File")
304
 
305
+ def on_submit(file, relevance, diversity, model_name_batch_input):
306
+ print("in on submit")
307
+ print(model_name_batch_input)
308
+ result, output_file = process_inputs(file, relevance, diversity, model_name_batch_input)
309
  return result, output_file
310
 
311
+ submit_button.click(fn=on_submit, inputs=[file_upload, relevance_slider, diversity_slider, model_name_batch_input], outputs=[output_textbox, download_button])
312
 
313
  # Launch the Gradio app
314
  demo.launch()