AuditLLM / app.py
Amirizaniani's picture
Update app.py
313e518 verified
raw
history blame
3.24 kB
import gradio as gr
from dotenv import load_dotenv
from langchain import PromptTemplate, LLMChain, HuggingFaceHub
from langchain.llms import CTransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import pipeline
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
load_dotenv()
def generate_prompts(user_input):
prompt_template = PromptTemplate(
input_variables=["Question"],
template=f"Just list 10 question prompts for {user_input} and don't put number before each of the prompts."
)
config = {'max_new_tokens': 64, 'temperature': 0.7, 'context_length': 64}
llm = CTransformers(model="TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
config=config)
hub_chain = LLMChain(prompt = prompt_template, llm = llm)
input_data = {"Question": user_input}
generated_prompts = hub_chain.run(input_data)
questions_list = generated_prompts.split('\n')
formatted_questions = "\n".join(f"Question: {question}" for i, question in enumerate(questions_list) if question.strip())
questions_list = formatted_questions.split("Question:")[1:]
return questions_list
def answer_question(prompt):
prompt_template = PromptTemplate(
input_variables=["Question"],
template=f"give one answer for {prompt} and do not consider the number behind it."
)
config = {'max_new_tokens': 64, 'temperature': 0.7, 'context_length': 64}
llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGML",
config=config)
hub_chain = LLMChain(prompt = prompt_template, llm = llm)
input_data = {"Question": prompt}
generated_answer = hub_chain.run(input_data)
return generated_answer
text_list = []
def updateChoices(prompt):
newChoices = generate_prompts(prompt)
return gr.CheckboxGroup(choices=newChoices)
def setTextVisibility(cbg):
update_show = [gr.Textbox(visible=True, label=text, value=answer_question(text)) for text in cbg]
update_hide = [gr.Textbox(visible=False, label="") for _ in range(10-len(cbg))]
return update_show + update_hide
with gr.Blocks() as demo:
gr.HTML("""
<div style="text-align: center; max-width: 1240px; margin: 0 auto;">
<h1 style="font-weight: 200; font-size: 20px; margin-bottom:8px; margin-top:0px;">
Auditing LLMs
</h1>
<hr style="margin-bottom:5px; margin-top:5px;">
</div>
""")
with gr.Row():
prompt_input = gr.Textbox(label="Enter your question", placeholder="Enter Your Question")
with gr.Row():
generate_button = gr.Button("Generate")
with gr.Column():
cbg = gr.CheckboxGroup(choices=[], label="List of the prompts", interactive=True)
generate_button.click(updateChoices, inputs=[prompt_input], outputs=[cbg])
with gr.Row(variant="compact") as exec:
btnExec = gr.Button("Execute")
with gr.Column() as texts:
for i in range(10):
text = gr.Textbox(label="_", visible=False)
text_list.append(text)
btnExec.click(setTextVisibility, inputs=cbg, outputs=text_list)
Clear = gr.ClearButton([prompt_input, cbg, text], scale=1)
# Launch the Gradio app
demo.launch(share=True)