File size: 3,239 Bytes
f7ecb69
5922875
313e518
 
 
 
 
f7ecb69
5922875
f7ecb69
f163cb3
f7ecb69
313e518
f7ecb69
313e518
f7ecb69
313e518
f7ecb69
 
f163cb3
313e518
f7ecb69
f163cb3
 
f7ecb69
 
 
 
313e518
ceda1ed
f7ecb69
313e518
f7ecb69
313e518
 
 
f7ecb69
 
ceda1ed
 
2508cf4
f7ecb69
 
 
 
 
 
313e518
 
 
 
b6528b0
313e518
f7ecb69
 
 
 
8a72869
f7ecb69
 
8a72869
f7ecb69
 
c9e2742
313e518
 
 
 
 
 
 
 
c9e2742
313e518
 
 
 
 
 
c9e2742
313e518
c9e2742
313e518
8a72869
f7ecb69
c1ad0f4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import gradio as gr
from dotenv import load_dotenv
from langchain import PromptTemplate, LLMChain, HuggingFaceHub
from langchain.llms import CTransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import pipeline
from langchain.llms.huggingface_pipeline import HuggingFacePipeline

load_dotenv()
def generate_prompts(user_input):
    prompt_template = PromptTemplate(
        input_variables=["Question"],
        template=f"Just list 10 question prompts for {user_input} and don't put number before each of the prompts."
    )
    config = {'max_new_tokens': 64, 'temperature': 0.7, 'context_length': 64}
    llm = CTransformers(model="TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
                        config=config)
    hub_chain = LLMChain(prompt = prompt_template, llm = llm)
    input_data = {"Question": user_input}

    generated_prompts = hub_chain.run(input_data)  
    questions_list = generated_prompts.split('\n') 
    

    formatted_questions = "\n".join(f"Question: {question}" for i, question in enumerate(questions_list) if question.strip())
    questions_list = formatted_questions.split("Question:")[1:]
    return questions_list

def answer_question(prompt):
    prompt_template = PromptTemplate(
        input_variables=["Question"],
        template=f"give one answer for {prompt} and do not consider the number behind it."
    )
    config = {'max_new_tokens': 64, 'temperature': 0.7, 'context_length': 64}
    llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGML",
                        config=config)
    hub_chain = LLMChain(prompt = prompt_template, llm = llm)
    input_data = {"Question": prompt}
    generated_answer = hub_chain.run(input_data)  
    return generated_answer

text_list = []

def updateChoices(prompt):
    newChoices = generate_prompts(prompt)
    return gr.CheckboxGroup(choices=newChoices)

def setTextVisibility(cbg):
    update_show = [gr.Textbox(visible=True, label=text, value=answer_question(text)) for text in cbg]
    update_hide = [gr.Textbox(visible=False, label="") for _ in range(10-len(cbg))]
    return update_show + update_hide

with gr.Blocks() as demo:

    gr.HTML("""
    <div style="text-align: center; max-width: 1240px; margin: 0 auto;">
    <h1 style="font-weight: 200; font-size: 20px; margin-bottom:8px; margin-top:0px;">
    Auditing LLMs
    </h1>
    <hr style="margin-bottom:5px; margin-top:5px;">
    
    </div>
    """)

    with gr.Row():
        prompt_input = gr.Textbox(label="Enter your question", placeholder="Enter Your Question")
    with gr.Row():
        generate_button = gr.Button("Generate")
    with gr.Column():
        cbg = gr.CheckboxGroup(choices=[], label="List of the prompts", interactive=True)
    
    generate_button.click(updateChoices, inputs=[prompt_input], outputs=[cbg])

    with gr.Row(variant="compact") as exec: 
        btnExec = gr.Button("Execute")
    with gr.Column() as texts:
        for i in range(10):
            text = gr.Textbox(label="_", visible=False)
            text_list.append(text)

    btnExec.click(setTextVisibility, inputs=cbg, outputs=text_list)

    Clear = gr.ClearButton([prompt_input, cbg, text], scale=1)

# Launch the Gradio app
demo.launch(share=True)