File size: 3,101 Bytes
f7ecb69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import gradio as gr
from langchain import PromptTemplate, LLMChain
from langchain.llms import CTransformers

def generate_prompts(user_input):
    prompt_template = PromptTemplate(
        input_variables=["Question"],
        template=f"Just list 10 quetion prompts for {user_input} and don't put number before each of the prompts."
    )
    config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
    llm = CTransformers(model="TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
                        config=config,
                        threads=os.cpu_count())
    hub_chain = LLMChain(prompt = prompt_template, llm = llm)

    input_data = {"Question": user_input}
    
    generated_prompts = hub_chain.run(input_data)  
    questions_list = generated_prompts.split('\n') 
    formatted_questions = "\n".join(f"Question: {question}" for i, question in enumerate(questions_list) if question.strip())
    questions_list = formatted_questions.split("Question:")[1:]
    return questions_list

def answer_question(prompt):
    prompt_template = PromptTemplate(
        input_variables=["Question"],
        template=f"give one answer for {prompt} and do not consider the number behind it."
    )
    config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
    llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGML",
                        config=config,
                        threads=os.cpu_count())
    hub_chain = LLMChain(prompt = prompt_template, llm = llm)
    input_data = {"Question": prompt}
    generated_answer = hub_chain.run(input_data)  
    return generated_answer

text_list = []

def updateChoices(prompt):
    newChoices = generate_prompts(prompt)
    return gr.CheckboxGroup(choices=newChoices)

def setTextVisibility(cbg):
    update_show = [gr.Textbox(visible=True, label=text, value=answer_question(text)) for text in cbg]
    update_hide = [gr.Textbox(visible=False, label="") for _ in range(10-len(cbg))]
    return update_show + update_hide

with gr.Blocks() as demo:

    gr.HTML("""
    <div style="text-align: center; max-width: 1240px; margin: 0 auto;">
    <h1 style="font-weight: 200; font-size: 20px; margin-bottom:8px; margin-top:0px;">
    Auditing LLMs
    </h1>
    <hr style="margin-bottom:5px; margin-top:5px;">
    
    </div>
    """)

    with gr.Row():
        prompt_input = gr.Textbox(label="Enter your question", placeholder="Enter Your Question")
    with gr.Row():
        generate_button = gr.Button("Generate")
    with gr.Column():
        cbg = gr.CheckboxGroup(choices=[], label="List of the prompts", interactive=True)
    
    generate_button.click(updateChoices, inputs=[prompt_input], outputs=[cbg])

    with gr.Row(variant="compact") as exec: 
        btnExec = gr.Button("Execute")
    with gr.Column() as texts:
        for i in range(10):
            text = gr.Textbox(label="_", visible=False)
            text_list.append(text)

    btnExec.click(setTextVisibility, inputs=cbg, outputs=text_list)

    clear = gr.ClearButton(link="http://127.0.0.1:8160")

# Launch the Gradio app
demo.launch()