File size: 4,554 Bytes
f5d22a4
 
aab5640
ceaa546
6f2843a
26d9065
fdb6484
aab5640
f5d22a4
26d9065
aab5640
f5d22a4
aab5640
e3a5dff
 
 
 
f5d22a4
e3a5dff
aab5640
 
 
26d9065
aab5640
 
fdb6484
aab5640
3b24cdb
 
 
 
 
 
 
 
 
 
 
 
26d9065
aab5640
 
 
 
 
 
f5d22a4
26d9065
 
 
 
 
aab5640
 
26d9065
e3a5dff
f5d22a4
e3a5dff
 
 
 
 
 
26d9065
 
 
 
 
 
 
 
 
aab5640
f5d22a4
e3a5dff
26d9065
e3a5dff
26d9065
e3a5dff
26d9065
e3a5dff
 
 
 
f5d22a4
aab5640
 
26d9065
 
 
 
 
 
 
 
 
 
 
e3a5dff
26d9065
f5d22a4
aab5640
 
26d9065
 
 
aab5640
 
f5d22a4
aab5640
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# Import Gradio for UI, along with other necessary libraries
import gradio as gr
from rag_app.loading_data.load_S3_vector_stores import get_chroma_vs
from rag_app.loading_data.load_S3_vector_stores import get_chroma_vs
from rag_app.agents.react_agent import agent_executor
from config import db

get_chroma_vs()


if __name__ == "__main__":

    # Function to add a new input to the chat history
    def add_text(history, text):
        # Append the new text to the history with a placeholder for the response
        history = history + [(text, None)]
        return history, ""

    # Function representing the bot's response mechanism
    def bot(history):
        # Obtain the response from the 'infer' function using the latest input
        response = infer(history[-1][0], history)
        print(response)
        history[-1][1] = response['output']
        return history

    # Function to infer the response using the RAG model
    def infer(question, history):
        # Use the question and history to query the RAG model
        #result = qa({"query": question, "history": history, "question": question})
        try:
            result = agent_executor.invoke(
                {
                    "input": question,
                    "chat_history": history
                }
            )
            return result
        except Exception:
            raise gr.Warning("Model is Overloaded, please try again in a few minutes!")
        
    def vote(data: gr.LikeData):
        if data.liked:
            print("You upvoted this response: ")
        else:
            print("You downvoted this response: ")

    def get_examples(input_text: str):
        tmp_history = [(input_text, None)]
        response = infer(input_text, tmp_history)
        return response['output']

    # CSS styling for the Gradio interface
    css = """
    #col-container {max-width: 1200px; margin-left: auto; margin-right: auto;}
    """

    # HTML content for the Gradio interface title
    title = """
    <div style="text-align:left;">
        <p>Hello, I BotTina 2.0, your intelligent AI assistant. I can help you explore Wuerttembergische Versicherungs products.<br />
    </div>
    """
    head_style = """
    <style>
    @media (min-width: 1536px)
    {
        .gradio-container {
            min-width: var(--size-full) !important;
        }
    }
    </style>
    """

    # Building the Gradio interface
    with gr.Blocks(theme=gr.themes.Soft(), title="InsurePal AI 🤵🏻‍♂️", head=head_style) as demo:
        with gr.Column(elem_id="col-container"):
            gr.HTML()  # Add the HTML title to the interface
            chatbot = gr.Chatbot([], elem_id="chatbot",
                                        label="InsurePal AI",
                                        bubble_full_width=False,
                                        avatar_images=(None, "https://dacodi-production.s3.amazonaws.com/store/87bc00b6727589462954f2e3ff6f531c.png"),
                                        height=680,)  # Initialize the chatbot component
            chatbot.like(vote, None, None)

            # Create a row for the question input
            with gr.Row():
                question = gr.Textbox(label="Question", show_label=False, placeholder="Type your question and hit Enter ", scale=4)
                send_btn = gr.Button(value="Send", variant="primary", scale=0)
            with gr.Accordion(label="Beispiele", open=False):
                #examples
                examples = gr.Examples([
                    "Welche Versicherungen brauche ich als Student?", 
                    "Wie melde ich einen Schaden?",
                    "Wie kann ich mich als Selbstständiger finanziell absichern?",
                    "Welche Versicherungen sollte ich für meine Vorsorge abschliessen?"
                    ], inputs=[question], label="") #, cache_examples="lazy", fn=get_examples, outputs=[chatbot]

            with gr.Row():
                clear = gr.Button("Clear")  # Add a button to clear the chat

        # Define the action when the question is submitted
        question.submit(add_text, [chatbot, question], [chatbot, question], queue=False).then(
            bot, chatbot, chatbot)
        send_btn.click(add_text, [chatbot, question], [chatbot, question], queue=False).then(
            bot, chatbot, chatbot)
        # Define the action for the clear button
        clear.click(lambda: None, None, chatbot, queue=False)

    # Launch the Gradio demo interface
    demo.queue().launch(share=False, debug=True)