File size: 6,088 Bytes
c83181d
390f830
 
 
c83181d
 
 
 
 
 
b6e72f6
37f14ac
c525e42
c83181d
 
37f14ac
c83181d
 
 
 
 
 
 
 
 
 
390f830
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c83181d
 
b6e72f6
 
 
c83181d
 
b6e72f6
 
 
 
c83181d
 
b6e72f6
 
 
 
 
390f830
857f140
 
 
 
 
 
 
 
 
 
 
 
 
b6e72f6
 
c525e42
b6e72f6
 
 
 
 
c525e42
b6e72f6
 
 
 
 
 
 
 
 
 
 
 
 
390f830
b6e72f6
 
c425f6c
c83181d
c425f6c
c525e42
c425f6c
 
c525e42
 
c83181d
c425f6c
c525e42
 
 
 
 
 
 
 
 
 
 
 
390f830
 
 
c525e42
c425f6c
 
390f830
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c425f6c
 
 
390f830
c425f6c
390f830
 
 
 
c425f6c
390f830
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c525e42
c83181d
390f830
 
 
 
 
 
 
 
c83181d
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
import base64
import sys
from datetime import datetime
from io import StringIO

import gradio as gr
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel

from agent import function_caller, retriever
from client import HybridClient
from sarvam import save_audio, speaker, translator

app = FastAPI()
hclient = HybridClient()

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


class DebugCapture(StringIO):
    def __init__(self):
        super().__init__()
        self.debug_history = []
        self.new_entry = True

    def write(self, s):
        if s.strip():
            if self.new_entry:
                timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                self.debug_history.append(f"[{timestamp}] {s.strip()}")
                self.new_entry = False
            else:
                self.debug_history[-1] += f"\n{s.strip()}"
        else:
            self.new_entry = True

        if len(self.debug_history) > 10:  # Limit log history memory consumption
            self.debug_history.pop(0)

        return super().write(s)


debug_capture = DebugCapture()
sys.stdout = debug_capture


class ChatQuery(BaseModel):
    query: str
    grade: str
    subject: str
    chapter: str


class TranslateQuery(BaseModel):
    text: str
    src: str
    dest: str


class TTSQuery(BaseModel):
    text: str
    src: str


# API Endpoints
@app.get("/")
def root():
    return {
        "message": "Welcome!",
        "endpoints": {"status", "query", "agent", "rag", "translate", "tts"},
    }


@app.get("/status")
async def status():
    return {"status": "200 OK"}


@app.get("/agent")
async def agent(query: ChatQuery):
    collection = f"{query.grade}_{query.subject.lower()}_{query.chapter}"
    return await function_caller(query.query, collection, hclient)


@app.get("/rag")
async def rag(query: ChatQuery):
    collection = f"{query.grade}_{query.subject.lower()}_{query.chapter}"
    return await retriever(query.query, collection, hclient)


@app.get("/translate")
async def translate(query: TranslateQuery):
    return await translator(query.text, query.src, query.dest)


@app.get("/tts")
async def tts(query: TTSQuery):
    return await speaker(query.text, query.src)


# Gradio interface
async def gradio_interface(input_text, grade, subject, chapter, history):
    response = await agent(ChatQuery(query=input_text, grade=grade, subject=subject, chapter=chapter))

    if "text" in response:
        output = response["text"]
        history.append((input_text, {"type": "text", "content": output}))
    elif "audios" in response:
        audio_data = base64.b64decode(response["audios"][0])
        audio_path = save_audio(audio_data)
        history.append((input_text, {"type": "audio", "content": audio_path}))
    else:
        output = "Unexpected response format"
        history.append((input_text, {"type": "text", "content": output}))
    return "", history


def format_history(history):
    formatted_history = []
    for human, assistant in history:
        formatted_history.append((human, None))
        if assistant["type"] == "text":
            formatted_history.append((None, assistant["content"]))
        elif assistant["type"] == "audio":
            formatted_history.append((None, gr.Audio(value=assistant["content"], visible=True)))

    if len(formatted_history) > 10:  # Limit history memory consumption
        formatted_history.pop(0)
    return formatted_history


# Debug functions
def update_debug_output():
    return "\n".join(debug_capture.debug_history)


def clear_debug_history():
    debug_capture.debug_history = []
    return "Debug history cleared."


def toggle_debug_modal(visible):
    return gr.update(visible=visible)


# Gradio UI setup
with gr.Blocks() as iface:
    gr.Markdown("# Agentic RAG Chatbot")

    # Main header row
    with gr.Row():
        with gr.Column(scale=19):
            gr.Markdown("Ask a question and get an answer from the chatbot. The response may be text or audio.")
        with gr.Column(scale=1, min_width=50):
            debug_button = gr.Button("🖥️", size="sm")

    # Chat input and interaction
    with gr.Row():
        with gr.Column(scale=20):
            with gr.Row():
                grade = gr.Dropdown(choices=["1", "2", "3", "4", "5", "6", "7", "9", "10", "11", "12"], label="Grade", value="9", interactive=True)
                subject = gr.Dropdown(choices=["Math", "Science", "History"], label="Subject", value="Science", interactive=True)
                chapter = gr.Dropdown(choices=["1", "2", "3", "4", "5", "6", "7", "9", "10", "11", "12", "13", "14", "15", "16"], label="Chapter", value="11", interactive=True)

            chatbot = gr.Chatbot(label="Chat History")
            msg = gr.Textbox(label="Your message", placeholder="Type your message here...")
            state = gr.State([])

    # Debugging modal
    with gr.Group(visible=False) as debug_modal:
        debug_output = gr.TextArea(label="Debug Terminal", interactive=False)
        with gr.Row():
            refresh_button = gr.Button("Refresh Debug History")
            clear_button = gr.Button("Clear Debug History")
            close_button = gr.Button("Close")

    # Submit action
    msg.submit(gradio_interface, inputs=[msg, grade, subject, chapter, state], outputs=[msg, state]).then(format_history, inputs=[state], outputs=[chatbot])

    # Debug button click
    debug_button.click(lambda: toggle_debug_modal(True), outputs=debug_modal).then(update_debug_output, inputs=[], outputs=[debug_output])

    # Debug modal buttons
    refresh_button.click(update_debug_output, inputs=[], outputs=[debug_output])
    clear_button.click(clear_debug_history, inputs=[], outputs=[debug_output])
    close_button.click(lambda: toggle_debug_modal(False), outputs=debug_modal)

app = gr.mount_gradio_app(app, iface, path="/")

if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=8000)