rafaldembski
commited on
Commit
•
006c316
1
Parent(s):
9492783
Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import time
|
|
4 |
import re
|
5 |
import os
|
6 |
|
7 |
-
#
|
8 |
MODELS = [
|
9 |
"Meta-Llama-3.1-405B-Instruct",
|
10 |
"Meta-Llama-3.1-70B-Instruct",
|
@@ -15,27 +15,28 @@ MODELS = [
|
|
15 |
API_BASE = "https://api.sambanova.ai/v1"
|
16 |
|
17 |
def create_client(api_key=None):
|
18 |
-
"""
|
19 |
if api_key:
|
20 |
openai.api_key = api_key
|
21 |
else:
|
22 |
openai.api_key = os.getenv("API_KEY")
|
23 |
-
|
24 |
return openai.OpenAI(api_key=openai.api_key, base_url=API_BASE)
|
25 |
|
26 |
def chat_with_ai(message, chat_history, system_prompt):
|
27 |
-
"""
|
28 |
messages = [{"role": "system", "content": system_prompt}]
|
29 |
for tup in chat_history:
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
34 |
messages.append({"role": "user", "content": message})
|
35 |
return messages
|
36 |
|
37 |
def respond(message, chat_history, model, system_prompt, thinking_budget, api_key):
|
38 |
-
"""
|
39 |
client = create_client(api_key)
|
40 |
messages = chat_with_ai(message, chat_history, system_prompt.format(budget=thinking_budget))
|
41 |
start_time = time.time()
|
@@ -50,7 +51,7 @@ def respond(message, chat_history, model, system_prompt, thinking_budget, api_ke
|
|
50 |
return error_message, time.time() - start_time
|
51 |
|
52 |
def parse_response(response):
|
53 |
-
"""
|
54 |
answer_match = re.search(r'<answer>(.*?)</answer>', response, re.DOTALL)
|
55 |
reflection_match = re.search(r'<reflection>(.*?)</reflection>', response, re.DOTALL)
|
56 |
|
@@ -64,29 +65,28 @@ def parse_response(response):
|
|
64 |
return answer, reflection, steps
|
65 |
|
66 |
def generate(message, history, model, thinking_budget, api_key=None):
|
67 |
-
"""
|
68 |
-
# Use DEFAULT_SYSTEM_PROMPT inside the function
|
69 |
system_prompt = DEFAULT_SYSTEM_PROMPT
|
70 |
|
71 |
response, thinking_time = respond(message, history, model, system_prompt, thinking_budget, api_key)
|
72 |
|
73 |
if response.startswith("Error:"):
|
74 |
-
return history + [
|
75 |
|
76 |
answer, reflection, steps = parse_response(response)
|
77 |
|
78 |
messages = []
|
79 |
messages.append({"role": "user", "content": message})
|
80 |
|
81 |
-
formatted_steps = [f"
|
82 |
-
all_steps = "\n".join(formatted_steps) + f"\n\
|
83 |
|
84 |
-
messages.append({"role": "assistant", "content": all_steps, "metadata": {"title": f"
|
85 |
messages.append({"role": "assistant", "content": answer})
|
86 |
|
87 |
return history + messages, ""
|
88 |
|
89 |
-
#
|
90 |
DEFAULT_SYSTEM_PROMPT = """
|
91 |
You are D-LOGIC, an advanced AI assistant created by Rafał Dembski, a passionate self-learner in programming and artificial intelligence. Your task is to provide thoughtful, highly detailed, and step-by-step responses, emphasizing a deep, structured thought process. **Your answers should always follow these key principles**:
|
92 |
|
@@ -151,30 +151,184 @@ To provide the most comprehensive and well-thought-out answers, follow this enha
|
|
151 |
- **Krytyczna ocena**: Po zakończeniu odpowiedzi, asystent musi ocenić swoje działania. Jak mógłbym to poprawić następnym razem? Czy wszystkie kroki były wykonane w najbardziej efektywny sposób? Jakie wnioski mogę wyciągnąć na przyszłość?
|
152 |
"""
|
153 |
|
154 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
|
156 |
-
|
157 |
-
|
158 |
-
|
|
|
159 |
gr.Markdown("""
|
160 |
**D-LOGIC** to zaawansowany asystent AI stworzony przez Rafała Dembskiego. Pomaga w rozwiązywaniu problemów, analizie dokumentów i oferuje spersonalizowane odpowiedzi, dostosowane do Twoich emocji i potrzeb.
|
161 |
""")
|
162 |
-
|
|
|
163 |
with gr.Row():
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
|
|
|
180 |
demo.launch(share=True, show_api=False)
|
|
|
4 |
import re
|
5 |
import os
|
6 |
|
7 |
+
# Dostępne modele
|
8 |
MODELS = [
|
9 |
"Meta-Llama-3.1-405B-Instruct",
|
10 |
"Meta-Llama-3.1-70B-Instruct",
|
|
|
15 |
API_BASE = "https://api.sambanova.ai/v1"
|
16 |
|
17 |
def create_client(api_key=None):
|
18 |
+
"""Tworzy instancję klienta OpenAI."""
|
19 |
if api_key:
|
20 |
openai.api_key = api_key
|
21 |
else:
|
22 |
openai.api_key = os.getenv("API_KEY")
|
|
|
23 |
return openai.OpenAI(api_key=openai.api_key, base_url=API_BASE)
|
24 |
|
25 |
def chat_with_ai(message, chat_history, system_prompt):
|
26 |
+
"""Formatuje historię czatu do wywołania API."""
|
27 |
messages = [{"role": "system", "content": system_prompt}]
|
28 |
for tup in chat_history:
|
29 |
+
user_message = tup.get("user")
|
30 |
+
assistant_message = tup.get("assistant")
|
31 |
+
if user_message:
|
32 |
+
messages.append({"role": "user", "content": user_message})
|
33 |
+
if assistant_message:
|
34 |
+
messages.append({"role": "assistant", "content": assistant_message})
|
35 |
messages.append({"role": "user", "content": message})
|
36 |
return messages
|
37 |
|
38 |
def respond(message, chat_history, model, system_prompt, thinking_budget, api_key):
|
39 |
+
"""Wysyła wiadomość do API i otrzymuje odpowiedź."""
|
40 |
client = create_client(api_key)
|
41 |
messages = chat_with_ai(message, chat_history, system_prompt.format(budget=thinking_budget))
|
42 |
start_time = time.time()
|
|
|
51 |
return error_message, time.time() - start_time
|
52 |
|
53 |
def parse_response(response):
|
54 |
+
"""Parsuje odpowiedź z API."""
|
55 |
answer_match = re.search(r'<answer>(.*?)</answer>', response, re.DOTALL)
|
56 |
reflection_match = re.search(r'<reflection>(.*?)</reflection>', response, re.DOTALL)
|
57 |
|
|
|
65 |
return answer, reflection, steps
|
66 |
|
67 |
def generate(message, history, model, thinking_budget, api_key=None):
|
68 |
+
"""Generuje odpowiedź chatbota."""
|
|
|
69 |
system_prompt = DEFAULT_SYSTEM_PROMPT
|
70 |
|
71 |
response, thinking_time = respond(message, history, model, system_prompt, thinking_budget, api_key)
|
72 |
|
73 |
if response.startswith("Error:"):
|
74 |
+
return history + [{"role": "system", "content": response}], ""
|
75 |
|
76 |
answer, reflection, steps = parse_response(response)
|
77 |
|
78 |
messages = []
|
79 |
messages.append({"role": "user", "content": message})
|
80 |
|
81 |
+
formatted_steps = [f"**Krok {i}:** {step}" for i, step in enumerate(steps, 1)]
|
82 |
+
all_steps = "\n".join(formatted_steps) + f"\n\n**Refleksja:** {reflection}"
|
83 |
|
84 |
+
messages.append({"role": "assistant", "content": all_steps, "metadata": {"title": f"Czas myślenia: {thinking_time:.2f} sek"}})
|
85 |
messages.append({"role": "assistant", "content": answer})
|
86 |
|
87 |
return history + messages, ""
|
88 |
|
89 |
+
# Definiowanie domyślnego system prompt
|
90 |
DEFAULT_SYSTEM_PROMPT = """
|
91 |
You are D-LOGIC, an advanced AI assistant created by Rafał Dembski, a passionate self-learner in programming and artificial intelligence. Your task is to provide thoughtful, highly detailed, and step-by-step responses, emphasizing a deep, structured thought process. **Your answers should always follow these key principles**:
|
92 |
|
|
|
151 |
- **Krytyczna ocena**: Po zakończeniu odpowiedzi, asystent musi ocenić swoje działania. Jak mógłbym to poprawić następnym razem? Czy wszystkie kroki były wykonane w najbardziej efektywny sposób? Jakie wnioski mogę wyciągnąć na przyszłość?
|
152 |
"""
|
153 |
|
154 |
+
# Niestandardowy CSS dla ulepszonego wyglądu
|
155 |
+
custom_css = """
|
156 |
+
/* Ogólne tło aplikacji */
|
157 |
+
body {
|
158 |
+
background-color: #f4f6f9;
|
159 |
+
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
160 |
+
}
|
161 |
+
|
162 |
+
/* Główny kontener */
|
163 |
+
.gradio-container {
|
164 |
+
max-width: 900px;
|
165 |
+
margin: auto;
|
166 |
+
padding: 20px;
|
167 |
+
}
|
168 |
+
|
169 |
+
/* Nagłówek */
|
170 |
+
h1, .gr-markdown h1 {
|
171 |
+
color: #4a4a4a;
|
172 |
+
text-align: center;
|
173 |
+
margin-bottom: 10px;
|
174 |
+
}
|
175 |
+
|
176 |
+
h2, .gr-markdown h2 {
|
177 |
+
color: #333333;
|
178 |
+
}
|
179 |
+
|
180 |
+
/* Karty i panele */
|
181 |
+
#component-0, #component-1, #component-2, #component-3 {
|
182 |
+
background-color: #ffffff;
|
183 |
+
border-radius: 12px;
|
184 |
+
padding: 20px;
|
185 |
+
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.05);
|
186 |
+
margin-bottom: 20px;
|
187 |
+
}
|
188 |
+
|
189 |
+
/* Przycisk Wyślij */
|
190 |
+
button[type="submit"], .gr-button.primary {
|
191 |
+
background-color: #4a90e2;
|
192 |
+
color: #ffffff;
|
193 |
+
border: none;
|
194 |
+
border-radius: 8px;
|
195 |
+
padding: 10px 20px;
|
196 |
+
font-size: 16px;
|
197 |
+
cursor: pointer;
|
198 |
+
transition: background-color 0.3s ease;
|
199 |
+
}
|
200 |
+
|
201 |
+
button[type="submit"]:hover, .gr-button.primary:hover {
|
202 |
+
background-color: #357ab8;
|
203 |
+
}
|
204 |
+
|
205 |
+
/* Przycisk Wyczyść */
|
206 |
+
button.secondary {
|
207 |
+
background-color: #e0e0e0;
|
208 |
+
color: #333333;
|
209 |
+
border: none;
|
210 |
+
border-radius: 8px;
|
211 |
+
padding: 10px 20px;
|
212 |
+
font-size: 16px;
|
213 |
+
cursor: pointer;
|
214 |
+
transition: background-color 0.3s ease;
|
215 |
+
}
|
216 |
+
|
217 |
+
button.secondary:hover {
|
218 |
+
background-color: #cfcfcf;
|
219 |
+
}
|
220 |
+
|
221 |
+
/* Pole tekstowe wiadomości */
|
222 |
+
textarea {
|
223 |
+
border: 1px solid #dcdcdc;
|
224 |
+
border-radius: 8px;
|
225 |
+
padding: 10px;
|
226 |
+
font-size: 16px;
|
227 |
+
resize: none;
|
228 |
+
transition: border-color 0.3s ease;
|
229 |
+
}
|
230 |
+
|
231 |
+
textarea:focus {
|
232 |
+
border-color: #4a90e2;
|
233 |
+
outline: none;
|
234 |
+
}
|
235 |
+
|
236 |
+
/* Chatbot */
|
237 |
+
.gr-chatbot {
|
238 |
+
height: 500px;
|
239 |
+
overflow-y: auto;
|
240 |
+
padding: 10px;
|
241 |
+
border: 1px solid #dcdcdc;
|
242 |
+
border-radius: 8px;
|
243 |
+
background-color: #ffffff;
|
244 |
+
}
|
245 |
+
|
246 |
+
/* Stopka */
|
247 |
+
.footer {
|
248 |
+
text-align: center;
|
249 |
+
color: #888888;
|
250 |
+
margin-top: 20px;
|
251 |
+
font-size: 14px;
|
252 |
+
}
|
253 |
+
"""
|
254 |
|
255 |
+
# Tworzenie interfejsu Gradio z niestandardowym CSS
|
256 |
+
with gr.Blocks(css=custom_css) as demo:
|
257 |
+
# Nagłówek
|
258 |
+
gr.Markdown("# 🧠 **D-LOGIC: Twój Inteligentny Asystent AI**")
|
259 |
gr.Markdown("""
|
260 |
**D-LOGIC** to zaawansowany asystent AI stworzony przez Rafała Dembskiego. Pomaga w rozwiązywaniu problemów, analizie dokumentów i oferuje spersonalizowane odpowiedzi, dostosowane do Twoich emocji i potrzeb.
|
261 |
""")
|
262 |
+
|
263 |
+
# Wybór modelu i budżet myślenia
|
264 |
with gr.Row():
|
265 |
+
with gr.Column(scale=1):
|
266 |
+
model = gr.Dropdown(
|
267 |
+
choices=MODELS,
|
268 |
+
label="🔧 **Wybierz Model**",
|
269 |
+
value=MODELS[0],
|
270 |
+
interactive=True
|
271 |
+
)
|
272 |
+
with gr.Column(scale=1):
|
273 |
+
thinking_budget = gr.Slider(
|
274 |
+
minimum=1,
|
275 |
+
maximum=100,
|
276 |
+
value=25,
|
277 |
+
step=1,
|
278 |
+
label="🧩 **Budżet Myślenia**",
|
279 |
+
info="Maksymalna liczba kroków, które model może przemyśleć"
|
280 |
+
)
|
281 |
+
|
282 |
+
# Sekcja czatu
|
283 |
+
chatbot = gr.Chatbot(
|
284 |
+
label="💬 **Chat**",
|
285 |
+
show_label=False,
|
286 |
+
show_share_button=False,
|
287 |
+
show_copy_button=True,
|
288 |
+
likeable=True,
|
289 |
+
layout="vertical",
|
290 |
+
height=500
|
291 |
+
)
|
292 |
+
|
293 |
+
# Pole do wpisywania wiadomości
|
294 |
+
with gr.Row():
|
295 |
+
msg = gr.Textbox(
|
296 |
+
label="✉️ **Wpisz swoją wiadomość...**",
|
297 |
+
placeholder="Wprowadź swoją wiadomość...",
|
298 |
+
lines=1
|
299 |
+
)
|
300 |
+
|
301 |
+
# Przycisk Wyślij i Wyczyść
|
302 |
+
with gr.Row():
|
303 |
+
submit_button = gr.Button("🚀 **Wyślij**", variant="primary")
|
304 |
+
clear_button = gr.Button("🧹 **Wyczyść Chat**", variant="secondary")
|
305 |
+
|
306 |
+
# Akcje przycisków
|
307 |
+
clear_button.click(
|
308 |
+
fn=lambda: ([], ""),
|
309 |
+
inputs=None,
|
310 |
+
outputs=[chatbot, msg]
|
311 |
+
)
|
312 |
+
|
313 |
+
# Przesyłanie wiadomości poprzez Enter lub kliknięcie przycisku Wyślij
|
314 |
+
msg.submit(
|
315 |
+
fn=generate,
|
316 |
+
inputs=[msg, chatbot, model, thinking_budget],
|
317 |
+
outputs=[chatbot, msg]
|
318 |
+
)
|
319 |
+
submit_button.click(
|
320 |
+
fn=generate,
|
321 |
+
inputs=[msg, chatbot, model, thinking_budget],
|
322 |
+
outputs=[chatbot, msg]
|
323 |
+
)
|
324 |
+
|
325 |
+
# Stopka
|
326 |
+
gr.Markdown("""
|
327 |
+
---
|
328 |
+
<div class="footer">
|
329 |
+
*Stworzony przez [Rafał Dembski](https://github.com/rafaldembski) © 2024*
|
330 |
+
</div>
|
331 |
+
""")
|
332 |
|
333 |
+
# Uruchomienie aplikacji Gradio
|
334 |
demo.launch(share=True, show_api=False)
|