Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -104,19 +104,17 @@ class GradioInterface:
|
|
104 |
def __init__(self, prompt_refiner: PromptRefiner):
|
105 |
self.prompt_refiner = prompt_refiner
|
106 |
|
107 |
-
# Define custom CSS for containers
|
108 |
custom_css = """
|
109 |
.container {
|
110 |
-
border: 2px solid #2196F3;
|
111 |
border-radius: 10px;
|
112 |
padding: 20px;
|
113 |
margin: 15px;
|
114 |
background: white;
|
115 |
-
box-shadow: 0 4px 6px rgba(33, 150, 243, 0.1);
|
116 |
position: relative;
|
117 |
}
|
118 |
|
119 |
-
/* Container titles */
|
120 |
.container::before {
|
121 |
position: absolute;
|
122 |
top: -12px;
|
@@ -128,10 +126,11 @@ class GradioInterface:
|
|
128 |
font-size: 1.2em;
|
129 |
}
|
130 |
|
131 |
-
/* Remove default Gradio
|
132 |
-
.
|
133 |
-
border:
|
134 |
-
background:
|
|
|
135 |
}
|
136 |
|
137 |
.title-container::before { content: 'PROMPT++'; }
|
@@ -140,9 +139,16 @@ class GradioInterface:
|
|
140 |
.model-container::before { content: 'MODEL APPLICATION'; }
|
141 |
.results-container::before { content: 'RESULTS'; }
|
142 |
.examples-container::before { content: 'EXAMPLES'; }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
"""
|
144 |
|
145 |
-
with gr.Blocks(css=custom_css) as self.interface:
|
146 |
# Title Container
|
147 |
with gr.Column(elem_classes=["container", "title-container"]):
|
148 |
gr.Markdown("# PROMPT++")
|
@@ -153,7 +159,7 @@ class GradioInterface:
|
|
153 |
with gr.Column(elem_classes=["container", "input-container"]):
|
154 |
prompt_text = gr.Textbox(
|
155 |
label="Type the prompt (or let it empty to see metaprompt)",
|
156 |
-
elem_classes="
|
157 |
)
|
158 |
with gr.Accordion("Meta Prompt explanation", open=False):
|
159 |
gr.Markdown(explanation_markdown)
|
@@ -161,7 +167,7 @@ class GradioInterface:
|
|
161 |
["star","done","physics","morphosis", "verse", "phor","bolism","math","arpe"],
|
162 |
label="Choose Meta Prompt",
|
163 |
value="star",
|
164 |
-
elem_classes="
|
165 |
)
|
166 |
refine_button = gr.Button("Refine Prompt")
|
167 |
|
@@ -172,7 +178,7 @@ class GradioInterface:
|
|
172 |
gr.Markdown("### Refined Prompt")
|
173 |
refined_prompt = gr.Textbox(
|
174 |
interactive=False,
|
175 |
-
elem_classes="
|
176 |
)
|
177 |
gr.Markdown("### Explanation of Refinements")
|
178 |
explanation_of_refinements = gr.Markdown()
|
@@ -183,28 +189,30 @@ class GradioInterface:
|
|
183 |
# Model Application Container
|
184 |
with gr.Column(elem_classes=["container", "model-container"]):
|
185 |
gr.Markdown("## See MetaPrompt Impact")
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
|
|
201 |
|
202 |
# Results Container
|
203 |
with gr.Column(elem_classes=["container", "results-container"]):
|
204 |
-
with gr.
|
205 |
-
|
206 |
-
|
207 |
-
|
|
|
208 |
|
209 |
# Examples Container
|
210 |
with gr.Column(elem_classes=["container", "examples-container"]):
|
@@ -225,8 +233,20 @@ class GradioInterface:
|
|
225 |
inputs=[prompt_text, meta_prompt_choice]
|
226 |
)
|
227 |
|
228 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
|
|
|
230 |
def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
|
231 |
input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
|
232 |
result = self.prompt_refiner.refine_prompt(input_data)
|
|
|
104 |
def __init__(self, prompt_refiner: PromptRefiner):
|
105 |
self.prompt_refiner = prompt_refiner
|
106 |
|
107 |
+
# Define custom CSS for containers
|
108 |
custom_css = """
|
109 |
.container {
|
110 |
+
border: 2px solid #2196F3;
|
111 |
border-radius: 10px;
|
112 |
padding: 20px;
|
113 |
margin: 15px;
|
114 |
background: white;
|
|
|
115 |
position: relative;
|
116 |
}
|
117 |
|
|
|
118 |
.container::before {
|
119 |
position: absolute;
|
120 |
top: -12px;
|
|
|
126 |
font-size: 1.2em;
|
127 |
}
|
128 |
|
129 |
+
/* Remove default Gradio styles */
|
130 |
+
.no-background > div:first-child {
|
131 |
+
border: none !important;
|
132 |
+
background: transparent !important;
|
133 |
+
box-shadow: none !important;
|
134 |
}
|
135 |
|
136 |
.title-container::before { content: 'PROMPT++'; }
|
|
|
139 |
.model-container::before { content: 'MODEL APPLICATION'; }
|
140 |
.results-container::before { content: 'RESULTS'; }
|
141 |
.examples-container::before { content: 'EXAMPLES'; }
|
142 |
+
|
143 |
+
/* Custom styling for radio buttons */
|
144 |
+
.radio-group {
|
145 |
+
display: flex;
|
146 |
+
gap: 10px;
|
147 |
+
margin: 10px 0;
|
148 |
+
}
|
149 |
"""
|
150 |
|
151 |
+
with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
|
152 |
# Title Container
|
153 |
with gr.Column(elem_classes=["container", "title-container"]):
|
154 |
gr.Markdown("# PROMPT++")
|
|
|
159 |
with gr.Column(elem_classes=["container", "input-container"]):
|
160 |
prompt_text = gr.Textbox(
|
161 |
label="Type the prompt (or let it empty to see metaprompt)",
|
162 |
+
elem_classes="no-background"
|
163 |
)
|
164 |
with gr.Accordion("Meta Prompt explanation", open=False):
|
165 |
gr.Markdown(explanation_markdown)
|
|
|
167 |
["star","done","physics","morphosis", "verse", "phor","bolism","math","arpe"],
|
168 |
label="Choose Meta Prompt",
|
169 |
value="star",
|
170 |
+
elem_classes=["no-background", "radio-group"]
|
171 |
)
|
172 |
refine_button = gr.Button("Refine Prompt")
|
173 |
|
|
|
178 |
gr.Markdown("### Refined Prompt")
|
179 |
refined_prompt = gr.Textbox(
|
180 |
interactive=False,
|
181 |
+
elem_classes="no-background"
|
182 |
)
|
183 |
gr.Markdown("### Explanation of Refinements")
|
184 |
explanation_of_refinements = gr.Markdown()
|
|
|
189 |
# Model Application Container
|
190 |
with gr.Column(elem_classes=["container", "model-container"]):
|
191 |
gr.Markdown("## See MetaPrompt Impact")
|
192 |
+
with gr.Row():
|
193 |
+
apply_model = gr.Dropdown(
|
194 |
+
[
|
195 |
+
"Qwen/Qwen2.5-72B-Instruct",
|
196 |
+
"meta-llama/Meta-Llama-3-70B-Instruct",
|
197 |
+
"meta-llama/Llama-3.1-8B-Instruct",
|
198 |
+
"NousResearch/Hermes-3-Llama-3.1-8B",
|
199 |
+
"HuggingFaceH4/zephyr-7b-alpha",
|
200 |
+
"meta-llama/Llama-2-7b-chat-hf",
|
201 |
+
"microsoft/Phi-3.5-mini-instruct"
|
202 |
+
],
|
203 |
+
value="meta-llama/Meta-Llama-3-70B-Instruct",
|
204 |
+
label="Choose the Model to apply to the prompts (the one you will used)",
|
205 |
+
elem_classes="no-background"
|
206 |
+
)
|
207 |
+
apply_button = gr.Button("Apply MetaPrompt")
|
208 |
|
209 |
# Results Container
|
210 |
with gr.Column(elem_classes=["container", "results-container"]):
|
211 |
+
with gr.Tabs():
|
212 |
+
with gr.TabItem("Original Prompt Output"):
|
213 |
+
original_output = gr.Markdown()
|
214 |
+
with gr.TabItem("Refined Prompt Output"):
|
215 |
+
refined_output = gr.Markdown()
|
216 |
|
217 |
# Examples Container
|
218 |
with gr.Column(elem_classes=["container", "examples-container"]):
|
|
|
233 |
inputs=[prompt_text, meta_prompt_choice]
|
234 |
)
|
235 |
|
236 |
+
# Connect the buttons to their functions
|
237 |
+
refine_button.click(
|
238 |
+
fn=self.refine_prompt,
|
239 |
+
inputs=[prompt_text, meta_prompt_choice],
|
240 |
+
outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
|
241 |
+
)
|
242 |
+
|
243 |
+
apply_button.click(
|
244 |
+
fn=self.apply_prompts,
|
245 |
+
inputs=[prompt_text, refined_prompt, apply_model],
|
246 |
+
outputs=[original_output, refined_output]
|
247 |
+
)
|
248 |
|
249 |
+
# Your existing methods remain the same
|
250 |
def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
|
251 |
input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
|
252 |
result = self.prompt_refiner.refine_prompt(input_data)
|