baconnier commited on
Commit
09cb397
1 Parent(s): 046b80d

Upload 8 files

Browse files
Files changed (3) hide show
  1. app.py +131 -104
  2. metaprompt_router.py +4 -3
  3. prompt_refiner.py +98 -5
app.py CHANGED
@@ -5,115 +5,142 @@ from variables import *
5
  from custom_css import custom_css
6
 
7
  class GradioInterface:
8
- def __init__(self, prompt_refiner: PromptRefiner, custom_css):
9
- self.prompt_refiner = prompt_refiner
10
- with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
11
- with gr.Column(elem_classes=["container", "title-container"]):
12
- gr.Markdown("# PROMPT++")
13
- gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
14
- gr.Markdown("Learn how to generate an improved version of your prompts.")
15
-
16
- with gr.Column(elem_classes=["container", "input-container"]):
17
- prompt_text = gr.Textbox(
18
- label="Type your prompt (or let it empty to see metaprompt)",
19
- lines=5
20
- )
21
- meta_prompt_choice = gr.Radio(
22
- metaprompt_list,
23
- label="Choose Meta Prompt",
24
- value=metaprompt_list[0],
25
- elem_classes=["no-background", "radio-group"]
26
- )
27
- refine_button = gr.Button("Refine Prompt")
28
-
29
- with gr.Row(elem_classes=["container2"]):
30
- with gr.Accordion("Examples", open=False):
31
- gr.Examples(
32
- examples=examples,
33
- inputs=[prompt_text, meta_prompt_choice]
34
- )
35
-
36
- with gr.Accordion("Meta Prompt explanation", open=False):
37
- gr.Markdown(explanation_markdown)
38
-
39
- with gr.Column(elem_classes=["container", "analysis-container"]):
40
- gr.Markdown(' ')
41
- gr.Markdown("### Initial prompt analysis")
42
- analysis_evaluation = gr.Markdown()
43
- gr.Markdown("### Refined Prompt")
44
- refined_prompt = gr.Textbox(
45
- label="Refined Prompt",
46
- interactive=True,
47
- show_label=True,
48
- show_copy_button=True,
49
- )
50
- gr.Markdown("### Explanation of Refinements")
51
- explanation_of_refinements = gr.Markdown()
52
-
53
- with gr.Column(elem_classes=["container", "model-container"]):
54
- with gr.Row():
55
- apply_model = gr.Dropdown(models,
56
- value="meta-llama/Llama-3.1-8B-Instruct",
57
- label="Choose the Model",
58
- container=False,
59
- scale=1,
60
- min_width=300
61
- )
62
- apply_button = gr.Button("Apply MetaPrompt")
63
 
64
- gr.Markdown("### Prompts on choosen model")
65
- with gr.Tabs():
66
- with gr.TabItem("Original Prompt Output"):
67
- original_output = gr.Markdown()
68
- with gr.TabItem("Refined Prompt Output"):
69
- refined_output = gr.Markdown()
70
- with gr.Accordion("Full Response JSON", open=False, visible=True):
71
- full_response_json = gr.JSON()
72
-
73
- refine_button.click(
74
- fn=self.refine_prompt,
75
- inputs=[prompt_text, meta_prompt_choice],
76
- outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
77
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
- apply_button.click(
80
- fn=self.apply_prompts,
81
- inputs=[prompt_text, refined_prompt, apply_model],
82
- outputs=[original_output, refined_output],
83
- api_name="apply_prompts"
84
- )
85
- gr.HTML(
86
- "<p style='text-align: center; color:orange;'>⚠ This space is in progress, and we're actively working on it, so you might find some bugs! Please report any issues you have in the Community tab to help us make it better for all.</p>"
87
- )
88
 
89
- def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
90
- initial_prompt_evaluation, refined_prompt, explanation_refinements, full_response = self.prompt_refiner.refine_prompt(prompt, meta_prompt_choice)
91
- analysis_evaluation = f"\n\n{initial_prompt_evaluation}"
92
- return (
93
- analysis_evaluation,
94
- refined_prompt,
95
- explanation_refinements,
96
- full_response
97
- )
 
 
98
 
99
- def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
100
- try:
101
- original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
102
- refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
103
- return original_output, refined_output
104
- except Exception as e:
105
- return f"Error: {str(e)}", f"Error: {str(e)}"
 
 
106
 
107
- def launch(self, share=False):
108
- self.interface.launch(share=share)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
 
111
  if __name__ == '__main__':
112
- # Initialize the prompt refiner with API token
113
- prompt_refiner = PromptRefiner(api_token,meta_prompts)
114
-
115
- # Create the Gradio interface
116
- gradio_interface = GradioInterface(prompt_refiner, custom_css)
117
-
118
- # Launch the interface
119
- gradio_interface.launch(share=True)
 
5
  from custom_css import custom_css
6
 
7
  class GradioInterface:
8
+ def __init__(self, prompt_refiner: PromptRefiner, custom_css):
9
+ self.prompt_refiner = prompt_refiner
10
+ with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
11
+ with gr.Column(elem_classes=["container", "title-container"]):
12
+ gr.Markdown("# PROMPT++")
13
+ gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
14
+ gr.Markdown("Learn how to generate an improved version of your prompts.")
15
+
16
+ with gr.Column(elem_classes=["container", "input-container"]):
17
+ prompt_text = gr.Textbox(
18
+ label="Type your prompt (or let it empty to see metaprompt)",
19
+ lines=5
20
+ )
21
+ with gr.Row(elem_classes=["container2"]):
22
+ automatic_metaprompt_button = gr.Button("Refine Prompt using automatic MetaPrompt choice")
23
+
24
+ with gr.Accordion("Manual Choice of Meta Prompt", open=False):
25
+ meta_prompt_choice = gr.Radio(
26
+ metaprompt_list,
27
+ label="Choose Meta Prompt",
28
+ value=metaprompt_list[0],
29
+ elem_classes=["no-background", "radio-group"]
30
+ )
31
+ refine_button = gr.Button("Refine Prompt using manual choice")
32
+ gr.Markdown(explanation_markdown)
33
+ gr.Examples(
34
+ examples=examples,
35
+ inputs=[prompt_text, meta_prompt_choice]
36
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
+
39
+ with gr.Column(elem_classes=["container", "analysis-container"]):
40
+ gr.Markdown(' ')
41
+ gr.Markdown("### Original Prompt analysis")
42
+ analysis_evaluation = gr.Markdown()
43
+ gr.Markdown("---")
44
+ #MetaPrompt_analysis_evaluation= gr.Markdown("### MataPrompt used")
45
+ MetaPrompt_analysis = gr.Markdown("### MataPrompt used")
46
+ gr.Markdown("### Refined Prompt")
47
+ refined_prompt = gr.Textbox(
48
+ label="Refined Prompt",
49
+ interactive=True,
50
+ show_label=True,
51
+ show_copy_button=True,
52
+ )
53
+ gr.Markdown("### Explanation of Refinements")
54
+ explanation_of_refinements = gr.Markdown()
55
+
56
+ with gr.Column(elem_classes=["container", "model-container"]):
57
+ with gr.Row():
58
+ apply_model = gr.Dropdown(models,
59
+ value="meta-llama/Llama-3.1-8B-Instruct",
60
+ label="Choose the Model",
61
+ container=False,
62
+ scale=1,
63
+ min_width=300
64
+ )
65
+ apply_button = gr.Button("Apply Prompts")
66
 
67
+ gr.Markdown("### Prompts on choosen model")
68
+ with gr.Tabs():
69
+ with gr.TabItem("Original Prompt Output"):
70
+ original_output = gr.Markdown()
71
+ with gr.TabItem("Refined Prompt Output"):
72
+ refined_output = gr.Markdown()
73
+ with gr.Accordion("Full Response JSON", open=False, visible=True):
74
+ full_response_json = gr.JSON()
 
75
 
76
+ automatic_metaprompt_button.click(
77
+ fn=self.automatic_metaprompt,
78
+ inputs=[prompt_text, meta_prompt_choice],
79
+ outputs=[MetaPrompt_analysis, analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
80
+ )
81
+
82
+ refine_button.click(
83
+ fn=self.refine_prompt,
84
+ inputs=[prompt_text, meta_prompt_choice],
85
+ outputs=[MetaPrompt_analysis, analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
86
+ )
87
 
88
+ apply_button.click(
89
+ fn=self.apply_prompts,
90
+ inputs=[prompt_text, refined_prompt, apply_model],
91
+ outputs=[original_output, refined_output],
92
+ api_name="apply_prompts"
93
+ )
94
+ gr.HTML(
95
+ "<p style='text-align: center; color:orange;'>⚠ This space is in progress, and we're actively working on it, so you might find some bugs! Please report any issues you have in the Community tab to help us make it better for all.</p>"
96
+ )
97
 
98
+ def automatic_metaprompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
99
+ """Wrapper method to call prompt_refiner's automatic_metaprompt"""
100
+ metaprompt_analysis_evaluation, initial_prompt_evaluation, refined_prompt, explanation_refinements, full_response = (
101
+ self.prompt_refiner.automatic_metaprompt(prompt, meta_prompt_choice)
102
+ )
103
+ analysis_evaluation = f"\n\n{initial_prompt_evaluation}"
104
+ return (
105
+ metaprompt_analysis_evaluation,
106
+ analysis_evaluation,
107
+ refined_prompt,
108
+ explanation_refinements,
109
+ full_response
110
+ )
111
+
112
+ def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
113
+ """Refine the given prompt using the selected meta prompt."""
114
+ metaprompt_analysis_evaluation, initial_prompt_evaluation, refined_prompt, explanation_refinements, full_response = (
115
+ self.prompt_refiner.refine_prompt(prompt, meta_prompt_choice)
116
+ )
117
+ analysis_evaluation = f"\n\n{initial_prompt_evaluation}"
118
+ return (
119
+ metaprompt_analysis_evaluation,
120
+ analysis_evaluation,
121
+ refined_prompt,
122
+ explanation_refinements,
123
+ full_response
124
+ )
125
+
126
+ def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
127
+ try:
128
+ original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
129
+ refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
130
+ return original_output, refined_output
131
+ except Exception as e:
132
+ return f"Error: {str(e)}", f"Error: {str(e)}"
133
+
134
+ def launch(self, share=False):
135
+ self.interface.launch(share=share)
136
 
137
 
138
  if __name__ == '__main__':
139
+ # Initialize the prompt refiner with API token
140
+ prompt_refiner = PromptRefiner(api_token, meta_prompts, metaprompt_explanations)
141
+
142
+ # Create the Gradio interface
143
+ gradio_interface = GradioInterface(prompt_refiner, custom_css)
144
+
145
+ # Launch the interface
146
+ gradio_interface.launch(share=True)
metaprompt_router.py CHANGED
@@ -1,5 +1,4 @@
1
- metaprompt_router=
2
- """
3
  You are an AI Prompt Selection Assistant. Your task is to analyze the user's query and recommend the most appropriate metaprompt from the following list based on the nature of the request. Provide your response in a structured JSON format.
4
 
5
  **Metaprompt List:**
@@ -48,7 +47,9 @@ You are an AI Prompt Selection Assistant. Your task is to analyze the user's que
48
  - **Description**: Advanced reasoning and proof engineering approach. Focuses on sequential processing and context awareness.
49
  - **Sample**: "Analyze the historical context and impact of the Industrial Revolution on modern economic systems."
50
 
51
- For any given user query, provide your response in the following JSON format enclosed in <json> tags:
 
 
52
 
53
  <json>
54
  {
 
1
+ metaprompt_router="""
 
2
  You are an AI Prompt Selection Assistant. Your task is to analyze the user's query and recommend the most appropriate metaprompt from the following list based on the nature of the request. Provide your response in a structured JSON format.
3
 
4
  **Metaprompt List:**
 
47
  - **Description**: Advanced reasoning and proof engineering approach. Focuses on sequential processing and context awareness.
48
  - **Sample**: "Analyze the historical context and impact of the Industrial Revolution on modern economic systems."
49
 
50
+ For this given user query:
51
+ [Insert initial prompt here]
52
+ provide your response in the following JSON format enclosed in <json> tags:
53
 
54
  <json>
55
  {
prompt_refiner.py CHANGED
@@ -5,6 +5,7 @@ from pydantic import BaseModel, Field, validator
5
  from huggingface_hub import InferenceClient
6
  from huggingface_hub.errors import HfHubHTTPError
7
  from variables import *
 
8
 
9
  class LLMResponse(BaseModel):
10
  initial_prompt_evaluation: str = Field(..., description="Evaluation of the initial prompt")
@@ -37,9 +38,10 @@ class LLMResponse(BaseModel):
37
  return v
38
 
39
  class PromptRefiner:
40
- def __init__(self, api_token: str, meta_prompts: dict):
41
  self.client = InferenceClient(token=api_token, timeout=120)
42
  self.meta_prompts = meta_prompts
 
43
 
44
  def _clean_json_string(self, content: str) -> str:
45
  """Clean and prepare JSON string for parsing."""
@@ -116,14 +118,103 @@ class PromptRefiner:
116
  "explanation_of_refinements": "",
117
  "response_content": {"error": error_message}
118
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> Tuple[str, str, str, dict]:
121
  """Refine the given prompt using the selected meta prompt."""
122
  try:
123
- selected_meta_prompt = self.meta_prompts.get(
124
- meta_prompt_choice,
125
- self.meta_prompts["star"]
126
- )
127
 
128
  messages = [
129
  {
@@ -149,6 +240,7 @@ class PromptRefiner:
149
  try:
150
  llm_response = LLMResponse(**result)
151
  return (
 
152
  llm_response.initial_prompt_evaluation,
153
  llm_response.refined_prompt,
154
  llm_response.explanation_of_refinements,
@@ -166,6 +258,7 @@ class PromptRefiner:
166
  def _create_error_response(self, error_message: str) -> Tuple[str, str, str, dict]:
167
  """Create a standardized error response tuple."""
168
  return (
 
169
  f"Error: {error_message}",
170
  "The selected model is currently unavailable.",
171
  "An error occurred during processing.",
 
5
  from huggingface_hub import InferenceClient
6
  from huggingface_hub.errors import HfHubHTTPError
7
  from variables import *
8
+ from metaprompt_router import metaprompt_router
9
 
10
  class LLMResponse(BaseModel):
11
  initial_prompt_evaluation: str = Field(..., description="Evaluation of the initial prompt")
 
38
  return v
39
 
40
  class PromptRefiner:
41
+ def __init__(self, api_token: str, meta_prompts: dict,metaprompt_explanations: dict):
42
  self.client = InferenceClient(token=api_token, timeout=120)
43
  self.meta_prompts = meta_prompts
44
+ self.metaprompt_explanations=metaprompt_explanations
45
 
46
  def _clean_json_string(self, content: str) -> str:
47
  """Clean and prepare JSON string for parsing."""
 
118
  "explanation_of_refinements": "",
119
  "response_content": {"error": error_message}
120
  }
121
+
122
+ def automatic_metaprompt(self, prompt: str, meta_prompt_choice: str) -> Tuple[str, str, str, dict]:
123
+ """Automatically select and apply the most appropriate metaprompt for the given prompt."""
124
+ try:
125
+ # First, use the router to determine the best metaprompt
126
+ router_messages = [
127
+ {
128
+ "role": "system",
129
+ "content": "You are an AI Prompt Selection Assistant that helps choose the most appropriate metaprompt based on the user's query."
130
+ },
131
+ {
132
+ "role": "user",
133
+ "content": metaprompt_router.replace("[Insert initial prompt here]", prompt)
134
+ }
135
+ ]
136
+
137
+ # Get router response
138
+ router_response = self.client.chat_completion(
139
+ model=prompt_refiner_model,
140
+ messages=router_messages,
141
+ max_tokens=3000,
142
+ temperature=0.2
143
+ )
144
+
145
+ router_content = router_response.choices[0].message.content.strip()
146
+
147
+ # Extract JSON from router response
148
+ json_match = re.search(r'<json>(.*?)</json>', router_content, re.DOTALL)
149
+ if not json_match:
150
+ raise ValueError("No JSON found in router response")
151
+
152
+ router_result = json.loads(json_match.group(1))
153
+
154
+ # Get the recommended metaprompt key
155
+ recommended_key = router_result["recommended_metaprompt"]["key"]
156
+
157
+ # Use the recommended metaprompt to refine the prompt
158
+ selected_meta_prompt = self.meta_prompts.get(recommended_key)
159
+ selected_meta_prompt_explanations = self.metaprompt_explanations.get(recommended_key)
160
+
161
+ # Now use the selected metaprompt to refine the original prompt
162
+ refine_messages = [
163
+ {
164
+ "role": "system",
165
+ "content": 'You are an expert at refining and extending prompts. Given a basic prompt, provide a more relevant and detailed prompt.'
166
+ },
167
+ {
168
+ "role": "user",
169
+ "content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt)
170
+ }
171
+ ]
172
+
173
+ response = self.client.chat_completion(
174
+ model=prompt_refiner_model,
175
+ messages=refine_messages,
176
+ max_tokens=3000,
177
+ temperature=0.8
178
+ )
179
+
180
+ response_content = response.choices[0].message.content.strip()
181
+ result = self._parse_response(response_content)
182
+
183
+ try:
184
+ llm_response = LLMResponse(**result)
185
+ metaprompt_analysis = f"""
186
+ #### Selected MetaPrompt Analysis
187
+ - <span style="color: grey; font-style: italic;">**Primary Choice**</span>: *{router_result["recommended_metaprompt"]["name"]}*
188
+ - *Description*: *{router_result["recommended_metaprompt"]["description"]}*
189
+ - *Why This Choice*: *{router_result["recommended_metaprompt"]["explanation"]}*
190
+
191
+ #### Alternative Option
192
+ - <span style="color: grey; font-style: italic;">**Secondary Choice**</span>: *{router_result["alternative_recommendation"]["name"]}*
193
+ - *Why Consider This*: *{router_result["alternative_recommendation"]["explanation"]}*
194
+ """
195
+ return (
196
+ metaprompt_analysis,
197
+ llm_response.initial_prompt_evaluation,
198
+ llm_response.refined_prompt,
199
+ llm_response.explanation_of_refinements,
200
+ llm_response.dict()
201
+ )
202
+ except Exception as e:
203
+ print(f"Error creating LLMResponse: {e}")
204
+ return self._create_error_response(f"Error validating response: {str(e)}")
205
+
206
+ except HfHubHTTPError as e:
207
+ return self._create_error_response("Model timeout. Please try again later.")
208
+ except Exception as e:
209
+ return self._create_error_response(f"Unexpected error: {str(e)}")
210
+
211
+
212
 
213
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> Tuple[str, str, str, dict]:
214
  """Refine the given prompt using the selected meta prompt."""
215
  try:
216
+ selected_meta_prompt = self.meta_prompts.get(meta_prompt_choice)
217
+ selected_meta_prompt_explanations = self.metaprompt_explanations.get(meta_prompt_choice)
 
 
218
 
219
  messages = [
220
  {
 
240
  try:
241
  llm_response = LLMResponse(**result)
242
  return (
243
+ f"- **{meta_prompt_choice}**: {selected_meta_prompt_explanations}",
244
  llm_response.initial_prompt_evaluation,
245
  llm_response.refined_prompt,
246
  llm_response.explanation_of_refinements,
 
258
  def _create_error_response(self, error_message: str) -> Tuple[str, str, str, dict]:
259
  """Create a standardized error response tuple."""
260
  return (
261
+ f"Error: {error_message}",
262
  f"Error: {error_message}",
263
  "The selected model is currently unavailable.",
264
  "An error occurred during processing.",