Spaces:
Running
Running
Upload 8 files
Browse files- app.py +92 -72
- custom_css.py +8 -4
- metaprompt_router.py +108 -72
- prompt_refiner.py +71 -141
app.py
CHANGED
@@ -1,146 +1,166 @@
|
|
1 |
import gradio as gr
|
2 |
from prompt_refiner import PromptRefiner
|
3 |
-
from variables import models, explanation_markdown
|
4 |
-
from variables import *
|
5 |
from custom_css import custom_css
|
6 |
|
7 |
class GradioInterface:
|
8 |
def __init__(self, prompt_refiner: PromptRefiner, custom_css):
|
9 |
self.prompt_refiner = prompt_refiner
|
10 |
with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
|
|
|
11 |
with gr.Column(elem_classes=["container", "title-container"]):
|
12 |
gr.Markdown("# PROMPT++")
|
13 |
gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
|
14 |
gr.Markdown("Learn how to generate an improved version of your prompts.")
|
15 |
-
|
|
|
16 |
with gr.Column(elem_classes=["container", "input-container"]):
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
)
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
with gr.Accordion("Manual Choice of Meta Prompt", open=False):
|
25 |
-
meta_prompt_choice = gr.Radio(
|
26 |
-
metaprompt_list,
|
27 |
-
label="Choose Meta Prompt",
|
28 |
-
value=metaprompt_list[0],
|
29 |
-
elem_classes=["no-background", "radio-group"]
|
30 |
-
)
|
31 |
-
refine_button = gr.Button("Refine Prompt using manual choice")
|
32 |
-
gr.Markdown(explanation_markdown)
|
33 |
gr.Examples(
|
34 |
examples=examples,
|
35 |
inputs=[prompt_text, meta_prompt_choice]
|
36 |
-
)
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
gr.Markdown(
|
41 |
-
gr.Markdown("### Original Prompt analysis")
|
42 |
-
analysis_evaluation = gr.Markdown()
|
43 |
-
gr.Markdown("---")
|
44 |
-
#MetaPrompt_analysis_evaluation= gr.Markdown("### MataPrompt used")
|
45 |
-
MetaPrompt_analysis = gr.Markdown("### MataPrompt used")
|
46 |
gr.Markdown("### Refined Prompt")
|
47 |
refined_prompt = gr.Textbox(
|
48 |
-
label="
|
49 |
interactive=True,
|
50 |
show_label=True,
|
51 |
show_copy_button=True,
|
52 |
)
|
53 |
-
gr.Markdown("### Explanation of Refinements")
|
54 |
explanation_of_refinements = gr.Markdown()
|
|
|
55 |
|
56 |
with gr.Column(elem_classes=["container", "model-container"]):
|
57 |
with gr.Row():
|
58 |
-
apply_model = gr.Dropdown(
|
59 |
-
|
|
|
60 |
label="Choose the Model",
|
61 |
container=False,
|
62 |
scale=1,
|
63 |
min_width=300
|
64 |
)
|
65 |
apply_button = gr.Button("Apply Prompts")
|
66 |
-
|
67 |
-
gr.Markdown("### Prompts on
|
68 |
with gr.Tabs():
|
69 |
with gr.TabItem("Original Prompt Output"):
|
70 |
original_output = gr.Markdown()
|
71 |
with gr.TabItem("Refined Prompt Output"):
|
72 |
refined_output = gr.Markdown()
|
|
|
73 |
with gr.Accordion("Full Response JSON", open=False, visible=True):
|
74 |
full_response_json = gr.JSON()
|
75 |
-
|
|
|
76 |
automatic_metaprompt_button.click(
|
77 |
fn=self.automatic_metaprompt,
|
78 |
-
inputs=[prompt_text
|
79 |
-
outputs=[MetaPrompt_analysis,
|
80 |
)
|
81 |
|
82 |
refine_button.click(
|
83 |
fn=self.refine_prompt,
|
84 |
inputs=[prompt_text, meta_prompt_choice],
|
85 |
-
outputs=[
|
|
|
|
|
|
|
|
|
|
|
86 |
)
|
87 |
-
|
88 |
apply_button.click(
|
89 |
fn=self.apply_prompts,
|
90 |
inputs=[prompt_text, refined_prompt, apply_model],
|
91 |
-
outputs=[original_output, refined_output]
|
92 |
-
api_name="apply_prompts"
|
93 |
)
|
|
|
94 |
gr.HTML(
|
95 |
"<p style='text-align: center; color:orange;'>⚠ This space is in progress, and we're actively working on it, so you might find some bugs! Please report any issues you have in the Community tab to help us make it better for all.</p>"
|
96 |
)
|
97 |
|
98 |
-
def automatic_metaprompt(self, prompt: str
|
99 |
-
"""
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
)
|
111 |
|
112 |
def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
|
113 |
-
"""
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
|
126 |
-
def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
|
|
|
127 |
try:
|
|
|
|
|
|
|
128 |
original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
|
129 |
refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
|
|
|
130 |
return original_output, refined_output
|
131 |
except Exception as e:
|
132 |
-
|
|
|
133 |
|
134 |
def launch(self, share=False):
|
|
|
135 |
self.interface.launch(share=share)
|
136 |
|
137 |
|
138 |
if __name__ == '__main__':
|
139 |
-
|
|
|
|
|
140 |
prompt_refiner = PromptRefiner(api_token, meta_prompts, metaprompt_explanations)
|
141 |
|
142 |
-
# Create the Gradio interface
|
143 |
gradio_interface = GradioInterface(prompt_refiner, custom_css)
|
144 |
-
|
145 |
-
# Launch the interface
|
146 |
gradio_interface.launch(share=True)
|
|
|
1 |
import gradio as gr
|
2 |
from prompt_refiner import PromptRefiner
|
3 |
+
from variables import models, explanation_markdown, metaprompt_list, examples
|
|
|
4 |
from custom_css import custom_css
|
5 |
|
6 |
class GradioInterface:
|
7 |
def __init__(self, prompt_refiner: PromptRefiner, custom_css):
|
8 |
self.prompt_refiner = prompt_refiner
|
9 |
with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
|
10 |
+
# CONTAINER 1
|
11 |
with gr.Column(elem_classes=["container", "title-container"]):
|
12 |
gr.Markdown("# PROMPT++")
|
13 |
gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
|
14 |
gr.Markdown("Learn how to generate an improved version of your prompts.")
|
15 |
+
|
16 |
+
# CONTAINER 2
|
17 |
with gr.Column(elem_classes=["container", "input-container"]):
|
18 |
+
prompt_text = gr.Textbox(label="Type your prompt (or leave empty to see metaprompt)",lines=5)
|
19 |
+
automatic_metaprompt_button = gr.Button("Automatic Choice for Refinement Method ")
|
20 |
+
#with gr.Row(elem_classes=["container2"]):
|
21 |
+
MetaPrompt_analysis = gr.Markdown()
|
22 |
+
|
23 |
+
# CONTAINER 3
|
24 |
+
# with gr.Column(elem_classes=["container"]):
|
25 |
+
with gr.Column(elem_classes=["container","meta-container"]):
|
26 |
+
meta_prompt_choice = gr.Radio(
|
27 |
+
choices=metaprompt_list,
|
28 |
+
label="Choose Meta Prompt",
|
29 |
+
value=metaprompt_list[0],
|
30 |
+
elem_classes=["no-background", "radio-group"]
|
31 |
)
|
32 |
+
refine_button = gr.Button("Refine Prompt")
|
33 |
+
with gr.Accordion("Metaprompt Explanation", open=False, visible=True):
|
34 |
+
gr.Markdown(explanation_markdown)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
gr.Examples(
|
36 |
examples=examples,
|
37 |
inputs=[prompt_text, meta_prompt_choice]
|
38 |
+
)
|
39 |
+
|
40 |
+
with gr.Column(elem_classes=["container", "analysis-container"]):
|
41 |
+
gr.Markdown(" ")
|
42 |
+
prompt_evaluation = gr.Markdown() # Added this component
|
|
|
|
|
|
|
|
|
|
|
43 |
gr.Markdown("### Refined Prompt")
|
44 |
refined_prompt = gr.Textbox(
|
45 |
+
label=" ",
|
46 |
interactive=True,
|
47 |
show_label=True,
|
48 |
show_copy_button=True,
|
49 |
)
|
50 |
+
#gr.Markdown("### Explanation of Refinements")
|
51 |
explanation_of_refinements = gr.Markdown()
|
52 |
+
|
53 |
|
54 |
with gr.Column(elem_classes=["container", "model-container"]):
|
55 |
with gr.Row():
|
56 |
+
apply_model = gr.Dropdown(
|
57 |
+
choices=models,
|
58 |
+
value=models[0] if models else None,
|
59 |
label="Choose the Model",
|
60 |
container=False,
|
61 |
scale=1,
|
62 |
min_width=300
|
63 |
)
|
64 |
apply_button = gr.Button("Apply Prompts")
|
65 |
+
|
66 |
+
gr.Markdown("### Prompts on Chosen Model")
|
67 |
with gr.Tabs():
|
68 |
with gr.TabItem("Original Prompt Output"):
|
69 |
original_output = gr.Markdown()
|
70 |
with gr.TabItem("Refined Prompt Output"):
|
71 |
refined_output = gr.Markdown()
|
72 |
+
|
73 |
with gr.Accordion("Full Response JSON", open=False, visible=True):
|
74 |
full_response_json = gr.JSON()
|
75 |
+
|
76 |
+
# Button click handlers
|
77 |
automatic_metaprompt_button.click(
|
78 |
fn=self.automatic_metaprompt,
|
79 |
+
inputs=[prompt_text],
|
80 |
+
outputs=[MetaPrompt_analysis, meta_prompt_choice]
|
81 |
)
|
82 |
|
83 |
refine_button.click(
|
84 |
fn=self.refine_prompt,
|
85 |
inputs=[prompt_text, meta_prompt_choice],
|
86 |
+
outputs=[
|
87 |
+
prompt_evaluation,
|
88 |
+
refined_prompt,
|
89 |
+
explanation_of_refinements,
|
90 |
+
full_response_json
|
91 |
+
]
|
92 |
)
|
93 |
+
|
94 |
apply_button.click(
|
95 |
fn=self.apply_prompts,
|
96 |
inputs=[prompt_text, refined_prompt, apply_model],
|
97 |
+
outputs=[original_output, refined_output]
|
|
|
98 |
)
|
99 |
+
|
100 |
gr.HTML(
|
101 |
"<p style='text-align: center; color:orange;'>⚠ This space is in progress, and we're actively working on it, so you might find some bugs! Please report any issues you have in the Community tab to help us make it better for all.</p>"
|
102 |
)
|
103 |
|
104 |
+
def automatic_metaprompt(self, prompt: str) -> tuple:
|
105 |
+
"""Handle automatic metaprompt selection"""
|
106 |
+
try:
|
107 |
+
if not prompt.strip():
|
108 |
+
return "Please enter a prompt to analyze.", None
|
109 |
+
|
110 |
+
metaprompt_analysis, recommended_key = self.prompt_refiner.automatic_metaprompt(prompt)
|
111 |
+
return metaprompt_analysis, recommended_key
|
112 |
+
|
113 |
+
except Exception as e:
|
114 |
+
error_message = f"Error in automatic metaprompt: {str(e)}"
|
115 |
+
return error_message, None
|
|
|
116 |
|
117 |
def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
|
118 |
+
"""Handle manual prompt refinement"""
|
119 |
+
try:
|
120 |
+
if not prompt.strip():
|
121 |
+
return (
|
122 |
+
"No prompt provided.",
|
123 |
+
"",
|
124 |
+
"",
|
125 |
+
{}
|
126 |
+
)
|
127 |
+
|
128 |
+
result = self.prompt_refiner.refine_prompt(prompt, meta_prompt_choice)
|
129 |
+
return (
|
130 |
+
result[0], # initial_prompt_evaluation
|
131 |
+
result[1], # refined_prompt
|
132 |
+
result[2], # explanation_of_refinements
|
133 |
+
result[3] # full_response
|
134 |
+
)
|
135 |
+
except Exception as e:
|
136 |
+
error_message = f"Error in refine_prompt: {str(e)}"
|
137 |
+
return error_message, "", "", {}
|
138 |
|
139 |
+
def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str) -> tuple:
|
140 |
+
"""Apply both original and refined prompts to the selected model"""
|
141 |
try:
|
142 |
+
if not original_prompt.strip() or not refined_prompt.strip():
|
143 |
+
return "No prompt provided.", "No prompt provided."
|
144 |
+
|
145 |
original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
|
146 |
refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
|
147 |
+
|
148 |
return original_output, refined_output
|
149 |
except Exception as e:
|
150 |
+
error_message = f"Error applying prompts: {str(e)}"
|
151 |
+
return error_message, error_message
|
152 |
|
153 |
def launch(self, share=False):
|
154 |
+
"""Launch the Gradio interface"""
|
155 |
self.interface.launch(share=share)
|
156 |
|
157 |
|
158 |
if __name__ == '__main__':
|
159 |
+
from variables import api_token, meta_prompts, metaprompt_explanations
|
160 |
+
|
161 |
+
# Initialize the prompt refiner
|
162 |
prompt_refiner = PromptRefiner(api_token, meta_prompts, metaprompt_explanations)
|
163 |
|
164 |
+
# Create and launch the Gradio interface
|
165 |
gradio_interface = GradioInterface(prompt_refiner, custom_css)
|
|
|
|
|
166 |
gradio_interface.launch(share=True)
|
custom_css.py
CHANGED
@@ -13,7 +13,7 @@ custom_css = """
|
|
13 |
|
14 |
.container::before {
|
15 |
position: absolute;
|
16 |
-
top: -
|
17 |
left: 20px;
|
18 |
background: white;
|
19 |
padding: 0 10px;
|
@@ -53,15 +53,19 @@ custom_css = """
|
|
53 |
}
|
54 |
|
55 |
.input-container::before {
|
56 |
-
content: 'PROMPT
|
57 |
}
|
58 |
|
59 |
.analysis-container::before {
|
60 |
-
content: '
|
|
|
|
|
|
|
|
|
61 |
}
|
62 |
|
63 |
.model-container::before {
|
64 |
-
content: '
|
65 |
}
|
66 |
|
67 |
.examples-container::before {
|
|
|
13 |
|
14 |
.container::before {
|
15 |
position: absolute;
|
16 |
+
top: -14px;
|
17 |
left: 20px;
|
18 |
background: white;
|
19 |
padding: 0 10px;
|
|
|
53 |
}
|
54 |
|
55 |
.input-container::before {
|
56 |
+
content: 'PROMPT ANALYSIS';
|
57 |
}
|
58 |
|
59 |
.analysis-container::before {
|
60 |
+
content: 'PROMPT REFINEMENT';
|
61 |
+
}
|
62 |
+
|
63 |
+
.meta-container::before {
|
64 |
+
content: 'REFINEMENT METHOD';
|
65 |
}
|
66 |
|
67 |
.model-container::before {
|
68 |
+
content: 'PROMPTS APPLICATION';
|
69 |
}
|
70 |
|
71 |
.examples-container::before {
|
metaprompt_router.py
CHANGED
@@ -1,94 +1,130 @@
|
|
1 |
-
|
|
|
|
|
|
|
2 |
You are an AI Prompt Selection Assistant. Your task is to analyze the user's query and recommend the most appropriate metaprompt from the following list based on the nature of the request. Provide your response in a structured JSON format.
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
**Metaprompt List:**
|
5 |
1. **comprehensive_multistage**
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
2. **structured_roleplaying**
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
3. **balanced_scientific**
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
4. **quick_simplified**
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
5. **logical_flow**
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
6. **flexible_technique**
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
7. **autoregressive_reasoning**
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
8. **mathematical_proof**
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
9. **sequential_contextual**
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
For this given user query:
|
51 |
[Insert initial prompt here]
|
52 |
-
provide your response in the following JSON format enclosed in <json> tags:
|
53 |
|
54 |
-
<json>
|
55 |
-
{
|
56 |
-
"user_query": "The original query from the user",
|
57 |
-
"recommended_metaprompt": {
|
58 |
-
"key": "Key of the recommended metaprompt",
|
59 |
-
"name": "Name of the recommended metaprompt",
|
60 |
-
"description": "Brief description of the metaprompt's purpose",
|
61 |
-
"explanation": "Detailed explanation of why this metaprompt is the best fit",
|
62 |
-
"similar_sample": "A sample use case from the list that's most similar to the user's query",
|
63 |
-
"customized_sample": "A new sample specifically tailored to the user's query using this metaprompt approach"
|
64 |
-
},
|
65 |
-
"alternative_recommendation": {
|
66 |
-
"key": "Key of the second-best metaprompt option",
|
67 |
-
"name": "Name of the second-best metaprompt option",
|
68 |
-
"explanation": "Brief explanation of why this could be an alternative choice"
|
69 |
-
}
|
70 |
-
}
|
71 |
-
</json>
|
72 |
-
|
73 |
-
Example:
|
74 |
-
User Query: "Help me write a comprehensive business plan for a startup"
|
75 |
|
76 |
<json>
|
77 |
{
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
}
|
93 |
</json>
|
94 |
"""
|
|
|
1 |
+
|
2 |
+
def generate_metaprompt_router(methods_dict):
|
3 |
+
# Start with the base template
|
4 |
+
router_template = """
|
5 |
You are an AI Prompt Selection Assistant. Your task is to analyze the user's query and recommend the most appropriate metaprompt from the following list based on the nature of the request. Provide your response in a structured JSON format.
|
6 |
|
7 |
+
**Metaprompt List:**
|
8 |
+
"""
|
9 |
+
|
10 |
+
# Add each method to the template
|
11 |
+
for i, (key, method) in enumerate(methods_dict.items(), 1):
|
12 |
+
method_template = f"""
|
13 |
+
{i}. **{key}**
|
14 |
+
- **Name**: {method['name']}
|
15 |
+
- **Description**: {method['description']}
|
16 |
+
- **Sample**: {', '.join(f'"{example}"' for example in method.get('examples', []))}
|
17 |
+
"""
|
18 |
+
router_template += method_template
|
19 |
+
|
20 |
+
# Add the output format template
|
21 |
+
router_template += """
|
22 |
+
For this given user query:
|
23 |
+
[Insert initial prompt here]
|
24 |
+
|
25 |
+
Analyze the query and provide your recommendation in the following JSON format enclosed in <json> tags:
|
26 |
+
|
27 |
+
<json>
|
28 |
+
{
|
29 |
+
"user_query": "The original query from the user",
|
30 |
+
"recommended_metaprompt": {
|
31 |
+
"key": "Key of the recommended metaprompt",
|
32 |
+
"name": "Name of the recommended metaprompt",
|
33 |
+
"description": "Brief description of the metaprompt's purpose",
|
34 |
+
"explanation": "Detailed explanation of why this metaprompt is the best fit for this specific query, including how it addresses the query's unique requirements and challenges",
|
35 |
+
"similar_sample": "If available, a sample use case from the list that's most similar to the user's query",
|
36 |
+
"customized_sample": "A new sample specifically tailored to the user's query using this metaprompt approach"
|
37 |
+
},
|
38 |
+
"alternative_recommendation": {
|
39 |
+
"key": "Key of the second-best metaprompt option",
|
40 |
+
"name": "Name of the second-best metaprompt option",
|
41 |
+
"explanation": "Brief explanation of why this could be an alternative choice and what specific benefits it might offer for this query"
|
42 |
+
}
|
43 |
+
}
|
44 |
+
</json>
|
45 |
+
"""
|
46 |
+
|
47 |
+
return router_template
|
48 |
+
|
49 |
+
# Generate the router configuration
|
50 |
+
#metaprompt_router = generate_metaprompt_router(methods_dict) # methods_dict is your full file dictionary
|
51 |
+
|
52 |
+
|
53 |
+
metaprompt_router = """
|
54 |
+
You are an AI Prompt Selection Assistant. Your task is to analyze the user's query and recommend the most appropriate metaprompt from the available methods. Each method has specific strengths and use cases.
|
55 |
+
|
56 |
**Metaprompt List:**
|
57 |
1. **comprehensive_multistage**
|
58 |
+
- **Name**: Comprehensive Multi-Stage Refinement
|
59 |
+
- **Description**: Use this method for a thorough, multi-stage refinement process. Ideal for complex prompts requiring in-depth analysis, exploration of alternatives, and synthesis of ideas. Best when time allows for detailed refinement and consideration of various aspects.
|
60 |
+
- **Sample**: "Design a comprehensive educational curriculum for teaching artificial intelligence to high school students", "Develop a detailed analysis of climate change impacts on global agriculture over the next 50 years"
|
61 |
+
|
62 |
2. **structured_roleplaying**
|
63 |
+
- **Name**: Structured Role-Playing Enhancement
|
64 |
+
- **Description**: Opt for this when you need a structured approach with emphasis on role-playing and advanced techniques. Useful for tasks benefiting from diverse perspectives and complex reasoning.
|
65 |
+
- **Sample**: "Create a dialogue between Einstein and a modern AI researcher discussing the future of quantum computing", "Simulate a strategic planning meeting between historical business leaders solving current tech industry challenges"
|
66 |
+
|
67 |
3. **balanced_scientific**
|
68 |
+
- **Name**: Balanced Scientific Structuring
|
69 |
+
- **Description**: Choose this for a balance between structure and advanced techniques, with a focus on role-playing. Suitable for scientific or technical prompts.
|
70 |
+
- **Sample**: "Explain how CRISPR gene editing technology works and its potential applications in medicine", "Analyze the psychological and neurological factors that influence decision-making in high-pressure situations"
|
71 |
+
|
72 |
4. **quick_simplified**
|
73 |
+
- **Name**: Quick Simplified Refinement
|
74 |
+
- **Description**: Use this simplified approach for straightforward prompts or when time is limited. Focuses on essential improvements without complex techniques.
|
75 |
+
- **Sample**: "What are the key differences between renewable and non-renewable energy sources?", "Explain the basic principles of machine learning in simple terms"
|
76 |
+
|
77 |
5. **logical_flow**
|
78 |
+
- **Name**: Logical Flow Enhancement
|
79 |
+
- **Description**: Choose this method to analyze and improve a prompt's strengths and weaknesses, focusing on information flow. Useful for enhancing the logical structure of prompts.
|
80 |
+
- **Sample**: "Break down the process of implementing a sustainable urban transportation system", "Analyze the cause-and-effect relationship between social media use and mental health"
|
81 |
+
|
82 |
6. **flexible_technique**
|
83 |
+
- **Name**: Flexible Technique Integration
|
84 |
+
- **Description**: Employ this advanced approach to combine multiple prompt engineering techniques. Ideal for complex tasks requiring both clarity and sophisticated methods.
|
85 |
+
- **Sample**: "Create a comprehensive guide for starting a tech startup, including business, technical, and marketing aspects", "Design a multi-phase approach to teaching critical thinking skills in different educational contexts"
|
86 |
+
|
87 |
7. **autoregressive_reasoning**
|
88 |
+
- **Name**: Autoregressive Reasoning Optimization
|
89 |
+
- **Description**: Utilize this method for tasks requiring careful reasoning before conclusions. Best for prompts needing detailed output formatting.
|
90 |
+
- **Sample**: "Develop a step-by-step analysis of market trends to predict future investment opportunities", "Create a systematic approach to debugging complex software systems"
|
91 |
+
|
92 |
8. **mathematical_proof**
|
93 |
+
- **Name**: Mathematical Proof Structuring
|
94 |
+
- **Description**: Specialized approach for mathematical and formal proofs. Use this for tasks requiring a logical, step-by-step prompt engineering process.
|
95 |
+
- **Sample**: "Prove the relationship between energy and mass in Einstein's E=mc²", "Demonstrate the mathematical principles behind modern encryption methods"
|
96 |
+
|
97 |
9. **sequential_contextual**
|
98 |
+
- **Name**: Sequential Contextual Enhancement
|
99 |
+
- **Description**: Advanced reasoning and proof engineering approach. Focuses on systematic prompt enhancement through structured analysis, enhancement protocols, and validation. Ideal for complex tasks requiring thorough documentation and systematic improvements.
|
100 |
+
- **Sample**: "Create a framework for analyzing the long-term societal impacts of artificial intelligence", "Develop a systematic approach to evaluating and improving corporate sustainability practices"
|
101 |
+
|
102 |
+
10. **attention_aware**
|
103 |
+
- **Name**: Attention-Aware Positioning
|
104 |
+
- **Description**: Token-efficient prompt optimization focusing on attention positioning and context management. Best for tasks requiring careful information placement and progressive context building while maintaining efficiency.
|
105 |
+
- **Sample**: "Design a progressive learning curriculum that builds complex concepts from fundamental principles", "Create a narrative structure for explaining quantum physics concepts to general audiences"
|
106 |
|
107 |
For this given user query:
|
108 |
[Insert initial prompt here]
|
|
|
109 |
|
110 |
+
Analyze the query and provide your recommendation in the following JSON format enclosed in <json> tags:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
|
112 |
<json>
|
113 |
{
|
114 |
+
"user_query": "The original query from the user",
|
115 |
+
"recommended_metaprompt": {
|
116 |
+
"key": "Key of the recommended metaprompt",
|
117 |
+
"name": "Name of the recommended metaprompt",
|
118 |
+
"description": "Brief description of the metaprompt's purpose",
|
119 |
+
"explanation": "Detailed explanation of why this metaprompt is the best fit for this specific query, including how it addresses the query's unique requirements and challenges",
|
120 |
+
"similar_sample": "If available, a sample use case from the list that's most similar to the user's query",
|
121 |
+
"customized_sample": "A new sample specifically tailored to the user's query using this metaprompt approach"
|
122 |
+
},
|
123 |
+
"alternative_recommendation": {
|
124 |
+
"key": "Key of the second-best metaprompt option",
|
125 |
+
"name": "Name of the second-best metaprompt option",
|
126 |
+
"explanation": "Brief explanation of why this could be an alternative choice and what specific benefits it might offer for this query"
|
127 |
+
}
|
128 |
}
|
129 |
</json>
|
130 |
"""
|
prompt_refiner.py
CHANGED
@@ -22,14 +22,8 @@ class LLMResponse(BaseModel):
|
|
22 |
return {"raw_content": v}
|
23 |
return v
|
24 |
|
25 |
-
@validator('initial_prompt_evaluation', 'refined_prompt')
|
26 |
def clean_text_fields(cls, v):
|
27 |
-
if isinstance(v, str):
|
28 |
-
return v.strip().replace('\\n', '\n').replace('\\"', '"')
|
29 |
-
return v
|
30 |
-
|
31 |
-
@validator('explanation_of_refinements')
|
32 |
-
def clean_refinements(cls, v):
|
33 |
if isinstance(v, str):
|
34 |
return v.strip().replace('\\n', '\n').replace('\\"', '"')
|
35 |
elif isinstance(v, list):
|
@@ -38,54 +32,55 @@ class LLMResponse(BaseModel):
|
|
38 |
return v
|
39 |
|
40 |
class PromptRefiner:
|
41 |
-
def __init__(self, api_token: str, meta_prompts: dict,metaprompt_explanations: dict):
|
42 |
self.client = InferenceClient(token=api_token, timeout=120)
|
43 |
self.meta_prompts = meta_prompts
|
44 |
-
self.metaprompt_explanations=metaprompt_explanations
|
45 |
|
46 |
def _clean_json_string(self, content: str) -> str:
|
47 |
"""Clean and prepare JSON string for parsing."""
|
48 |
-
content = content.replace('•', '-')
|
49 |
-
content = re.sub(r'\s+', ' ', content)
|
50 |
-
content = content.replace('\\"', '"')
|
51 |
return content.strip()
|
52 |
|
53 |
def _parse_response(self, response_content: str) -> dict:
|
54 |
"""Parse the LLM response with enhanced error handling."""
|
55 |
try:
|
56 |
-
# Extract content between <json> tags
|
57 |
json_match = re.search(r'<json>\s*(.*?)\s*</json>', response_content, re.DOTALL)
|
58 |
if json_match:
|
59 |
json_str = self._clean_json_string(json_match.group(1))
|
60 |
try:
|
61 |
-
# Try parsing the cleaned JSON
|
62 |
parsed_json = json.loads(json_str)
|
63 |
if isinstance(parsed_json, str):
|
64 |
parsed_json = json.loads(parsed_json)
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
return {
|
67 |
-
"initial_prompt_evaluation":
|
68 |
"refined_prompt": parsed_json.get("refined_prompt", ""),
|
69 |
-
"explanation_of_refinements":
|
70 |
-
"response_content": parsed_json
|
71 |
}
|
72 |
except json.JSONDecodeError:
|
73 |
-
# If JSON parsing fails, try regex parsing
|
74 |
return self._parse_with_regex(json_str)
|
75 |
|
76 |
-
# If no JSON tags found, try regex parsing
|
77 |
return self._parse_with_regex(response_content)
|
78 |
|
79 |
except Exception as e:
|
80 |
print(f"Error parsing response: {str(e)}")
|
81 |
-
print(f"Raw content: {response_content}")
|
82 |
return self._create_error_dict(str(e))
|
83 |
|
84 |
def _parse_with_regex(self, content: str) -> dict:
|
85 |
"""Parse content using regex when JSON parsing fails."""
|
86 |
output = {}
|
87 |
|
88 |
-
# Handle explanation_of_refinements list format
|
89 |
refinements_match = re.search(r'"explanation_of_refinements":\s*$(.*?)$', content, re.DOTALL)
|
90 |
if refinements_match:
|
91 |
refinements_str = refinements_match.group(1)
|
@@ -95,18 +90,15 @@ class PromptRefiner:
|
|
95 |
]
|
96 |
output["explanation_of_refinements"] = refinements
|
97 |
else:
|
98 |
-
# Try single string format
|
99 |
pattern = r'"explanation_of_refinements":\s*"(.*?)"(?:,|\})'
|
100 |
match = re.search(pattern, content, re.DOTALL)
|
101 |
output["explanation_of_refinements"] = match.group(1).strip() if match else ""
|
102 |
|
103 |
-
# Extract other fields
|
104 |
for key in ["initial_prompt_evaluation", "refined_prompt"]:
|
105 |
pattern = rf'"{key}":\s*"(.*?)"(?:,|\}})'
|
106 |
match = re.search(pattern, content, re.DOTALL)
|
107 |
output[key] = match.group(1).strip() if match else ""
|
108 |
|
109 |
-
# Store the original content in a structured way
|
110 |
output["response_content"] = {"raw_content": content}
|
111 |
return output
|
112 |
|
@@ -118,11 +110,10 @@ class PromptRefiner:
|
|
118 |
"explanation_of_refinements": "",
|
119 |
"response_content": {"error": error_message}
|
120 |
}
|
121 |
-
|
122 |
-
def automatic_metaprompt(self, prompt: str
|
123 |
-
"""Automatically select
|
124 |
try:
|
125 |
-
# First, use the router to determine the best metaprompt
|
126 |
router_messages = [
|
127 |
{
|
128 |
"role": "system",
|
@@ -134,7 +125,6 @@ class PromptRefiner:
|
|
134 |
}
|
135 |
]
|
136 |
|
137 |
-
# Get router response
|
138 |
router_response = self.client.chat_completion(
|
139 |
model=prompt_refiner_model,
|
140 |
messages=router_messages,
|
@@ -143,72 +133,31 @@ class PromptRefiner:
|
|
143 |
)
|
144 |
|
145 |
router_content = router_response.choices[0].message.content.strip()
|
146 |
-
|
147 |
-
# Extract JSON from router response
|
148 |
json_match = re.search(r'<json>(.*?)</json>', router_content, re.DOTALL)
|
|
|
149 |
if not json_match:
|
150 |
raise ValueError("No JSON found in router response")
|
151 |
-
|
152 |
router_result = json.loads(json_match.group(1))
|
153 |
-
|
154 |
-
# Get the recommended metaprompt key
|
155 |
recommended_key = router_result["recommended_metaprompt"]["key"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
|
157 |
-
|
158 |
-
selected_meta_prompt = self.meta_prompts.get(recommended_key)
|
159 |
-
selected_meta_prompt_explanations = self.metaprompt_explanations.get(recommended_key)
|
160 |
-
|
161 |
-
# Now use the selected metaprompt to refine the original prompt
|
162 |
-
refine_messages = [
|
163 |
-
{
|
164 |
-
"role": "system",
|
165 |
-
"content": 'You are an expert at refining and extending prompts. Given a basic prompt, provide a more relevant and detailed prompt.'
|
166 |
-
},
|
167 |
-
{
|
168 |
-
"role": "user",
|
169 |
-
"content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt)
|
170 |
-
}
|
171 |
-
]
|
172 |
-
|
173 |
-
response = self.client.chat_completion(
|
174 |
-
model=prompt_refiner_model,
|
175 |
-
messages=refine_messages,
|
176 |
-
max_tokens=3000,
|
177 |
-
temperature=0.8
|
178 |
-
)
|
179 |
|
180 |
-
response_content = response.choices[0].message.content.strip()
|
181 |
-
result = self._parse_response(response_content)
|
182 |
-
|
183 |
-
try:
|
184 |
-
llm_response = LLMResponse(**result)
|
185 |
-
metaprompt_analysis = f"""
|
186 |
-
#### Selected MetaPrompt Analysis
|
187 |
-
- <span style="color: grey; font-style: italic;">**Primary Choice**</span>: *{router_result["recommended_metaprompt"]["name"]}*
|
188 |
-
- *Description*: *{router_result["recommended_metaprompt"]["description"]}*
|
189 |
-
- *Why This Choice*: *{router_result["recommended_metaprompt"]["explanation"]}*
|
190 |
-
|
191 |
-
#### Alternative Option
|
192 |
-
- <span style="color: grey; font-style: italic;">**Secondary Choice**</span>: *{router_result["alternative_recommendation"]["name"]}*
|
193 |
-
- *Why Consider This*: *{router_result["alternative_recommendation"]["explanation"]}*
|
194 |
-
"""
|
195 |
-
return (
|
196 |
-
metaprompt_analysis,
|
197 |
-
llm_response.initial_prompt_evaluation,
|
198 |
-
llm_response.refined_prompt,
|
199 |
-
llm_response.explanation_of_refinements,
|
200 |
-
llm_response.dict()
|
201 |
-
)
|
202 |
-
except Exception as e:
|
203 |
-
print(f"Error creating LLMResponse: {e}")
|
204 |
-
return self._create_error_response(f"Error validating response: {str(e)}")
|
205 |
-
|
206 |
-
except HfHubHTTPError as e:
|
207 |
-
return self._create_error_response("Model timeout. Please try again later.")
|
208 |
except Exception as e:
|
209 |
-
return
|
210 |
-
|
211 |
-
|
212 |
|
213 |
def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> Tuple[str, str, str, dict]:
|
214 |
"""Refine the given prompt using the selected meta prompt."""
|
@@ -218,47 +167,48 @@ class PromptRefiner:
|
|
218 |
|
219 |
messages = [
|
220 |
{
|
221 |
-
"role": "system",
|
222 |
-
"content": 'You are an expert at refining and extending prompts.
|
223 |
},
|
224 |
{
|
225 |
-
"role": "user",
|
226 |
"content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt)
|
227 |
}
|
228 |
]
|
229 |
-
|
230 |
response = self.client.chat_completion(
|
231 |
model=prompt_refiner_model,
|
232 |
messages=messages,
|
233 |
max_tokens=3000,
|
234 |
temperature=0.8
|
235 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
llm_response.initial_prompt_evaluation,
|
245 |
-
llm_response.refined_prompt,
|
246 |
-
llm_response.explanation_of_refinements,
|
247 |
-
llm_response.dict()
|
248 |
-
)
|
249 |
-
except Exception as e:
|
250 |
-
print(f"Error creating LLMResponse: {e}")
|
251 |
-
return self._create_error_response(f"Error validating response: {str(e)}")
|
252 |
-
|
253 |
-
except HfHubHTTPError as e:
|
254 |
-
return self._create_error_response("Model timeout. Please try again later.")
|
255 |
except Exception as e:
|
256 |
-
return
|
|
|
|
|
|
|
|
|
|
|
257 |
|
258 |
-
def _create_error_response(self, error_message: str) -> Tuple[str, str, str, dict]:
|
259 |
"""Create a standardized error response tuple."""
|
260 |
return (
|
261 |
-
|
262 |
f"Error: {error_message}",
|
263 |
"The selected model is currently unavailable.",
|
264 |
"An error occurred during processing.",
|
@@ -271,32 +221,14 @@ class PromptRefiner:
|
|
271 |
messages = [
|
272 |
{
|
273 |
"role": "system",
|
274 |
-
"content": "
|
275 |
-
1. Paragraph Spacing:
|
276 |
-
- Add TWO blank lines between major sections (##)
|
277 |
-
- Add ONE blank line between subsections (###)
|
278 |
-
- Add ONE blank line between paragraphs within sections
|
279 |
-
- Add ONE blank line before and after lists
|
280 |
-
- Add ONE blank line before and after code blocks
|
281 |
-
- Add ONE blank line before and after blockquotes
|
282 |
-
|
283 |
-
2. Section Formatting:
|
284 |
-
# Title
|
285 |
-
|
286 |
-
## Major Section
|
287 |
-
|
288 |
-
[blank line]
|
289 |
-
Content paragraph 1
|
290 |
-
[blank line]
|
291 |
-
Content paragraph 2
|
292 |
-
[blank line]"""
|
293 |
},
|
294 |
{
|
295 |
"role": "user",
|
296 |
"content": prompt
|
297 |
}
|
298 |
]
|
299 |
-
|
300 |
response = self.client.chat_completion(
|
301 |
model=model,
|
302 |
messages=messages,
|
@@ -304,13 +236,11 @@ class PromptRefiner:
|
|
304 |
temperature=0.8,
|
305 |
stream=True
|
306 |
)
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
return full_response.replace('\n\n', '\n').strip()
|
314 |
-
|
315 |
except Exception as e:
|
316 |
return f"Error: {str(e)}"
|
|
|
22 |
return {"raw_content": v}
|
23 |
return v
|
24 |
|
25 |
+
@validator('initial_prompt_evaluation', 'refined_prompt', 'explanation_of_refinements')
|
26 |
def clean_text_fields(cls, v):
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
if isinstance(v, str):
|
28 |
return v.strip().replace('\\n', '\n').replace('\\"', '"')
|
29 |
elif isinstance(v, list):
|
|
|
32 |
return v
|
33 |
|
34 |
class PromptRefiner:
|
35 |
+
def __init__(self, api_token: str, meta_prompts: dict, metaprompt_explanations: dict):
|
36 |
self.client = InferenceClient(token=api_token, timeout=120)
|
37 |
self.meta_prompts = meta_prompts
|
38 |
+
self.metaprompt_explanations = metaprompt_explanations
|
39 |
|
40 |
def _clean_json_string(self, content: str) -> str:
|
41 |
"""Clean and prepare JSON string for parsing."""
|
42 |
+
content = content.replace('•', '-')
|
43 |
+
content = re.sub(r'\s+', ' ', content)
|
44 |
+
content = content.replace('\\"', '"')
|
45 |
return content.strip()
|
46 |
|
47 |
def _parse_response(self, response_content: str) -> dict:
|
48 |
"""Parse the LLM response with enhanced error handling."""
|
49 |
try:
|
|
|
50 |
json_match = re.search(r'<json>\s*(.*?)\s*</json>', response_content, re.DOTALL)
|
51 |
if json_match:
|
52 |
json_str = self._clean_json_string(json_match.group(1))
|
53 |
try:
|
|
|
54 |
parsed_json = json.loads(json_str)
|
55 |
if isinstance(parsed_json, str):
|
56 |
parsed_json = json.loads(parsed_json)
|
57 |
+
prompt_analysis = f"""
|
58 |
+
#### Original prompt analysis
|
59 |
+
- {parsed_json.get("initial_prompt_evaluation", "")}
|
60 |
+
"""
|
61 |
+
explanation_of_refinements=f"""
|
62 |
+
#### Refinement Explanation
|
63 |
+
- {parsed_json.get("explanation_of_refinements", "")}
|
64 |
+
"""
|
65 |
return {
|
66 |
+
"initial_prompt_evaluation": prompt_analysis,
|
67 |
"refined_prompt": parsed_json.get("refined_prompt", ""),
|
68 |
+
"explanation_of_refinements": explanation_of_refinements,
|
69 |
+
"response_content": parsed_json
|
70 |
}
|
71 |
except json.JSONDecodeError:
|
|
|
72 |
return self._parse_with_regex(json_str)
|
73 |
|
|
|
74 |
return self._parse_with_regex(response_content)
|
75 |
|
76 |
except Exception as e:
|
77 |
print(f"Error parsing response: {str(e)}")
|
|
|
78 |
return self._create_error_dict(str(e))
|
79 |
|
80 |
def _parse_with_regex(self, content: str) -> dict:
|
81 |
"""Parse content using regex when JSON parsing fails."""
|
82 |
output = {}
|
83 |
|
|
|
84 |
refinements_match = re.search(r'"explanation_of_refinements":\s*$(.*?)$', content, re.DOTALL)
|
85 |
if refinements_match:
|
86 |
refinements_str = refinements_match.group(1)
|
|
|
90 |
]
|
91 |
output["explanation_of_refinements"] = refinements
|
92 |
else:
|
|
|
93 |
pattern = r'"explanation_of_refinements":\s*"(.*?)"(?:,|\})'
|
94 |
match = re.search(pattern, content, re.DOTALL)
|
95 |
output["explanation_of_refinements"] = match.group(1).strip() if match else ""
|
96 |
|
|
|
97 |
for key in ["initial_prompt_evaluation", "refined_prompt"]:
|
98 |
pattern = rf'"{key}":\s*"(.*?)"(?:,|\}})'
|
99 |
match = re.search(pattern, content, re.DOTALL)
|
100 |
output[key] = match.group(1).strip() if match else ""
|
101 |
|
|
|
102 |
output["response_content"] = {"raw_content": content}
|
103 |
return output
|
104 |
|
|
|
110 |
"explanation_of_refinements": "",
|
111 |
"response_content": {"error": error_message}
|
112 |
}
|
113 |
+
|
114 |
+
def automatic_metaprompt(self, prompt: str) -> Tuple[str, str]:
|
115 |
+
"""Automatically select the most appropriate metaprompt."""
|
116 |
try:
|
|
|
117 |
router_messages = [
|
118 |
{
|
119 |
"role": "system",
|
|
|
125 |
}
|
126 |
]
|
127 |
|
|
|
128 |
router_response = self.client.chat_completion(
|
129 |
model=prompt_refiner_model,
|
130 |
messages=router_messages,
|
|
|
133 |
)
|
134 |
|
135 |
router_content = router_response.choices[0].message.content.strip()
|
|
|
|
|
136 |
json_match = re.search(r'<json>(.*?)</json>', router_content, re.DOTALL)
|
137 |
+
|
138 |
if not json_match:
|
139 |
raise ValueError("No JSON found in router response")
|
140 |
+
|
141 |
router_result = json.loads(json_match.group(1))
|
|
|
|
|
142 |
recommended_key = router_result["recommended_metaprompt"]["key"]
|
143 |
+
metaprompt_analysis = f"""
|
144 |
+
#### Selected MetaPrompt
|
145 |
+
- **Primary Choice**: {router_result["recommended_metaprompt"]["name"]}
|
146 |
+
- *Description*: {router_result["recommended_metaprompt"]["description"]}
|
147 |
+
- *Why This Choice*: {router_result["recommended_metaprompt"]["explanation"]}
|
148 |
+
- *Similar Sample*: {router_result["recommended_metaprompt"]["similar_sample"]}
|
149 |
+
- *Customized Sample*: {router_result["recommended_metaprompt"]["customized_sample"]}
|
150 |
+
|
151 |
+
#### Alternative Option
|
152 |
+
- **Secondary Choice**: {router_result["alternative_recommendation"]["name"]}
|
153 |
+
- *Why Consider This*: {router_result["alternative_recommendation"]["explanation"]}
|
154 |
+
"""
|
155 |
|
156 |
+
return metaprompt_analysis, recommended_key
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
except Exception as e:
|
159 |
+
return f"Error in automatic metaprompt: {str(e)}", ""
|
160 |
+
|
|
|
161 |
|
162 |
def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> Tuple[str, str, str, dict]:
|
163 |
"""Refine the given prompt using the selected meta prompt."""
|
|
|
167 |
|
168 |
messages = [
|
169 |
{
|
170 |
+
"role": "system",
|
171 |
+
"content": 'You are an expert at refining and extending prompts.'
|
172 |
},
|
173 |
{
|
174 |
+
"role": "user",
|
175 |
"content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt)
|
176 |
}
|
177 |
]
|
178 |
+
|
179 |
response = self.client.chat_completion(
|
180 |
model=prompt_refiner_model,
|
181 |
messages=messages,
|
182 |
max_tokens=3000,
|
183 |
temperature=0.8
|
184 |
)
|
185 |
+
|
186 |
+
result = self._parse_response(response.choices[0].message.content.strip())
|
187 |
+
llm_response = LLMResponse(**result)
|
188 |
+
llm_response_dico={}
|
189 |
+
llm_response_dico['initial_prompt']=prompt
|
190 |
+
llm_response_dico['meta_prompt']=meta_prompt_choice
|
191 |
+
llm_response_dico=llm_response_dico | llm_response.dict()
|
192 |
|
193 |
+
return (
|
194 |
+
llm_response.initial_prompt_evaluation,
|
195 |
+
llm_response.refined_prompt,
|
196 |
+
llm_response.explanation_of_refinements,
|
197 |
+
llm_response_dico
|
198 |
+
)
|
199 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
except Exception as e:
|
201 |
+
return (
|
202 |
+
f"Error: {str(e)}",
|
203 |
+
"",
|
204 |
+
"",
|
205 |
+
{}
|
206 |
+
)
|
207 |
|
208 |
+
def _create_error_response(self, error_message: str) -> Tuple[str, str, str, str, dict]:
|
209 |
"""Create a standardized error response tuple."""
|
210 |
return (
|
211 |
+
# f"Error: {error_message}",
|
212 |
f"Error: {error_message}",
|
213 |
"The selected model is currently unavailable.",
|
214 |
"An error occurred during processing.",
|
|
|
221 |
messages = [
|
222 |
{
|
223 |
"role": "system",
|
224 |
+
"content": "You are a markdown formatting expert."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
},
|
226 |
{
|
227 |
"role": "user",
|
228 |
"content": prompt
|
229 |
}
|
230 |
]
|
231 |
+
|
232 |
response = self.client.chat_completion(
|
233 |
model=model,
|
234 |
messages=messages,
|
|
|
236 |
temperature=0.8,
|
237 |
stream=True
|
238 |
)
|
239 |
+
|
240 |
+
return "".join(
|
241 |
+
chunk.choices[0].delta.content or ""
|
242 |
+
for chunk in response
|
243 |
+
).strip()
|
244 |
+
|
|
|
|
|
245 |
except Exception as e:
|
246 |
return f"Error: {str(e)}"
|