File size: 6,959 Bytes
0bb3006
a217992
ad2df9a
2e9f353
8100125
 
09cb397
 
 
ad2df9a
09cb397
 
 
 
ad2df9a
 
09cb397
ad2df9a
 
 
 
 
 
 
 
 
 
 
 
 
09cb397
ad2df9a
 
 
09cb397
 
 
ad2df9a
 
 
 
 
09cb397
 
ad2df9a
09cb397
 
 
 
ad2df9a
09cb397
ad2df9a
09cb397
 
 
ad2df9a
 
 
09cb397
 
 
 
 
 
ad2df9a
 
09cb397
 
 
 
 
ad2df9a
09cb397
 
ad2df9a
 
09cb397
 
ad2df9a
 
09cb397
 
 
 
 
ad2df9a
 
 
 
 
 
09cb397
ad2df9a
09cb397
 
 
ad2df9a
09cb397
ad2df9a
09cb397
 
 
8100125
ad2df9a
 
 
 
 
 
 
 
 
 
 
 
09cb397
 
ad2df9a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
09cb397
ad2df9a
 
09cb397
ad2df9a
 
 
09cb397
 
ad2df9a
09cb397
 
ad2df9a
 
09cb397
 
ad2df9a
09cb397
b68c9a6
 
 
ad2df9a
 
 
09cb397
 
ad2df9a
09cb397
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import gradio as gr
from prompt_refiner import PromptRefiner
from variables import models, explanation_markdown, metaprompt_list, examples
from custom_css import custom_css

class GradioInterface:
  def __init__(self, prompt_refiner: PromptRefiner, custom_css):
      self.prompt_refiner = prompt_refiner
      with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
          # CONTAINER 1
          with gr.Column(elem_classes=["container", "title-container"]):
              gr.Markdown("# PROMPT++")
              gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
              gr.Markdown("Learn how to generate an improved version of your prompts.")
              
          # CONTAINER 2
          with gr.Column(elem_classes=["container", "input-container"]):
            prompt_text = gr.Textbox(label="Type your prompt (or leave empty to see metaprompt)",lines=5)
            automatic_metaprompt_button = gr.Button("Automatic Choice for Refinement Method ")
            #with gr.Row(elem_classes=["container2"]):                
            MetaPrompt_analysis = gr.Markdown()
              
          # CONTAINER 3    
         # with gr.Column(elem_classes=["container"]):    
          with gr.Column(elem_classes=["container","meta-container"]):
              meta_prompt_choice = gr.Radio(
                  choices=metaprompt_list,
                  label="Choose Meta Prompt",
                  value=metaprompt_list[0],
                  elem_classes=["no-background", "radio-group"]
              )
              refine_button = gr.Button("Refine Prompt")
              with gr.Accordion("Metaprompt Explanation", open=False, visible=True): 
                  gr.Markdown(explanation_markdown)         
                  gr.Examples(
                      examples=examples,
                      inputs=[prompt_text, meta_prompt_choice]
                  )               
              
          with gr.Column(elem_classes=["container", "analysis-container"]):           
              gr.Markdown(" ")
              prompt_evaluation = gr.Markdown()  # Added this component
              gr.Markdown("### Refined Prompt")
              refined_prompt = gr.Textbox(
                  label=" ",
                  interactive=True,
                  show_label=True,
                  show_copy_button=True,
              )
              #gr.Markdown("### Explanation of Refinements")
              explanation_of_refinements = gr.Markdown()

          
          with gr.Column(elem_classes=["container", "model-container"]):
              with gr.Row():
                  apply_model = gr.Dropdown(
                      choices=models,
                      value=models[0] if models else None,
                      label="Choose the Model",
                      container=False,
                      scale=1,
                      min_width=300
                  )
                  apply_button = gr.Button("Apply Prompts")
              
              gr.Markdown("### Prompts on Chosen Model")
              with gr.Tabs():
                  with gr.TabItem("Original Prompt Output"):
                      original_output = gr.Markdown()
                  with gr.TabItem("Refined Prompt Output"):
                      refined_output = gr.Markdown()
          
          with gr.Accordion("Full Response JSON", open=False, visible=True):
              full_response_json = gr.JSON()
      
          # Button click handlers
          automatic_metaprompt_button.click(
              fn=self.automatic_metaprompt,
              inputs=[prompt_text],
              outputs=[MetaPrompt_analysis, meta_prompt_choice]
          )
          
          refine_button.click(
              fn=self.refine_prompt,
              inputs=[prompt_text, meta_prompt_choice],
              outputs=[
                  prompt_evaluation,
                  refined_prompt,
                  explanation_of_refinements,
                  full_response_json
              ]
          )
          
          apply_button.click(
              fn=self.apply_prompts,
              inputs=[prompt_text, refined_prompt, apply_model],
              outputs=[original_output, refined_output]
          )

          gr.HTML(
              "<p style='text-align: center; color:orange;'>⚠ This space is in progress, and we're actively working on it, so you might find some bugs! Please report any issues you have in the Community tab to help us make it better for all.</p>"
          )

  def automatic_metaprompt(self, prompt: str) -> tuple:
      """Handle automatic metaprompt selection"""
      try:
          if not prompt.strip():
              return "Please enter a prompt to analyze.", None

          metaprompt_analysis, recommended_key = self.prompt_refiner.automatic_metaprompt(prompt)
          return metaprompt_analysis, recommended_key

      except Exception as e:
          error_message = f"Error in automatic metaprompt: {str(e)}"
          return error_message, None

  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
      """Handle manual prompt refinement"""
      try:
          if not prompt.strip():
              return (
                  "No prompt provided.",
                  "",
                  "",
                  {}
              )
    
          result = self.prompt_refiner.refine_prompt(prompt, meta_prompt_choice)
          return (
              result[0],  # initial_prompt_evaluation
              result[1],  # refined_prompt
              result[2],  # explanation_of_refinements
              result[3]   # full_response
          )
      except Exception as e:
          error_message = f"Error in refine_prompt: {str(e)}"
          return error_message, "", "", {}

  def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str) -> tuple:
      """Apply both original and refined prompts to the selected model"""
      try:
          if not original_prompt.strip() or not refined_prompt.strip():
              return "No prompt provided.", "No prompt provided."

          original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
          refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
          
          return original_output, refined_output
      except Exception as e:
          error_message = f"Error applying prompts: {str(e)}"
          return error_message, error_message

  def launch(self, share=False):
      """Launch the Gradio interface"""
      self.interface.launch(share=share)


if __name__ == '__main__':
  from variables import api_token, meta_prompts, metaprompt_explanations
  
  # Initialize the prompt refiner
  prompt_refiner = PromptRefiner(api_token, meta_prompts, metaprompt_explanations)
  
  # Create and launch the Gradio interface
  gradio_interface = GradioInterface(prompt_refiner, custom_css)
  gradio_interface.launch(share=True)