baconnier commited on
Commit
6babc2d
1 Parent(s): 143e72c

Update variables.py

Browse files
Files changed (1) hide show
  1. variables.py +25 -1
variables.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
 
2
  metaprompt_explanations = {
3
  "star": "Use ECHO when you need a comprehensive, multi-stage approach for complex prompts. It's ideal for tasks requiring in-depth analysis, exploration of multiple alternatives, and synthesis of ideas. Choose this over others when you have time for a thorough refinement process and need to consider various aspects of the prompt.",
@@ -9,6 +11,16 @@ metaprompt_explanations = {
9
  "bolism": "Utilize this method when working with autoregressive language models and when the task requires careful reasoning before conclusions. It's best for prompts that need detailed output formatting. Choose this over others when the prompt's structure and reasoning order are crucial."
10
  }
11
 
 
 
 
 
 
 
 
 
 
 
12
  models = [
13
  # Meta-Llama models (all support system)
14
  "meta-llama/Meta-Llama-3-70B-Instruct",
@@ -32,15 +44,27 @@ models = [
32
  ]
33
 
34
  explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()])
 
 
 
 
 
35
 
36
 
37
- import os
38
 
39
  meta_info=""
40
  api_token = os.getenv('HF_API_TOKEN')
41
  if not api_token:
42
  raise ValueError("HF_API_TOKEN not found in environment variables")
43
 
 
 
 
 
 
 
 
 
44
  metadone = os.getenv('metadone')
45
  prompt_refiner_model = os.getenv('prompt_refiner_model')
46
  echo_prompt_refiner = os.getenv('echo_prompt_refiner')
 
1
+ import json
2
+ import os
3
 
4
  metaprompt_explanations = {
5
  "star": "Use ECHO when you need a comprehensive, multi-stage approach for complex prompts. It's ideal for tasks requiring in-depth analysis, exploration of multiple alternatives, and synthesis of ideas. Choose this over others when you have time for a thorough refinement process and need to consider various aspects of the prompt.",
 
11
  "bolism": "Utilize this method when working with autoregressive language models and when the task requires careful reasoning before conclusions. It's best for prompts that need detailed output formatting. Choose this over others when the prompt's structure and reasoning order are crucial."
12
  }
13
 
14
+ # Load JSON data from environment variable
15
+ PROMPT_DATA = json.loads(os.getenv('PROMPT_TEMPLATES', '[]'))
16
+
17
+ # Create metaprompt_explanations dictionary from JSON data
18
+ metaprompt_explanations = {
19
+ prompt["name"].lower().split()[0]: prompt["description"]
20
+ for prompt in PROMPT_DATA
21
+ }
22
+
23
+
24
  models = [
25
  # Meta-Llama models (all support system)
26
  "meta-llama/Meta-Llama-3-70B-Instruct",
 
44
  ]
45
 
46
  explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()])
47
+ # Generate explanation markdown from JSON data
48
+ explanation_markdown = "".join([
49
+ f"- **{prompt['name'].lower().split()[0]}**: {prompt['description']}\n"
50
+ for prompt in PROMPT_DATA
51
+ ])
52
 
53
 
 
54
 
55
  meta_info=""
56
  api_token = os.getenv('HF_API_TOKEN')
57
  if not api_token:
58
  raise ValueError("HF_API_TOKEN not found in environment variables")
59
 
60
+
61
+ # Store templates in a dictionary
62
+ meta_prompts = {
63
+ prompt["name"].lower().split()[0]: prompt["template"]
64
+ for prompt in PROMPT_DATA
65
+ }
66
+
67
+
68
  metadone = os.getenv('metadone')
69
  prompt_refiner_model = os.getenv('prompt_refiner_model')
70
  echo_prompt_refiner = os.getenv('echo_prompt_refiner')