Spaces:
Running
Running
Update variables.py
Browse files- variables.py +37 -65
variables.py
CHANGED
@@ -1,80 +1,52 @@
|
|
1 |
import json
|
2 |
import os
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
"physics": "Select this when you need a balance between structure and advanced techniques, with a focus on role-playing. It's similar to 'done' but may be more suitable for scientific or technical prompts. Choose this over 'done' for a slightly less complex approach.",
|
8 |
-
"morphosis": "Use this simplified approach for straightforward prompts or when time is limited. It focuses on essential improvements without complex techniques. Prefer this over other methods when you need quick, clear refinements without extensive analysis.",
|
9 |
-
"verse": "Choose this method when you need to analyze and improve a prompt's strengths and weaknesses, with a focus on information flow. It's particularly useful for enhancing the logical structure of prompts. Use this over 'morphosis' when you need more depth but less complexity than 'star'.",
|
10 |
-
"phor": "Employ this advanced approach when you need to combine multiple prompt engineering techniques. It's ideal for complex tasks requiring both clarity and sophisticated prompting methods. Select this over 'star' when you want a more flexible, technique-focused approach.",
|
11 |
-
"bolism": "Utilize this method when working with autoregressive language models and when the task requires careful reasoning before conclusions. It's best for prompts that need detailed output formatting. Choose this over others when the prompt's structure and reasoning order are crucial."
|
12 |
-
}
|
13 |
-
|
14 |
-
# Load JSON data directly
|
15 |
-
PROMPT_DATA = [
|
16 |
-
{
|
17 |
-
"name": name,
|
18 |
-
"description": data["description"],
|
19 |
-
"template": data["template"],
|
20 |
-
"examples": data.get("examples", []) # Using get() to handle optional examples
|
21 |
-
}
|
22 |
-
for name, data in json.loads(os.getenv('PROMPT_TEMPLATES', '[]')).items()
|
23 |
-
]
|
24 |
|
25 |
-
# Create
|
26 |
metaprompt_explanations = {
|
27 |
-
|
28 |
-
for
|
29 |
}
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
|
|
|
|
|
|
56 |
|
57 |
-
|
58 |
api_token = os.getenv('HF_API_TOKEN')
|
59 |
if not api_token:
|
60 |
raise ValueError("HF_API_TOKEN not found in environment variables")
|
61 |
|
62 |
-
|
63 |
# Store templates in a dictionary
|
64 |
meta_prompts = {
|
65 |
-
|
66 |
-
for
|
67 |
}
|
68 |
-
|
69 |
-
|
70 |
-
metadone = os.getenv('metadone')
|
71 |
-
prompt_refiner_model = os.getenv('prompt_refiner_model')
|
72 |
-
echo_prompt_refiner = os.getenv('echo_prompt_refiner')
|
73 |
-
metaprompt1 = os.getenv('metaprompt1')
|
74 |
-
loic_metaprompt = os.getenv('loic_metaprompt')
|
75 |
-
openai_metaprompt = os.getenv('openai_metaprompt')
|
76 |
-
original_meta_prompt = os.getenv('original_meta_prompt')
|
77 |
-
new_meta_prompt = os.getenv('new_meta_prompt')
|
78 |
-
advanced_meta_prompt = os.getenv('advanced_meta_prompt')
|
79 |
-
math_meta_prompt = os.getenv('metamath')
|
80 |
-
autoregressive_metaprompt = os.getenv('autoregressive_metaprompt')
|
|
|
1 |
import json
|
2 |
import os
|
3 |
|
4 |
+
# Load JSON data from environment variable and parse it
|
5 |
+
json_data = os.getenv('PROMPT_TEMPLATES', '{}')
|
6 |
+
prompt_data = json.loads(json_data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
+
# Create dictionaries from the JSON data
|
9 |
metaprompt_explanations = {
|
10 |
+
key: data["description"]
|
11 |
+
for key, data in prompt_data.items()
|
12 |
}
|
13 |
|
14 |
+
# Generate markdown explanation
|
15 |
+
explanation_markdown = "".join([
|
16 |
+
f"- **{key}**: {value}\n"
|
17 |
+
for key, value in metaprompt_explanations.items()
|
18 |
+
])
|
19 |
+
|
20 |
+
# Define models list
|
21 |
+
models = [
|
22 |
+
# Meta-Llama models (all support system)
|
23 |
+
"meta-llama/Meta-Llama-3-70B-Instruct",
|
24 |
+
"meta-llama/Meta-Llama-3-8B-Instruct",
|
25 |
+
"meta-llama/Llama-3.1-70B-Instruct",
|
26 |
+
"meta-llama/Llama-3.1-8B-Instruct",
|
27 |
+
"meta-llama/Llama-3.2-3B-Instruct",
|
28 |
+
"meta-llama/Llama-3.2-1B-Instruct",
|
29 |
+
"meta-llama/Llama-2-13b-chat-hf",
|
30 |
+
"meta-llama/Llama-2-7b-chat-hf",
|
31 |
+
|
32 |
+
# HuggingFaceH4 models (support system)
|
33 |
+
"HuggingFaceH4/zephyr-7b-beta",
|
34 |
+
"HuggingFaceH4/zephyr-7b-alpha",
|
35 |
+
|
36 |
+
# Qwen models (support system)
|
37 |
+
"Qwen/Qwen2.5-72B-Instruct",
|
38 |
+
"Qwen/Qwen2.5-1.5B",
|
39 |
+
|
40 |
+
"microsoft/Phi-3.5-mini-instruct"
|
41 |
+
]
|
42 |
|
43 |
+
# Check for API token
|
44 |
api_token = os.getenv('HF_API_TOKEN')
|
45 |
if not api_token:
|
46 |
raise ValueError("HF_API_TOKEN not found in environment variables")
|
47 |
|
|
|
48 |
# Store templates in a dictionary
|
49 |
meta_prompts = {
|
50 |
+
key: data["template"]
|
51 |
+
for key, data in prompt_data.items()
|
52 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|