Spaces:
Running
Running
File size: 2,262 Bytes
6babc2d b25d878 53f56d7 6babc2d 53f56d7 420f262 53f56d7 420f262 16d2c99 ec08f4a 53f56d7 6babc2d 420f262 53f56d7 6babc2d 70fc62f 420f262 70fc62f 420f262 70fc62f a3d4f67 420f262 1584099 53f56d7 b25d878 420f262 b25d878 53f56d7 6babc2d 420f262 53f56d7 3fb7bf3 420f262 4bba9ed 5e0769d 4bba9ed 420f262 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import json
import os
# Load templates from environment variable with a safe default
templates_json = os.getenv('PROMPT_TEMPLATES', '{}')
try:
# Parse JSON data with error handling
prompt_data = json.loads(templates_json)
except json.JSONDecodeError:
# Fallback to empty dict if JSON is invalid
prompt_data = {}
metaprompt_list = [key for key in prompt_data.keys()] if prompt_data else []
print(metaprompt_list)
# Create explanations dictionary with safe access
metaprompt_explanations = {
key: data.get("description", "No description available")
for key, data in prompt_data.items()
} if prompt_data else {}
# Generate markdown explanation
explanation_markdown = "".join([
f"- **{key}**: {value}\n"
for key, value in metaprompt_explanations.items()
])
# Define models list
models = [
"meta-llama/Meta-Llama-3-70B-Instruct",
"meta-llama/Meta-Llama-3-8B-Instruct",
"meta-llama/Llama-3.1-70B-Instruct",
"meta-llama/Llama-3.1-8B-Instruct",
"meta-llama/Llama-3.2-3B-Instruct",
"meta-llama/Llama-3.2-1B-Instruct",
"meta-llama/Llama-2-13b-chat-hf",
"meta-llama/Llama-2-7b-chat-hf",
"HuggingFaceH4/zephyr-7b-beta",
"HuggingFaceH4/zephyr-7b-alpha",
"Qwen/Qwen2.5-72B-Instruct",
"Qwen/Qwen2.5-1.5B",
"microsoft/Phi-3.5-mini-instruct"
]
# Extract examples only from JSON templates
examples = []
for key, data in prompt_data.items():
template_examples = data.get("examples", [])
if template_examples:
examples.extend([
[example[0], key] if isinstance(example, list) else [example, key]
for example in template_examples
])
# Get API token with error handling
api_token = os.getenv('HF_API_TOKEN')
if not api_token:
raise ValueError("HF_API_TOKEN not found in environment variables")
# Create meta_prompts dictionary with safe access
meta_prompts = {
key: data.get("template", "No template available")
for key, data in prompt_data.items()
} if prompt_data else {}
prompt_refiner_model = os.getenv('prompt_refiner_model', 'meta-llama/Llama-3.1-8B-Instruct')
print("prompt_refiner_model used :" + prompt_refiner_model)
echo_prompt_refiner = os.getenv('echo_prompt_refiner')
openai_metaprompt = os.getenv('openai_metaprompt')
advanced_meta_prompt = os.getenv('advanced_meta_prompt') |