eduagarcia commited on
Commit
86b590f
1 Parent(s): d077302

Update status of v000000/SwallowMaid-8B-L3-SPPO-abliterated_eval_request_False_bfloat16_Original to FINISHED

Browse files
v000000/SwallowMaid-8B-L3-SPPO-abliterated_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,24 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-21T16:41:19Z",
13
  "model_type": "🤝 : base merges and moerges",
14
  "source": "leaderboard",
15
- "job_id": 1134,
16
- "job_start_time": "2024-10-02T05-55-04.159229",
17
- "error_msg": "[Errno 2] No such file or directory: '/workspace/datasets/hf_cache/models--v000000--SwallowMaid-8B-L3-SPPO-abliterated/snapshots/cf14898c7241314fbd9477d48a5c190d3ce02712/config.json'",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 291, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 545, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py\", line 1008, in from_pretrained\n config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 567, in get_config_dict\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 661, in _get_config_dict\n config_dict = cls._dict_from_json_file(resolved_config_file)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 763, in _dict_from_json_file\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nFileNotFoundError: [Errno 2] No such file or directory: '/workspace/datasets/hf_cache/models--v000000--SwallowMaid-8B-L3-SPPO-abliterated/snapshots/cf14898c7241314fbd9477d48a5c190d3ce02712/config.json'\n"
 
 
 
 
 
 
 
 
 
 
 
 
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "FINISHED",
12
  "submitted_time": "2024-09-21T16:41:19Z",
13
  "model_type": "🤝 : base merges and moerges",
14
  "source": "leaderboard",
15
+ "job_id": 1135,
16
+ "job_start_time": "2024-10-02T05-55-04.169030",
17
+ "eval_version": "1.1.0",
18
+ "result_metrics": {
19
+ "enem_challenge": 0.7116864940517844,
20
+ "bluex": 0.588317107093185,
21
+ "oab_exams": 0.4997722095671982,
22
+ "assin2_rte": 0.9127627391239099,
23
+ "assin2_sts": 0.7293756978101533,
24
+ "faquad_nli": 0.6843906848976986,
25
+ "hatebr_offensive": 0.8631774516440309,
26
+ "portuguese_hate_speech": 0.6597524501389483,
27
+ "tweetsentbr": 0.6415428853547284
28
+ },
29
+ "result_metrics_average": 0.6989753021868486,
30
+ "result_metrics_npm": 0.5506712505370146
31
  }