eduagarcia commited on
Commit
0b962c6
1 Parent(s): 733bd61

Update status of vilm/Quyen-Pro-Max-v0.1_eval_request_1366c92_False_bfloat16_Original to FAILED

Browse files
vilm/Quyen-Pro-Max-v0.1_eval_request_1366c92_False_bfloat16_Original.json CHANGED
@@ -8,10 +8,12 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "RUNNING",
12
  "submitted_time": "2024-09-15T04:13:04Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1070,
16
- "job_start_time": "2024-09-19T02-39-58.642137"
 
 
17
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "FAILED",
12
  "submitted_time": "2024-09-15T04:13:04Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1070,
16
+ "job_start_time": "2024-09-19T02-39-58.642137",
17
+ "error_msg": "CUDA out of memory. Tried to allocate 384.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.49 GiB is free. Process 1106294 has 68.85 GiB memory in use. Of the allocated memory 67.33 GiB is allocated by PyTorch, and 56.75 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
18
+ "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3903, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4377, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 933, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 384.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.49 GiB is free. Process 1106294 has 68.85 GiB memory in use. Of the allocated memory 67.33 GiB is allocated by PyTorch, and 56.75 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
19
  }