eduagarcia commited on
Commit
c821a81
β€’
1 Parent(s): 556bd8c

fix dtype of models

Browse files
Files changed (43) hide show
  1. J-LAB/{BRisa-7B-Instruct-v0.2_eval_request_False_float16_Original.json β†’ BRisa-7B-Instruct-v0.2_eval_request_False_bfloat16_Original.json} +2 -2
  2. JJhooww/Mistral-7B-v0.2-Base_ptbr_eval_request_False_bfloat16_Original.json +17 -0
  3. JJhooww/Mistral_Relora_Step2k_eval_request_False_bfloat16_Original.json +17 -0
  4. Qwen/Qwen-14B_eval_request_False_float16_Original.json +17 -0
  5. Qwen/Qwen-1_8B-Chat_eval_request_False_float16_Original.json +17 -0
  6. Qwen/Qwen-1_8B_eval_request_False_float16_Original.json +17 -0
  7. Qwen/Qwen-72B-Chat_eval_request_False_float16_Original.json +17 -0
  8. Qwen/Qwen-72B_eval_request_False_float16_Original.json +17 -0
  9. Qwen/Qwen-7B-Chat_eval_request_False_float16_Original.json +17 -0
  10. Qwen/Qwen-7B_eval_request_False_float16_Original.json +17 -0
  11. Weni/WeniGPT-2.8.1-Zephyr-7B-zephyr-prompt-binarized_eval_request_False_4bit_Adapter.json +1 -1
  12. Weni/WeniGPT-Agents-Zephyr-1.0.17-KTO_eval_request_False_bfloat16_Adapter.json +1 -1
  13. bardsai/jaskier-7b-dpo-v5.6_eval_request_False_float16_Original.json +17 -0
  14. cnmoro/Mistral-7B-Portuguese_eval_request_False_bfloat16_Original.json +17 -0
  15. google/mt5-base_eval_request_False_float16_Original.json +17 -0
  16. google/mt5-large_eval_request_False_bfloat16_Original.json +0 -17
  17. google/mt5-large_eval_request_False_float16_Original.json +17 -0
  18. google/mt5-small_eval_request_False_float16_Original.json +17 -0
  19. google/{mt5-xl_eval_request_False_bfloat16_Original.json β†’ mt5-xl_eval_request_False_float16_Original.json} +2 -2
  20. google/{mt5-xxl_eval_request_False_bfloat16_Original.json β†’ mt5-xxl_eval_request_False_float16_Original.json} +2 -2
  21. google/{t5-v1_1-base_eval_request_False_bfloat16_Original.json β†’ t5-v1_1-base_eval_request_False_float16_Original.json} +2 -2
  22. google/{t5-v1_1-large_eval_request_False_bfloat16_Original.json β†’ t5-v1_1-large_eval_request_False_float16_Original.json} +2 -2
  23. google/{t5-v1_1-small_eval_request_False_bfloat16_Original.json β†’ t5-v1_1-small_eval_request_False_float16_Original.json} +2 -2
  24. google/{t5-v1_1-xl_eval_request_False_bfloat16_Original.json β†’ t5-v1_1-xl_eval_request_False_float16_Original.json} +2 -2
  25. google/{t5-v1_1-xxl_eval_request_False_bfloat16_Original.json β†’ t5-v1_1-xxl_eval_request_False_float16_Original.json} +2 -2
  26. google/umt5-base_eval_request_False_bfloat16_Original.json +0 -17
  27. google/umt5-base_eval_request_False_float16_Original.json +17 -0
  28. google/umt5-small_eval_request_False_bfloat16_Original.json +0 -17
  29. google/umt5-small_eval_request_False_float16_Original.json +17 -0
  30. google/{umt5-xxl_eval_request_False_bfloat16_Original.json β†’ umt5-xxl_eval_request_False_float16_Original.json} +2 -2
  31. internlm/internlm2-chat-1_8b_eval_request_False_bfloat16_Original.json +17 -0
  32. internlm/internlm2-chat-20b_eval_request_False_bfloat16_Original.json +17 -0
  33. internlm/internlm2-chat-7b_eval_request_False_bfloat16_Original.json +17 -0
  34. maritaca-ai/sabia-7b_eval_request_False_bfloat16_Original.json +17 -0
  35. mistral-community/{Mixtral-8x22B-v0.1_eval_request_False_float16_Original.json β†’ Mixtral-8x22B-v0.1_eval_request_False_bfloat16_Original.json} +2 -2
  36. nicolasdec/CabraMistral7b_eval_request_False_bfloat16_Original.json +5 -19
  37. pfnet/plamo-13b_eval_request_False_bfloat16_Original.json +17 -0
  38. pfnet/plamo-13b_eval_request_False_float16_Original.json +0 -19
  39. rhaymison/Mistral-portuguese-luana-7b-Mathematics_eval_request_False_bfloat16_Original.json +17 -0
  40. rhaymison/Mistral-portuguese-luana-7b-mental-health_eval_request_False_bfloat16_Original.json +17 -0
  41. rhaymison/Qwen-portuguese-luana-7b_eval_request_False_bfloat16_Original.json +17 -0
  42. rhaymison/gemma-portuguese-luana-2b_eval_request_False_bfloat16_Original.json +17 -0
  43. stabilityai/stablelm-2-1_6b_eval_request_False_float16_Original.json +17 -0
J-LAB/{BRisa-7B-Instruct-v0.2_eval_request_False_float16_Original.json β†’ BRisa-7B-Instruct-v0.2_eval_request_False_bfloat16_Original.json} RENAMED
@@ -3,13 +3,13 @@
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
- "precision": "float16",
7
  "params": 7.242,
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Portuguese",
11
  "status": "PENDING",
12
- "submitted_time": "2024-04-18T13:21:31Z",
13
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": -1,
 
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
+ "precision": "bfloat16",
7
  "params": 7.242,
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Portuguese",
11
  "status": "PENDING",
12
+ "submitted_time": "2024-04-18T23:08:40Z",
13
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": -1,
JJhooww/Mistral-7B-v0.2-Base_ptbr_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "JJhooww/Mistral-7B-v0.2-Base_ptbr",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.242,
8
+ "architectures": "MistralForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "Portuguese",
11
+ "status": "PENDING",
12
+ "submitted_time": "2024-04-18T23:08:40Z",
13
+ "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": -1,
16
+ "job_start_time": null
17
+ }
JJhooww/Mistral_Relora_Step2k_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "JJhooww/Mistral_Relora_Step2k",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.242,
8
+ "architectures": "MistralForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "Portuguese"
17
+ }
Qwen/Qwen-14B_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen-14B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 14.167,
8
+ "architectures": "QWenLMHeadModel",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "Chinese"
17
+ }
Qwen/Qwen-1_8B-Chat_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen-1_8B-Chat",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 1.837,
8
+ "architectures": "QWenLMHeadModel",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "Chinese"
17
+ }
Qwen/Qwen-1_8B_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen-1_8B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 1.837,
8
+ "architectures": "QWenLMHeadModel",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "Chinese"
17
+ }
Qwen/Qwen-72B-Chat_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen-72B-Chat",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 72.288,
8
+ "architectures": "QWenLMHeadModel",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "Chinese"
17
+ }
Qwen/Qwen-72B_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen-72B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 72.288,
8
+ "architectures": "QWenLMHeadModel",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "Chinese"
17
+ }
Qwen/Qwen-7B-Chat_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen-7B-Chat",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 7.721,
8
+ "architectures": "QWenLMHeadModel",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "Chinese"
17
+ }
Qwen/Qwen-7B_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen-7B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 7.721,
8
+ "architectures": "QWenLMHeadModel",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "Chinese"
17
+ }
Weni/WeniGPT-2.8.1-Zephyr-7B-zephyr-prompt-binarized_eval_request_False_4bit_Adapter.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.0,
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
- "main_language": "Other",
11
  "status": "FINISHED",
12
  "submitted_time": "2024-04-09T03:00:51Z",
13
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
 
7
  "params": 7.0,
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
+ "main_language": "Portuguese",
11
  "status": "FINISHED",
12
  "submitted_time": "2024-04-09T03:00:51Z",
13
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
Weni/WeniGPT-Agents-Zephyr-1.0.17-KTO_eval_request_False_bfloat16_Adapter.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 0,
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
- "main_language": "Other",
11
  "status": "FINISHED",
12
  "submitted_time": "2024-04-09T02:55:19Z",
13
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
 
7
  "params": 0,
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
+ "main_language": "Portuguese",
11
  "status": "FINISHED",
12
  "submitted_time": "2024-04-09T02:55:19Z",
13
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
bardsai/jaskier-7b-dpo-v5.6_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "bardsai/jaskier-7b-dpo-v5.6",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 7.242,
8
+ "architectures": "MistralForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "🀝 : base merges and moerges",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
cnmoro/Mistral-7B-Portuguese_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "cnmoro/Mistral-7B-Portuguese",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.242,
8
+ "architectures": "MistralForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "πŸ”Ά : fine-tuned/fp on domain-specific datasets",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "Portuguese"
17
+ }
google/mt5-base_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "google/mt5-base",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 0,
8
+ "architectures": "MT5ForConditionalGeneration",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
google/mt5-large_eval_request_False_bfloat16_Original.json DELETED
@@ -1,17 +0,0 @@
1
- {
2
- "model": "google/mt5-large",
3
- "base_model": "",
4
- "revision": "main",
5
- "private": false,
6
- "precision": "bfloat16",
7
- "params": 0,
8
- "architectures": "MT5ForConditionalGeneration",
9
- "weight_type": "Original",
10
- "status": "RUNNING",
11
- "submitted_time": "2024-02-11T13:35:35Z",
12
- "model_type": "🟒 : pretrained",
13
- "source": "script",
14
- "job_id": 481,
15
- "job_start_time": "2024-04-18T06-38-12.484907",
16
- "main_language": "English"
17
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
google/mt5-large_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "google/mt5-large",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 0,
8
+ "architectures": "MT5ForConditionalGeneration",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "script",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
google/mt5-small_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "google/mt5-small",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 0,
8
+ "architectures": "MT5ForConditionalGeneration",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
google/{mt5-xl_eval_request_False_bfloat16_Original.json β†’ mt5-xl_eval_request_False_float16_Original.json} RENAMED
@@ -3,12 +3,12 @@
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
- "precision": "bfloat16",
7
  "params": 0,
8
  "architectures": "MT5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
- "submitted_time": "2024-02-11T13:36:37Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
 
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
+ "precision": "float16",
7
  "params": 0,
8
  "architectures": "MT5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
google/{mt5-xxl_eval_request_False_bfloat16_Original.json β†’ mt5-xxl_eval_request_False_float16_Original.json} RENAMED
@@ -3,12 +3,12 @@
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
- "precision": "bfloat16",
7
  "params": 0,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
- "submitted_time": "2024-02-11T13:36:42Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
 
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
+ "precision": "float16",
7
  "params": 0,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
google/{t5-v1_1-base_eval_request_False_bfloat16_Original.json β†’ t5-v1_1-base_eval_request_False_float16_Original.json} RENAMED
@@ -3,12 +3,12 @@
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
- "precision": "bfloat16",
7
  "params": 0,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
- "submitted_time": "2024-02-11T13:36:14Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
 
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
+ "precision": "float16",
7
  "params": 0,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
google/{t5-v1_1-large_eval_request_False_bfloat16_Original.json β†’ t5-v1_1-large_eval_request_False_float16_Original.json} RENAMED
@@ -3,12 +3,12 @@
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
- "precision": "bfloat16",
7
  "params": 0,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
- "submitted_time": "2024-02-11T13:36:23Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
 
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
+ "precision": "float16",
7
  "params": 0,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
google/{t5-v1_1-small_eval_request_False_bfloat16_Original.json β†’ t5-v1_1-small_eval_request_False_float16_Original.json} RENAMED
@@ -3,12 +3,12 @@
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
- "precision": "bfloat16",
7
  "params": 0,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
- "submitted_time": "2024-02-11T13:36:20Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
 
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
+ "precision": "float16",
7
  "params": 0,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
google/{t5-v1_1-xl_eval_request_False_bfloat16_Original.json β†’ t5-v1_1-xl_eval_request_False_float16_Original.json} RENAMED
@@ -3,12 +3,12 @@
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
- "precision": "bfloat16",
7
  "params": 0,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
- "submitted_time": "2024-02-11T13:36:27Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
 
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
+ "precision": "float16",
7
  "params": 0,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
google/{t5-v1_1-xxl_eval_request_False_bfloat16_Original.json β†’ t5-v1_1-xxl_eval_request_False_float16_Original.json} RENAMED
@@ -3,12 +3,12 @@
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
- "precision": "bfloat16",
7
  "params": 0,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
- "submitted_time": "2024-02-11T13:36:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
 
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
+ "precision": "float16",
7
  "params": 0,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
google/umt5-base_eval_request_False_bfloat16_Original.json DELETED
@@ -1,17 +0,0 @@
1
- {
2
- "model": "google/umt5-base",
3
- "base_model": "",
4
- "revision": "main",
5
- "private": false,
6
- "precision": "bfloat16",
7
- "params": 0,
8
- "architectures": "UMT5ForConditionalGeneration",
9
- "weight_type": "Original",
10
- "status": "RUNNING",
11
- "submitted_time": "2024-02-05T23:18:55Z",
12
- "model_type": "🟒 : pretrained",
13
- "source": "script",
14
- "job_id": 474,
15
- "job_start_time": "2024-04-17T13-12-53.518606",
16
- "main_language": "English"
17
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
google/umt5-base_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "google/umt5-base",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 0,
8
+ "architectures": "UMT5ForConditionalGeneration",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "script",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
google/umt5-small_eval_request_False_bfloat16_Original.json DELETED
@@ -1,17 +0,0 @@
1
- {
2
- "model": "google/umt5-small",
3
- "base_model": "",
4
- "revision": "main",
5
- "private": false,
6
- "precision": "bfloat16",
7
- "params": 0,
8
- "architectures": "UMT5ForConditionalGeneration",
9
- "weight_type": "Original",
10
- "status": "RUNNING",
11
- "submitted_time": "2024-02-05T23:18:45Z",
12
- "model_type": "🟒 : pretrained",
13
- "source": "script",
14
- "job_id": 474,
15
- "job_start_time": "2024-04-17T02-59-23.529830",
16
- "main_language": "English"
17
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
google/umt5-small_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "google/umt5-small",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 0,
8
+ "architectures": "UMT5ForConditionalGeneration",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "script",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
google/{umt5-xxl_eval_request_False_bfloat16_Original.json β†’ umt5-xxl_eval_request_False_float16_Original.json} RENAMED
@@ -3,12 +3,12 @@
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
- "precision": "bfloat16",
7
  "params": 0,
8
  "architectures": "UMT5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
- "submitted_time": "2024-02-11T13:36:51Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
 
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
+ "precision": "float16",
7
  "params": 0,
8
  "architectures": "UMT5ForConditionalGeneration",
9
  "weight_type": "Original",
10
  "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
internlm/internlm2-chat-1_8b_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "internlm/internlm2-chat-1_8b",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 1.889,
8
+ "architectures": "InternLM2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "?"
17
+ }
internlm/internlm2-chat-20b_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "internlm/internlm2-chat-20b",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 20.0,
8
+ "architectures": "InternLM2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "?"
17
+ }
internlm/internlm2-chat-7b_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "internlm/internlm2-chat-7b",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.0,
8
+ "architectures": "InternLM2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "?"
17
+ }
maritaca-ai/sabia-7b_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "maritaca-ai/sabia-7b",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 6.738,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "Portuguese"
17
+ }
mistral-community/{Mixtral-8x22B-v0.1_eval_request_False_float16_Original.json β†’ Mixtral-8x22B-v0.1_eval_request_False_bfloat16_Original.json} RENAMED
@@ -3,13 +3,13 @@
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
- "precision": "float16",
7
  "params": 140.621,
8
  "architectures": "MixtralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
  "status": "PENDING",
12
- "submitted_time": "2024-04-12T13:24:57Z",
13
  "model_type": "🟒 : pretrained",
14
  "source": "leaderboard",
15
  "job_id": -1,
 
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
6
+ "precision": "bfloat16",
7
  "params": 140.621,
8
  "architectures": "MixtralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
  "status": "PENDING",
12
+ "submitted_time": "2024-04-18T23:08:40Z",
13
  "model_type": "🟒 : pretrained",
14
  "source": "leaderboard",
15
  "job_id": -1,
nicolasdec/CabraMistral7b_eval_request_False_bfloat16_Original.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "model": "nicolasdec/CabraMistral7b",
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
@@ -7,25 +7,11 @@
7
  "params": 7.242,
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
- "submitted_time": "2024-04-02T01:16:18Z",
12
  "model_type": "πŸ”Ά : fine-tuned/fp on domain-specific datasets",
13
  "source": "leaderboard",
14
- "job_id": 352,
15
- "job_start_time": "2024-04-02T10-38-23.288377",
16
- "eval_version": "1.1.0",
17
- "result_metrics": {
18
- "enem_challenge": 0.6081175647305809,
19
- "bluex": 0.46870653685674546,
20
- "oab_exams": 0.38587699316628704,
21
- "assin2_rte": 0.9027403846153846,
22
- "assin2_sts": 0.7224760231184958,
23
- "faquad_nli": 0.64349376114082,
24
- "hatebr_offensive": 0.8314517624968287,
25
- "portuguese_hate_speech": 0.6482110188573036,
26
- "tweetsentbr": 0.647986430647758
27
- },
28
- "result_metrics_average": 0.6510067195144671,
29
- "result_metrics_npm": 0.48247175601183667,
30
  "main_language": "Portuguese"
31
  }
 
1
  {
2
+ "model": "nicolasdec/Cabramistral7b",
3
  "base_model": "",
4
  "revision": "main",
5
  "private": false,
 
7
  "params": 7.242,
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
  "model_type": "πŸ”Ά : fine-tuned/fp on domain-specific datasets",
13
  "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  "main_language": "Portuguese"
17
  }
pfnet/plamo-13b_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "pfnet/plamo-13b",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 13.1,
8
+ "architectures": "PlamoForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "script",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
pfnet/plamo-13b_eval_request_False_float16_Original.json DELETED
@@ -1,19 +0,0 @@
1
- {
2
- "model": "pfnet/plamo-13b",
3
- "base_model": "",
4
- "revision": "main",
5
- "private": false,
6
- "precision": "float16",
7
- "params": 13.1,
8
- "architectures": "PlamoForCausalLM",
9
- "weight_type": "Original",
10
- "status": "FAILED",
11
- "submitted_time": "2024-02-05T23:15:14Z",
12
- "model_type": "🟒 : pretrained",
13
- "source": "script",
14
- "job_id": 386,
15
- "job_start_time": "2024-04-05T12-41-12.299366",
16
- "main_language": "English",
17
- "error_msg": "The model class you are passing has a `config_class` attribute that is not consistent with the config class you passed (model has <class 'transformers_modules.pfnet.plamo-13b.88237e8483cdf6672faf3144f76f73f89b96d30c.modeling_plamo.PlamoConfig'> and you passed <class 'transformers_modules.pfnet.plamo-13b.88237e8483cdf6672faf3144f76f73f89b96d30c.modeling_plamo.PlamoConfig'>. Fix one of those so they match!",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 240, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 557, in from_pretrained\n cls.register(config.__class__, model_class, exist_ok=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 583, in register\n raise ValueError(\nValueError: The model class you are passing has a `config_class` attribute that is not consistent with the config class you passed (model has <class 'transformers_modules.pfnet.plamo-13b.88237e8483cdf6672faf3144f76f73f89b96d30c.modeling_plamo.PlamoConfig'> and you passed <class 'transformers_modules.pfnet.plamo-13b.88237e8483cdf6672faf3144f76f73f89b96d30c.modeling_plamo.PlamoConfig'>. Fix one of those so they match!\n"
19
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
rhaymison/Mistral-portuguese-luana-7b-Mathematics_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "rhaymison/Mistral-portuguese-luana-7b-Mathematics",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.242,
8
+ "architectures": "MistralForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "Portuguese",
11
+ "status": "PENDING",
12
+ "submitted_time": "2024-04-18T23:08:40Z",
13
+ "model_type": "πŸ”Ά : fine-tuned/fp on domain-specific datasets",
14
+ "source": "leaderboard",
15
+ "job_id": -1,
16
+ "job_start_time": null
17
+ }
rhaymison/Mistral-portuguese-luana-7b-mental-health_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "rhaymison/Mistral-portuguese-luana-7b-mental-health",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.242,
8
+ "architectures": "MistralForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "Portuguese",
11
+ "status": "PENDING",
12
+ "submitted_time": "2024-04-18T23:08:40Z",
13
+ "model_type": "πŸ”Ά : fine-tuned/fp on domain-specific datasets",
14
+ "source": "leaderboard",
15
+ "job_id": -1,
16
+ "job_start_time": null
17
+ }
rhaymison/Qwen-portuguese-luana-7b_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "rhaymison/Qwen-portuguese-luana-7b",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.721,
8
+ "architectures": "Qwen2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "Portuguese",
11
+ "status": "PENDING",
12
+ "submitted_time": "2024-04-18T23:08:40Z",
13
+ "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": -1,
16
+ "job_start_time": null
17
+ }
rhaymison/gemma-portuguese-luana-2b_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "rhaymison/gemma-portuguese-luana-2b",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 2.506,
8
+ "architectures": "GemmaForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "Portuguese",
11
+ "status": "PENDING",
12
+ "submitted_time": "2024-04-18T23:08:40Z",
13
+ "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": -1,
16
+ "job_start_time": null
17
+ }
stabilityai/stablelm-2-1_6b_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "stabilityai/stablelm-2-1_6b",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 1.645,
8
+ "architectures": "StableLMEpochForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-04-18T23:08:40Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "leaderboard",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }