File size: 2,839 Bytes
57cab3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
{
  "best_metric": 1.3635696172714233,
  "best_model_checkpoint": "./llama3/21-04-24-Weni-WeniGPT-Agents-Llama3-1.0.9-SFT_Experiment with SFT and Llama3 and updates in requirements-2_max_steps-330_batch_8_2024-04-21_ppid_9/checkpoint-90",
  "epoch": 1.6071428571428572,
  "eval_steps": 30,
  "global_step": 90,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.17857142857142858,
      "grad_norm": 0.6299236416816711,
      "learning_rate": 0.00015555555555555556,
      "loss": 1.9266,
      "step": 10
    },
    {
      "epoch": 0.35714285714285715,
      "grad_norm": 0.4194231629371643,
      "learning_rate": 0.00019969365006623072,
      "loss": 1.5439,
      "step": 20
    },
    {
      "epoch": 0.5357142857142857,
      "grad_norm": 0.44722992181777954,
      "learning_rate": 0.00019845231970029773,
      "loss": 1.3488,
      "step": 30
    },
    {
      "epoch": 0.5357142857142857,
      "eval_loss": 1.3763371706008911,
      "eval_runtime": 4.9516,
      "eval_samples_per_second": 9.29,
      "eval_steps_per_second": 2.423,
      "step": 30
    },
    {
      "epoch": 0.7142857142857143,
      "grad_norm": 0.4362790286540985,
      "learning_rate": 0.00019626873324023915,
      "loss": 1.3256,
      "step": 40
    },
    {
      "epoch": 0.8928571428571429,
      "grad_norm": 0.435635507106781,
      "learning_rate": 0.00019316378910323102,
      "loss": 1.2663,
      "step": 50
    },
    {
      "epoch": 1.0714285714285714,
      "grad_norm": 0.5550065636634827,
      "learning_rate": 0.00018916720373012426,
      "loss": 1.1798,
      "step": 60
    },
    {
      "epoch": 1.0714285714285714,
      "eval_loss": 1.3427196741104126,
      "eval_runtime": 4.9453,
      "eval_samples_per_second": 9.302,
      "eval_steps_per_second": 2.427,
      "step": 60
    },
    {
      "epoch": 1.25,
      "grad_norm": 0.571713387966156,
      "learning_rate": 0.00018431722717876384,
      "loss": 1.0592,
      "step": 70
    },
    {
      "epoch": 1.4285714285714286,
      "grad_norm": 0.5714108943939209,
      "learning_rate": 0.0001786602770447513,
      "loss": 0.98,
      "step": 80
    },
    {
      "epoch": 1.6071428571428572,
      "grad_norm": 0.5294617414474487,
      "learning_rate": 0.00017225049421328023,
      "loss": 0.9664,
      "step": 90
    },
    {
      "epoch": 1.6071428571428572,
      "eval_loss": 1.3635696172714233,
      "eval_runtime": 4.9445,
      "eval_samples_per_second": 9.303,
      "eval_steps_per_second": 2.427,
      "step": 90
    }
  ],
  "logging_steps": 10,
  "max_steps": 330,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 6,
  "save_steps": 90,
  "total_flos": 7.318524682829824e+16,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}