zerozeroz commited on
Commit
e281806
·
verified ·
1 Parent(s): bbcc8d7

Model save

Browse files
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-Coder-7B
3
+ library_name: transformers
4
+ model_name: Qwen2.5-Coder-7B
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - grpo
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for Qwen2.5-Coder-7B
13
+
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-Coder-7B](https://huggingface.co/Qwen/Qwen2.5-Coder-7B).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="zerozeroz/Qwen2.5-Coder-7B", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+
31
+
32
+
33
+ This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
34
+
35
+ ### Framework versions
36
+
37
+ - TRL: 0.14.0
38
+ - Transformers: 4.48.1
39
+ - Pytorch: 2.5.1+cu121
40
+ - Datasets: 3.1.0
41
+ - Tokenizers: 0.21.0
42
+
43
+ ## Citations
44
+
45
+ Cite GRPO as:
46
+
47
+ ```bibtex
48
+ @article{zhihong2024deepseekmath,
49
+ title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
50
+ author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
51
+ year = 2024,
52
+ eprint = {arXiv:2402.03300},
53
+ }
54
+
55
+ ```
56
+
57
+ Cite TRL as:
58
+
59
+ ```bibtex
60
+ @misc{vonwerra2022trl,
61
+ title = {{TRL: Transformer Reinforcement Learning}},
62
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
63
+ year = 2020,
64
+ journal = {GitHub repository},
65
+ publisher = {GitHub},
66
+ howpublished = {\url{https://github.com/huggingface/trl}}
67
+ }
68
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 2.596636219443127e-05,
4
+ "train_runtime": 4869.9523,
5
+ "train_samples": 374,
6
+ "train_samples_per_second": 0.154,
7
+ "train_steps_per_second": 0.026
8
+ }
config.json CHANGED
@@ -23,7 +23,7 @@
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.48.1",
26
- "use_cache": false,
27
  "use_sliding_window": false,
28
  "vocab_size": 152064
29
  }
 
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.48.1",
26
+ "use_cache": true,
27
  "use_sliding_window": false,
28
  "vocab_size": 152064
29
  }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": 151643,
4
+ "max_new_tokens": 2048,
5
+ "transformers_version": "4.48.1"
6
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 2.596636219443127e-05,
4
+ "train_runtime": 4869.9523,
5
+ "train_samples": 374,
6
+ "train_samples_per_second": 0.154,
7
+ "train_steps_per_second": 0.026
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1667 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.992,
5
+ "eval_steps": 500,
6
+ "global_step": 125,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "completion_length": 142.08333587646484,
13
+ "epoch": 0.016,
14
+ "grad_norm": 0.9348842823066913,
15
+ "kl": 0.0,
16
+ "learning_rate": 1.25e-07,
17
+ "loss": -0.0,
18
+ "reward": 0.6988552212715149,
19
+ "reward_std": 0.5108021795749664,
20
+ "rewards/correct_code_reward_func": 0.4583333432674408,
21
+ "rewards/len_reward_func": 0.24052191525697708,
22
+ "step": 1
23
+ },
24
+ {
25
+ "completion_length": 126.56250762939453,
26
+ "epoch": 0.032,
27
+ "grad_norm": 0.8243337506143691,
28
+ "kl": 0.0,
29
+ "learning_rate": 2.5e-07,
30
+ "loss": -0.0,
31
+ "reward": 0.6203328222036362,
32
+ "reward_std": 0.45215627551078796,
33
+ "rewards/correct_code_reward_func": 0.3541666865348816,
34
+ "rewards/len_reward_func": 0.26616617292165756,
35
+ "step": 2
36
+ },
37
+ {
38
+ "completion_length": 108.79166793823242,
39
+ "epoch": 0.048,
40
+ "grad_norm": 0.9128499702856003,
41
+ "kl": 7.30752944946289e-05,
42
+ "learning_rate": 3.75e-07,
43
+ "loss": 0.0,
44
+ "reward": 0.6249927878379822,
45
+ "reward_std": 0.5788741409778595,
46
+ "rewards/correct_code_reward_func": 0.3958333432674408,
47
+ "rewards/len_reward_func": 0.22915946692228317,
48
+ "step": 3
49
+ },
50
+ {
51
+ "completion_length": 142.2291717529297,
52
+ "epoch": 0.064,
53
+ "grad_norm": 0.9975440173663436,
54
+ "kl": 0.00012683868408203125,
55
+ "learning_rate": 5e-07,
56
+ "loss": 0.0,
57
+ "reward": 0.4923219382762909,
58
+ "reward_std": 0.5618958175182343,
59
+ "rewards/correct_code_reward_func": 0.2708333358168602,
60
+ "rewards/len_reward_func": 0.2214886099100113,
61
+ "step": 4
62
+ },
63
+ {
64
+ "completion_length": 167.9166717529297,
65
+ "epoch": 0.08,
66
+ "grad_norm": 0.6875029348712502,
67
+ "kl": 7.176399230957031e-05,
68
+ "learning_rate": 4.999157413258781e-07,
69
+ "loss": 0.0,
70
+ "reward": 0.3392188400030136,
71
+ "reward_std": 0.3500474542379379,
72
+ "rewards/correct_code_reward_func": 0.0833333358168602,
73
+ "rewards/len_reward_func": 0.255885511636734,
74
+ "step": 5
75
+ },
76
+ {
77
+ "completion_length": 132.9166717529297,
78
+ "epoch": 0.096,
79
+ "grad_norm": 0.7328221161754551,
80
+ "kl": 0.0001456737518310547,
81
+ "learning_rate": 4.996630220997057e-07,
82
+ "loss": 0.0,
83
+ "reward": 0.7516234815120697,
84
+ "reward_std": 0.5683842748403549,
85
+ "rewards/correct_code_reward_func": 0.4375000298023224,
86
+ "rewards/len_reward_func": 0.3141235262155533,
87
+ "step": 6
88
+ },
89
+ {
90
+ "completion_length": 178.0416717529297,
91
+ "epoch": 0.112,
92
+ "grad_norm": 0.7294488262352424,
93
+ "kl": 0.00011301040649414062,
94
+ "learning_rate": 4.992420126717784e-07,
95
+ "loss": 0.0,
96
+ "reward": 0.4081832021474838,
97
+ "reward_std": 0.4984392523765564,
98
+ "rewards/correct_code_reward_func": 0.1666666716337204,
99
+ "rewards/len_reward_func": 0.24151653796434402,
100
+ "step": 7
101
+ },
102
+ {
103
+ "completion_length": 190.81250762939453,
104
+ "epoch": 0.128,
105
+ "grad_norm": 0.774451422596215,
106
+ "kl": 0.00012731552124023438,
107
+ "learning_rate": 4.986529968316653e-07,
108
+ "loss": 0.0,
109
+ "reward": 0.5889585018157959,
110
+ "reward_std": 0.5029059052467346,
111
+ "rewards/correct_code_reward_func": 0.2500000149011612,
112
+ "rewards/len_reward_func": 0.3389585465192795,
113
+ "step": 8
114
+ },
115
+ {
116
+ "completion_length": 203.1041717529297,
117
+ "epoch": 0.144,
118
+ "grad_norm": 0.7610759960843761,
119
+ "kl": 0.0001308917999267578,
120
+ "learning_rate": 4.978963716169165e-07,
121
+ "loss": 0.0,
122
+ "reward": 0.5821144282817841,
123
+ "reward_std": 0.5114115178585052,
124
+ "rewards/correct_code_reward_func": 0.3333333432674408,
125
+ "rewards/len_reward_func": 0.24878107011318207,
126
+ "step": 9
127
+ },
128
+ {
129
+ "completion_length": 119.56250762939453,
130
+ "epoch": 0.16,
131
+ "grad_norm": 0.9290248509168798,
132
+ "kl": 0.00010132789611816406,
133
+ "learning_rate": 4.969726470454313e-07,
134
+ "loss": 0.0,
135
+ "reward": 0.7238170504570007,
136
+ "reward_std": 0.5344631671905518,
137
+ "rewards/correct_code_reward_func": 0.4166666716337204,
138
+ "rewards/len_reward_func": 0.30715033411979675,
139
+ "step": 10
140
+ },
141
+ {
142
+ "completion_length": 150.9166717529297,
143
+ "epoch": 0.176,
144
+ "grad_norm": 1.3590266587813116,
145
+ "kl": 0.00018453598022460938,
146
+ "learning_rate": 4.958824457716706e-07,
147
+ "loss": 0.0,
148
+ "reward": 0.6649805009365082,
149
+ "reward_std": 0.4508441388607025,
150
+ "rewards/correct_code_reward_func": 0.375,
151
+ "rewards/len_reward_func": 0.289980486035347,
152
+ "step": 11
153
+ },
154
+ {
155
+ "completion_length": 206.8541717529297,
156
+ "epoch": 0.192,
157
+ "grad_norm": 0.6744815448192079,
158
+ "kl": 0.000133514404296875,
159
+ "learning_rate": 4.946265026669454e-07,
160
+ "loss": 0.0,
161
+ "reward": 0.44046473503112793,
162
+ "reward_std": 0.4918256551027298,
163
+ "rewards/correct_code_reward_func": 0.2083333432674408,
164
+ "rewards/len_reward_func": 0.23213139921426773,
165
+ "step": 12
166
+ },
167
+ {
168
+ "completion_length": 100.33333587646484,
169
+ "epoch": 0.208,
170
+ "grad_norm": 1.1473694035346043,
171
+ "kl": 0.00011897087097167969,
172
+ "learning_rate": 4.932056643240618e-07,
173
+ "loss": 0.0,
174
+ "reward": 0.8099721968173981,
175
+ "reward_std": 0.5177792608737946,
176
+ "rewards/correct_code_reward_func": 0.4791666716337204,
177
+ "rewards/len_reward_func": 0.3308054953813553,
178
+ "step": 13
179
+ },
180
+ {
181
+ "completion_length": 174.52084350585938,
182
+ "epoch": 0.224,
183
+ "grad_norm": 0.7173952224852224,
184
+ "kl": 0.00014257431030273438,
185
+ "learning_rate": 4.916208884866592e-07,
186
+ "loss": 0.0,
187
+ "reward": 0.5219025313854218,
188
+ "reward_std": 0.43796107172966003,
189
+ "rewards/correct_code_reward_func": 0.229166679084301,
190
+ "rewards/len_reward_func": 0.29273584485054016,
191
+ "step": 14
192
+ },
193
+ {
194
+ "completion_length": 118.60417175292969,
195
+ "epoch": 0.24,
196
+ "grad_norm": 1.2540281005004774,
197
+ "kl": 0.00018072128295898438,
198
+ "learning_rate": 4.898732434036243e-07,
199
+ "loss": 0.0,
200
+ "reward": 0.7924503684043884,
201
+ "reward_std": 0.506152406334877,
202
+ "rewards/correct_code_reward_func": 0.4375000149011612,
203
+ "rewards/len_reward_func": 0.3549504280090332,
204
+ "step": 15
205
+ },
206
+ {
207
+ "completion_length": 105.31250381469727,
208
+ "epoch": 0.256,
209
+ "grad_norm": 0.9431489918373908,
210
+ "kl": 0.00018644332885742188,
211
+ "learning_rate": 4.879639071090173e-07,
212
+ "loss": 0.0,
213
+ "reward": 0.5001727938652039,
214
+ "reward_std": 0.45510660111904144,
215
+ "rewards/correct_code_reward_func": 0.2291666716337204,
216
+ "rewards/len_reward_func": 0.27100610733032227,
217
+ "step": 16
218
+ },
219
+ {
220
+ "completion_length": 130.85416793823242,
221
+ "epoch": 0.272,
222
+ "grad_norm": 0.7237713131170656,
223
+ "kl": 0.00011801719665527344,
224
+ "learning_rate": 4.858941666279955e-07,
225
+ "loss": 0.0,
226
+ "reward": 0.7888750731945038,
227
+ "reward_std": 0.5659129619598389,
228
+ "rewards/correct_code_reward_func": 0.5208333432674408,
229
+ "rewards/len_reward_func": 0.2680417224764824,
230
+ "step": 17
231
+ },
232
+ {
233
+ "completion_length": 134.89583587646484,
234
+ "epoch": 0.288,
235
+ "grad_norm": 0.8717728732040717,
236
+ "kl": 0.000164031982421875,
237
+ "learning_rate": 4.836654171092682e-07,
238
+ "loss": 0.0,
239
+ "reward": 0.5576134622097015,
240
+ "reward_std": 0.5418040752410889,
241
+ "rewards/correct_code_reward_func": 0.3125000149011612,
242
+ "rewards/len_reward_func": 0.24511344730854034,
243
+ "step": 18
244
+ },
245
+ {
246
+ "completion_length": 116.54167175292969,
247
+ "epoch": 0.304,
248
+ "grad_norm": 0.9377250817808462,
249
+ "kl": 0.0002789497375488281,
250
+ "learning_rate": 4.812791608846709e-07,
251
+ "loss": 0.0,
252
+ "reward": 0.7832874357700348,
253
+ "reward_std": 0.5703642070293427,
254
+ "rewards/correct_code_reward_func": 0.4583333432674408,
255
+ "rewards/len_reward_func": 0.3249540776014328,
256
+ "step": 19
257
+ },
258
+ {
259
+ "completion_length": 186.06250762939453,
260
+ "epoch": 0.32,
261
+ "grad_norm": 0.9812718853662215,
262
+ "kl": 0.00038433074951171875,
263
+ "learning_rate": 4.787370064564882e-07,
264
+ "loss": 0.0,
265
+ "reward": 0.5579635202884674,
266
+ "reward_std": 0.519572913646698,
267
+ "rewards/correct_code_reward_func": 0.3125,
268
+ "rewards/len_reward_func": 0.2454635202884674,
269
+ "step": 20
270
+ },
271
+ {
272
+ "completion_length": 110.75,
273
+ "epoch": 0.336,
274
+ "grad_norm": 1.0782358974361461,
275
+ "kl": 0.00030231475830078125,
276
+ "learning_rate": 4.7604066741321253e-07,
277
+ "loss": 0.0,
278
+ "reward": 0.9209870100021362,
279
+ "reward_std": 0.5739021599292755,
280
+ "rewards/correct_code_reward_func": 0.6458333432674408,
281
+ "rewards/len_reward_func": 0.2751536965370178,
282
+ "step": 21
283
+ },
284
+ {
285
+ "completion_length": 162.08334350585938,
286
+ "epoch": 0.352,
287
+ "grad_norm": 0.874006092063079,
288
+ "kl": 0.00022363662719726562,
289
+ "learning_rate": 4.731919612744659e-07,
290
+ "loss": 0.0,
291
+ "reward": 0.7241076529026031,
292
+ "reward_std": 0.5581964254379272,
293
+ "rewards/correct_code_reward_func": 0.4791666865348816,
294
+ "rewards/len_reward_func": 0.24494098126888275,
295
+ "step": 22
296
+ },
297
+ {
298
+ "completion_length": 204.14584350585938,
299
+ "epoch": 0.368,
300
+ "grad_norm": 0.82204687952658,
301
+ "kl": 0.00034046173095703125,
302
+ "learning_rate": 4.7019280826586604e-07,
303
+ "loss": 0.0,
304
+ "reward": 0.532179206609726,
305
+ "reward_std": 0.34643039107322693,
306
+ "rewards/correct_code_reward_func": 0.1666666716337204,
307
+ "rewards/len_reward_func": 0.36551257967948914,
308
+ "step": 23
309
+ },
310
+ {
311
+ "completion_length": 151.0208396911621,
312
+ "epoch": 0.384,
313
+ "grad_norm": 0.9960569412421229,
314
+ "kl": 0.00042057037353515625,
315
+ "learning_rate": 4.6704523002466094e-07,
316
+ "loss": 0.0,
317
+ "reward": 0.5193642377853394,
318
+ "reward_std": 0.40249721705913544,
319
+ "rewards/correct_code_reward_func": 0.2083333395421505,
320
+ "rewards/len_reward_func": 0.31103089451789856,
321
+ "step": 24
322
+ },
323
+ {
324
+ "completion_length": 187.20834350585938,
325
+ "epoch": 0.4,
326
+ "grad_norm": 0.6829787161734556,
327
+ "kl": 0.0003147125244140625,
328
+ "learning_rate": 4.6375134823700503e-07,
329
+ "loss": 0.0,
330
+ "reward": 0.43366140127182007,
331
+ "reward_std": 0.4143451601266861,
332
+ "rewards/correct_code_reward_func": 0.1666666716337204,
333
+ "rewards/len_reward_func": 0.2669947147369385,
334
+ "step": 25
335
+ },
336
+ {
337
+ "completion_length": 99.33333587646484,
338
+ "epoch": 0.416,
339
+ "grad_norm": 1.0546549364927,
340
+ "kl": 0.0007066726684570312,
341
+ "learning_rate": 4.603133832077953e-07,
342
+ "loss": 0.0,
343
+ "reward": 0.6213856935501099,
344
+ "reward_std": 0.5858824849128723,
345
+ "rewards/correct_code_reward_func": 0.395833358168602,
346
+ "rewards/len_reward_func": 0.22555235773324966,
347
+ "step": 26
348
+ },
349
+ {
350
+ "completion_length": 119.04167175292969,
351
+ "epoch": 0.432,
352
+ "grad_norm": 1.0435674697004191,
353
+ "kl": 0.000743865966796875,
354
+ "learning_rate": 4.5673365236403216e-07,
355
+ "loss": 0.0,
356
+ "reward": 0.6226212680339813,
357
+ "reward_std": 0.515766441822052,
358
+ "rewards/correct_code_reward_func": 0.4791666865348816,
359
+ "rewards/len_reward_func": 0.14345459267497063,
360
+ "step": 27
361
+ },
362
+ {
363
+ "completion_length": 204.02083587646484,
364
+ "epoch": 0.448,
365
+ "grad_norm": 1.1685217734586066,
366
+ "kl": 0.000659942626953125,
367
+ "learning_rate": 4.530145686927125e-07,
368
+ "loss": 0.0,
369
+ "reward": 0.6347978413105011,
370
+ "reward_std": 0.4856678545475006,
371
+ "rewards/correct_code_reward_func": 0.3333333432674408,
372
+ "rewards/len_reward_func": 0.3014644980430603,
373
+ "step": 28
374
+ },
375
+ {
376
+ "completion_length": 149.87500762939453,
377
+ "epoch": 0.464,
378
+ "grad_norm": 0.7804112511261988,
379
+ "kl": 0.0006847381591796875,
380
+ "learning_rate": 4.4915863911430897e-07,
381
+ "loss": 0.0,
382
+ "reward": 0.4445287883281708,
383
+ "reward_std": 0.4996263086795807,
384
+ "rewards/correct_code_reward_func": 0.1458333358168602,
385
+ "rewards/len_reward_func": 0.29869547486305237,
386
+ "step": 29
387
+ },
388
+ {
389
+ "completion_length": 132.0833396911621,
390
+ "epoch": 0.48,
391
+ "grad_norm": 0.8535635529221804,
392
+ "kl": 0.0006618499755859375,
393
+ "learning_rate": 4.45168462792932e-07,
394
+ "loss": 0.0,
395
+ "reward": 0.6259033381938934,
396
+ "reward_std": 0.4563398212194443,
397
+ "rewards/correct_code_reward_func": 0.3750000149011612,
398
+ "rewards/len_reward_func": 0.2509033679962158,
399
+ "step": 30
400
+ },
401
+ {
402
+ "completion_length": 126.79166793823242,
403
+ "epoch": 0.496,
404
+ "grad_norm": 0.8761490464403178,
405
+ "kl": 0.0010280609130859375,
406
+ "learning_rate": 4.4104672938431223e-07,
407
+ "loss": 0.0,
408
+ "reward": 0.7820720672607422,
409
+ "reward_std": 0.4476567506790161,
410
+ "rewards/correct_code_reward_func": 0.583333358168602,
411
+ "rewards/len_reward_func": 0.1987387351691723,
412
+ "step": 31
413
+ },
414
+ {
415
+ "completion_length": 95.87500381469727,
416
+ "epoch": 0.512,
417
+ "grad_norm": 1.0760263673729638,
418
+ "kl": 0.0013580322265625,
419
+ "learning_rate": 4.367962172227866e-07,
420
+ "loss": 0.0,
421
+ "reward": 0.7095580399036407,
422
+ "reward_std": 0.4921827018260956,
423
+ "rewards/correct_code_reward_func": 0.5000000149011612,
424
+ "rewards/len_reward_func": 0.20955805480480194,
425
+ "step": 32
426
+ },
427
+ {
428
+ "completion_length": 155.02083587646484,
429
+ "epoch": 0.528,
430
+ "grad_norm": 0.9305216291102568,
431
+ "kl": 0.001430511474609375,
432
+ "learning_rate": 4.324197914485075e-07,
433
+ "loss": 0.0,
434
+ "reward": 0.6416721642017365,
435
+ "reward_std": 0.4772767722606659,
436
+ "rewards/correct_code_reward_func": 0.3958333432674408,
437
+ "rewards/len_reward_func": 0.24583880603313446,
438
+ "step": 33
439
+ },
440
+ {
441
+ "completion_length": 215.0416717529297,
442
+ "epoch": 0.544,
443
+ "grad_norm": 0.8211396277152214,
444
+ "kl": 0.0010585784912109375,
445
+ "learning_rate": 4.2792040207614e-07,
446
+ "loss": 0.0,
447
+ "reward": 0.7173371911048889,
448
+ "reward_std": 0.5010073632001877,
449
+ "rewards/correct_code_reward_func": 0.3958333432674408,
450
+ "rewards/len_reward_func": 0.3215038478374481,
451
+ "step": 34
452
+ },
453
+ {
454
+ "completion_length": 128.8958396911621,
455
+ "epoch": 0.56,
456
+ "grad_norm": 0.9266230852523272,
457
+ "kl": 0.001445770263671875,
458
+ "learning_rate": 4.2330108200634723e-07,
459
+ "loss": 0.0,
460
+ "reward": 0.6981382668018341,
461
+ "reward_std": 0.5356450974941254,
462
+ "rewards/correct_code_reward_func": 0.4166666716337204,
463
+ "rewards/len_reward_func": 0.2814715951681137,
464
+ "step": 35
465
+ },
466
+ {
467
+ "completion_length": 174.62500762939453,
468
+ "epoch": 0.576,
469
+ "grad_norm": 0.8774350304368993,
470
+ "kl": 0.00160980224609375,
471
+ "learning_rate": 4.185649449814045e-07,
472
+ "loss": 0.0,
473
+ "reward": 0.819614589214325,
474
+ "reward_std": 0.5062630474567413,
475
+ "rewards/correct_code_reward_func": 0.5000000223517418,
476
+ "rewards/len_reward_func": 0.3196146488189697,
477
+ "step": 36
478
+ },
479
+ {
480
+ "completion_length": 79.29166793823242,
481
+ "epoch": 0.592,
482
+ "grad_norm": 1.1529766368540975,
483
+ "kl": 0.0024871826171875,
484
+ "learning_rate": 4.137151834863213e-07,
485
+ "loss": 0.0,
486
+ "reward": 0.615891844034195,
487
+ "reward_std": 0.6233284175395966,
488
+ "rewards/correct_code_reward_func": 0.4166666716337204,
489
+ "rewards/len_reward_func": 0.19922512769699097,
490
+ "step": 37
491
+ },
492
+ {
493
+ "completion_length": 85.97917175292969,
494
+ "epoch": 0.608,
495
+ "grad_norm": 1.2549480144397245,
496
+ "kl": 0.0028076171875,
497
+ "learning_rate": 4.087550665968846e-07,
498
+ "loss": 0.0,
499
+ "reward": 0.5825270563364029,
500
+ "reward_std": 0.45819054543972015,
501
+ "rewards/correct_code_reward_func": 0.35416667722165585,
502
+ "rewards/len_reward_func": 0.2283603698015213,
503
+ "step": 38
504
+ },
505
+ {
506
+ "completion_length": 85.68750381469727,
507
+ "epoch": 0.624,
508
+ "grad_norm": 1.4005267604268767,
509
+ "kl": 0.003387451171875,
510
+ "learning_rate": 4.036879377760752e-07,
511
+ "loss": 0.0,
512
+ "reward": 0.8216443359851837,
513
+ "reward_std": 0.548437625169754,
514
+ "rewards/correct_code_reward_func": 0.5833333432674408,
515
+ "rewards/len_reward_func": 0.23831100016832352,
516
+ "step": 39
517
+ },
518
+ {
519
+ "completion_length": 113.16667175292969,
520
+ "epoch": 0.64,
521
+ "grad_norm": 1.0341725323218647,
522
+ "kl": 0.003143310546875,
523
+ "learning_rate": 3.9851721262034157e-07,
524
+ "loss": 0.0,
525
+ "reward": 0.520989790558815,
526
+ "reward_std": 0.47860175371170044,
527
+ "rewards/correct_code_reward_func": 0.25,
528
+ "rewards/len_reward_func": 0.270989790558815,
529
+ "step": 40
530
+ },
531
+ {
532
+ "completion_length": 109.70833587646484,
533
+ "epoch": 0.656,
534
+ "grad_norm": 1.0974308815666283,
535
+ "kl": 0.00255584716796875,
536
+ "learning_rate": 3.932463765572505e-07,
537
+ "loss": 0.0,
538
+ "reward": 0.5007437467575073,
539
+ "reward_std": 0.4586540758609772,
540
+ "rewards/correct_code_reward_func": 0.3333333432674408,
541
+ "rewards/len_reward_func": 0.16741041094064713,
542
+ "step": 41
543
+ },
544
+ {
545
+ "completion_length": 113.50000381469727,
546
+ "epoch": 0.672,
547
+ "grad_norm": 0.8007467983812053,
548
+ "kl": 0.00237274169921875,
549
+ "learning_rate": 3.8787898249606767e-07,
550
+ "loss": 0.0,
551
+ "reward": 0.4927902817726135,
552
+ "reward_std": 0.4271247088909149,
553
+ "rewards/correct_code_reward_func": 0.2708333395421505,
554
+ "rewards/len_reward_func": 0.22195692360401154,
555
+ "step": 42
556
+ },
557
+ {
558
+ "completion_length": 79.85416793823242,
559
+ "epoch": 0.688,
560
+ "grad_norm": 1.2179865851315288,
561
+ "kl": 0.00423431396484375,
562
+ "learning_rate": 3.8241864843284964e-07,
563
+ "loss": 0.0,
564
+ "reward": 0.8101656138896942,
565
+ "reward_std": 0.4996851086616516,
566
+ "rewards/correct_code_reward_func": 0.5416666716337204,
567
+ "rewards/len_reward_func": 0.2684989869594574,
568
+ "step": 43
569
+ },
570
+ {
571
+ "completion_length": 91.54167175292969,
572
+ "epoch": 0.704,
573
+ "grad_norm": 1.240439347309207,
574
+ "kl": 0.005615234375,
575
+ "learning_rate": 3.768690550116639e-07,
576
+ "loss": 0.0,
577
+ "reward": 0.7196292579174042,
578
+ "reward_std": 0.5478431880474091,
579
+ "rewards/correct_code_reward_func": 0.4166666865348816,
580
+ "rewards/len_reward_func": 0.3029625713825226,
581
+ "step": 44
582
+ },
583
+ {
584
+ "completion_length": 121.97916793823242,
585
+ "epoch": 0.72,
586
+ "grad_norm": 0.9464336788144472,
587
+ "kl": 0.0048980712890625,
588
+ "learning_rate": 3.712339430435792e-07,
589
+ "loss": 0.0,
590
+ "reward": 0.8251023292541504,
591
+ "reward_std": 0.5142593383789062,
592
+ "rewards/correct_code_reward_func": 0.4791666716337204,
593
+ "rewards/len_reward_func": 0.34593565762043,
594
+ "step": 45
595
+ },
596
+ {
597
+ "completion_length": 88.20833587646484,
598
+ "epoch": 0.736,
599
+ "grad_norm": 0.944700095686709,
600
+ "kl": 0.0039215087890625,
601
+ "learning_rate": 3.65517110985099e-07,
602
+ "loss": 0.0,
603
+ "reward": 0.6876890659332275,
604
+ "reward_std": 0.4308091402053833,
605
+ "rewards/correct_code_reward_func": 0.4166666716337204,
606
+ "rewards/len_reward_func": 0.27102240175008774,
607
+ "step": 46
608
+ },
609
+ {
610
+ "completion_length": 69.75000381469727,
611
+ "epoch": 0.752,
612
+ "grad_norm": 1.2034725464007063,
613
+ "kl": 0.0064544677734375,
614
+ "learning_rate": 3.597224123777389e-07,
615
+ "loss": 0.0,
616
+ "reward": 0.9030305743217468,
617
+ "reward_std": 0.5125944316387177,
618
+ "rewards/correct_code_reward_func": 0.6458333730697632,
619
+ "rewards/len_reward_func": 0.25719721615314484,
620
+ "step": 47
621
+ },
622
+ {
623
+ "completion_length": 84.00000381469727,
624
+ "epoch": 0.768,
625
+ "grad_norm": 1.188785130607309,
626
+ "kl": 0.0072174072265625,
627
+ "learning_rate": 3.5385375325047163e-07,
628
+ "loss": 0.0,
629
+ "reward": 0.6972790956497192,
630
+ "reward_std": 0.6147326231002808,
631
+ "rewards/correct_code_reward_func": 0.3750000149011612,
632
+ "rewards/len_reward_func": 0.32227905094623566,
633
+ "step": 48
634
+ },
635
+ {
636
+ "completion_length": 54.35416793823242,
637
+ "epoch": 0.784,
638
+ "grad_norm": 1.5392691471218216,
639
+ "kl": 0.011932373046875,
640
+ "learning_rate": 3.479150894867926e-07,
641
+ "loss": 0.0,
642
+ "reward": 0.7476979494094849,
643
+ "reward_std": 0.5232938826084137,
644
+ "rewards/correct_code_reward_func": 0.520833358168602,
645
+ "rewards/len_reward_func": 0.22686457633972168,
646
+ "step": 49
647
+ },
648
+ {
649
+ "completion_length": 105.375,
650
+ "epoch": 0.8,
651
+ "grad_norm": 0.9867137272930855,
652
+ "kl": 0.0070037841796875,
653
+ "learning_rate": 3.4191042415818e-07,
654
+ "loss": 0.0,
655
+ "reward": 0.7631438374519348,
656
+ "reward_std": 0.5120529979467392,
657
+ "rewards/correct_code_reward_func": 0.4375,
658
+ "rewards/len_reward_func": 0.3256438076496124,
659
+ "step": 50
660
+ },
661
+ {
662
+ "completion_length": 79.79166793823242,
663
+ "epoch": 0.816,
664
+ "grad_norm": 1.2765736460552886,
665
+ "kl": 0.009490966796875,
666
+ "learning_rate": 3.3584380482574717e-07,
667
+ "loss": 0.0,
668
+ "reward": 0.8190419375896454,
669
+ "reward_std": 0.5837846994400024,
670
+ "rewards/correct_code_reward_func": 0.583333358168602,
671
+ "rewards/len_reward_func": 0.23570860922336578,
672
+ "step": 51
673
+ },
674
+ {
675
+ "completion_length": 66.62500381469727,
676
+ "epoch": 0.832,
677
+ "grad_norm": 1.2357424740774945,
678
+ "kl": 0.012359619140625,
679
+ "learning_rate": 3.297193208119047e-07,
680
+ "loss": 0.0,
681
+ "reward": 0.9992968440055847,
682
+ "reward_std": 0.38213877379894257,
683
+ "rewards/correct_code_reward_func": 0.7500000298023224,
684
+ "rewards/len_reward_func": 0.24929680675268173,
685
+ "step": 52
686
+ },
687
+ {
688
+ "completion_length": 99.83333587646484,
689
+ "epoch": 0.848,
690
+ "grad_norm": 1.1189426445940507,
691
+ "kl": 0.01153564453125,
692
+ "learning_rate": 3.235411004438741e-07,
693
+ "loss": 0.0,
694
+ "reward": 0.8771131932735443,
695
+ "reward_std": 0.5585960447788239,
696
+ "rewards/correct_code_reward_func": 0.520833358168602,
697
+ "rewards/len_reward_func": 0.3562798500061035,
698
+ "step": 53
699
+ },
700
+ {
701
+ "completion_length": 115.89583587646484,
702
+ "epoch": 0.864,
703
+ "grad_norm": 1.3030164084383453,
704
+ "kl": 0.013397216796875,
705
+ "learning_rate": 3.173133082709086e-07,
706
+ "loss": 0.0,
707
+ "reward": 0.9209796786308289,
708
+ "reward_std": 0.5127619951963425,
709
+ "rewards/correct_code_reward_func": 0.5416666865348816,
710
+ "rewards/len_reward_func": 0.37931299209594727,
711
+ "step": 54
712
+ },
713
+ {
714
+ "completion_length": 45.81250190734863,
715
+ "epoch": 0.88,
716
+ "grad_norm": 1.2758138396906251,
717
+ "kl": 0.02093505859375,
718
+ "learning_rate": 3.1104014225709784e-07,
719
+ "loss": 0.0,
720
+ "reward": 1.014316976070404,
721
+ "reward_std": 0.5054647028446198,
722
+ "rewards/correct_code_reward_func": 0.6666666865348816,
723
+ "rewards/len_reward_func": 0.34765030443668365,
724
+ "step": 55
725
+ },
726
+ {
727
+ "completion_length": 177.89584350585938,
728
+ "epoch": 0.896,
729
+ "grad_norm": 0.9790523295873852,
730
+ "kl": 0.0159149169921875,
731
+ "learning_rate": 3.0472583095164873e-07,
732
+ "loss": 0.0,
733
+ "reward": 0.5877984166145325,
734
+ "reward_std": 0.4042231887578964,
735
+ "rewards/correct_code_reward_func": 0.3125000149011612,
736
+ "rewards/len_reward_func": 0.2752983868122101,
737
+ "step": 56
738
+ },
739
+ {
740
+ "completion_length": 46.97916793823242,
741
+ "epoch": 0.912,
742
+ "grad_norm": 2.352715010123481,
743
+ "kl": 0.0218505859375,
744
+ "learning_rate": 2.983746306385499e-07,
745
+ "loss": 0.0,
746
+ "reward": 0.9101540148258209,
747
+ "reward_std": 0.5364184379577637,
748
+ "rewards/correct_code_reward_func": 0.5625,
749
+ "rewards/len_reward_func": 0.3476540297269821,
750
+ "step": 57
751
+ },
752
+ {
753
+ "completion_length": 86.5,
754
+ "epoch": 0.928,
755
+ "grad_norm": 1.3590745378537943,
756
+ "kl": 0.018341064453125,
757
+ "learning_rate": 2.919908224675412e-07,
758
+ "loss": 0.0,
759
+ "reward": 0.7466456890106201,
760
+ "reward_std": 0.4038194566965103,
761
+ "rewards/correct_code_reward_func": 0.458333358168602,
762
+ "rewards/len_reward_func": 0.2883123308420181,
763
+ "step": 58
764
+ },
765
+ {
766
+ "completion_length": 93.35416793823242,
767
+ "epoch": 0.944,
768
+ "grad_norm": 1.0657115005336795,
769
+ "kl": 0.0169677734375,
770
+ "learning_rate": 2.8557870956832133e-07,
771
+ "loss": 0.0,
772
+ "reward": 0.8307068645954132,
773
+ "reward_std": 0.41568903625011444,
774
+ "rewards/correct_code_reward_func": 0.458333358168602,
775
+ "rewards/len_reward_func": 0.37237347662448883,
776
+ "step": 59
777
+ },
778
+ {
779
+ "completion_length": 96.04166793823242,
780
+ "epoch": 0.96,
781
+ "grad_norm": 1.1894982274411716,
782
+ "kl": 0.0167236328125,
783
+ "learning_rate": 2.7914261414993976e-07,
784
+ "loss": 0.0,
785
+ "reward": 0.9781838655471802,
786
+ "reward_std": 0.5707030892372131,
787
+ "rewards/correct_code_reward_func": 0.6458333730697632,
788
+ "rewards/len_reward_func": 0.33235056698322296,
789
+ "step": 60
790
+ },
791
+ {
792
+ "completion_length": 57.041669845581055,
793
+ "epoch": 0.976,
794
+ "grad_norm": 1.6373435351232113,
795
+ "kl": 0.02020263671875,
796
+ "learning_rate": 2.726868745873286e-07,
797
+ "loss": 0.0,
798
+ "reward": 0.7698030173778534,
799
+ "reward_std": 0.5640588998794556,
800
+ "rewards/correct_code_reward_func": 0.5208333432674408,
801
+ "rewards/len_reward_func": 0.2489696592092514,
802
+ "step": 61
803
+ },
804
+ {
805
+ "completion_length": 84.89583587646484,
806
+ "epoch": 0.992,
807
+ "grad_norm": 1.223004703854309,
808
+ "kl": 0.017547607421875,
809
+ "learning_rate": 2.662158424969357e-07,
810
+ "loss": 0.0,
811
+ "reward": 0.8090977072715759,
812
+ "reward_std": 0.6418364942073822,
813
+ "rewards/correct_code_reward_func": 0.4791666716337204,
814
+ "rewards/len_reward_func": 0.32993103563785553,
815
+ "step": 62
816
+ },
817
+ {
818
+ "completion_length": 52.958335876464844,
819
+ "epoch": 1.0,
820
+ "grad_norm": 1.223004703854309,
821
+ "kl": 0.03564453125,
822
+ "learning_rate": 2.597338798034344e-07,
823
+ "loss": 0.0,
824
+ "reward": 0.9149335622787476,
825
+ "reward_std": 0.3469616770744324,
826
+ "rewards/correct_code_reward_func": 0.5416666865348816,
827
+ "rewards/len_reward_func": 0.3732668161392212,
828
+ "step": 63
829
+ },
830
+ {
831
+ "completion_length": 76.31250381469727,
832
+ "epoch": 1.016,
833
+ "grad_norm": 1.234231990060427,
834
+ "kl": 0.0224609375,
835
+ "learning_rate": 2.532453557994827e-07,
836
+ "loss": 0.0,
837
+ "reward": 0.7154572010040283,
838
+ "reward_std": 0.5053917020559311,
839
+ "rewards/correct_code_reward_func": 0.3958333432674408,
840
+ "rewards/len_reward_func": 0.31962384283542633,
841
+ "step": 64
842
+ },
843
+ {
844
+ "completion_length": 116.52083587646484,
845
+ "epoch": 1.032,
846
+ "grad_norm": 1.0750099724368811,
847
+ "kl": 0.01617431640625,
848
+ "learning_rate": 2.467546442005173e-07,
849
+ "loss": 0.0,
850
+ "reward": 0.7226243913173676,
851
+ "reward_std": 0.5376951545476913,
852
+ "rewards/correct_code_reward_func": 0.3750000149011612,
853
+ "rewards/len_reward_func": 0.34762439131736755,
854
+ "step": 65
855
+ },
856
+ {
857
+ "completion_length": 116.10416793823242,
858
+ "epoch": 1.048,
859
+ "grad_norm": 1.0133791335296118,
860
+ "kl": 0.01983642578125,
861
+ "learning_rate": 2.4026612019656556e-07,
862
+ "loss": 0.0,
863
+ "reward": 0.9590997993946075,
864
+ "reward_std": 0.4536292999982834,
865
+ "rewards/correct_code_reward_func": 0.6041666865348816,
866
+ "rewards/len_reward_func": 0.35493315756320953,
867
+ "step": 66
868
+ },
869
+ {
870
+ "completion_length": 66.625,
871
+ "epoch": 1.064,
872
+ "grad_norm": 1.2625208133395418,
873
+ "kl": 0.030517578125,
874
+ "learning_rate": 2.337841575030642e-07,
875
+ "loss": 0.0,
876
+ "reward": 0.9440672397613525,
877
+ "reward_std": 0.42680656909942627,
878
+ "rewards/correct_code_reward_func": 0.583333358168602,
879
+ "rewards/len_reward_func": 0.36073388159275055,
880
+ "step": 67
881
+ },
882
+ {
883
+ "completion_length": 60.458335876464844,
884
+ "epoch": 1.08,
885
+ "grad_norm": 1.174665706452754,
886
+ "kl": 0.02490234375,
887
+ "learning_rate": 2.2731312541267143e-07,
888
+ "loss": 0.0,
889
+ "reward": 0.5735488831996918,
890
+ "reward_std": 0.48749370872974396,
891
+ "rewards/correct_code_reward_func": 0.2500000149011612,
892
+ "rewards/len_reward_func": 0.3235488831996918,
893
+ "step": 68
894
+ },
895
+ {
896
+ "completion_length": 74.10416984558105,
897
+ "epoch": 1.096,
898
+ "grad_norm": 1.5260410634207344,
899
+ "kl": 0.029052734375,
900
+ "learning_rate": 2.2085738585006021e-07,
901
+ "loss": 0.0,
902
+ "reward": 1.0659485459327698,
903
+ "reward_std": 0.3796464204788208,
904
+ "rewards/correct_code_reward_func": 0.6666666865348816,
905
+ "rewards/len_reward_func": 0.3992818146944046,
906
+ "step": 69
907
+ },
908
+ {
909
+ "completion_length": 71.89583396911621,
910
+ "epoch": 1.112,
911
+ "grad_norm": 1.059976697715823,
912
+ "kl": 0.0418701171875,
913
+ "learning_rate": 2.1442129043167873e-07,
914
+ "loss": 0.0,
915
+ "reward": 0.7276312112808228,
916
+ "reward_std": 0.5470257103443146,
917
+ "rewards/correct_code_reward_func": 0.3958333432674408,
918
+ "rewards/len_reward_func": 0.33179786801338196,
919
+ "step": 70
920
+ },
921
+ {
922
+ "completion_length": 96.29166793823242,
923
+ "epoch": 1.1280000000000001,
924
+ "grad_norm": 0.9278761590759935,
925
+ "kl": 0.02056884765625,
926
+ "learning_rate": 2.0800917753245875e-07,
927
+ "loss": 0.0,
928
+ "reward": 0.8413068056106567,
929
+ "reward_std": 0.3271617591381073,
930
+ "rewards/correct_code_reward_func": 0.4375000298023224,
931
+ "rewards/len_reward_func": 0.40380676090717316,
932
+ "step": 71
933
+ },
934
+ {
935
+ "completion_length": 79.33333587646484,
936
+ "epoch": 1.144,
937
+ "grad_norm": 0.9890378359951959,
938
+ "kl": 0.025390625,
939
+ "learning_rate": 2.0162536936145008e-07,
940
+ "loss": 0.0,
941
+ "reward": 0.7865794003009796,
942
+ "reward_std": 0.3915296047925949,
943
+ "rewards/correct_code_reward_func": 0.4166666716337204,
944
+ "rewards/len_reward_func": 0.3699127733707428,
945
+ "step": 72
946
+ },
947
+ {
948
+ "completion_length": 72.95833587646484,
949
+ "epoch": 1.16,
950
+ "grad_norm": 1.3690756427464768,
951
+ "kl": 0.0413818359375,
952
+ "learning_rate": 1.9527416904835132e-07,
953
+ "loss": 0.0,
954
+ "reward": 0.9756404757499695,
955
+ "reward_std": 0.3930533230304718,
956
+ "rewards/correct_code_reward_func": 0.5416666865348816,
957
+ "rewards/len_reward_func": 0.4339737892150879,
958
+ "step": 73
959
+ },
960
+ {
961
+ "completion_length": 79.89583396911621,
962
+ "epoch": 1.176,
963
+ "grad_norm": 1.1734737313856272,
964
+ "kl": 0.02801513671875,
965
+ "learning_rate": 1.889598577429022e-07,
966
+ "loss": 0.0,
967
+ "reward": 0.7878330945968628,
968
+ "reward_std": 0.4340344965457916,
969
+ "rewards/correct_code_reward_func": 0.4166666716337204,
970
+ "rewards/len_reward_func": 0.3711664080619812,
971
+ "step": 74
972
+ },
973
+ {
974
+ "completion_length": 58.270835876464844,
975
+ "epoch": 1.192,
976
+ "grad_norm": 1.2871892634157702,
977
+ "kl": 0.0377197265625,
978
+ "learning_rate": 1.8268669172909136e-07,
979
+ "loss": 0.0,
980
+ "reward": 1.1113883256912231,
981
+ "reward_std": 0.5605219900608063,
982
+ "rewards/correct_code_reward_func": 0.7291666865348816,
983
+ "rewards/len_reward_func": 0.3822215795516968,
984
+ "step": 75
985
+ },
986
+ {
987
+ "completion_length": 97.25,
988
+ "epoch": 1.208,
989
+ "grad_norm": 2.007411904832845,
990
+ "kl": 0.04949951171875,
991
+ "learning_rate": 1.7645889955612592e-07,
992
+ "loss": 0.0,
993
+ "reward": 1.0362713038921356,
994
+ "reward_std": 0.3777136504650116,
995
+ "rewards/correct_code_reward_func": 0.6666666865348816,
996
+ "rewards/len_reward_func": 0.3696046322584152,
997
+ "step": 76
998
+ },
999
+ {
1000
+ "completion_length": 42.60416793823242,
1001
+ "epoch": 1.224,
1002
+ "grad_norm": 1.196008727333628,
1003
+ "kl": 0.0433349609375,
1004
+ "learning_rate": 1.7028067918809535e-07,
1005
+ "loss": 0.0,
1006
+ "reward": 0.8968790173530579,
1007
+ "reward_std": 0.47050511837005615,
1008
+ "rewards/correct_code_reward_func": 0.520833358168602,
1009
+ "rewards/len_reward_func": 0.3760456293821335,
1010
+ "step": 77
1011
+ },
1012
+ {
1013
+ "completion_length": 44.97916793823242,
1014
+ "epoch": 1.24,
1015
+ "grad_norm": 1.4303810146633773,
1016
+ "kl": 0.0574951171875,
1017
+ "learning_rate": 1.6415619517425294e-07,
1018
+ "loss": 0.0001,
1019
+ "reward": 0.9696877598762512,
1020
+ "reward_std": 0.33823370933532715,
1021
+ "rewards/correct_code_reward_func": 0.5208333432674408,
1022
+ "rewards/len_reward_func": 0.4488544166088104,
1023
+ "step": 78
1024
+ },
1025
+ {
1026
+ "completion_length": 66.0625,
1027
+ "epoch": 1.256,
1028
+ "grad_norm": 1.5543919802377633,
1029
+ "kl": 0.03875732421875,
1030
+ "learning_rate": 1.5808957584181994e-07,
1031
+ "loss": 0.0,
1032
+ "reward": 0.9006170630455017,
1033
+ "reward_std": 0.37997904419898987,
1034
+ "rewards/correct_code_reward_func": 0.4791666716337204,
1035
+ "rewards/len_reward_func": 0.4214503914117813,
1036
+ "step": 79
1037
+ },
1038
+ {
1039
+ "completion_length": 76.06250381469727,
1040
+ "epoch": 1.272,
1041
+ "grad_norm": 1.8800678172876588,
1042
+ "kl": 0.047607421875,
1043
+ "learning_rate": 1.5208491051320744e-07,
1044
+ "loss": 0.0,
1045
+ "reward": 0.9962214827537537,
1046
+ "reward_std": 0.3772743344306946,
1047
+ "rewards/correct_code_reward_func": 0.5625000149011612,
1048
+ "rewards/len_reward_func": 0.4337214529514313,
1049
+ "step": 80
1050
+ },
1051
+ {
1052
+ "completion_length": 43.27083396911621,
1053
+ "epoch": 1.288,
1054
+ "grad_norm": 1.3749781365688138,
1055
+ "kl": 0.055908203125,
1056
+ "learning_rate": 1.461462467495284e-07,
1057
+ "loss": 0.0001,
1058
+ "reward": 1.1482934355735779,
1059
+ "reward_std": 0.4654877036809921,
1060
+ "rewards/correct_code_reward_func": 0.7291666865348816,
1061
+ "rewards/len_reward_func": 0.41912680864334106,
1062
+ "step": 81
1063
+ },
1064
+ {
1065
+ "completion_length": 64.25000381469727,
1066
+ "epoch": 1.304,
1067
+ "grad_norm": 1.2603902864707686,
1068
+ "kl": 0.041259765625,
1069
+ "learning_rate": 1.4027758762226107e-07,
1070
+ "loss": 0.0,
1071
+ "reward": 0.8633248507976532,
1072
+ "reward_std": 0.44106219708919525,
1073
+ "rewards/correct_code_reward_func": 0.4583333432674408,
1074
+ "rewards/len_reward_func": 0.40499147772789,
1075
+ "step": 82
1076
+ },
1077
+ {
1078
+ "completion_length": 84.87500190734863,
1079
+ "epoch": 1.32,
1080
+ "grad_norm": 1.3553050400485256,
1081
+ "kl": 0.037109375,
1082
+ "learning_rate": 1.3448288901490092e-07,
1083
+ "loss": 0.0,
1084
+ "reward": 0.930885374546051,
1085
+ "reward_std": 0.28970160335302353,
1086
+ "rewards/correct_code_reward_func": 0.4791666865348816,
1087
+ "rewards/len_reward_func": 0.45171867311000824,
1088
+ "step": 83
1089
+ },
1090
+ {
1091
+ "completion_length": 44.12500190734863,
1092
+ "epoch": 1.336,
1093
+ "grad_norm": 1.3471283821112554,
1094
+ "kl": 0.05126953125,
1095
+ "learning_rate": 1.2876605695642084e-07,
1096
+ "loss": 0.0001,
1097
+ "reward": 0.8797213435173035,
1098
+ "reward_std": 0.4040851444005966,
1099
+ "rewards/correct_code_reward_func": 0.4375000149011612,
1100
+ "rewards/len_reward_func": 0.44222137331962585,
1101
+ "step": 84
1102
+ },
1103
+ {
1104
+ "completion_length": 77.27083587646484,
1105
+ "epoch": 1.3519999999999999,
1106
+ "grad_norm": 1.3204773489251114,
1107
+ "kl": 0.0401611328125,
1108
+ "learning_rate": 1.231309449883361e-07,
1109
+ "loss": 0.0,
1110
+ "reward": 0.9848110675811768,
1111
+ "reward_std": 0.5164197236299515,
1112
+ "rewards/correct_code_reward_func": 0.5833333432674408,
1113
+ "rewards/len_reward_func": 0.40147776901721954,
1114
+ "step": 85
1115
+ },
1116
+ {
1117
+ "completion_length": 58.729169845581055,
1118
+ "epoch": 1.3679999999999999,
1119
+ "grad_norm": 1.6764054808630413,
1120
+ "kl": 0.0513916015625,
1121
+ "learning_rate": 1.1758135156715041e-07,
1122
+ "loss": 0.0001,
1123
+ "reward": 1.1368677616119385,
1124
+ "reward_std": 0.3856023848056793,
1125
+ "rewards/correct_code_reward_func": 0.7708333432674408,
1126
+ "rewards/len_reward_func": 0.3660343587398529,
1127
+ "step": 86
1128
+ },
1129
+ {
1130
+ "completion_length": 90.02083396911621,
1131
+ "epoch": 1.384,
1132
+ "grad_norm": 1.1184416966735806,
1133
+ "kl": 0.03411865234375,
1134
+ "learning_rate": 1.1212101750393235e-07,
1135
+ "loss": 0.0,
1136
+ "reward": 1.00173819065094,
1137
+ "reward_std": 0.49202577769756317,
1138
+ "rewards/correct_code_reward_func": 0.5625000298023224,
1139
+ "rewards/len_reward_func": 0.43923819065093994,
1140
+ "step": 87
1141
+ },
1142
+ {
1143
+ "completion_length": 46.60416793823242,
1144
+ "epoch": 1.4,
1145
+ "grad_norm": 1.1504631747132115,
1146
+ "kl": 0.0496826171875,
1147
+ "learning_rate": 1.0675362344274952e-07,
1148
+ "loss": 0.0,
1149
+ "reward": 0.9823802709579468,
1150
+ "reward_std": 0.46341855823993683,
1151
+ "rewards/correct_code_reward_func": 0.6041666865348816,
1152
+ "rewards/len_reward_func": 0.3782135844230652,
1153
+ "step": 88
1154
+ },
1155
+ {
1156
+ "completion_length": 59.937503814697266,
1157
+ "epoch": 1.416,
1158
+ "grad_norm": 1.3546823329176554,
1159
+ "kl": 0.0450439453125,
1160
+ "learning_rate": 1.0148278737965844e-07,
1161
+ "loss": 0.0,
1162
+ "reward": 1.116898536682129,
1163
+ "reward_std": 0.3632017821073532,
1164
+ "rewards/correct_code_reward_func": 0.6250000298023224,
1165
+ "rewards/len_reward_func": 0.4918985068798065,
1166
+ "step": 89
1167
+ },
1168
+ {
1169
+ "completion_length": 38.08333396911621,
1170
+ "epoch": 1.432,
1171
+ "grad_norm": 1.4742606396398155,
1172
+ "kl": 0.06396484375,
1173
+ "learning_rate": 9.631206222392479e-08,
1174
+ "loss": 0.0001,
1175
+ "reward": 1.079964131116867,
1176
+ "reward_std": 0.47177985310554504,
1177
+ "rewards/correct_code_reward_func": 0.6666666865348816,
1178
+ "rewards/len_reward_func": 0.4132973849773407,
1179
+ "step": 90
1180
+ },
1181
+ {
1182
+ "completion_length": 72.18750190734863,
1183
+ "epoch": 1.448,
1184
+ "grad_norm": 1.4885040482346985,
1185
+ "kl": 0.0491943359375,
1186
+ "learning_rate": 9.124493340311537e-08,
1187
+ "loss": 0.0,
1188
+ "reward": 1.0042789578437805,
1189
+ "reward_std": 0.3533863425254822,
1190
+ "rewards/correct_code_reward_func": 0.5625000298023224,
1191
+ "rewards/len_reward_func": 0.44177892804145813,
1192
+ "step": 91
1193
+ },
1194
+ {
1195
+ "completion_length": 69.35416984558105,
1196
+ "epoch": 1.464,
1197
+ "grad_norm": 1.1925308190836468,
1198
+ "kl": 0.0478515625,
1199
+ "learning_rate": 8.628481651367875e-08,
1200
+ "loss": 0.0,
1201
+ "reward": 1.0887993574142456,
1202
+ "reward_std": 0.5487662255764008,
1203
+ "rewards/correct_code_reward_func": 0.6666666865348816,
1204
+ "rewards/len_reward_func": 0.422132670879364,
1205
+ "step": 92
1206
+ },
1207
+ {
1208
+ "completion_length": 48.41666793823242,
1209
+ "epoch": 1.48,
1210
+ "grad_norm": 1.6549689653260358,
1211
+ "kl": 0.051025390625,
1212
+ "learning_rate": 8.143505501859551e-08,
1213
+ "loss": 0.0001,
1214
+ "reward": 0.920165479183197,
1215
+ "reward_std": 0.415915310382843,
1216
+ "rewards/correct_code_reward_func": 0.5208333432674408,
1217
+ "rewards/len_reward_func": 0.39933212101459503,
1218
+ "step": 93
1219
+ },
1220
+ {
1221
+ "completion_length": 75.87500381469727,
1222
+ "epoch": 1.496,
1223
+ "grad_norm": 1.586423514577547,
1224
+ "kl": 0.053466796875,
1225
+ "learning_rate": 7.669891799365282e-08,
1226
+ "loss": 0.0001,
1227
+ "reward": 0.8828703761100769,
1228
+ "reward_std": 0.49832169711589813,
1229
+ "rewards/correct_code_reward_func": 0.4375000149011612,
1230
+ "rewards/len_reward_func": 0.4453703910112381,
1231
+ "step": 94
1232
+ },
1233
+ {
1234
+ "completion_length": 79.06250190734863,
1235
+ "epoch": 1.512,
1236
+ "grad_norm": 1.7029251090199986,
1237
+ "kl": 0.0489501953125,
1238
+ "learning_rate": 7.207959792385998e-08,
1239
+ "loss": 0.0,
1240
+ "reward": 1.0712128281593323,
1241
+ "reward_std": 0.39892764389514923,
1242
+ "rewards/correct_code_reward_func": 0.6041666865348816,
1243
+ "rewards/len_reward_func": 0.4670460820198059,
1244
+ "step": 95
1245
+ },
1246
+ {
1247
+ "completion_length": 77.66666984558105,
1248
+ "epoch": 1.528,
1249
+ "grad_norm": 1.068625601865938,
1250
+ "kl": 0.03729248046875,
1251
+ "learning_rate": 6.758020855149249e-08,
1252
+ "loss": 0.0,
1253
+ "reward": 0.839818924665451,
1254
+ "reward_std": 0.4449944496154785,
1255
+ "rewards/correct_code_reward_func": 0.3958333432674408,
1256
+ "rewards/len_reward_func": 0.44398559629917145,
1257
+ "step": 96
1258
+ },
1259
+ {
1260
+ "completion_length": 46.00000190734863,
1261
+ "epoch": 1.544,
1262
+ "grad_norm": 1.6059040022360391,
1263
+ "kl": 0.0543212890625,
1264
+ "learning_rate": 6.320378277721342e-08,
1265
+ "loss": 0.0001,
1266
+ "reward": 0.8210516273975372,
1267
+ "reward_std": 0.34706588089466095,
1268
+ "rewards/correct_code_reward_func": 0.3958333432674408,
1269
+ "rewards/len_reward_func": 0.42521825432777405,
1270
+ "step": 97
1271
+ },
1272
+ {
1273
+ "completion_length": 47.35416793823242,
1274
+ "epoch": 1.56,
1275
+ "grad_norm": 1.0536706533475677,
1276
+ "kl": 0.06640625,
1277
+ "learning_rate": 5.895327061568775e-08,
1278
+ "loss": 0.0001,
1279
+ "reward": 0.9913617670536041,
1280
+ "reward_std": 0.24820256233215332,
1281
+ "rewards/correct_code_reward_func": 0.5208333432674408,
1282
+ "rewards/len_reward_func": 0.47052840888500214,
1283
+ "step": 98
1284
+ },
1285
+ {
1286
+ "completion_length": 34.83333396911621,
1287
+ "epoch": 1.576,
1288
+ "grad_norm": 1.7119720585841143,
1289
+ "kl": 0.0626220703125,
1290
+ "learning_rate": 5.483153720706798e-08,
1291
+ "loss": 0.0001,
1292
+ "reward": 0.8738410770893097,
1293
+ "reward_std": 0.5085435211658478,
1294
+ "rewards/correct_code_reward_func": 0.4375000149011612,
1295
+ "rewards/len_reward_func": 0.4363410472869873,
1296
+ "step": 99
1297
+ },
1298
+ {
1299
+ "completion_length": 57.04166793823242,
1300
+ "epoch": 1.592,
1301
+ "grad_norm": 1.3955252614043234,
1302
+ "kl": 0.036865234375,
1303
+ "learning_rate": 5.0841360885690996e-08,
1304
+ "loss": 0.0,
1305
+ "reward": 1.1610032320022583,
1306
+ "reward_std": 0.3681405633687973,
1307
+ "rewards/correct_code_reward_func": 0.6666666865348816,
1308
+ "rewards/len_reward_func": 0.4943365752696991,
1309
+ "step": 100
1310
+ },
1311
+ {
1312
+ "completion_length": 71.41666984558105,
1313
+ "epoch": 1.608,
1314
+ "grad_norm": 0.9360155945219467,
1315
+ "kl": 0.055419921875,
1316
+ "learning_rate": 4.698543130728755e-08,
1317
+ "loss": 0.0001,
1318
+ "reward": 1.013959676027298,
1319
+ "reward_std": 0.39147183299064636,
1320
+ "rewards/correct_code_reward_func": 0.625,
1321
+ "rewards/len_reward_func": 0.3889596611261368,
1322
+ "step": 101
1323
+ },
1324
+ {
1325
+ "completion_length": 43.5625,
1326
+ "epoch": 1.624,
1327
+ "grad_norm": 1.620965651402336,
1328
+ "kl": 0.054443359375,
1329
+ "learning_rate": 4.326634763596784e-08,
1330
+ "loss": 0.0001,
1331
+ "reward": 0.8748385310173035,
1332
+ "reward_std": 0.3259390592575073,
1333
+ "rewards/correct_code_reward_func": 0.4166666679084301,
1334
+ "rewards/len_reward_func": 0.45817187428474426,
1335
+ "step": 102
1336
+ },
1337
+ {
1338
+ "completion_length": 54.125003814697266,
1339
+ "epoch": 1.6400000000000001,
1340
+ "grad_norm": 2.035180505859693,
1341
+ "kl": 0.056640625,
1342
+ "learning_rate": 3.968661679220467e-08,
1343
+ "loss": 0.0001,
1344
+ "reward": 1.2046028971672058,
1345
+ "reward_std": 0.4274601340293884,
1346
+ "rewards/correct_code_reward_func": 0.7708333730697632,
1347
+ "rewards/len_reward_func": 0.433769553899765,
1348
+ "step": 103
1349
+ },
1350
+ {
1351
+ "completion_length": 37.72916793823242,
1352
+ "epoch": 1.6560000000000001,
1353
+ "grad_norm": 1.3110502429273858,
1354
+ "kl": 0.07470703125,
1355
+ "learning_rate": 3.624865176299499e-08,
1356
+ "loss": 0.0001,
1357
+ "reward": 1.3125,
1358
+ "reward_std": 0.2893980145454407,
1359
+ "rewards/correct_code_reward_func": 0.8333333432674408,
1360
+ "rewards/len_reward_func": 0.4791666716337204,
1361
+ "step": 104
1362
+ },
1363
+ {
1364
+ "completion_length": 85.20833587646484,
1365
+ "epoch": 1.6720000000000002,
1366
+ "grad_norm": 0.7548512888732658,
1367
+ "kl": 0.0379638671875,
1368
+ "learning_rate": 3.295476997533905e-08,
1369
+ "loss": 0.0,
1370
+ "reward": 0.8693651854991913,
1371
+ "reward_std": 0.467289537191391,
1372
+ "rewards/correct_code_reward_func": 0.4166666865348816,
1373
+ "rewards/len_reward_func": 0.4526985138654709,
1374
+ "step": 105
1375
+ },
1376
+ {
1377
+ "completion_length": 60.72916793823242,
1378
+ "epoch": 1.688,
1379
+ "grad_norm": 0.7605372623815415,
1380
+ "kl": 0.03662109375,
1381
+ "learning_rate": 2.980719173413396e-08,
1382
+ "loss": 0.0,
1383
+ "reward": 1.058800995349884,
1384
+ "reward_std": 0.4301101863384247,
1385
+ "rewards/correct_code_reward_func": 0.5625000298023224,
1386
+ "rewards/len_reward_func": 0.49630098044872284,
1387
+ "step": 106
1388
+ },
1389
+ {
1390
+ "completion_length": 49.08333396911621,
1391
+ "epoch": 1.704,
1392
+ "grad_norm": 1.0207996418513696,
1393
+ "kl": 0.07421875,
1394
+ "learning_rate": 2.680803872553408e-08,
1395
+ "loss": 0.0001,
1396
+ "reward": 0.9703012108802795,
1397
+ "reward_std": 0.23036788403987885,
1398
+ "rewards/correct_code_reward_func": 0.5208333432674408,
1399
+ "rewards/len_reward_func": 0.44946780800819397,
1400
+ "step": 107
1401
+ },
1402
+ {
1403
+ "completion_length": 57.62500190734863,
1404
+ "epoch": 1.72,
1405
+ "grad_norm": 1.5035291383359601,
1406
+ "kl": 0.0594482421875,
1407
+ "learning_rate": 2.395933258678745e-08,
1408
+ "loss": 0.0001,
1409
+ "reward": 1.1501469016075134,
1410
+ "reward_std": 0.34404293447732925,
1411
+ "rewards/correct_code_reward_func": 0.7083333432674408,
1412
+ "rewards/len_reward_func": 0.44181351363658905,
1413
+ "step": 108
1414
+ },
1415
+ {
1416
+ "completion_length": 31.312501907348633,
1417
+ "epoch": 1.736,
1418
+ "grad_norm": 1.6167712080198857,
1419
+ "kl": 0.0712890625,
1420
+ "learning_rate": 2.1262993543511715e-08,
1421
+ "loss": 0.0001,
1422
+ "reward": 1.3109871745109558,
1423
+ "reward_std": 0.3160089999437332,
1424
+ "rewards/correct_code_reward_func": 0.875,
1425
+ "rewards/len_reward_func": 0.4359871447086334,
1426
+ "step": 109
1427
+ },
1428
+ {
1429
+ "completion_length": 87.22916793823242,
1430
+ "epoch": 1.752,
1431
+ "grad_norm": 1.2313006391246961,
1432
+ "kl": 0.0474853515625,
1433
+ "learning_rate": 1.872083911532907e-08,
1434
+ "loss": 0.0,
1435
+ "reward": 0.8871810734272003,
1436
+ "reward_std": 0.43447698652744293,
1437
+ "rewards/correct_code_reward_func": 0.4166666865348816,
1438
+ "rewards/len_reward_func": 0.4705143868923187,
1439
+ "step": 110
1440
+ },
1441
+ {
1442
+ "completion_length": 38.52083396911621,
1443
+ "epoch": 1.768,
1444
+ "grad_norm": 1.1333291307805236,
1445
+ "kl": 0.0531005859375,
1446
+ "learning_rate": 1.6334582890731697e-08,
1447
+ "loss": 0.0001,
1448
+ "reward": 1.3089049458503723,
1449
+ "reward_std": 0.39449335634708405,
1450
+ "rewards/correct_code_reward_func": 0.8541666865348816,
1451
+ "rewards/len_reward_func": 0.45473821461200714,
1452
+ "step": 111
1453
+ },
1454
+ {
1455
+ "completion_length": 99.85416984558105,
1456
+ "epoch": 1.784,
1457
+ "grad_norm": 1.1649171723265364,
1458
+ "kl": 0.04095458984375,
1459
+ "learning_rate": 1.4105833372004523e-08,
1460
+ "loss": 0.0,
1461
+ "reward": 0.8114411234855652,
1462
+ "reward_std": 0.3455437570810318,
1463
+ "rewards/correct_code_reward_func": 0.4166666716337204,
1464
+ "rewards/len_reward_func": 0.3947744071483612,
1465
+ "step": 112
1466
+ },
1467
+ {
1468
+ "completion_length": 62.97916793823242,
1469
+ "epoch": 1.8,
1470
+ "grad_norm": 1.3270422235967783,
1471
+ "kl": 0.042236328125,
1472
+ "learning_rate": 1.2036092890982619e-08,
1473
+ "loss": 0.0,
1474
+ "reward": 0.8704836964607239,
1475
+ "reward_std": 0.37642528116703033,
1476
+ "rewards/correct_code_reward_func": 0.3958333432674408,
1477
+ "rewards/len_reward_func": 0.4746503531932831,
1478
+ "step": 113
1479
+ },
1480
+ {
1481
+ "completion_length": 61.854169845581055,
1482
+ "epoch": 1.8159999999999998,
1483
+ "grad_norm": 1.47111563325612,
1484
+ "kl": 0.0511474609375,
1485
+ "learning_rate": 1.0126756596375685e-08,
1486
+ "loss": 0.0001,
1487
+ "reward": 1.0473325848579407,
1488
+ "reward_std": 0.5612048506736755,
1489
+ "rewards/correct_code_reward_func": 0.5833333730697632,
1490
+ "rewards/len_reward_func": 0.4639992117881775,
1491
+ "step": 114
1492
+ },
1493
+ {
1494
+ "completion_length": 44.22916793823242,
1495
+ "epoch": 1.8319999999999999,
1496
+ "grad_norm": 1.3437447624267829,
1497
+ "kl": 0.0599365234375,
1498
+ "learning_rate": 8.379111513340753e-09,
1499
+ "loss": 0.0001,
1500
+ "reward": 0.9941486120223999,
1501
+ "reward_std": 0.5003164112567902,
1502
+ "rewards/correct_code_reward_func": 0.5625000149011612,
1503
+ "rewards/len_reward_func": 0.4316485822200775,
1504
+ "step": 115
1505
+ },
1506
+ {
1507
+ "completion_length": 51.45833396911621,
1508
+ "epoch": 1.8479999999999999,
1509
+ "grad_norm": 1.3897204537445644,
1510
+ "kl": 0.0416259765625,
1511
+ "learning_rate": 6.7943356759381785e-09,
1512
+ "loss": 0.0,
1513
+ "reward": 1.0625000596046448,
1514
+ "reward_std": 0.4191845655441284,
1515
+ "rewards/correct_code_reward_func": 0.5833333730697632,
1516
+ "rewards/len_reward_func": 0.4791666716337204,
1517
+ "step": 116
1518
+ },
1519
+ {
1520
+ "completion_length": 64.45833587646484,
1521
+ "epoch": 1.8639999999999999,
1522
+ "grad_norm": 2.0280580958010224,
1523
+ "kl": 0.07666015625,
1524
+ "learning_rate": 5.373497333054616e-09,
1525
+ "loss": 0.0001,
1526
+ "reward": 0.9957386255264282,
1527
+ "reward_std": 0.49953845143318176,
1528
+ "rewards/correct_code_reward_func": 0.5,
1529
+ "rewards/len_reward_func": 0.4957386255264282,
1530
+ "step": 117
1531
+ },
1532
+ {
1533
+ "completion_length": 61.833335876464844,
1534
+ "epoch": 1.88,
1535
+ "grad_norm": 1.4575505392586763,
1536
+ "kl": 0.06689453125,
1537
+ "learning_rate": 4.117554228329406e-09,
1538
+ "loss": 0.0001,
1539
+ "reward": 1.2341400384902954,
1540
+ "reward_std": 0.472938671708107,
1541
+ "rewards/correct_code_reward_func": 0.8125000298023224,
1542
+ "rewards/len_reward_func": 0.421640083193779,
1543
+ "step": 118
1544
+ },
1545
+ {
1546
+ "completion_length": 54.43750286102295,
1547
+ "epoch": 1.896,
1548
+ "grad_norm": 1.6449474004844165,
1549
+ "kl": 0.0567626953125,
1550
+ "learning_rate": 3.0273529545687125e-09,
1551
+ "loss": 0.0001,
1552
+ "reward": 1.0164108276367188,
1553
+ "reward_std": 0.4117661267518997,
1554
+ "rewards/correct_code_reward_func": 0.5208333432674408,
1555
+ "rewards/len_reward_func": 0.49557754397392273,
1556
+ "step": 119
1557
+ },
1558
+ {
1559
+ "completion_length": 59.8125,
1560
+ "epoch": 1.912,
1561
+ "grad_norm": 1.2181441627902125,
1562
+ "kl": 0.052978515625,
1563
+ "learning_rate": 2.1036283830834224e-09,
1564
+ "loss": 0.0001,
1565
+ "reward": 1.1736499071121216,
1566
+ "reward_std": 0.41473129391670227,
1567
+ "rewards/correct_code_reward_func": 0.7083333432674408,
1568
+ "rewards/len_reward_func": 0.465316578745842,
1569
+ "step": 120
1570
+ },
1571
+ {
1572
+ "completion_length": 29.125001907348633,
1573
+ "epoch": 1.928,
1574
+ "grad_norm": 1.6515660045888947,
1575
+ "kl": 0.0712890625,
1576
+ "learning_rate": 1.347003168334665e-09,
1577
+ "loss": 0.0001,
1578
+ "reward": 1.2279411554336548,
1579
+ "reward_std": 0.212213896214962,
1580
+ "rewards/correct_code_reward_func": 0.7500000298023224,
1581
+ "rewards/len_reward_func": 0.477941170334816,
1582
+ "step": 121
1583
+ },
1584
+ {
1585
+ "completion_length": 40.29166793823242,
1586
+ "epoch": 1.944,
1587
+ "grad_norm": 2.3395660695095883,
1588
+ "kl": 0.0679931640625,
1589
+ "learning_rate": 7.579873282216598e-10,
1590
+ "loss": 0.0001,
1591
+ "reward": 0.9375000298023224,
1592
+ "reward_std": 0.37034808099269867,
1593
+ "rewards/correct_code_reward_func": 0.4583333432674408,
1594
+ "rewards/len_reward_func": 0.4791666716337204,
1595
+ "step": 122
1596
+ },
1597
+ {
1598
+ "completion_length": 70.83333396911621,
1599
+ "epoch": 1.96,
1600
+ "grad_norm": 1.119825823829922,
1601
+ "kl": 0.057861328125,
1602
+ "learning_rate": 3.3697790029424413e-10,
1603
+ "loss": 0.0001,
1604
+ "reward": 1.2083333730697632,
1605
+ "reward_std": 0.39485183358192444,
1606
+ "rewards/correct_code_reward_func": 0.7083333730697632,
1607
+ "rewards/len_reward_func": 0.5,
1608
+ "step": 123
1609
+ },
1610
+ {
1611
+ "completion_length": 52.750003814697266,
1612
+ "epoch": 1.976,
1613
+ "grad_norm": 2.4270573554119026,
1614
+ "kl": 0.0513916015625,
1615
+ "learning_rate": 8.425867412190091e-11,
1616
+ "loss": 0.0001,
1617
+ "reward": 1.1584753692150116,
1618
+ "reward_std": 0.37459391355514526,
1619
+ "rewards/correct_code_reward_func": 0.7083333432674408,
1620
+ "rewards/len_reward_func": 0.4501419961452484,
1621
+ "step": 124
1622
+ },
1623
+ {
1624
+ "completion_length": 95.9375,
1625
+ "epoch": 1.992,
1626
+ "grad_norm": 1.1107259696791534,
1627
+ "kl": 0.038330078125,
1628
+ "learning_rate": 0.0,
1629
+ "loss": 0.0,
1630
+ "reward": 1.0544261932373047,
1631
+ "reward_std": 0.4110799580812454,
1632
+ "rewards/correct_code_reward_func": 0.6041666865348816,
1633
+ "rewards/len_reward_func": 0.4502594470977783,
1634
+ "step": 125
1635
+ },
1636
+ {
1637
+ "epoch": 1.992,
1638
+ "step": 125,
1639
+ "total_flos": 0.0,
1640
+ "train_loss": 2.596636219443127e-05,
1641
+ "train_runtime": 4869.9523,
1642
+ "train_samples_per_second": 0.154,
1643
+ "train_steps_per_second": 0.026
1644
+ }
1645
+ ],
1646
+ "logging_steps": 1,
1647
+ "max_steps": 125,
1648
+ "num_input_tokens_seen": 0,
1649
+ "num_train_epochs": 3,
1650
+ "save_steps": 25,
1651
+ "stateful_callbacks": {
1652
+ "TrainerControl": {
1653
+ "args": {
1654
+ "should_epoch_stop": false,
1655
+ "should_evaluate": false,
1656
+ "should_log": false,
1657
+ "should_save": true,
1658
+ "should_training_stop": true
1659
+ },
1660
+ "attributes": {}
1661
+ }
1662
+ },
1663
+ "total_flos": 0.0,
1664
+ "train_batch_size": 1,
1665
+ "trial_name": null,
1666
+ "trial_params": null
1667
+ }