Jukaboo commited on
Commit
21a1809
·
1 Parent(s): 244bd48

Upload 13 files

Browse files
README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+
19
+ The following `bitsandbytes` quantization config was used during training:
20
+ - quant_method: bitsandbytes
21
+ - load_in_8bit: False
22
+ - load_in_4bit: True
23
+ - llm_int8_threshold: 6.0
24
+ - llm_int8_skip_modules: None
25
+ - llm_int8_enable_fp32_cpu_offload: False
26
+ - llm_int8_has_fp16_weight: False
27
+ - bnb_4bit_quant_type: nf4
28
+ - bnb_4bit_use_double_quant: True
29
+ - bnb_4bit_compute_dtype: bfloat16
30
+ ### Framework versions
31
+
32
+ - PEFT 0.4.0
33
+
34
+ - PEFT 0.4.0
adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-7b-chat-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7de667110bc25195b5295b002a18db9a16257329155a94d36eccf142a06adc98
3
+ size 16823434
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4906f2772d341e44fd77d06b83c6bf82ec89ddc242ad469112ff39e24d7a8080
3
+ size 16794200
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de10babd43e9f8e4e386d64b523ff769001361d1a7e969501aa64ca585cb6878
3
+ size 8556090
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c84fd65e9e48a2aaba56eb31f1c7ff721aec3e1d3a68505905e0e8048c884a3e
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2398e2f77cd71aef986dd87578cc135e4c8437f8ee57ac4403039b04b09c9f95
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "bos_token": "<s>",
29
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}",
30
+ "clean_up_tokenization_spaces": false,
31
+ "eos_token": "</s>",
32
+ "legacy": false,
33
+ "model_max_length": 1000000000000000019884624838656,
34
+ "pad_token": "</s>",
35
+ "padding_side": "right",
36
+ "sp_model_kwargs": {},
37
+ "tokenizer_class": "LlamaTokenizer",
38
+ "unk_token": "<unk>",
39
+ "use_default_system_prompt": true
40
+ }
trainer_state.json ADDED
@@ -0,0 +1,815 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.40216550657385924,
5
+ "eval_steps": 65,
6
+ "global_step": 130,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 1.1764705882352942e-05,
14
+ "loss": 2.7493,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.01,
19
+ "learning_rate": 2.3529411764705884e-05,
20
+ "loss": 2.6209,
21
+ "step": 2
22
+ },
23
+ {
24
+ "epoch": 0.01,
25
+ "learning_rate": 3.529411764705883e-05,
26
+ "loss": 2.6323,
27
+ "step": 3
28
+ },
29
+ {
30
+ "epoch": 0.01,
31
+ "learning_rate": 4.705882352941177e-05,
32
+ "loss": 2.7371,
33
+ "step": 4
34
+ },
35
+ {
36
+ "epoch": 0.02,
37
+ "learning_rate": 5.882352941176471e-05,
38
+ "loss": 2.7399,
39
+ "step": 5
40
+ },
41
+ {
42
+ "epoch": 0.02,
43
+ "learning_rate": 7.058823529411765e-05,
44
+ "loss": 2.6775,
45
+ "step": 6
46
+ },
47
+ {
48
+ "epoch": 0.02,
49
+ "learning_rate": 8.23529411764706e-05,
50
+ "loss": 2.6392,
51
+ "step": 7
52
+ },
53
+ {
54
+ "epoch": 0.02,
55
+ "learning_rate": 9.411764705882353e-05,
56
+ "loss": 2.6636,
57
+ "step": 8
58
+ },
59
+ {
60
+ "epoch": 0.03,
61
+ "learning_rate": 0.00010588235294117647,
62
+ "loss": 2.6269,
63
+ "step": 9
64
+ },
65
+ {
66
+ "epoch": 0.03,
67
+ "learning_rate": 0.00011764705882352942,
68
+ "loss": 2.6557,
69
+ "step": 10
70
+ },
71
+ {
72
+ "epoch": 0.03,
73
+ "learning_rate": 0.00012941176470588237,
74
+ "loss": 2.5117,
75
+ "step": 11
76
+ },
77
+ {
78
+ "epoch": 0.04,
79
+ "learning_rate": 0.0001411764705882353,
80
+ "loss": 2.54,
81
+ "step": 12
82
+ },
83
+ {
84
+ "epoch": 0.04,
85
+ "learning_rate": 0.00015294117647058822,
86
+ "loss": 2.4645,
87
+ "step": 13
88
+ },
89
+ {
90
+ "epoch": 0.04,
91
+ "learning_rate": 0.0001647058823529412,
92
+ "loss": 2.4728,
93
+ "step": 14
94
+ },
95
+ {
96
+ "epoch": 0.05,
97
+ "learning_rate": 0.00017647058823529413,
98
+ "loss": 2.5242,
99
+ "step": 15
100
+ },
101
+ {
102
+ "epoch": 0.05,
103
+ "learning_rate": 0.00018823529411764707,
104
+ "loss": 2.4592,
105
+ "step": 16
106
+ },
107
+ {
108
+ "epoch": 0.05,
109
+ "learning_rate": 0.0002,
110
+ "loss": 2.364,
111
+ "step": 17
112
+ },
113
+ {
114
+ "epoch": 0.06,
115
+ "learning_rate": 0.00019999472984871732,
116
+ "loss": 2.3955,
117
+ "step": 18
118
+ },
119
+ {
120
+ "epoch": 0.06,
121
+ "learning_rate": 0.00019997891995035912,
122
+ "loss": 2.3193,
123
+ "step": 19
124
+ },
125
+ {
126
+ "epoch": 0.06,
127
+ "learning_rate": 0.0001999525719713366,
128
+ "loss": 2.3536,
129
+ "step": 20
130
+ },
131
+ {
132
+ "epoch": 0.06,
133
+ "learning_rate": 0.0001999156886888064,
134
+ "loss": 2.4059,
135
+ "step": 21
136
+ },
137
+ {
138
+ "epoch": 0.07,
139
+ "learning_rate": 0.00019986827399037812,
140
+ "loss": 2.4518,
141
+ "step": 22
142
+ },
143
+ {
144
+ "epoch": 0.07,
145
+ "learning_rate": 0.00019981033287370443,
146
+ "loss": 2.3709,
147
+ "step": 23
148
+ },
149
+ {
150
+ "epoch": 0.07,
151
+ "learning_rate": 0.00019974187144595432,
152
+ "loss": 2.4401,
153
+ "step": 24
154
+ },
155
+ {
156
+ "epoch": 0.08,
157
+ "learning_rate": 0.00019966289692316944,
158
+ "loss": 2.3015,
159
+ "step": 25
160
+ },
161
+ {
162
+ "epoch": 0.08,
163
+ "learning_rate": 0.00019957341762950344,
164
+ "loss": 2.3688,
165
+ "step": 26
166
+ },
167
+ {
168
+ "epoch": 0.08,
169
+ "learning_rate": 0.00019947344299634464,
170
+ "loss": 2.2737,
171
+ "step": 27
172
+ },
173
+ {
174
+ "epoch": 0.09,
175
+ "learning_rate": 0.00019936298356132176,
176
+ "loss": 2.2844,
177
+ "step": 28
178
+ },
179
+ {
180
+ "epoch": 0.09,
181
+ "learning_rate": 0.0001992420509671936,
182
+ "loss": 2.2693,
183
+ "step": 29
184
+ },
185
+ {
186
+ "epoch": 0.09,
187
+ "learning_rate": 0.00019911065796062135,
188
+ "loss": 2.2691,
189
+ "step": 30
190
+ },
191
+ {
192
+ "epoch": 0.1,
193
+ "learning_rate": 0.00019896881839082556,
194
+ "loss": 2.2378,
195
+ "step": 31
196
+ },
197
+ {
198
+ "epoch": 0.1,
199
+ "learning_rate": 0.00019881654720812594,
200
+ "loss": 2.1501,
201
+ "step": 32
202
+ },
203
+ {
204
+ "epoch": 0.1,
205
+ "learning_rate": 0.00019865386046236596,
206
+ "loss": 2.0803,
207
+ "step": 33
208
+ },
209
+ {
210
+ "epoch": 0.11,
211
+ "learning_rate": 0.00019848077530122083,
212
+ "loss": 2.1133,
213
+ "step": 34
214
+ },
215
+ {
216
+ "epoch": 0.11,
217
+ "learning_rate": 0.0001982973099683902,
218
+ "loss": 2.0154,
219
+ "step": 35
220
+ },
221
+ {
222
+ "epoch": 0.11,
223
+ "learning_rate": 0.00019810348380167527,
224
+ "loss": 1.962,
225
+ "step": 36
226
+ },
227
+ {
228
+ "epoch": 0.11,
229
+ "learning_rate": 0.00019789931723094046,
230
+ "loss": 1.9794,
231
+ "step": 37
232
+ },
233
+ {
234
+ "epoch": 0.12,
235
+ "learning_rate": 0.0001976848317759601,
236
+ "loss": 2.017,
237
+ "step": 38
238
+ },
239
+ {
240
+ "epoch": 0.12,
241
+ "learning_rate": 0.00019746005004415005,
242
+ "loss": 1.9816,
243
+ "step": 39
244
+ },
245
+ {
246
+ "epoch": 0.12,
247
+ "learning_rate": 0.00019722499572818496,
248
+ "loss": 1.9713,
249
+ "step": 40
250
+ },
251
+ {
252
+ "epoch": 0.13,
253
+ "learning_rate": 0.00019697969360350098,
254
+ "loss": 1.9338,
255
+ "step": 41
256
+ },
257
+ {
258
+ "epoch": 0.13,
259
+ "learning_rate": 0.00019672416952568416,
260
+ "loss": 1.983,
261
+ "step": 42
262
+ },
263
+ {
264
+ "epoch": 0.13,
265
+ "learning_rate": 0.00019645845042774553,
266
+ "loss": 1.821,
267
+ "step": 43
268
+ },
269
+ {
270
+ "epoch": 0.14,
271
+ "learning_rate": 0.00019618256431728194,
272
+ "loss": 1.9501,
273
+ "step": 44
274
+ },
275
+ {
276
+ "epoch": 0.14,
277
+ "learning_rate": 0.00019589654027352414,
278
+ "loss": 1.9909,
279
+ "step": 45
280
+ },
281
+ {
282
+ "epoch": 0.14,
283
+ "learning_rate": 0.0001956004084442718,
284
+ "loss": 1.8494,
285
+ "step": 46
286
+ },
287
+ {
288
+ "epoch": 0.15,
289
+ "learning_rate": 0.00019529420004271567,
290
+ "loss": 1.8093,
291
+ "step": 47
292
+ },
293
+ {
294
+ "epoch": 0.15,
295
+ "learning_rate": 0.0001949779473441478,
296
+ "loss": 1.7693,
297
+ "step": 48
298
+ },
299
+ {
300
+ "epoch": 0.15,
301
+ "learning_rate": 0.00019465168368255946,
302
+ "loss": 1.7823,
303
+ "step": 49
304
+ },
305
+ {
306
+ "epoch": 0.15,
307
+ "learning_rate": 0.00019431544344712776,
308
+ "loss": 1.7239,
309
+ "step": 50
310
+ },
311
+ {
312
+ "epoch": 0.16,
313
+ "learning_rate": 0.00019396926207859084,
314
+ "loss": 2.2562,
315
+ "step": 51
316
+ },
317
+ {
318
+ "epoch": 0.16,
319
+ "learning_rate": 0.00019361317606551238,
320
+ "loss": 2.201,
321
+ "step": 52
322
+ },
323
+ {
324
+ "epoch": 0.16,
325
+ "learning_rate": 0.00019324722294043558,
326
+ "loss": 2.2162,
327
+ "step": 53
328
+ },
329
+ {
330
+ "epoch": 0.17,
331
+ "learning_rate": 0.00019287144127592704,
332
+ "loss": 2.2388,
333
+ "step": 54
334
+ },
335
+ {
336
+ "epoch": 0.17,
337
+ "learning_rate": 0.0001924858706805112,
338
+ "loss": 2.2686,
339
+ "step": 55
340
+ },
341
+ {
342
+ "epoch": 0.17,
343
+ "learning_rate": 0.0001920905517944954,
344
+ "loss": 2.213,
345
+ "step": 56
346
+ },
347
+ {
348
+ "epoch": 0.18,
349
+ "learning_rate": 0.00019168552628568631,
350
+ "loss": 2.1732,
351
+ "step": 57
352
+ },
353
+ {
354
+ "epoch": 0.18,
355
+ "learning_rate": 0.00019127083684499806,
356
+ "loss": 2.2206,
357
+ "step": 58
358
+ },
359
+ {
360
+ "epoch": 0.18,
361
+ "learning_rate": 0.00019084652718195238,
362
+ "loss": 2.0658,
363
+ "step": 59
364
+ },
365
+ {
366
+ "epoch": 0.19,
367
+ "learning_rate": 0.0001904126420200716,
368
+ "loss": 2.1432,
369
+ "step": 60
370
+ },
371
+ {
372
+ "epoch": 0.19,
373
+ "learning_rate": 0.00018996922709216455,
374
+ "loss": 2.0427,
375
+ "step": 61
376
+ },
377
+ {
378
+ "epoch": 0.19,
379
+ "learning_rate": 0.00018951632913550626,
380
+ "loss": 2.1674,
381
+ "step": 62
382
+ },
383
+ {
384
+ "epoch": 0.19,
385
+ "learning_rate": 0.00018905399588691163,
386
+ "loss": 2.1218,
387
+ "step": 63
388
+ },
389
+ {
390
+ "epoch": 0.2,
391
+ "learning_rate": 0.00018858227607770398,
392
+ "loss": 2.1721,
393
+ "step": 64
394
+ },
395
+ {
396
+ "epoch": 0.2,
397
+ "learning_rate": 0.00018810121942857845,
398
+ "loss": 2.1525,
399
+ "step": 65
400
+ },
401
+ {
402
+ "epoch": 0.2,
403
+ "eval_loss": 2.053250789642334,
404
+ "eval_runtime": 25.1667,
405
+ "eval_samples_per_second": 3.457,
406
+ "eval_steps_per_second": 0.437,
407
+ "step": 65
408
+ },
409
+ {
410
+ "epoch": 0.2,
411
+ "learning_rate": 0.00018761087664436138,
412
+ "loss": 2.1132,
413
+ "step": 66
414
+ },
415
+ {
416
+ "epoch": 0.21,
417
+ "learning_rate": 0.00018711129940866575,
418
+ "loss": 2.127,
419
+ "step": 67
420
+ },
421
+ {
422
+ "epoch": 0.21,
423
+ "learning_rate": 0.00018660254037844388,
424
+ "loss": 2.1563,
425
+ "step": 68
426
+ },
427
+ {
428
+ "epoch": 0.21,
429
+ "learning_rate": 0.00018608465317843678,
430
+ "loss": 2.0825,
431
+ "step": 69
432
+ },
433
+ {
434
+ "epoch": 0.22,
435
+ "learning_rate": 0.00018555769239552233,
436
+ "loss": 2.1625,
437
+ "step": 70
438
+ },
439
+ {
440
+ "epoch": 0.22,
441
+ "learning_rate": 0.00018502171357296144,
442
+ "loss": 2.1801,
443
+ "step": 71
444
+ },
445
+ {
446
+ "epoch": 0.22,
447
+ "learning_rate": 0.00018447677320454367,
448
+ "loss": 2.1931,
449
+ "step": 72
450
+ },
451
+ {
452
+ "epoch": 0.23,
453
+ "learning_rate": 0.00018392292872863267,
454
+ "loss": 2.1354,
455
+ "step": 73
456
+ },
457
+ {
458
+ "epoch": 0.23,
459
+ "learning_rate": 0.00018336023852211195,
460
+ "loss": 2.2592,
461
+ "step": 74
462
+ },
463
+ {
464
+ "epoch": 0.23,
465
+ "learning_rate": 0.00018278876189423179,
466
+ "loss": 2.1984,
467
+ "step": 75
468
+ },
469
+ {
470
+ "epoch": 0.24,
471
+ "learning_rate": 0.00018220855908035783,
472
+ "loss": 2.1576,
473
+ "step": 76
474
+ },
475
+ {
476
+ "epoch": 0.24,
477
+ "learning_rate": 0.0001816196912356222,
478
+ "loss": 2.158,
479
+ "step": 77
480
+ },
481
+ {
482
+ "epoch": 0.24,
483
+ "learning_rate": 0.00018102222042847737,
484
+ "loss": 2.1722,
485
+ "step": 78
486
+ },
487
+ {
488
+ "epoch": 0.24,
489
+ "learning_rate": 0.00018041620963415417,
490
+ "loss": 2.144,
491
+ "step": 79
492
+ },
493
+ {
494
+ "epoch": 0.25,
495
+ "learning_rate": 0.000179801722728024,
496
+ "loss": 2.1204,
497
+ "step": 80
498
+ },
499
+ {
500
+ "epoch": 0.25,
501
+ "learning_rate": 0.00017917882447886582,
502
+ "loss": 2.1186,
503
+ "step": 81
504
+ },
505
+ {
506
+ "epoch": 0.25,
507
+ "learning_rate": 0.00017854758054203988,
508
+ "loss": 2.1699,
509
+ "step": 82
510
+ },
511
+ {
512
+ "epoch": 0.26,
513
+ "learning_rate": 0.00017790805745256704,
514
+ "loss": 2.0864,
515
+ "step": 83
516
+ },
517
+ {
518
+ "epoch": 0.26,
519
+ "learning_rate": 0.0001772603226181159,
520
+ "loss": 2.1616,
521
+ "step": 84
522
+ },
523
+ {
524
+ "epoch": 0.26,
525
+ "learning_rate": 0.0001766044443118978,
526
+ "loss": 2.0011,
527
+ "step": 85
528
+ },
529
+ {
530
+ "epoch": 0.27,
531
+ "learning_rate": 0.00017594049166547073,
532
+ "loss": 1.9033,
533
+ "step": 86
534
+ },
535
+ {
536
+ "epoch": 0.27,
537
+ "learning_rate": 0.00017526853466145244,
538
+ "loss": 1.8434,
539
+ "step": 87
540
+ },
541
+ {
542
+ "epoch": 0.27,
543
+ "learning_rate": 0.00017458864412614434,
544
+ "loss": 1.9592,
545
+ "step": 88
546
+ },
547
+ {
548
+ "epoch": 0.28,
549
+ "learning_rate": 0.00017390089172206592,
550
+ "loss": 1.892,
551
+ "step": 89
552
+ },
553
+ {
554
+ "epoch": 0.28,
555
+ "learning_rate": 0.00017320534994040148,
556
+ "loss": 1.8285,
557
+ "step": 90
558
+ },
559
+ {
560
+ "epoch": 0.28,
561
+ "learning_rate": 0.00017250209209335927,
562
+ "loss": 1.8733,
563
+ "step": 91
564
+ },
565
+ {
566
+ "epoch": 0.28,
567
+ "learning_rate": 0.0001717911923064442,
568
+ "loss": 1.8142,
569
+ "step": 92
570
+ },
571
+ {
572
+ "epoch": 0.29,
573
+ "learning_rate": 0.00017107272551064473,
574
+ "loss": 1.7412,
575
+ "step": 93
576
+ },
577
+ {
578
+ "epoch": 0.29,
579
+ "learning_rate": 0.00017034676743453499,
580
+ "loss": 1.721,
581
+ "step": 94
582
+ },
583
+ {
584
+ "epoch": 0.29,
585
+ "learning_rate": 0.0001696133945962927,
586
+ "loss": 1.6415,
587
+ "step": 95
588
+ },
589
+ {
590
+ "epoch": 0.3,
591
+ "learning_rate": 0.0001688726842956339,
592
+ "loss": 1.7964,
593
+ "step": 96
594
+ },
595
+ {
596
+ "epoch": 0.3,
597
+ "learning_rate": 0.0001681247146056654,
598
+ "loss": 1.5657,
599
+ "step": 97
600
+ },
601
+ {
602
+ "epoch": 0.3,
603
+ "learning_rate": 0.00016736956436465573,
604
+ "loss": 1.6301,
605
+ "step": 98
606
+ },
607
+ {
608
+ "epoch": 0.31,
609
+ "learning_rate": 0.00016660731316772505,
610
+ "loss": 1.738,
611
+ "step": 99
612
+ },
613
+ {
614
+ "epoch": 0.31,
615
+ "learning_rate": 0.0001658380413584558,
616
+ "loss": 1.5882,
617
+ "step": 100
618
+ },
619
+ {
620
+ "epoch": 0.31,
621
+ "learning_rate": 0.0001650618300204242,
622
+ "loss": 2.1436,
623
+ "step": 101
624
+ },
625
+ {
626
+ "epoch": 0.32,
627
+ "learning_rate": 0.00016427876096865394,
628
+ "loss": 2.1338,
629
+ "step": 102
630
+ },
631
+ {
632
+ "epoch": 0.32,
633
+ "learning_rate": 0.0001634889167409923,
634
+ "loss": 2.1798,
635
+ "step": 103
636
+ },
637
+ {
638
+ "epoch": 0.32,
639
+ "learning_rate": 0.0001626923805894107,
640
+ "loss": 2.1958,
641
+ "step": 104
642
+ },
643
+ {
644
+ "epoch": 0.32,
645
+ "learning_rate": 0.00016188923647122947,
646
+ "loss": 2.0973,
647
+ "step": 105
648
+ },
649
+ {
650
+ "epoch": 0.33,
651
+ "learning_rate": 0.0001610795690402688,
652
+ "loss": 2.2179,
653
+ "step": 106
654
+ },
655
+ {
656
+ "epoch": 0.33,
657
+ "learning_rate": 0.00016026346363792567,
658
+ "loss": 2.1803,
659
+ "step": 107
660
+ },
661
+ {
662
+ "epoch": 0.33,
663
+ "learning_rate": 0.00015944100628417868,
664
+ "loss": 2.1251,
665
+ "step": 108
666
+ },
667
+ {
668
+ "epoch": 0.34,
669
+ "learning_rate": 0.00015861228366852148,
670
+ "loss": 2.0697,
671
+ "step": 109
672
+ },
673
+ {
674
+ "epoch": 0.34,
675
+ "learning_rate": 0.00015777738314082514,
676
+ "loss": 2.1803,
677
+ "step": 110
678
+ },
679
+ {
680
+ "epoch": 0.34,
681
+ "learning_rate": 0.00015693639270213136,
682
+ "loss": 2.112,
683
+ "step": 111
684
+ },
685
+ {
686
+ "epoch": 0.35,
687
+ "learning_rate": 0.000156089400995377,
688
+ "loss": 2.1563,
689
+ "step": 112
690
+ },
691
+ {
692
+ "epoch": 0.35,
693
+ "learning_rate": 0.0001552364972960506,
694
+ "loss": 2.1587,
695
+ "step": 113
696
+ },
697
+ {
698
+ "epoch": 0.35,
699
+ "learning_rate": 0.00015437777150278267,
700
+ "loss": 2.1726,
701
+ "step": 114
702
+ },
703
+ {
704
+ "epoch": 0.36,
705
+ "learning_rate": 0.00015351331412787004,
706
+ "loss": 2.0995,
707
+ "step": 115
708
+ },
709
+ {
710
+ "epoch": 0.36,
711
+ "learning_rate": 0.0001526432162877356,
712
+ "loss": 2.0481,
713
+ "step": 116
714
+ },
715
+ {
716
+ "epoch": 0.36,
717
+ "learning_rate": 0.00015176756969332425,
718
+ "loss": 2.0404,
719
+ "step": 117
720
+ },
721
+ {
722
+ "epoch": 0.37,
723
+ "learning_rate": 0.0001508864666404365,
724
+ "loss": 2.1471,
725
+ "step": 118
726
+ },
727
+ {
728
+ "epoch": 0.37,
729
+ "learning_rate": 0.00015000000000000001,
730
+ "loss": 2.1167,
731
+ "step": 119
732
+ },
733
+ {
734
+ "epoch": 0.37,
735
+ "learning_rate": 0.00014910826320828084,
736
+ "loss": 2.1705,
737
+ "step": 120
738
+ },
739
+ {
740
+ "epoch": 0.37,
741
+ "learning_rate": 0.0001482113502570349,
742
+ "loss": 2.0837,
743
+ "step": 121
744
+ },
745
+ {
746
+ "epoch": 0.38,
747
+ "learning_rate": 0.00014730935568360102,
748
+ "loss": 2.1088,
749
+ "step": 122
750
+ },
751
+ {
752
+ "epoch": 0.38,
753
+ "learning_rate": 0.00014640237456093634,
754
+ "loss": 2.046,
755
+ "step": 123
756
+ },
757
+ {
758
+ "epoch": 0.38,
759
+ "learning_rate": 0.00014549050248759547,
760
+ "loss": 2.1104,
761
+ "step": 124
762
+ },
763
+ {
764
+ "epoch": 0.39,
765
+ "learning_rate": 0.00014457383557765386,
766
+ "loss": 2.0926,
767
+ "step": 125
768
+ },
769
+ {
770
+ "epoch": 0.39,
771
+ "learning_rate": 0.00014365247045057734,
772
+ "loss": 2.094,
773
+ "step": 126
774
+ },
775
+ {
776
+ "epoch": 0.39,
777
+ "learning_rate": 0.0001427265042210381,
778
+ "loss": 2.0852,
779
+ "step": 127
780
+ },
781
+ {
782
+ "epoch": 0.4,
783
+ "learning_rate": 0.00014179603448867835,
784
+ "loss": 2.0501,
785
+ "step": 128
786
+ },
787
+ {
788
+ "epoch": 0.4,
789
+ "learning_rate": 0.00014086115932782314,
790
+ "loss": 2.1239,
791
+ "step": 129
792
+ },
793
+ {
794
+ "epoch": 0.4,
795
+ "learning_rate": 0.0001399219772771431,
796
+ "loss": 2.0143,
797
+ "step": 130
798
+ },
799
+ {
800
+ "epoch": 0.4,
801
+ "eval_loss": 1.9829031229019165,
802
+ "eval_runtime": 25.1483,
803
+ "eval_samples_per_second": 3.459,
804
+ "eval_steps_per_second": 0.437,
805
+ "step": 130
806
+ }
807
+ ],
808
+ "logging_steps": 1,
809
+ "max_steps": 323,
810
+ "num_train_epochs": 1,
811
+ "save_steps": 65,
812
+ "total_flos": 7.631867476426752e+16,
813
+ "trial_name": null,
814
+ "trial_params": null
815
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2494663c7f0e92a4fb2f2e93a0f9ede601832248f8173403d06dc46cd2aa0dad
3
+ size 4536