IEIT-Yuan commited on
Commit
b2fd15d
·
verified ·
1 Parent(s): a20c679

Upload 19 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
1_Pooling/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 1024,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": false,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": true,
9
+ "include_prompt": true
10
+ }
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
args.json ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_dir": "/mnt/beegfs3/liying/zhangfanhao/output1125/v1-20251125-231025",
3
+ "overwrite_output_dir": false,
4
+ "do_train": false,
5
+ "do_eval": false,
6
+ "do_predict": false,
7
+ "eval_strategy": "steps",
8
+ "prediction_loss_only": false,
9
+ "per_device_train_batch_size": 4,
10
+ "per_device_eval_batch_size": 4,
11
+ "per_gpu_train_batch_size": null,
12
+ "per_gpu_eval_batch_size": null,
13
+ "gradient_accumulation_steps": 4,
14
+ "eval_accumulation_steps": null,
15
+ "eval_delay": 0,
16
+ "torch_empty_cache_steps": null,
17
+ "learning_rate": 6e-06,
18
+ "weight_decay": 0.1,
19
+ "adam_beta1": 0.9,
20
+ "adam_beta2": 0.95,
21
+ "adam_epsilon": 1e-08,
22
+ "max_grad_norm": 1.0,
23
+ "num_train_epochs": 5.0,
24
+ "max_steps": -1,
25
+ "lr_scheduler_type": "cosine",
26
+ "lr_scheduler_kwargs": null,
27
+ "warmup_ratio": 0.0,
28
+ "warmup_steps": 0,
29
+ "log_level": "passive",
30
+ "log_level_replica": "warning",
31
+ "log_on_each_node": true,
32
+ "logging_dir": "/mnt/beegfs3/liying/zhangfanhao/output1125/v1-20251125-231025/runs",
33
+ "logging_strategy": "steps",
34
+ "logging_first_step": true,
35
+ "logging_steps": 5,
36
+ "logging_nan_inf_filter": true,
37
+ "save_strategy": "steps",
38
+ "save_steps": 100.0,
39
+ "save_total_limit": null,
40
+ "save_safetensors": true,
41
+ "save_on_each_node": false,
42
+ "save_only_model": false,
43
+ "restore_callback_states_from_checkpoint": false,
44
+ "no_cuda": false,
45
+ "use_cpu": false,
46
+ "use_mps_device": false,
47
+ "seed": 42,
48
+ "data_seed": 42,
49
+ "jit_mode_eval": false,
50
+ "bf16": true,
51
+ "fp16": false,
52
+ "fp16_opt_level": "O1",
53
+ "half_precision_backend": "auto",
54
+ "bf16_full_eval": false,
55
+ "fp16_full_eval": false,
56
+ "tf32": null,
57
+ "local_rank": 0,
58
+ "ddp_backend": null,
59
+ "tpu_num_cores": null,
60
+ "tpu_metrics_debug": false,
61
+ "debug": null,
62
+ "dataloader_drop_last": true,
63
+ "eval_steps": 100.0,
64
+ "dataloader_num_workers": null,
65
+ "dataloader_prefetch_factor": null,
66
+ "past_index": -1,
67
+ "run_name": "/mnt/beegfs3/liying/zhangfanhao/output1125/v1-20251125-231025",
68
+ "disable_tqdm": null,
69
+ "remove_unused_columns": true,
70
+ "label_names": [
71
+ "labels"
72
+ ],
73
+ "load_best_model_at_end": false,
74
+ "metric_for_best_model": "loss",
75
+ "greater_is_better": false,
76
+ "ignore_data_skip": false,
77
+ "fsdp": null,
78
+ "fsdp_min_num_params": 0,
79
+ "fsdp_config": null,
80
+ "fsdp_transformer_layer_cls_to_wrap": null,
81
+ "accelerator_config": {
82
+ "dispatch_batches": false
83
+ },
84
+ "parallelism_config": null,
85
+ "deepspeed": {
86
+ "fp16": {
87
+ "enabled": "auto",
88
+ "loss_scale": 0,
89
+ "loss_scale_window": 1000,
90
+ "initial_scale_power": 16,
91
+ "hysteresis": 2,
92
+ "min_loss_scale": 1
93
+ },
94
+ "bf16": {
95
+ "enabled": "auto"
96
+ },
97
+ "zero_optimization": {
98
+ "stage": 3,
99
+ "offload_optimizer": {
100
+ "device": "none",
101
+ "pin_memory": true
102
+ },
103
+ "offload_param": {
104
+ "device": "none",
105
+ "pin_memory": true
106
+ },
107
+ "overlap_comm": false,
108
+ "contiguous_gradients": true,
109
+ "sub_group_size": 1000000000.0,
110
+ "reduce_bucket_size": "auto",
111
+ "zero_quantized_weights": false,
112
+ "zero_quantized_gradients": false,
113
+ "stage3_prefetch_bucket_size": "auto",
114
+ "stage3_param_persistence_threshold": "auto",
115
+ "stage3_max_live_parameters": 1000000000.0,
116
+ "stage3_max_reuse_distance": 1000000000.0,
117
+ "stage3_gather_16bit_weights_on_model_save": true
118
+ },
119
+ "gradient_accumulation_steps": "auto",
120
+ "gradient_clipping": "auto",
121
+ "steps_per_print": 2000,
122
+ "train_batch_size": "auto",
123
+ "train_micro_batch_size_per_gpu": "auto",
124
+ "wall_clock_breakdown": false
125
+ },
126
+ "label_smoothing_factor": 0.0,
127
+ "optim": "adamw_torch",
128
+ "optim_args": null,
129
+ "adafactor": false,
130
+ "group_by_length": false,
131
+ "length_column_name": "length",
132
+ "report_to": [
133
+ "tensorboard"
134
+ ],
135
+ "project": "huggingface",
136
+ "trackio_space_id": "trackio",
137
+ "ddp_find_unused_parameters": null,
138
+ "ddp_bucket_cap_mb": null,
139
+ "ddp_broadcast_buffers": null,
140
+ "dataloader_pin_memory": true,
141
+ "dataloader_persistent_workers": false,
142
+ "skip_memory_metrics": true,
143
+ "use_legacy_prediction_loop": false,
144
+ "push_to_hub": false,
145
+ "resume_from_checkpoint": null,
146
+ "hub_model_id": null,
147
+ "hub_strategy": "every_save",
148
+ "hub_token": null,
149
+ "hub_private_repo": null,
150
+ "hub_always_push": false,
151
+ "hub_revision": null,
152
+ "gradient_checkpointing": true,
153
+ "gradient_checkpointing_kwargs": null,
154
+ "include_inputs_for_metrics": false,
155
+ "include_for_metrics": [],
156
+ "eval_do_concat_batches": true,
157
+ "fp16_backend": "auto",
158
+ "push_to_hub_model_id": null,
159
+ "push_to_hub_organization": null,
160
+ "push_to_hub_token": null,
161
+ "mp_parameters": "",
162
+ "auto_find_batch_size": false,
163
+ "full_determinism": false,
164
+ "torchdynamo": null,
165
+ "ray_scope": "last",
166
+ "ddp_timeout": 18000000,
167
+ "torch_compile": false,
168
+ "torch_compile_backend": null,
169
+ "torch_compile_mode": null,
170
+ "include_tokens_per_second": false,
171
+ "include_num_input_tokens_seen": false,
172
+ "neftune_noise_alpha": null,
173
+ "optim_target_modules": null,
174
+ "batch_eval_metrics": false,
175
+ "eval_on_start": false,
176
+ "use_liger_kernel": false,
177
+ "liger_kernel_config": null,
178
+ "eval_use_gather_object": false,
179
+ "average_tokens_across_devices": true,
180
+ "sortish_sampler": false,
181
+ "predict_with_generate": false,
182
+ "generation_max_length": null,
183
+ "generation_num_beams": null,
184
+ "generation_config": null,
185
+ "tuner_backend": "peft",
186
+ "vit_gradient_checkpointing": null,
187
+ "router_aux_loss_coef": 0.0,
188
+ "enable_dft_loss": false,
189
+ "enable_channel_loss": false,
190
+ "check_model": true,
191
+ "acc_strategy": "token",
192
+ "train_dataloader_shuffle": true,
193
+ "max_epochs": null,
194
+ "aligner_lr": null,
195
+ "vit_lr": null,
196
+ "use_logits_to_keep": null,
197
+ "ds3_gather_for_generation": true,
198
+ "resume_only_model": false,
199
+ "optimizer": null,
200
+ "loss_type": "infonce",
201
+ "metric": null,
202
+ "eval_use_evalscope": false,
203
+ "eval_dataset": [],
204
+ "eval_dataset_args": null,
205
+ "eval_limit": null,
206
+ "eval_generation_config": null,
207
+ "extra_eval_args": null,
208
+ "use_flash_ckpt": false,
209
+ "model": "/mnt/beegfs3/liying/zhangfanhao/Qwen3-Embedding-0.6B",
210
+ "model_type": "qwen3_emb",
211
+ "model_revision": null,
212
+ "task_type": "embedding",
213
+ "torch_dtype": "bfloat16",
214
+ "attn_impl": null,
215
+ "new_special_tokens": [],
216
+ "num_labels": null,
217
+ "problem_type": null,
218
+ "rope_scaling": null,
219
+ "device_map": null,
220
+ "max_memory": {},
221
+ "max_model_len": null,
222
+ "local_repo_path": null,
223
+ "init_strategy": null,
224
+ "template": "qwen3_emb",
225
+ "system": null,
226
+ "max_length": 2048,
227
+ "truncation_strategy": "left",
228
+ "max_pixels": null,
229
+ "agent_template": null,
230
+ "norm_bbox": null,
231
+ "use_chat_template": true,
232
+ "padding_free": false,
233
+ "padding_side": "right",
234
+ "loss_scale": "default",
235
+ "sequence_parallel_size": 1,
236
+ "response_prefix": null,
237
+ "template_backend": "swift",
238
+ "dataset": [
239
+ "/mnt/beegfs3/liying/zhangfanhao/testdata_final_all_1126.jsonl"
240
+ ],
241
+ "val_dataset": [],
242
+ "split_dataset_ratio": 0.05,
243
+ "dataset_num_proc": 32,
244
+ "load_from_cache_file": false,
245
+ "dataset_shuffle": true,
246
+ "val_dataset_shuffle": false,
247
+ "streaming": false,
248
+ "interleave_prob": null,
249
+ "stopping_strategy": "first_exhausted",
250
+ "shuffle_buffer_size": 1000,
251
+ "download_mode": "reuse_dataset_if_exists",
252
+ "columns": {},
253
+ "strict": false,
254
+ "model_name": null,
255
+ "model_author": null,
256
+ "custom_dataset_info": [],
257
+ "quant_method": null,
258
+ "quant_bits": null,
259
+ "hqq_axis": null,
260
+ "bnb_4bit_compute_dtype": "bfloat16",
261
+ "bnb_4bit_quant_type": "nf4",
262
+ "bnb_4bit_use_double_quant": true,
263
+ "bnb_4bit_quant_storage": null,
264
+ "max_new_tokens": 64,
265
+ "temperature": 0.0,
266
+ "top_k": null,
267
+ "top_p": null,
268
+ "repetition_penalty": null,
269
+ "num_beams": 1,
270
+ "stream": false,
271
+ "stop_words": [],
272
+ "logprobs": false,
273
+ "top_logprobs": null,
274
+ "ckpt_dir": null,
275
+ "lora_modules": [],
276
+ "train_type": "full",
277
+ "adapters": [],
278
+ "external_plugins": [],
279
+ "model_kwargs": {},
280
+ "load_args": false,
281
+ "load_data_args": false,
282
+ "packing": false,
283
+ "packing_length": null,
284
+ "lazy_tokenize": false,
285
+ "cached_dataset": [],
286
+ "custom_register_path": [],
287
+ "use_hf": false,
288
+ "ignore_args_error": false,
289
+ "use_swift_lora": false,
290
+ "freeze_parameters": [],
291
+ "freeze_parameters_regex": null,
292
+ "freeze_parameters_ratio": 0.0,
293
+ "trainable_parameters": [],
294
+ "trainable_parameters_regex": null,
295
+ "freeze_llm": false,
296
+ "freeze_vit": true,
297
+ "freeze_aligner": true,
298
+ "target_modules": [
299
+ "all-linear"
300
+ ],
301
+ "target_regex": null,
302
+ "target_parameters": null,
303
+ "modules_to_save": [],
304
+ "lora_rank": 8,
305
+ "lora_alpha": 32,
306
+ "lora_dropout": 0.05,
307
+ "lora_bias": "none",
308
+ "lora_dtype": null,
309
+ "lorap_lr_ratio": null,
310
+ "use_rslora": false,
311
+ "use_dora": false,
312
+ "lora_ga_batch_size": 2,
313
+ "lora_ga_iters": 2,
314
+ "lora_ga_max_length": 1024,
315
+ "lora_ga_direction": "ArB2r",
316
+ "lora_ga_scale": "stable",
317
+ "lora_ga_stable_gamma": 16,
318
+ "init_weights": true,
319
+ "fourier_n_frequency": 2000,
320
+ "fourier_scaling": 300.0,
321
+ "boft_block_size": 4,
322
+ "boft_block_num": 0,
323
+ "boft_n_butterfly_factor": 1,
324
+ "boft_dropout": 0.0,
325
+ "vera_rank": 256,
326
+ "vera_projection_prng_key": 0,
327
+ "vera_dropout": 0.0,
328
+ "vera_d_initial": 0.1,
329
+ "adapter_act": "gelu",
330
+ "adapter_length": 128,
331
+ "use_galore": false,
332
+ "galore_target_modules": null,
333
+ "galore_rank": 128,
334
+ "galore_update_proj_gap": 50,
335
+ "galore_scale": 1.0,
336
+ "galore_proj_type": "std",
337
+ "galore_optim_per_parameter": false,
338
+ "galore_with_embedding": false,
339
+ "galore_quantization": false,
340
+ "galore_proj_quant": false,
341
+ "galore_proj_bits": 4,
342
+ "galore_proj_group_size": 256,
343
+ "galore_cos_threshold": 0.4,
344
+ "galore_gamma_proj": 2,
345
+ "galore_queue_size": 5,
346
+ "adalora_target_r": 8,
347
+ "adalora_init_r": 12,
348
+ "adalora_tinit": 0,
349
+ "adalora_tfinal": 0,
350
+ "adalora_deltaT": 1,
351
+ "adalora_beta1": 0.85,
352
+ "adalora_beta2": 0.85,
353
+ "adalora_orth_reg_weight": 0.5,
354
+ "llamapro_num_new_blocks": 4,
355
+ "llamapro_num_groups": null,
356
+ "lisa_activated_layers": 0,
357
+ "lisa_step_interval": 20,
358
+ "reft_layer_key": null,
359
+ "reft_layers": null,
360
+ "reft_rank": 4,
361
+ "reft_intervention_type": "LoreftIntervention",
362
+ "reft_args": null,
363
+ "swanlab_token": null,
364
+ "swanlab_project": null,
365
+ "swanlab_workspace": null,
366
+ "swanlab_exp_name": null,
367
+ "swanlab_lark_webhook_url": null,
368
+ "swanlab_lark_secret": null,
369
+ "swanlab_mode": "cloud",
370
+ "add_version": true,
371
+ "create_checkpoint_symlink": false,
372
+ "zero_hpz_partition_size": null,
373
+ "deepspeed_autotp_size": null,
374
+ "early_stop_interval": null,
375
+ "rank": 0,
376
+ "global_world_size": 16,
377
+ "local_world_size": 8,
378
+ "model_suffix": "Qwen3-Embedding-0.6B",
379
+ "model_info": "ModelInfo(model_type='qwen3_emb', model_dir='/mnt/beegfs3/liying/zhangfanhao/Qwen3-Embedding-0.6B', torch_dtype=torch.bfloat16, max_model_len=32768, quant_method=None, quant_bits=None, rope_scaling=None, is_moe_model=False, config=None, task_type='embedding', num_labels=None)",
380
+ "model_meta": "ModelMeta(model_type='qwen3_emb', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen3-Embedding-0.6B', hf_model_id='Qwen/Qwen3-Embedding-0.6B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-Embedding-4B', hf_model_id='Qwen/Qwen3-Embedding-4B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-Embedding-8B', hf_model_id='Qwen/Qwen3-Embedding-8B', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen3_emb', get_function=<function get_model_tokenizer_with_flash_attn at 0x7fa2e318c4a0>, model_arch=None, architectures=['Qwen3ForCausalLM'], additional_saved_files=['config_sentence_transformers.json', '1_Pooling', 'modules.json'], torch_dtype=None, is_multimodal=False, is_reward=False, task_type=None, ignore_patterns=None, requires=[], tags=[])",
381
+ "model_dir": "/mnt/beegfs3/liying/zhangfanhao/Qwen3-Embedding-0.6B",
382
+ "hub": "<class 'swift.hub.hub.MSHub'>",
383
+ "evaluation_strategy": "steps",
384
+ "training_args": "TrainingArguments(output_dir='/mnt/beegfs3/liying/zhangfanhao/output1125/v1-20251125-231025', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.STEPS: 'steps'>, prediction_loss_only=False, per_device_train_batch_size=4, per_device_eval_batch_size=4, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=4, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=6e-06, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=5.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.0, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/mnt/beegfs3/liying/zhangfanhao/output1125/v1-20251125-231025/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.STEPS: 'steps'>, save_steps=100, save_total_limit=None, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=True, eval_steps=100, dataloader_num_workers=1, dataloader_prefetch_factor=10, past_index=-1, run_name='/mnt/beegfs3/liying/zhangfanhao/output1125/v1-20251125-231025', disable_tqdm=False, remove_unused_columns=False, label_names=['labels'], load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), parallelism_config=None, deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['tensorboard'], project='huggingface', trackio_space_id='trackio', ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, hub_revision=None, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=18000000, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, liger_kernel_config=None, eval_use_gather_object=False, average_tokens_across_devices=None, tuner_backend='peft', vit_gradient_checkpointing=True, router_aux_loss_coef=0.0, enable_dft_loss=False, enable_channel_loss=False, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, aligner_lr=None, vit_lr=None, use_logits_to_keep=None, ds3_gather_for_generation=True, resume_only_model=False, optimizer=None, loss_type='infonce', metric=None, eval_use_evalscope=False, eval_dataset=[], eval_dataset_args=None, eval_limit=None, eval_generation_config=None, extra_eval_args=None, use_flash_ckpt=False, sft_alpha=0, chord_sft_dataset=[], chord_sft_per_device_train_batch_size=None, chord_enable_phi_function=False, chord_mu_warmup_steps=None, chord_mu_decay_steps=None, chord_mu_peak=None, chord_mu_valley=None, train_type='full', local_repo_path=None, galore_config=None, padding_side='right', padding_free=False, task_type='embedding')"
385
+ }
chat_template.jinja ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
27
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
28
+ {%- elif message.role == "assistant" %}
29
+ {%- set content = message.content %}
30
+ {%- set reasoning_content = '' %}
31
+ {%- if message.reasoning_content is defined and message.reasoning_content is not none %}
32
+ {%- set reasoning_content = message.reasoning_content %}
33
+ {%- else %}
34
+ {%- if '</think>' in message.content %}
35
+ {%- set content = message.content.split('</think>')[-1].lstrip('\n') %}
36
+ {%- set reasoning_content = message.content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
37
+ {%- endif %}
38
+ {%- endif %}
39
+ {%- if loop.index0 > ns.last_query_index %}
40
+ {%- if loop.last or (not loop.last and reasoning_content) %}
41
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
42
+ {%- else %}
43
+ {{- '<|im_start|>' + message.role + '\n' + content }}
44
+ {%- endif %}
45
+ {%- else %}
46
+ {{- '<|im_start|>' + message.role + '\n' + content }}
47
+ {%- endif %}
48
+ {%- if message.tool_calls %}
49
+ {%- for tool_call in message.tool_calls %}
50
+ {%- if (loop.first and content) or (not loop.first) %}
51
+ {{- '\n' }}
52
+ {%- endif %}
53
+ {%- if tool_call.function %}
54
+ {%- set tool_call = tool_call.function %}
55
+ {%- endif %}
56
+ {{- '<tool_call>\n{"name": "' }}
57
+ {{- tool_call.name }}
58
+ {{- '", "arguments": ' }}
59
+ {%- if tool_call.arguments is string %}
60
+ {{- tool_call.arguments }}
61
+ {%- else %}
62
+ {{- tool_call.arguments | tojson }}
63
+ {%- endif %}
64
+ {{- '}\n</tool_call>' }}
65
+ {%- endfor %}
66
+ {%- endif %}
67
+ {{- '<|im_end|>\n' }}
68
+ {%- elif message.role == "tool" %}
69
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
70
+ {{- '<|im_start|>user' }}
71
+ {%- endif %}
72
+ {{- '\n<tool_response>\n' }}
73
+ {{- message.content }}
74
+ {{- '\n</tool_response>' }}
75
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
76
+ {{- '<|im_end|>\n' }}
77
+ {%- endif %}
78
+ {%- endif %}
79
+ {%- endfor %}
80
+ {%- if add_generation_prompt %}
81
+ {{- '<|im_start|>assistant\n' }}
82
+ {%- if enable_thinking is defined and enable_thinking is false %}
83
+ {{- '<think>\n\n</think>\n\n' }}
84
+ {%- endif %}
85
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "dtype": "bfloat16",
8
+ "eos_token_id": 151645,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 1024,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_types": [
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention"
43
+ ],
44
+ "max_position_embeddings": 32768,
45
+ "max_window_layers": 28,
46
+ "model_type": "qwen3",
47
+ "num_attention_heads": 16,
48
+ "num_hidden_layers": 28,
49
+ "num_key_value_heads": 8,
50
+ "pad_token_id": 151643,
51
+ "rms_norm_eps": 1e-06,
52
+ "rope_scaling": null,
53
+ "rope_theta": 1000000,
54
+ "sliding_window": null,
55
+ "tie_word_embeddings": true,
56
+ "transformers_version": "4.57.1",
57
+ "use_cache": false,
58
+ "use_sliding_window": false,
59
+ "vocab_size": 151669
60
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "prompts": {
3
+ "query": "Instruct: Given a web search query, retrieve relevant passages that answer the query\nQuery:",
4
+ "document": ""
5
+ },
6
+ "default_prompt_name": null,
7
+ "similarity_fn_name": "cosine"
8
+ }
generation_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token_id": [
3
+ 151645,
4
+ 151643,
5
+ 151645
6
+ ],
7
+ "max_new_tokens": 2048,
8
+ "pad_token_id": 151643,
9
+ "transformers_version": "4.57.1"
10
+ }
latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step1800
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7b814857c0036884b7e6315489fd79ac32f5c755c6ce719e45c0c08d3209df0
3
+ size 1191588280
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Normalize",
18
+ "type": "sentence_transformers.models.Normalize"
19
+ }
20
+ ]
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3df3e7f8a6cd70268712b0cc8a07baf070597f77383ff0df8999c53214c03ba8
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:def76fb086971c7867b829c23a26261e38d9d74e02139253b38aeb9df8b4b50a
3
+ size 11423705
tokenizer_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 131072,
235
+ "pad_token": "<|endoftext|>",
236
+ "split_special_tokens": false,
237
+ "tokenizer_class": "Qwen2Tokenizer",
238
+ "unk_token": null
239
+ }
trainer_state.json ADDED
@@ -0,0 +1,2759 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 1800,
3
+ "best_metric": 0.00229549,
4
+ "best_model_checkpoint": "/mnt/beegfs3/liying/zhangfanhao/output1125/v1-20251125-231025/checkpoint-1800",
5
+ "epoch": 3.0354280894137493,
6
+ "eval_steps": 100,
7
+ "global_step": 1800,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.001687051876845213,
14
+ "grad_norm": 0.5938383277366354,
15
+ "learning_rate": 5.999998316002012e-06,
16
+ "loss": 0.380859375,
17
+ "step": 1
18
+ },
19
+ {
20
+ "epoch": 0.008435259384226065,
21
+ "grad_norm": 0.432332139447319,
22
+ "learning_rate": 5.999957900144816e-06,
23
+ "loss": 0.3326416015625,
24
+ "step": 5
25
+ },
26
+ {
27
+ "epoch": 0.01687051876845213,
28
+ "grad_norm": 0.24245712798777588,
29
+ "learning_rate": 5.99983160176086e-06,
30
+ "loss": 0.2187744140625,
31
+ "step": 10
32
+ },
33
+ {
34
+ "epoch": 0.025305778152678194,
35
+ "grad_norm": 0.14967602144842607,
36
+ "learning_rate": 5.999621108392896e-06,
37
+ "loss": 0.1771240234375,
38
+ "step": 15
39
+ },
40
+ {
41
+ "epoch": 0.03374103753690426,
42
+ "grad_norm": 0.11965916268612647,
43
+ "learning_rate": 5.9993264259487505e-06,
44
+ "loss": 0.14423828125,
45
+ "step": 20
46
+ },
47
+ {
48
+ "epoch": 0.04217629692113033,
49
+ "grad_norm": 0.09039362542123534,
50
+ "learning_rate": 5.998947562699149e-06,
51
+ "loss": 0.1184326171875,
52
+ "step": 25
53
+ },
54
+ {
55
+ "epoch": 0.05061155630535639,
56
+ "grad_norm": 0.0761794885482189,
57
+ "learning_rate": 5.998484529277483e-06,
58
+ "loss": 0.108642578125,
59
+ "step": 30
60
+ },
61
+ {
62
+ "epoch": 0.059046815689582456,
63
+ "grad_norm": 0.07436752367684027,
64
+ "learning_rate": 5.997937338679513e-06,
65
+ "loss": 0.09638671875,
66
+ "step": 35
67
+ },
68
+ {
69
+ "epoch": 0.06748207507380852,
70
+ "grad_norm": 0.06054003854062884,
71
+ "learning_rate": 5.997306006263003e-06,
72
+ "loss": 0.1025146484375,
73
+ "step": 40
74
+ },
75
+ {
76
+ "epoch": 0.07591733445803459,
77
+ "grad_norm": 0.05715450839425674,
78
+ "learning_rate": 5.996590549747288e-06,
79
+ "loss": 0.0909912109375,
80
+ "step": 45
81
+ },
82
+ {
83
+ "epoch": 0.08435259384226065,
84
+ "grad_norm": 0.0562159873926997,
85
+ "learning_rate": 5.995790989212777e-06,
86
+ "loss": 0.0900390625,
87
+ "step": 50
88
+ },
89
+ {
90
+ "epoch": 0.09278785322648671,
91
+ "grad_norm": 0.054733644360014155,
92
+ "learning_rate": 5.994907347100393e-06,
93
+ "loss": 0.08599853515625,
94
+ "step": 55
95
+ },
96
+ {
97
+ "epoch": 0.10122311261071278,
98
+ "grad_norm": 0.04945430208391664,
99
+ "learning_rate": 5.99393964821094e-06,
100
+ "loss": 0.08861083984375,
101
+ "step": 60
102
+ },
103
+ {
104
+ "epoch": 0.10965837199493884,
105
+ "grad_norm": 0.060375343186170424,
106
+ "learning_rate": 5.992887919704406e-06,
107
+ "loss": 0.08037109375,
108
+ "step": 65
109
+ },
110
+ {
111
+ "epoch": 0.11809363137916491,
112
+ "grad_norm": 0.05113371142226039,
113
+ "learning_rate": 5.991752191099203e-06,
114
+ "loss": 0.07867431640625,
115
+ "step": 70
116
+ },
117
+ {
118
+ "epoch": 0.12652889076339097,
119
+ "grad_norm": 0.060073186423122656,
120
+ "learning_rate": 5.990532494271337e-06,
121
+ "loss": 0.07816162109375,
122
+ "step": 75
123
+ },
124
+ {
125
+ "epoch": 0.13496415014761703,
126
+ "grad_norm": 0.058832653609599356,
127
+ "learning_rate": 5.989228863453515e-06,
128
+ "loss": 0.08001708984375,
129
+ "step": 80
130
+ },
131
+ {
132
+ "epoch": 0.1433994095318431,
133
+ "grad_norm": 0.06587176624760811,
134
+ "learning_rate": 5.987841335234184e-06,
135
+ "loss": 0.074359130859375,
136
+ "step": 85
137
+ },
138
+ {
139
+ "epoch": 0.15183466891606917,
140
+ "grad_norm": 0.06491166432460505,
141
+ "learning_rate": 5.9863699485565e-06,
142
+ "loss": 0.0674072265625,
143
+ "step": 90
144
+ },
145
+ {
146
+ "epoch": 0.16026992830029524,
147
+ "grad_norm": 0.056198676389375694,
148
+ "learning_rate": 5.984814744717241e-06,
149
+ "loss": 0.0659912109375,
150
+ "step": 95
151
+ },
152
+ {
153
+ "epoch": 0.1687051876845213,
154
+ "grad_norm": 0.0673764252680421,
155
+ "learning_rate": 5.983175767365646e-06,
156
+ "loss": 0.063623046875,
157
+ "step": 100
158
+ },
159
+ {
160
+ "epoch": 0.1687051876845213,
161
+ "eval_loss": 0.0162808820605278,
162
+ "eval_margin": -0.020074697267714766,
163
+ "eval_mean_neg": 0.6548054814338684,
164
+ "eval_mean_pos": 0.8441178202629089,
165
+ "eval_runtime": 367.938,
166
+ "eval_samples_per_second": 21.713,
167
+ "eval_steps_per_second": 0.34,
168
+ "step": 100
169
+ },
170
+ {
171
+ "epoch": 0.17714044706874738,
172
+ "grad_norm": 0.06055978762872105,
173
+ "learning_rate": 5.981453062502185e-06,
174
+ "loss": 0.060498046875,
175
+ "step": 105
176
+ },
177
+ {
178
+ "epoch": 0.18557570645297342,
179
+ "grad_norm": 0.06213709235940642,
180
+ "learning_rate": 5.979646678477277e-06,
181
+ "loss": 0.056640625,
182
+ "step": 110
183
+ },
184
+ {
185
+ "epoch": 0.19401096583719948,
186
+ "grad_norm": 0.0659729457413995,
187
+ "learning_rate": 5.977756665989925e-06,
188
+ "loss": 0.05919189453125,
189
+ "step": 115
190
+ },
191
+ {
192
+ "epoch": 0.20244622522142555,
193
+ "grad_norm": 0.05897713608413389,
194
+ "learning_rate": 5.9757830780862985e-06,
195
+ "loss": 0.0628662109375,
196
+ "step": 120
197
+ },
198
+ {
199
+ "epoch": 0.21088148460565162,
200
+ "grad_norm": 0.05642517065149083,
201
+ "learning_rate": 5.973725970158239e-06,
202
+ "loss": 0.05245361328125,
203
+ "step": 125
204
+ },
205
+ {
206
+ "epoch": 0.2193167439898777,
207
+ "grad_norm": 0.06579611075607034,
208
+ "learning_rate": 5.9715853999417115e-06,
209
+ "loss": 0.05848388671875,
210
+ "step": 130
211
+ },
212
+ {
213
+ "epoch": 0.22775200337410376,
214
+ "grad_norm": 0.07448489445734133,
215
+ "learning_rate": 5.969361427515179e-06,
216
+ "loss": 0.0573974609375,
217
+ "step": 135
218
+ },
219
+ {
220
+ "epoch": 0.23618726275832982,
221
+ "grad_norm": 0.05975086799089143,
222
+ "learning_rate": 5.9670541152979215e-06,
223
+ "loss": 0.05091552734375,
224
+ "step": 140
225
+ },
226
+ {
227
+ "epoch": 0.2446225221425559,
228
+ "grad_norm": 0.06504159374670346,
229
+ "learning_rate": 5.964663528048276e-06,
230
+ "loss": 0.047943115234375,
231
+ "step": 145
232
+ },
233
+ {
234
+ "epoch": 0.25305778152678193,
235
+ "grad_norm": 0.060584307441235295,
236
+ "learning_rate": 5.96218973286183e-06,
237
+ "loss": 0.0493896484375,
238
+ "step": 150
239
+ },
240
+ {
241
+ "epoch": 0.261493040911008,
242
+ "grad_norm": 0.06234398910972033,
243
+ "learning_rate": 5.959632799169529e-06,
244
+ "loss": 0.04854736328125,
245
+ "step": 155
246
+ },
247
+ {
248
+ "epoch": 0.26992830029523407,
249
+ "grad_norm": 0.07326440644425879,
250
+ "learning_rate": 5.9569927987357305e-06,
251
+ "loss": 0.0443359375,
252
+ "step": 160
253
+ },
254
+ {
255
+ "epoch": 0.27836355967946014,
256
+ "grad_norm": 0.059873291821439245,
257
+ "learning_rate": 5.954269805656194e-06,
258
+ "loss": 0.04698486328125,
259
+ "step": 165
260
+ },
261
+ {
262
+ "epoch": 0.2867988190636862,
263
+ "grad_norm": 0.07214278446872342,
264
+ "learning_rate": 5.951463896355993e-06,
265
+ "loss": 0.0474639892578125,
266
+ "step": 170
267
+ },
268
+ {
269
+ "epoch": 0.2952340784479123,
270
+ "grad_norm": 0.06530184393433881,
271
+ "learning_rate": 5.94857514958738e-06,
272
+ "loss": 0.043914794921875,
273
+ "step": 175
274
+ },
275
+ {
276
+ "epoch": 0.30366933783213834,
277
+ "grad_norm": 0.06214586771199744,
278
+ "learning_rate": 5.945603646427567e-06,
279
+ "loss": 0.043475341796875,
280
+ "step": 180
281
+ },
282
+ {
283
+ "epoch": 0.3121045972163644,
284
+ "grad_norm": 0.06764874450241058,
285
+ "learning_rate": 5.9425494702764575e-06,
286
+ "loss": 0.04755859375,
287
+ "step": 185
288
+ },
289
+ {
290
+ "epoch": 0.3205398566005905,
291
+ "grad_norm": 0.06523200399348678,
292
+ "learning_rate": 5.939412706854299e-06,
293
+ "loss": 0.044635009765625,
294
+ "step": 190
295
+ },
296
+ {
297
+ "epoch": 0.32897511598481655,
298
+ "grad_norm": 0.060102318432770876,
299
+ "learning_rate": 5.9361934441992835e-06,
300
+ "loss": 0.042364501953125,
301
+ "step": 195
302
+ },
303
+ {
304
+ "epoch": 0.3374103753690426,
305
+ "grad_norm": 0.06678207500644712,
306
+ "learning_rate": 5.9328917726650706e-06,
307
+ "loss": 0.04183349609375,
308
+ "step": 200
309
+ },
310
+ {
311
+ "epoch": 0.3374103753690426,
312
+ "eval_loss": 0.010425936430692673,
313
+ "eval_margin": -0.016463442112229044,
314
+ "eval_mean_neg": 0.5932909250259399,
315
+ "eval_mean_pos": 0.8196097016334534,
316
+ "eval_runtime": 365.6666,
317
+ "eval_samples_per_second": 21.848,
318
+ "eval_steps_per_second": 0.342,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 0.3458456347532687,
323
+ "grad_norm": 0.05626492604909855,
324
+ "learning_rate": 5.929507784918257e-06,
325
+ "loss": 0.040447998046875,
326
+ "step": 205
327
+ },
328
+ {
329
+ "epoch": 0.35428089413749475,
330
+ "grad_norm": 0.054176681030320105,
331
+ "learning_rate": 5.926041575935772e-06,
332
+ "loss": 0.037396240234375,
333
+ "step": 210
334
+ },
335
+ {
336
+ "epoch": 0.3627161535217208,
337
+ "grad_norm": 0.06953999336709471,
338
+ "learning_rate": 5.922493243002212e-06,
339
+ "loss": 0.042828369140625,
340
+ "step": 215
341
+ },
342
+ {
343
+ "epoch": 0.37115141290594683,
344
+ "grad_norm": 0.05589129508252642,
345
+ "learning_rate": 5.918862885707113e-06,
346
+ "loss": 0.034979248046875,
347
+ "step": 220
348
+ },
349
+ {
350
+ "epoch": 0.3795866722901729,
351
+ "grad_norm": 0.07078214617147234,
352
+ "learning_rate": 5.915150605942153e-06,
353
+ "loss": 0.035723876953125,
354
+ "step": 225
355
+ },
356
+ {
357
+ "epoch": 0.38802193167439897,
358
+ "grad_norm": 0.06815732953530805,
359
+ "learning_rate": 5.911356507898291e-06,
360
+ "loss": 0.041973876953125,
361
+ "step": 230
362
+ },
363
+ {
364
+ "epoch": 0.39645719105862504,
365
+ "grad_norm": 0.062472935047014386,
366
+ "learning_rate": 5.907480698062848e-06,
367
+ "loss": 0.0356689453125,
368
+ "step": 235
369
+ },
370
+ {
371
+ "epoch": 0.4048924504428511,
372
+ "grad_norm": 0.06665756879409568,
373
+ "learning_rate": 5.90352328521651e-06,
374
+ "loss": 0.036456298828125,
375
+ "step": 240
376
+ },
377
+ {
378
+ "epoch": 0.41332770982707717,
379
+ "grad_norm": 0.06681598226193439,
380
+ "learning_rate": 5.899484380430284e-06,
381
+ "loss": 0.0343994140625,
382
+ "step": 245
383
+ },
384
+ {
385
+ "epoch": 0.42176296921130324,
386
+ "grad_norm": 0.060740413400477374,
387
+ "learning_rate": 5.895364097062374e-06,
388
+ "loss": 0.0318511962890625,
389
+ "step": 250
390
+ },
391
+ {
392
+ "epoch": 0.4301982285955293,
393
+ "grad_norm": 0.06545743307605277,
394
+ "learning_rate": 5.8911625507550015e-06,
395
+ "loss": 0.034765625,
396
+ "step": 255
397
+ },
398
+ {
399
+ "epoch": 0.4386334879797554,
400
+ "grad_norm": 0.0673664786591912,
401
+ "learning_rate": 5.88687985943116e-06,
402
+ "loss": 0.03580322265625,
403
+ "step": 260
404
+ },
405
+ {
406
+ "epoch": 0.44706874736398144,
407
+ "grad_norm": 0.06937993663032453,
408
+ "learning_rate": 5.882516143291308e-06,
409
+ "loss": 0.036236572265625,
410
+ "step": 265
411
+ },
412
+ {
413
+ "epoch": 0.4555040067482075,
414
+ "grad_norm": 0.0639250177544625,
415
+ "learning_rate": 5.878071524809988e-06,
416
+ "loss": 0.0317962646484375,
417
+ "step": 270
418
+ },
419
+ {
420
+ "epoch": 0.4639392661324336,
421
+ "grad_norm": 0.06037822600018219,
422
+ "learning_rate": 5.873546128732399e-06,
423
+ "loss": 0.0323699951171875,
424
+ "step": 275
425
+ },
426
+ {
427
+ "epoch": 0.47237452551665965,
428
+ "grad_norm": 0.060357976056049485,
429
+ "learning_rate": 5.868940082070885e-06,
430
+ "loss": 0.033660888671875,
431
+ "step": 280
432
+ },
433
+ {
434
+ "epoch": 0.4808097849008857,
435
+ "grad_norm": 0.061037172126093234,
436
+ "learning_rate": 5.8642535141013785e-06,
437
+ "loss": 0.0297515869140625,
438
+ "step": 285
439
+ },
440
+ {
441
+ "epoch": 0.4892450442851118,
442
+ "grad_norm": 0.0524126813526148,
443
+ "learning_rate": 5.859486556359768e-06,
444
+ "loss": 0.028472900390625,
445
+ "step": 290
446
+ },
447
+ {
448
+ "epoch": 0.49768030366933785,
449
+ "grad_norm": 0.062026009465912704,
450
+ "learning_rate": 5.854639342638208e-06,
451
+ "loss": 0.030718994140625,
452
+ "step": 295
453
+ },
454
+ {
455
+ "epoch": 0.5061155630535639,
456
+ "grad_norm": 0.05866098788599579,
457
+ "learning_rate": 5.849712008981361e-06,
458
+ "loss": 0.032916259765625,
459
+ "step": 300
460
+ },
461
+ {
462
+ "epoch": 0.5061155630535639,
463
+ "eval_loss": 0.007868120446801186,
464
+ "eval_margin": -0.013977996595654517,
465
+ "eval_mean_neg": 0.5548827648162842,
466
+ "eval_mean_pos": 0.793705403804779,
467
+ "eval_runtime": 364.6437,
468
+ "eval_samples_per_second": 21.909,
469
+ "eval_steps_per_second": 0.343,
470
+ "step": 300
471
+ },
472
+ {
473
+ "epoch": 0.5145508224377899,
474
+ "grad_norm": 0.05968132039231295,
475
+ "learning_rate": 5.844704693682583e-06,
476
+ "loss": 0.0292724609375,
477
+ "step": 305
478
+ },
479
+ {
480
+ "epoch": 0.522986081822016,
481
+ "grad_norm": 0.06038138238675174,
482
+ "learning_rate": 5.8396175372800405e-06,
483
+ "loss": 0.030743408203125,
484
+ "step": 310
485
+ },
486
+ {
487
+ "epoch": 0.5314213412062421,
488
+ "grad_norm": 0.06052295196543659,
489
+ "learning_rate": 5.834450682552765e-06,
490
+ "loss": 0.030194091796875,
491
+ "step": 315
492
+ },
493
+ {
494
+ "epoch": 0.5398566005904681,
495
+ "grad_norm": 0.05539528727202974,
496
+ "learning_rate": 5.829204274516648e-06,
497
+ "loss": 0.0312774658203125,
498
+ "step": 320
499
+ },
500
+ {
501
+ "epoch": 0.5482918599746942,
502
+ "grad_norm": 0.052508369724972796,
503
+ "learning_rate": 5.823878460420366e-06,
504
+ "loss": 0.0295318603515625,
505
+ "step": 325
506
+ },
507
+ {
508
+ "epoch": 0.5567271193589203,
509
+ "grad_norm": 0.05151880865825463,
510
+ "learning_rate": 5.8184733897412565e-06,
511
+ "loss": 0.028912353515625,
512
+ "step": 330
513
+ },
514
+ {
515
+ "epoch": 0.5651623787431463,
516
+ "grad_norm": 0.0624220665428448,
517
+ "learning_rate": 5.812989214181113e-06,
518
+ "loss": 0.027313232421875,
519
+ "step": 335
520
+ },
521
+ {
522
+ "epoch": 0.5735976381273724,
523
+ "grad_norm": 0.06481057308539884,
524
+ "learning_rate": 5.807426087661934e-06,
525
+ "loss": 0.02608642578125,
526
+ "step": 340
527
+ },
528
+ {
529
+ "epoch": 0.5820328975115985,
530
+ "grad_norm": 0.06109467057046473,
531
+ "learning_rate": 5.8017841663216e-06,
532
+ "loss": 0.0282989501953125,
533
+ "step": 345
534
+ },
535
+ {
536
+ "epoch": 0.5904681568958245,
537
+ "grad_norm": 0.062107444796084835,
538
+ "learning_rate": 5.796063608509493e-06,
539
+ "loss": 0.0277069091796875,
540
+ "step": 350
541
+ },
542
+ {
543
+ "epoch": 0.5989034162800506,
544
+ "grad_norm": 0.0552072139581444,
545
+ "learning_rate": 5.7902645747820485e-06,
546
+ "loss": 0.028399658203125,
547
+ "step": 355
548
+ },
549
+ {
550
+ "epoch": 0.6073386756642767,
551
+ "grad_norm": 0.06047980839414296,
552
+ "learning_rate": 5.784387227898254e-06,
553
+ "loss": 0.0281524658203125,
554
+ "step": 360
555
+ },
556
+ {
557
+ "epoch": 0.6157739350485028,
558
+ "grad_norm": 0.05336288606895412,
559
+ "learning_rate": 5.778431732815078e-06,
560
+ "loss": 0.02484130859375,
561
+ "step": 365
562
+ },
563
+ {
564
+ "epoch": 0.6242091944327288,
565
+ "grad_norm": 0.060745200996401724,
566
+ "learning_rate": 5.77239825668284e-06,
567
+ "loss": 0.02640380859375,
568
+ "step": 370
569
+ },
570
+ {
571
+ "epoch": 0.6326444538169549,
572
+ "grad_norm": 0.048268694566304324,
573
+ "learning_rate": 5.766286968840522e-06,
574
+ "loss": 0.0278717041015625,
575
+ "step": 375
576
+ },
577
+ {
578
+ "epoch": 0.641079713201181,
579
+ "grad_norm": 0.05424806603710711,
580
+ "learning_rate": 5.760098040811012e-06,
581
+ "loss": 0.0271453857421875,
582
+ "step": 380
583
+ },
584
+ {
585
+ "epoch": 0.649514972585407,
586
+ "grad_norm": 0.054535443289609395,
587
+ "learning_rate": 5.7538316462962935e-06,
588
+ "loss": 0.026611328125,
589
+ "step": 385
590
+ },
591
+ {
592
+ "epoch": 0.6579502319696331,
593
+ "grad_norm": 0.06967389025087475,
594
+ "learning_rate": 5.7474879611725655e-06,
595
+ "loss": 0.02589111328125,
596
+ "step": 390
597
+ },
598
+ {
599
+ "epoch": 0.6663854913538592,
600
+ "grad_norm": 0.06024092137696802,
601
+ "learning_rate": 5.741067163485314e-06,
602
+ "loss": 0.0193756103515625,
603
+ "step": 395
604
+ },
605
+ {
606
+ "epoch": 0.6748207507380852,
607
+ "grad_norm": 0.05981804001044263,
608
+ "learning_rate": 5.7345694334443066e-06,
609
+ "loss": 0.0205718994140625,
610
+ "step": 400
611
+ },
612
+ {
613
+ "epoch": 0.6748207507380852,
614
+ "eval_loss": 0.006414474919438362,
615
+ "eval_margin": -0.013447051244457402,
616
+ "eval_mean_neg": 0.5324161052703857,
617
+ "eval_mean_pos": 0.7909372448921204,
618
+ "eval_runtime": 365.0145,
619
+ "eval_samples_per_second": 21.887,
620
+ "eval_steps_per_second": 0.342,
621
+ "step": 400
622
+ },
623
+ {
624
+ "epoch": 0.6832560101223113,
625
+ "grad_norm": 0.06692561927901217,
626
+ "learning_rate": 5.727994953418538e-06,
627
+ "loss": 0.022021484375,
628
+ "step": 405
629
+ },
630
+ {
631
+ "epoch": 0.6916912695065374,
632
+ "grad_norm": 0.06609269963808409,
633
+ "learning_rate": 5.721343907931114e-06,
634
+ "loss": 0.02950592041015625,
635
+ "step": 410
636
+ },
637
+ {
638
+ "epoch": 0.7001265288907634,
639
+ "grad_norm": 0.052617111357424175,
640
+ "learning_rate": 5.71461648365407e-06,
641
+ "loss": 0.025189208984375,
642
+ "step": 415
643
+ },
644
+ {
645
+ "epoch": 0.7085617882749895,
646
+ "grad_norm": 0.04860971480260525,
647
+ "learning_rate": 5.707812869403128e-06,
648
+ "loss": 0.022052001953125,
649
+ "step": 420
650
+ },
651
+ {
652
+ "epoch": 0.7169970476592156,
653
+ "grad_norm": 0.06030454097987917,
654
+ "learning_rate": 5.7009332561324085e-06,
655
+ "loss": 0.0219390869140625,
656
+ "step": 425
657
+ },
658
+ {
659
+ "epoch": 0.7254323070434416,
660
+ "grad_norm": 0.06837586048390999,
661
+ "learning_rate": 5.693977836929057e-06,
662
+ "loss": 0.0270172119140625,
663
+ "step": 430
664
+ },
665
+ {
666
+ "epoch": 0.7338675664276677,
667
+ "grad_norm": 0.05197492190608033,
668
+ "learning_rate": 5.686946807007834e-06,
669
+ "loss": 0.02206878662109375,
670
+ "step": 435
671
+ },
672
+ {
673
+ "epoch": 0.7423028258118937,
674
+ "grad_norm": 0.06128713786873146,
675
+ "learning_rate": 5.679840363705637e-06,
676
+ "loss": 0.0244720458984375,
677
+ "step": 440
678
+ },
679
+ {
680
+ "epoch": 0.7507380851961197,
681
+ "grad_norm": 0.0629198604819534,
682
+ "learning_rate": 5.672658706475953e-06,
683
+ "loss": 0.0194488525390625,
684
+ "step": 445
685
+ },
686
+ {
687
+ "epoch": 0.7591733445803458,
688
+ "grad_norm": 0.05502172045134509,
689
+ "learning_rate": 5.665402036883267e-06,
690
+ "loss": 0.0225250244140625,
691
+ "step": 450
692
+ },
693
+ {
694
+ "epoch": 0.7676086039645719,
695
+ "grad_norm": 0.06119000768724386,
696
+ "learning_rate": 5.658070558597408e-06,
697
+ "loss": 0.01928558349609375,
698
+ "step": 455
699
+ },
700
+ {
701
+ "epoch": 0.7760438633487979,
702
+ "grad_norm": 0.058834092769235756,
703
+ "learning_rate": 5.650664477387824e-06,
704
+ "loss": 0.02149658203125,
705
+ "step": 460
706
+ },
707
+ {
708
+ "epoch": 0.784479122733024,
709
+ "grad_norm": 0.06942758384696321,
710
+ "learning_rate": 5.643184001117811e-06,
711
+ "loss": 0.0266326904296875,
712
+ "step": 465
713
+ },
714
+ {
715
+ "epoch": 0.7929143821172501,
716
+ "grad_norm": 0.05395397336586372,
717
+ "learning_rate": 5.6356293397386836e-06,
718
+ "loss": 0.0206085205078125,
719
+ "step": 470
720
+ },
721
+ {
722
+ "epoch": 0.8013496415014761,
723
+ "grad_norm": 0.057301086470950384,
724
+ "learning_rate": 5.628000705283873e-06,
725
+ "loss": 0.021770477294921875,
726
+ "step": 475
727
+ },
728
+ {
729
+ "epoch": 0.8097849008857022,
730
+ "grad_norm": 0.058618795566843934,
731
+ "learning_rate": 5.620298311862985e-06,
732
+ "loss": 0.0174072265625,
733
+ "step": 480
734
+ },
735
+ {
736
+ "epoch": 0.8182201602699283,
737
+ "grad_norm": 0.053997897902853975,
738
+ "learning_rate": 5.612522375655783e-06,
739
+ "loss": 0.0246124267578125,
740
+ "step": 485
741
+ },
742
+ {
743
+ "epoch": 0.8266554196541543,
744
+ "grad_norm": 0.058293384553658546,
745
+ "learning_rate": 5.604673114906126e-06,
746
+ "loss": 0.0239288330078125,
747
+ "step": 490
748
+ },
749
+ {
750
+ "epoch": 0.8350906790383804,
751
+ "grad_norm": 0.062099166751088966,
752
+ "learning_rate": 5.596750749915842e-06,
753
+ "loss": 0.023724365234375,
754
+ "step": 495
755
+ },
756
+ {
757
+ "epoch": 0.8435259384226065,
758
+ "grad_norm": 0.0518337334475497,
759
+ "learning_rate": 5.588755503038543e-06,
760
+ "loss": 0.01995849609375,
761
+ "step": 500
762
+ },
763
+ {
764
+ "epoch": 0.8435259384226065,
765
+ "eval_loss": 0.00542406877502799,
766
+ "eval_margin": -0.010786364688688228,
767
+ "eval_mean_neg": 0.5346763134002686,
768
+ "eval_mean_pos": 0.7906754016876221,
769
+ "eval_runtime": 367.5462,
770
+ "eval_samples_per_second": 21.736,
771
+ "eval_steps_per_second": 0.34,
772
+ "step": 500
773
+ },
774
+ {
775
+ "epoch": 0.8519611978068325,
776
+ "grad_norm": 0.0579368996460804,
777
+ "learning_rate": 5.580687598673387e-06,
778
+ "loss": 0.02121734619140625,
779
+ "step": 505
780
+ },
781
+ {
782
+ "epoch": 0.8603964571910586,
783
+ "grad_norm": 0.05214070956939639,
784
+ "learning_rate": 5.572547263258776e-06,
785
+ "loss": 0.0197113037109375,
786
+ "step": 510
787
+ },
788
+ {
789
+ "epoch": 0.8688317165752847,
790
+ "grad_norm": 0.0687906199565583,
791
+ "learning_rate": 5.564334725266006e-06,
792
+ "loss": 0.0217254638671875,
793
+ "step": 515
794
+ },
795
+ {
796
+ "epoch": 0.8772669759595108,
797
+ "grad_norm": 0.051621267659708626,
798
+ "learning_rate": 5.55605021519285e-06,
799
+ "loss": 0.019158935546875,
800
+ "step": 520
801
+ },
802
+ {
803
+ "epoch": 0.8857022353437368,
804
+ "grad_norm": 0.05599957001213385,
805
+ "learning_rate": 5.547693965557092e-06,
806
+ "loss": 0.0195770263671875,
807
+ "step": 525
808
+ },
809
+ {
810
+ "epoch": 0.8941374947279629,
811
+ "grad_norm": 0.06034671456944424,
812
+ "learning_rate": 5.539266210889997e-06,
813
+ "loss": 0.0231231689453125,
814
+ "step": 530
815
+ },
816
+ {
817
+ "epoch": 0.902572754112189,
818
+ "grad_norm": 0.04518349407201743,
819
+ "learning_rate": 5.5307671877297326e-06,
820
+ "loss": 0.0208709716796875,
821
+ "step": 535
822
+ },
823
+ {
824
+ "epoch": 0.911008013496415,
825
+ "grad_norm": 0.0503478793140038,
826
+ "learning_rate": 5.522197134614728e-06,
827
+ "loss": 0.0209930419921875,
828
+ "step": 540
829
+ },
830
+ {
831
+ "epoch": 0.9194432728806411,
832
+ "grad_norm": 0.046047217532892024,
833
+ "learning_rate": 5.513556292076981e-06,
834
+ "loss": 0.0175750732421875,
835
+ "step": 545
836
+ },
837
+ {
838
+ "epoch": 0.9278785322648672,
839
+ "grad_norm": 0.05575253759567789,
840
+ "learning_rate": 5.504844902635303e-06,
841
+ "loss": 0.0171112060546875,
842
+ "step": 550
843
+ },
844
+ {
845
+ "epoch": 0.9363137916490932,
846
+ "grad_norm": 0.04687503220455111,
847
+ "learning_rate": 5.496063210788519e-06,
848
+ "loss": 0.0167633056640625,
849
+ "step": 555
850
+ },
851
+ {
852
+ "epoch": 0.9447490510333193,
853
+ "grad_norm": 0.04891593875536363,
854
+ "learning_rate": 5.487211463008597e-06,
855
+ "loss": 0.019036865234375,
856
+ "step": 560
857
+ },
858
+ {
859
+ "epoch": 0.9531843104175454,
860
+ "grad_norm": 0.04841249311058062,
861
+ "learning_rate": 5.478289907733738e-06,
862
+ "loss": 0.01807098388671875,
863
+ "step": 565
864
+ },
865
+ {
866
+ "epoch": 0.9616195698017714,
867
+ "grad_norm": 0.060373651634708765,
868
+ "learning_rate": 5.469298795361397e-06,
869
+ "loss": 0.015673828125,
870
+ "step": 570
871
+ },
872
+ {
873
+ "epoch": 0.9700548291859975,
874
+ "grad_norm": 0.051868174671481436,
875
+ "learning_rate": 5.460238378241262e-06,
876
+ "loss": 0.01802978515625,
877
+ "step": 575
878
+ },
879
+ {
880
+ "epoch": 0.9784900885702236,
881
+ "grad_norm": 0.051146316151485995,
882
+ "learning_rate": 5.451108910668163e-06,
883
+ "loss": 0.01664581298828125,
884
+ "step": 580
885
+ },
886
+ {
887
+ "epoch": 0.9869253479544496,
888
+ "grad_norm": 0.04017649470362814,
889
+ "learning_rate": 5.441910648874945e-06,
890
+ "loss": 0.016483306884765625,
891
+ "step": 585
892
+ },
893
+ {
894
+ "epoch": 0.9953606073386757,
895
+ "grad_norm": 0.04457228909606784,
896
+ "learning_rate": 5.4326438510252655e-06,
897
+ "loss": 0.0192718505859375,
898
+ "step": 590
899
+ },
900
+ {
901
+ "epoch": 1.0033741037536905,
902
+ "grad_norm": 0.053005736672298354,
903
+ "learning_rate": 5.423308777206357e-06,
904
+ "loss": 0.015604400634765625,
905
+ "step": 595
906
+ },
907
+ {
908
+ "epoch": 1.0118093631379166,
909
+ "grad_norm": 0.05754347568157857,
910
+ "learning_rate": 5.413905689421722e-06,
911
+ "loss": 0.0159515380859375,
912
+ "step": 600
913
+ },
914
+ {
915
+ "epoch": 1.0118093631379166,
916
+ "eval_loss": 0.004593910649418831,
917
+ "eval_margin": -0.011103880922159842,
918
+ "eval_mean_neg": 0.5064941644668579,
919
+ "eval_mean_pos": 0.7831713557243347,
920
+ "eval_runtime": 364.5429,
921
+ "eval_samples_per_second": 21.915,
922
+ "eval_steps_per_second": 0.343,
923
+ "step": 600
924
+ },
925
+ {
926
+ "epoch": 1.0202446225221427,
927
+ "grad_norm": 0.04962686662442784,
928
+ "learning_rate": 5.404434851583785e-06,
929
+ "loss": 0.01360015869140625,
930
+ "step": 605
931
+ },
932
+ {
933
+ "epoch": 1.0286798819063687,
934
+ "grad_norm": 0.046926535788142015,
935
+ "learning_rate": 5.394896529506479e-06,
936
+ "loss": 0.01566925048828125,
937
+ "step": 610
938
+ },
939
+ {
940
+ "epoch": 1.0371151412905948,
941
+ "grad_norm": 0.043042108440633,
942
+ "learning_rate": 5.38529099089779e-06,
943
+ "loss": 0.0128326416015625,
944
+ "step": 615
945
+ },
946
+ {
947
+ "epoch": 1.0455504006748209,
948
+ "grad_norm": 0.049749099013614635,
949
+ "learning_rate": 5.375618505352241e-06,
950
+ "loss": 0.0136383056640625,
951
+ "step": 620
952
+ },
953
+ {
954
+ "epoch": 1.053985660059047,
955
+ "grad_norm": 0.04945151693616336,
956
+ "learning_rate": 5.365879344343326e-06,
957
+ "loss": 0.01544036865234375,
958
+ "step": 625
959
+ },
960
+ {
961
+ "epoch": 1.062420919443273,
962
+ "grad_norm": 0.04626935309793636,
963
+ "learning_rate": 5.35607378121589e-06,
964
+ "loss": 0.0143829345703125,
965
+ "step": 630
966
+ },
967
+ {
968
+ "epoch": 1.070856178827499,
969
+ "grad_norm": 0.04580735975264899,
970
+ "learning_rate": 5.346202091178459e-06,
971
+ "loss": 0.014122772216796874,
972
+ "step": 635
973
+ },
974
+ {
975
+ "epoch": 1.079291438211725,
976
+ "grad_norm": 0.046216725385350446,
977
+ "learning_rate": 5.336264551295512e-06,
978
+ "loss": 0.014672088623046874,
979
+ "step": 640
980
+ },
981
+ {
982
+ "epoch": 1.087726697595951,
983
+ "grad_norm": 0.04564303944680029,
984
+ "learning_rate": 5.326261440479709e-06,
985
+ "loss": 0.0136993408203125,
986
+ "step": 645
987
+ },
988
+ {
989
+ "epoch": 1.096161956980177,
990
+ "grad_norm": 0.05114495970312972,
991
+ "learning_rate": 5.316193039484063e-06,
992
+ "loss": 0.0147705078125,
993
+ "step": 650
994
+ },
995
+ {
996
+ "epoch": 1.1045972163644031,
997
+ "grad_norm": 0.044105955284847585,
998
+ "learning_rate": 5.306059630894056e-06,
999
+ "loss": 0.015480804443359374,
1000
+ "step": 655
1001
+ },
1002
+ {
1003
+ "epoch": 1.1130324757486292,
1004
+ "grad_norm": 0.045182200484827885,
1005
+ "learning_rate": 5.295861499119711e-06,
1006
+ "loss": 0.013404083251953126,
1007
+ "step": 660
1008
+ },
1009
+ {
1010
+ "epoch": 1.1214677351328552,
1011
+ "grad_norm": 0.04324759296793784,
1012
+ "learning_rate": 5.2855989303876065e-06,
1013
+ "loss": 0.01672821044921875,
1014
+ "step": 665
1015
+ },
1016
+ {
1017
+ "epoch": 1.1299029945170813,
1018
+ "grad_norm": 0.03328038907845692,
1019
+ "learning_rate": 5.275272212732849e-06,
1020
+ "loss": 0.01335906982421875,
1021
+ "step": 670
1022
+ },
1023
+ {
1024
+ "epoch": 1.1383382539013074,
1025
+ "grad_norm": 0.044225327184826406,
1026
+ "learning_rate": 5.264881635990984e-06,
1027
+ "loss": 0.012935638427734375,
1028
+ "step": 675
1029
+ },
1030
+ {
1031
+ "epoch": 1.1467735132855335,
1032
+ "grad_norm": 0.04645591264342837,
1033
+ "learning_rate": 5.2544274917898615e-06,
1034
+ "loss": 0.01385498046875,
1035
+ "step": 680
1036
+ },
1037
+ {
1038
+ "epoch": 1.1552087726697595,
1039
+ "grad_norm": 0.05453216622664439,
1040
+ "learning_rate": 5.243910073541454e-06,
1041
+ "loss": 0.016290283203125,
1042
+ "step": 685
1043
+ },
1044
+ {
1045
+ "epoch": 1.1636440320539856,
1046
+ "grad_norm": 0.057731965028177075,
1047
+ "learning_rate": 5.233329676433617e-06,
1048
+ "loss": 0.0145355224609375,
1049
+ "step": 690
1050
+ },
1051
+ {
1052
+ "epoch": 1.1720792914382117,
1053
+ "grad_norm": 0.05145183297720149,
1054
+ "learning_rate": 5.222686597421808e-06,
1055
+ "loss": 0.01390838623046875,
1056
+ "step": 695
1057
+ },
1058
+ {
1059
+ "epoch": 1.1805145508224377,
1060
+ "grad_norm": 0.04021056012812571,
1061
+ "learning_rate": 5.211981135220751e-06,
1062
+ "loss": 0.01344757080078125,
1063
+ "step": 700
1064
+ },
1065
+ {
1066
+ "epoch": 1.1805145508224377,
1067
+ "eval_loss": 0.004191060084849596,
1068
+ "eval_margin": -0.010819014589933137,
1069
+ "eval_mean_neg": 0.4851545989513397,
1070
+ "eval_mean_pos": 0.7733471989631653,
1071
+ "eval_runtime": 359.0481,
1072
+ "eval_samples_per_second": 22.251,
1073
+ "eval_steps_per_second": 0.348,
1074
+ "step": 700
1075
+ },
1076
+ {
1077
+ "epoch": 1.1889498102066638,
1078
+ "grad_norm": 0.0416204676277527,
1079
+ "learning_rate": 5.201213590296052e-06,
1080
+ "loss": 0.014748382568359374,
1081
+ "step": 705
1082
+ },
1083
+ {
1084
+ "epoch": 1.1973850695908899,
1085
+ "grad_norm": 0.05633713089091016,
1086
+ "learning_rate": 5.190384264855764e-06,
1087
+ "loss": 0.014013671875,
1088
+ "step": 710
1089
+ },
1090
+ {
1091
+ "epoch": 1.205820328975116,
1092
+ "grad_norm": 0.05143948467095745,
1093
+ "learning_rate": 5.1794934628419104e-06,
1094
+ "loss": 0.015460205078125,
1095
+ "step": 715
1096
+ },
1097
+ {
1098
+ "epoch": 1.214255588359342,
1099
+ "grad_norm": 0.05227911954680101,
1100
+ "learning_rate": 5.168541489921949e-06,
1101
+ "loss": 0.01507415771484375,
1102
+ "step": 720
1103
+ },
1104
+ {
1105
+ "epoch": 1.222690847743568,
1106
+ "grad_norm": 0.058608960783147375,
1107
+ "learning_rate": 5.1575286534801955e-06,
1108
+ "loss": 0.01417236328125,
1109
+ "step": 725
1110
+ },
1111
+ {
1112
+ "epoch": 1.2311261071277941,
1113
+ "grad_norm": 0.04818858161693878,
1114
+ "learning_rate": 5.146455262609197e-06,
1115
+ "loss": 0.013425445556640625,
1116
+ "step": 730
1117
+ },
1118
+ {
1119
+ "epoch": 1.2395613665120202,
1120
+ "grad_norm": 0.05406749848988645,
1121
+ "learning_rate": 5.1353216281010535e-06,
1122
+ "loss": 0.013022613525390626,
1123
+ "step": 735
1124
+ },
1125
+ {
1126
+ "epoch": 1.2479966258962463,
1127
+ "grad_norm": 0.044408669007062154,
1128
+ "learning_rate": 5.1241280624387e-06,
1129
+ "loss": 0.01393585205078125,
1130
+ "step": 740
1131
+ },
1132
+ {
1133
+ "epoch": 1.2564318852804723,
1134
+ "grad_norm": 0.04519048638967848,
1135
+ "learning_rate": 5.1128748797871314e-06,
1136
+ "loss": 0.013826751708984375,
1137
+ "step": 745
1138
+ },
1139
+ {
1140
+ "epoch": 1.2648671446646984,
1141
+ "grad_norm": 0.0491460974626283,
1142
+ "learning_rate": 5.101562395984587e-06,
1143
+ "loss": 0.01336212158203125,
1144
+ "step": 750
1145
+ },
1146
+ {
1147
+ "epoch": 1.2733024040489245,
1148
+ "grad_norm": 0.04356609182045035,
1149
+ "learning_rate": 5.090190928533689e-06,
1150
+ "loss": 0.01492156982421875,
1151
+ "step": 755
1152
+ },
1153
+ {
1154
+ "epoch": 1.2817376634331505,
1155
+ "grad_norm": 0.03556136795064142,
1156
+ "learning_rate": 5.078760796592524e-06,
1157
+ "loss": 0.0125732421875,
1158
+ "step": 760
1159
+ },
1160
+ {
1161
+ "epoch": 1.2901729228173766,
1162
+ "grad_norm": 0.04189977738590891,
1163
+ "learning_rate": 5.067272320965692e-06,
1164
+ "loss": 0.0149322509765625,
1165
+ "step": 765
1166
+ },
1167
+ {
1168
+ "epoch": 1.2986081822016027,
1169
+ "grad_norm": 0.05051201336701144,
1170
+ "learning_rate": 5.055725824095301e-06,
1171
+ "loss": 0.01419525146484375,
1172
+ "step": 770
1173
+ },
1174
+ {
1175
+ "epoch": 1.3070434415858287,
1176
+ "grad_norm": 0.0416942039130722,
1177
+ "learning_rate": 5.0441216300519126e-06,
1178
+ "loss": 0.01274261474609375,
1179
+ "step": 775
1180
+ },
1181
+ {
1182
+ "epoch": 1.3154787009700548,
1183
+ "grad_norm": 0.04629875001130603,
1184
+ "learning_rate": 5.032460064525455e-06,
1185
+ "loss": 0.01363525390625,
1186
+ "step": 780
1187
+ },
1188
+ {
1189
+ "epoch": 1.3239139603542809,
1190
+ "grad_norm": 0.03704688355237128,
1191
+ "learning_rate": 5.020741454816074e-06,
1192
+ "loss": 0.01301422119140625,
1193
+ "step": 785
1194
+ },
1195
+ {
1196
+ "epoch": 1.332349219738507,
1197
+ "grad_norm": 0.03742406408262459,
1198
+ "learning_rate": 5.00896612982495e-06,
1199
+ "loss": 0.01353302001953125,
1200
+ "step": 790
1201
+ },
1202
+ {
1203
+ "epoch": 1.340784479122733,
1204
+ "grad_norm": 0.050480726423335516,
1205
+ "learning_rate": 4.99713442004507e-06,
1206
+ "loss": 0.01196746826171875,
1207
+ "step": 795
1208
+ },
1209
+ {
1210
+ "epoch": 1.349219738506959,
1211
+ "grad_norm": 0.03808846024736694,
1212
+ "learning_rate": 4.985246657551943e-06,
1213
+ "loss": 0.0110015869140625,
1214
+ "step": 800
1215
+ },
1216
+ {
1217
+ "epoch": 1.349219738506959,
1218
+ "eval_loss": 0.003908403683453798,
1219
+ "eval_margin": -0.010123856463319352,
1220
+ "eval_mean_neg": 0.49688851833343506,
1221
+ "eval_mean_pos": 0.7784863114356995,
1222
+ "eval_runtime": 364.0137,
1223
+ "eval_samples_per_second": 21.947,
1224
+ "eval_steps_per_second": 0.343,
1225
+ "step": 800
1226
+ },
1227
+ {
1228
+ "epoch": 1.3576549978911852,
1229
+ "grad_norm": 0.04637758927467518,
1230
+ "learning_rate": 4.973303175994289e-06,
1231
+ "loss": 0.013458251953125,
1232
+ "step": 805
1233
+ },
1234
+ {
1235
+ "epoch": 1.3660902572754112,
1236
+ "grad_norm": 0.05066098296531039,
1237
+ "learning_rate": 4.961304310584674e-06,
1238
+ "loss": 0.01515960693359375,
1239
+ "step": 810
1240
+ },
1241
+ {
1242
+ "epoch": 1.3745255166596373,
1243
+ "grad_norm": 0.038530384714911596,
1244
+ "learning_rate": 4.949250398090092e-06,
1245
+ "loss": 0.011260223388671876,
1246
+ "step": 815
1247
+ },
1248
+ {
1249
+ "epoch": 1.3829607760438634,
1250
+ "grad_norm": 0.040188601844867354,
1251
+ "learning_rate": 4.937141776822525e-06,
1252
+ "loss": 0.0158447265625,
1253
+ "step": 820
1254
+ },
1255
+ {
1256
+ "epoch": 1.3913960354280894,
1257
+ "grad_norm": 0.03574613677300634,
1258
+ "learning_rate": 4.92497878662944e-06,
1259
+ "loss": 0.011143875122070313,
1260
+ "step": 825
1261
+ },
1262
+ {
1263
+ "epoch": 1.3998312948123155,
1264
+ "grad_norm": 0.05019423126073816,
1265
+ "learning_rate": 4.912761768884255e-06,
1266
+ "loss": 0.01179351806640625,
1267
+ "step": 830
1268
+ },
1269
+ {
1270
+ "epoch": 1.4082665541965416,
1271
+ "grad_norm": 0.04311116805857567,
1272
+ "learning_rate": 4.9004910664767545e-06,
1273
+ "loss": 0.01372833251953125,
1274
+ "step": 835
1275
+ },
1276
+ {
1277
+ "epoch": 1.4167018135807676,
1278
+ "grad_norm": 0.04928580588462512,
1279
+ "learning_rate": 4.888167023803468e-06,
1280
+ "loss": 0.01297607421875,
1281
+ "step": 840
1282
+ },
1283
+ {
1284
+ "epoch": 1.4251370729649937,
1285
+ "grad_norm": 0.054968450905918724,
1286
+ "learning_rate": 4.8757899867580046e-06,
1287
+ "loss": 0.014654541015625,
1288
+ "step": 845
1289
+ },
1290
+ {
1291
+ "epoch": 1.4335723323492198,
1292
+ "grad_norm": 0.050366347428194534,
1293
+ "learning_rate": 4.86336030272134e-06,
1294
+ "loss": 0.011295318603515625,
1295
+ "step": 850
1296
+ },
1297
+ {
1298
+ "epoch": 1.4420075917334458,
1299
+ "grad_norm": 0.05107215089989217,
1300
+ "learning_rate": 4.850878320552076e-06,
1301
+ "loss": 0.01334228515625,
1302
+ "step": 855
1303
+ },
1304
+ {
1305
+ "epoch": 1.450442851117672,
1306
+ "grad_norm": 0.0391963683003482,
1307
+ "learning_rate": 4.838344390576638e-06,
1308
+ "loss": 0.01104736328125,
1309
+ "step": 860
1310
+ },
1311
+ {
1312
+ "epoch": 1.458878110501898,
1313
+ "grad_norm": 0.03985676744245212,
1314
+ "learning_rate": 4.825758864579452e-06,
1315
+ "loss": 0.013307952880859375,
1316
+ "step": 865
1317
+ },
1318
+ {
1319
+ "epoch": 1.467313369886124,
1320
+ "grad_norm": 0.04852757651119817,
1321
+ "learning_rate": 4.813122095793066e-06,
1322
+ "loss": 0.014328384399414062,
1323
+ "step": 870
1324
+ },
1325
+ {
1326
+ "epoch": 1.47574862927035,
1327
+ "grad_norm": 0.0454254941425111,
1328
+ "learning_rate": 4.800434438888235e-06,
1329
+ "loss": 0.012960052490234375,
1330
+ "step": 875
1331
+ },
1332
+ {
1333
+ "epoch": 1.4841838886545762,
1334
+ "grad_norm": 0.03868230007157653,
1335
+ "learning_rate": 4.787696249963974e-06,
1336
+ "loss": 0.01402740478515625,
1337
+ "step": 880
1338
+ },
1339
+ {
1340
+ "epoch": 1.4926191480388022,
1341
+ "grad_norm": 0.05289135869423979,
1342
+ "learning_rate": 4.774907886537553e-06,
1343
+ "loss": 0.013831901550292968,
1344
+ "step": 885
1345
+ },
1346
+ {
1347
+ "epoch": 1.501054407423028,
1348
+ "grad_norm": 0.04594308680556284,
1349
+ "learning_rate": 4.7620697075344736e-06,
1350
+ "loss": 0.012446975708007813,
1351
+ "step": 890
1352
+ },
1353
+ {
1354
+ "epoch": 1.5094896668072542,
1355
+ "grad_norm": 0.048917845490978454,
1356
+ "learning_rate": 4.7491820732783866e-06,
1357
+ "loss": 0.011295318603515625,
1358
+ "step": 895
1359
+ },
1360
+ {
1361
+ "epoch": 1.5179249261914802,
1362
+ "grad_norm": 0.043266255463378436,
1363
+ "learning_rate": 4.73624534548098e-06,
1364
+ "loss": 0.01407012939453125,
1365
+ "step": 900
1366
+ },
1367
+ {
1368
+ "epoch": 1.5179249261914802,
1369
+ "eval_loss": 0.0036048581823706627,
1370
+ "eval_margin": -0.009617562525935711,
1371
+ "eval_mean_neg": 0.4904225468635559,
1372
+ "eval_mean_pos": 0.7793014049530029,
1373
+ "eval_runtime": 363.5397,
1374
+ "eval_samples_per_second": 21.976,
1375
+ "eval_steps_per_second": 0.344,
1376
+ "step": 900
1377
+ },
1378
+ {
1379
+ "epoch": 1.5263601855757063,
1380
+ "grad_norm": 0.04363576408467007,
1381
+ "learning_rate": 4.723259887231835e-06,
1382
+ "loss": 0.0138519287109375,
1383
+ "step": 905
1384
+ },
1385
+ {
1386
+ "epoch": 1.5347954449599324,
1387
+ "grad_norm": 0.04199459687850267,
1388
+ "learning_rate": 4.710226062988223e-06,
1389
+ "loss": 0.01312255859375,
1390
+ "step": 910
1391
+ },
1392
+ {
1393
+ "epoch": 1.5432307043441584,
1394
+ "grad_norm": 0.047436231412077354,
1395
+ "learning_rate": 4.697144238564889e-06,
1396
+ "loss": 0.01208648681640625,
1397
+ "step": 915
1398
+ },
1399
+ {
1400
+ "epoch": 1.5516659637283845,
1401
+ "grad_norm": 0.04092453404900873,
1402
+ "learning_rate": 4.684014781123775e-06,
1403
+ "loss": 0.012505340576171874,
1404
+ "step": 920
1405
+ },
1406
+ {
1407
+ "epoch": 1.5601012231126106,
1408
+ "grad_norm": 0.045645370405214956,
1409
+ "learning_rate": 4.6708380591637166e-06,
1410
+ "loss": 0.0120208740234375,
1411
+ "step": 925
1412
+ },
1413
+ {
1414
+ "epoch": 1.5685364824968366,
1415
+ "grad_norm": 0.04911154284719614,
1416
+ "learning_rate": 4.6576144425101076e-06,
1417
+ "loss": 0.013311767578125,
1418
+ "step": 930
1419
+ },
1420
+ {
1421
+ "epoch": 1.5769717418810627,
1422
+ "grad_norm": 0.045881762593597546,
1423
+ "learning_rate": 4.64434430230451e-06,
1424
+ "loss": 0.012969207763671876,
1425
+ "step": 935
1426
+ },
1427
+ {
1428
+ "epoch": 1.5854070012652888,
1429
+ "grad_norm": 0.04728445094523914,
1430
+ "learning_rate": 4.631028010994245e-06,
1431
+ "loss": 0.01099395751953125,
1432
+ "step": 940
1433
+ },
1434
+ {
1435
+ "epoch": 1.5938422606495148,
1436
+ "grad_norm": 0.03903116673162643,
1437
+ "learning_rate": 4.617665942321937e-06,
1438
+ "loss": 0.0129608154296875,
1439
+ "step": 945
1440
+ },
1441
+ {
1442
+ "epoch": 1.602277520033741,
1443
+ "grad_norm": 0.040499425484585065,
1444
+ "learning_rate": 4.6042584713150225e-06,
1445
+ "loss": 0.009827423095703124,
1446
+ "step": 950
1447
+ },
1448
+ {
1449
+ "epoch": 1.610712779417967,
1450
+ "grad_norm": 0.047017092872005554,
1451
+ "learning_rate": 4.590805974275228e-06,
1452
+ "loss": 0.01045989990234375,
1453
+ "step": 955
1454
+ },
1455
+ {
1456
+ "epoch": 1.619148038802193,
1457
+ "grad_norm": 0.03869016761931018,
1458
+ "learning_rate": 4.577308828768005e-06,
1459
+ "loss": 0.011346435546875,
1460
+ "step": 960
1461
+ },
1462
+ {
1463
+ "epoch": 1.6275832981864191,
1464
+ "grad_norm": 0.05726216064413269,
1465
+ "learning_rate": 4.563767413611932e-06,
1466
+ "loss": 0.01296844482421875,
1467
+ "step": 965
1468
+ },
1469
+ {
1470
+ "epoch": 1.6360185575706452,
1471
+ "grad_norm": 0.034971593802495975,
1472
+ "learning_rate": 4.550182108868089e-06,
1473
+ "loss": 0.01379852294921875,
1474
+ "step": 970
1475
+ },
1476
+ {
1477
+ "epoch": 1.6444538169548713,
1478
+ "grad_norm": 0.04877425067250454,
1479
+ "learning_rate": 4.536553295829384e-06,
1480
+ "loss": 0.012924957275390624,
1481
+ "step": 975
1482
+ },
1483
+ {
1484
+ "epoch": 1.6528890763390973,
1485
+ "grad_norm": 0.03927648322180213,
1486
+ "learning_rate": 4.522881357009853e-06,
1487
+ "loss": 0.01293792724609375,
1488
+ "step": 980
1489
+ },
1490
+ {
1491
+ "epoch": 1.6613243357233234,
1492
+ "grad_norm": 0.024976847462424127,
1493
+ "learning_rate": 4.5091666761339275e-06,
1494
+ "loss": 0.009877777099609375,
1495
+ "step": 985
1496
+ },
1497
+ {
1498
+ "epoch": 1.6697595951075495,
1499
+ "grad_norm": 0.03945379802090875,
1500
+ "learning_rate": 4.495409638125657e-06,
1501
+ "loss": 0.01130523681640625,
1502
+ "step": 990
1503
+ },
1504
+ {
1505
+ "epoch": 1.6781948544917755,
1506
+ "grad_norm": 0.03430320161614481,
1507
+ "learning_rate": 4.481610629097917e-06,
1508
+ "loss": 0.009923553466796875,
1509
+ "step": 995
1510
+ },
1511
+ {
1512
+ "epoch": 1.6866301138760016,
1513
+ "grad_norm": 0.03895065600017937,
1514
+ "learning_rate": 4.46777003634156e-06,
1515
+ "loss": 0.01330413818359375,
1516
+ "step": 1000
1517
+ },
1518
+ {
1519
+ "epoch": 1.6866301138760016,
1520
+ "eval_loss": 0.0034073551651090384,
1521
+ "eval_margin": -0.009528953300398444,
1522
+ "eval_mean_neg": 0.4926661550998688,
1523
+ "eval_mean_pos": 0.7842009663581848,
1524
+ "eval_runtime": 367.0219,
1525
+ "eval_samples_per_second": 21.767,
1526
+ "eval_steps_per_second": 0.341,
1527
+ "step": 1000
1528
+ },
1529
+ {
1530
+ "epoch": 1.6950653732602277,
1531
+ "grad_norm": 0.04302786223265218,
1532
+ "learning_rate": 4.453888248314553e-06,
1533
+ "loss": 0.01107330322265625,
1534
+ "step": 1005
1535
+ },
1536
+ {
1537
+ "epoch": 1.7035006326444537,
1538
+ "grad_norm": 0.04002206909489744,
1539
+ "learning_rate": 4.439965654631073e-06,
1540
+ "loss": 0.0105499267578125,
1541
+ "step": 1010
1542
+ },
1543
+ {
1544
+ "epoch": 1.7119358920286798,
1545
+ "grad_norm": 0.04439497813433074,
1546
+ "learning_rate": 4.426002646050574e-06,
1547
+ "loss": 0.010544586181640624,
1548
+ "step": 1015
1549
+ },
1550
+ {
1551
+ "epoch": 1.7203711514129059,
1552
+ "grad_norm": 0.043341839034531496,
1553
+ "learning_rate": 4.411999614466812e-06,
1554
+ "loss": 0.0125335693359375,
1555
+ "step": 1020
1556
+ },
1557
+ {
1558
+ "epoch": 1.728806410797132,
1559
+ "grad_norm": 0.03449321841295583,
1560
+ "learning_rate": 4.397956952896858e-06,
1561
+ "loss": 0.010623550415039063,
1562
+ "step": 1025
1563
+ },
1564
+ {
1565
+ "epoch": 1.737241670181358,
1566
+ "grad_norm": 0.041185961783139574,
1567
+ "learning_rate": 4.383875055470055e-06,
1568
+ "loss": 0.01031951904296875,
1569
+ "step": 1030
1570
+ },
1571
+ {
1572
+ "epoch": 1.745676929565584,
1573
+ "grad_norm": 0.04627446953615271,
1574
+ "learning_rate": 4.3697543174169675e-06,
1575
+ "loss": 0.01590385437011719,
1576
+ "step": 1035
1577
+ },
1578
+ {
1579
+ "epoch": 1.7541121889498101,
1580
+ "grad_norm": 0.04582345634360075,
1581
+ "learning_rate": 4.355595135058278e-06,
1582
+ "loss": 0.0119537353515625,
1583
+ "step": 1040
1584
+ },
1585
+ {
1586
+ "epoch": 1.7625474483340362,
1587
+ "grad_norm": 0.033580437424405536,
1588
+ "learning_rate": 4.3413979057936715e-06,
1589
+ "loss": 0.01235198974609375,
1590
+ "step": 1045
1591
+ },
1592
+ {
1593
+ "epoch": 1.7709827077182623,
1594
+ "grad_norm": 0.03545606353671419,
1595
+ "learning_rate": 4.32716302809068e-06,
1596
+ "loss": 0.012863922119140624,
1597
+ "step": 1050
1598
+ },
1599
+ {
1600
+ "epoch": 1.7794179671024883,
1601
+ "grad_norm": 0.03491571698794484,
1602
+ "learning_rate": 4.312890901473496e-06,
1603
+ "loss": 0.01035614013671875,
1604
+ "step": 1055
1605
+ },
1606
+ {
1607
+ "epoch": 1.7878532264867144,
1608
+ "grad_norm": 0.04391496148899165,
1609
+ "learning_rate": 4.29858192651176e-06,
1610
+ "loss": 0.011370468139648437,
1611
+ "step": 1060
1612
+ },
1613
+ {
1614
+ "epoch": 1.7962884858709405,
1615
+ "grad_norm": 0.049338016603549396,
1616
+ "learning_rate": 4.284236504809324e-06,
1617
+ "loss": 0.011846160888671875,
1618
+ "step": 1065
1619
+ },
1620
+ {
1621
+ "epoch": 1.8047237452551665,
1622
+ "grad_norm": 0.035387852478552806,
1623
+ "learning_rate": 4.269855038992971e-06,
1624
+ "loss": 0.011142349243164063,
1625
+ "step": 1070
1626
+ },
1627
+ {
1628
+ "epoch": 1.8131590046393926,
1629
+ "grad_norm": 0.043891210942711104,
1630
+ "learning_rate": 4.2554379327011196e-06,
1631
+ "loss": 0.011545944213867187,
1632
+ "step": 1075
1633
+ },
1634
+ {
1635
+ "epoch": 1.8215942640236187,
1636
+ "grad_norm": 0.040327331551499056,
1637
+ "learning_rate": 4.240985590572496e-06,
1638
+ "loss": 0.00897674560546875,
1639
+ "step": 1080
1640
+ },
1641
+ {
1642
+ "epoch": 1.8300295234078447,
1643
+ "grad_norm": 0.03274271686886844,
1644
+ "learning_rate": 4.226498418234771e-06,
1645
+ "loss": 0.01215667724609375,
1646
+ "step": 1085
1647
+ },
1648
+ {
1649
+ "epoch": 1.8384647827920708,
1650
+ "grad_norm": 0.04375742422856697,
1651
+ "learning_rate": 4.2119768222931865e-06,
1652
+ "loss": 0.0109588623046875,
1653
+ "step": 1090
1654
+ },
1655
+ {
1656
+ "epoch": 1.8469000421762969,
1657
+ "grad_norm": 0.036163256401816654,
1658
+ "learning_rate": 4.19742121031913e-06,
1659
+ "loss": 0.012054443359375,
1660
+ "step": 1095
1661
+ },
1662
+ {
1663
+ "epoch": 1.855335301560523,
1664
+ "grad_norm": 0.04078407955383746,
1665
+ "learning_rate": 4.182831990838709e-06,
1666
+ "loss": 0.0132843017578125,
1667
+ "step": 1100
1668
+ },
1669
+ {
1670
+ "epoch": 1.855335301560523,
1671
+ "eval_loss": 0.003225065069273114,
1672
+ "eval_margin": -0.008749207222623932,
1673
+ "eval_mean_neg": 0.49084940552711487,
1674
+ "eval_mean_pos": 0.7849159836769104,
1675
+ "eval_runtime": 366.687,
1676
+ "eval_samples_per_second": 21.787,
1677
+ "eval_steps_per_second": 0.341,
1678
+ "step": 1100
1679
+ },
1680
+ {
1681
+ "epoch": 1.863770560944749,
1682
+ "grad_norm": 0.047827239751426935,
1683
+ "learning_rate": 4.168209573321271e-06,
1684
+ "loss": 0.0133697509765625,
1685
+ "step": 1105
1686
+ },
1687
+ {
1688
+ "epoch": 1.872205820328975,
1689
+ "grad_norm": 0.0274823880547768,
1690
+ "learning_rate": 4.153554368167927e-06,
1691
+ "loss": 0.010877227783203125,
1692
+ "step": 1110
1693
+ },
1694
+ {
1695
+ "epoch": 1.8806410797132012,
1696
+ "grad_norm": 0.052787755841206804,
1697
+ "learning_rate": 4.138866786700016e-06,
1698
+ "loss": 0.0139434814453125,
1699
+ "step": 1115
1700
+ },
1701
+ {
1702
+ "epoch": 1.8890763390974272,
1703
+ "grad_norm": 0.029629846825489692,
1704
+ "learning_rate": 4.124147241147577e-06,
1705
+ "loss": 0.011189651489257813,
1706
+ "step": 1120
1707
+ },
1708
+ {
1709
+ "epoch": 1.8975115984816533,
1710
+ "grad_norm": 0.039855575258898726,
1711
+ "learning_rate": 4.109396144637764e-06,
1712
+ "loss": 0.010993194580078126,
1713
+ "step": 1125
1714
+ },
1715
+ {
1716
+ "epoch": 1.9059468578658794,
1717
+ "grad_norm": 0.03789188882991695,
1718
+ "learning_rate": 4.094613911183265e-06,
1719
+ "loss": 0.01313323974609375,
1720
+ "step": 1130
1721
+ },
1722
+ {
1723
+ "epoch": 1.9143821172501054,
1724
+ "grad_norm": 0.03482605825228896,
1725
+ "learning_rate": 4.0798009556706685e-06,
1726
+ "loss": 0.008492279052734374,
1727
+ "step": 1135
1728
+ },
1729
+ {
1730
+ "epoch": 1.9228173766343315,
1731
+ "grad_norm": 0.0395626147511318,
1732
+ "learning_rate": 4.064957693848831e-06,
1733
+ "loss": 0.011167144775390625,
1734
+ "step": 1140
1735
+ },
1736
+ {
1737
+ "epoch": 1.9312526360185576,
1738
+ "grad_norm": 0.026910728579180684,
1739
+ "learning_rate": 4.050084542317201e-06,
1740
+ "loss": 0.0124908447265625,
1741
+ "step": 1145
1742
+ },
1743
+ {
1744
+ "epoch": 1.9396878954027836,
1745
+ "grad_norm": 0.05111929237613795,
1746
+ "learning_rate": 4.0351819185141284e-06,
1747
+ "loss": 0.01279144287109375,
1748
+ "step": 1150
1749
+ },
1750
+ {
1751
+ "epoch": 1.9481231547870097,
1752
+ "grad_norm": 0.031631097839140386,
1753
+ "learning_rate": 4.02025024070515e-06,
1754
+ "loss": 0.010783004760742187,
1755
+ "step": 1155
1756
+ },
1757
+ {
1758
+ "epoch": 1.9565584141712358,
1759
+ "grad_norm": 0.03921591693735718,
1760
+ "learning_rate": 4.005289927971248e-06,
1761
+ "loss": 0.009867095947265625,
1762
+ "step": 1160
1763
+ },
1764
+ {
1765
+ "epoch": 1.9649936735554618,
1766
+ "grad_norm": 0.03786979993880419,
1767
+ "learning_rate": 3.990301400197088e-06,
1768
+ "loss": 0.010943603515625,
1769
+ "step": 1165
1770
+ },
1771
+ {
1772
+ "epoch": 1.973428932939688,
1773
+ "grad_norm": 0.033688024912648086,
1774
+ "learning_rate": 3.9752850780592366e-06,
1775
+ "loss": 0.010836410522460937,
1776
+ "step": 1170
1777
+ },
1778
+ {
1779
+ "epoch": 1.981864192323914,
1780
+ "grad_norm": 0.0473160707405277,
1781
+ "learning_rate": 3.960241383014353e-06,
1782
+ "loss": 0.011658477783203124,
1783
+ "step": 1175
1784
+ },
1785
+ {
1786
+ "epoch": 1.99029945170814,
1787
+ "grad_norm": 0.034470209590808834,
1788
+ "learning_rate": 3.945170737287356e-06,
1789
+ "loss": 0.0096588134765625,
1790
+ "step": 1180
1791
+ },
1792
+ {
1793
+ "epoch": 1.9987347110923661,
1794
+ "grad_norm": 0.04035006428036731,
1795
+ "learning_rate": 3.930073563859583e-06,
1796
+ "loss": 0.013312530517578126,
1797
+ "step": 1185
1798
+ },
1799
+ {
1800
+ "epoch": 2.006748207507381,
1801
+ "grad_norm": 0.03443773853658945,
1802
+ "learning_rate": 3.914950286456911e-06,
1803
+ "loss": 0.0104766845703125,
1804
+ "step": 1190
1805
+ },
1806
+ {
1807
+ "epoch": 2.015183466891607,
1808
+ "grad_norm": 0.02321269258461312,
1809
+ "learning_rate": 3.899801329537865e-06,
1810
+ "loss": 0.008111572265625,
1811
+ "step": 1195
1812
+ },
1813
+ {
1814
+ "epoch": 2.023618726275833,
1815
+ "grad_norm": 0.02427731911492366,
1816
+ "learning_rate": 3.884627118281706e-06,
1817
+ "loss": 0.009668731689453125,
1818
+ "step": 1200
1819
+ },
1820
+ {
1821
+ "epoch": 2.023618726275833,
1822
+ "eval_loss": 0.0028827113565057516,
1823
+ "eval_margin": -0.008073512017877111,
1824
+ "eval_mean_neg": 0.5066258907318115,
1825
+ "eval_mean_pos": 0.7934735417366028,
1826
+ "eval_runtime": 365.1842,
1827
+ "eval_samples_per_second": 21.877,
1828
+ "eval_steps_per_second": 0.342,
1829
+ "step": 1200
1830
+ },
1831
+ {
1832
+ "epoch": 2.0320539856600592,
1833
+ "grad_norm": 0.033776934236771874,
1834
+ "learning_rate": 3.869428078576498e-06,
1835
+ "loss": 0.00937347412109375,
1836
+ "step": 1205
1837
+ },
1838
+ {
1839
+ "epoch": 2.0404892450442853,
1840
+ "grad_norm": 0.05070270762284893,
1841
+ "learning_rate": 3.8542046370071575e-06,
1842
+ "loss": 0.008733367919921875,
1843
+ "step": 1210
1844
+ },
1845
+ {
1846
+ "epoch": 2.0489245044285114,
1847
+ "grad_norm": 0.028063560546546604,
1848
+ "learning_rate": 3.838957220843472e-06,
1849
+ "loss": 0.00914459228515625,
1850
+ "step": 1215
1851
+ },
1852
+ {
1853
+ "epoch": 2.0573597638127374,
1854
+ "grad_norm": 0.041287537117132886,
1855
+ "learning_rate": 3.8236862580281175e-06,
1856
+ "loss": 0.010516357421875,
1857
+ "step": 1220
1858
+ },
1859
+ {
1860
+ "epoch": 2.0657950231969635,
1861
+ "grad_norm": 0.03173632436563901,
1862
+ "learning_rate": 3.808392177164642e-06,
1863
+ "loss": 0.010186767578125,
1864
+ "step": 1225
1865
+ },
1866
+ {
1867
+ "epoch": 2.0742302825811896,
1868
+ "grad_norm": 0.03149301964970768,
1869
+ "learning_rate": 3.7930754075054406e-06,
1870
+ "loss": 0.010378265380859375,
1871
+ "step": 1230
1872
+ },
1873
+ {
1874
+ "epoch": 2.0826655419654156,
1875
+ "grad_norm": 0.03183747792195117,
1876
+ "learning_rate": 3.7777363789397004e-06,
1877
+ "loss": 0.009032630920410156,
1878
+ "step": 1235
1879
+ },
1880
+ {
1881
+ "epoch": 2.0911008013496417,
1882
+ "grad_norm": 0.0353065686803631,
1883
+ "learning_rate": 3.7623755219813442e-06,
1884
+ "loss": 0.0096771240234375,
1885
+ "step": 1240
1886
+ },
1887
+ {
1888
+ "epoch": 2.0995360607338673,
1889
+ "grad_norm": 0.040800577074973816,
1890
+ "learning_rate": 3.746993267756939e-06,
1891
+ "loss": 0.009685516357421875,
1892
+ "step": 1245
1893
+ },
1894
+ {
1895
+ "epoch": 2.107971320118094,
1896
+ "grad_norm": 0.04064182954953987,
1897
+ "learning_rate": 3.7315900479936044e-06,
1898
+ "loss": 0.010097503662109375,
1899
+ "step": 1250
1900
+ },
1901
+ {
1902
+ "epoch": 2.1164065795023195,
1903
+ "grad_norm": 0.04908593416403285,
1904
+ "learning_rate": 3.7161662950068846e-06,
1905
+ "loss": 0.009412384033203125,
1906
+ "step": 1255
1907
+ },
1908
+ {
1909
+ "epoch": 2.124841838886546,
1910
+ "grad_norm": 0.0284060145446946,
1911
+ "learning_rate": 3.7007224416886276e-06,
1912
+ "loss": 0.00821533203125,
1913
+ "step": 1260
1914
+ },
1915
+ {
1916
+ "epoch": 2.1332770982707716,
1917
+ "grad_norm": 0.030842726867602113,
1918
+ "learning_rate": 3.685258921494824e-06,
1919
+ "loss": 0.009014129638671875,
1920
+ "step": 1265
1921
+ },
1922
+ {
1923
+ "epoch": 2.141712357654998,
1924
+ "grad_norm": 0.036273158990138075,
1925
+ "learning_rate": 3.6697761684334466e-06,
1926
+ "loss": 0.010558700561523438,
1927
+ "step": 1270
1928
+ },
1929
+ {
1930
+ "epoch": 2.1501476170392237,
1931
+ "grad_norm": 0.03693819496482909,
1932
+ "learning_rate": 3.6542746170522717e-06,
1933
+ "loss": 0.010668182373046875,
1934
+ "step": 1275
1935
+ },
1936
+ {
1937
+ "epoch": 2.15858287642345,
1938
+ "grad_norm": 0.03797267942950567,
1939
+ "learning_rate": 3.638754702426678e-06,
1940
+ "loss": 0.008889389038085938,
1941
+ "step": 1280
1942
+ },
1943
+ {
1944
+ "epoch": 2.167018135807676,
1945
+ "grad_norm": 0.03341108305444907,
1946
+ "learning_rate": 3.6232168601474363e-06,
1947
+ "loss": 0.006923675537109375,
1948
+ "step": 1285
1949
+ },
1950
+ {
1951
+ "epoch": 2.175453395191902,
1952
+ "grad_norm": 0.03454779917085028,
1953
+ "learning_rate": 3.607661526308488e-06,
1954
+ "loss": 0.00969085693359375,
1955
+ "step": 1290
1956
+ },
1957
+ {
1958
+ "epoch": 2.183888654576128,
1959
+ "grad_norm": 0.035727285557249105,
1960
+ "learning_rate": 3.5920891374947005e-06,
1961
+ "loss": 0.00997161865234375,
1962
+ "step": 1295
1963
+ },
1964
+ {
1965
+ "epoch": 2.192323913960354,
1966
+ "grad_norm": 0.031320211315080816,
1967
+ "learning_rate": 3.5765001307696152e-06,
1968
+ "loss": 0.007769393920898438,
1969
+ "step": 1300
1970
+ },
1971
+ {
1972
+ "epoch": 2.192323913960354,
1973
+ "eval_loss": 0.0026897923089563847,
1974
+ "eval_margin": -0.0077478337221808975,
1975
+ "eval_mean_neg": 0.49285975098609924,
1976
+ "eval_mean_pos": 0.7862820625305176,
1977
+ "eval_runtime": 362.3777,
1978
+ "eval_samples_per_second": 22.046,
1979
+ "eval_steps_per_second": 0.345,
1980
+ "step": 1300
1981
+ },
1982
+ {
1983
+ "epoch": 2.20075917334458,
1984
+ "grad_norm": 0.023952498523963275,
1985
+ "learning_rate": 3.560894943663185e-06,
1986
+ "loss": 0.009902191162109376,
1987
+ "step": 1305
1988
+ },
1989
+ {
1990
+ "epoch": 2.2091944327288062,
1991
+ "grad_norm": 0.029616458459003896,
1992
+ "learning_rate": 3.545274014159486e-06,
1993
+ "loss": 0.008718109130859375,
1994
+ "step": 1310
1995
+ },
1996
+ {
1997
+ "epoch": 2.2176296921130323,
1998
+ "grad_norm": 0.026768679077660198,
1999
+ "learning_rate": 3.5296377806844334e-06,
2000
+ "loss": 0.006624603271484375,
2001
+ "step": 1315
2002
+ },
2003
+ {
2004
+ "epoch": 2.2260649514972584,
2005
+ "grad_norm": 0.03723135315427558,
2006
+ "learning_rate": 3.5139866820934687e-06,
2007
+ "loss": 0.010486793518066407,
2008
+ "step": 1320
2009
+ },
2010
+ {
2011
+ "epoch": 2.2345002108814844,
2012
+ "grad_norm": 0.030973900207479872,
2013
+ "learning_rate": 3.498321157659248e-06,
2014
+ "loss": 0.00841064453125,
2015
+ "step": 1325
2016
+ },
2017
+ {
2018
+ "epoch": 2.2429354702657105,
2019
+ "grad_norm": 0.042522927349784224,
2020
+ "learning_rate": 3.482641647059313e-06,
2021
+ "loss": 0.010484886169433594,
2022
+ "step": 1330
2023
+ },
2024
+ {
2025
+ "epoch": 2.2513707296499366,
2026
+ "grad_norm": 0.036298357689256384,
2027
+ "learning_rate": 3.4669485903637452e-06,
2028
+ "loss": 0.010845947265625,
2029
+ "step": 1335
2030
+ },
2031
+ {
2032
+ "epoch": 2.2598059890341626,
2033
+ "grad_norm": 0.04210885166855473,
2034
+ "learning_rate": 3.4512424280228227e-06,
2035
+ "loss": 0.009656906127929688,
2036
+ "step": 1340
2037
+ },
2038
+ {
2039
+ "epoch": 2.2682412484183887,
2040
+ "grad_norm": 0.037852259539673916,
2041
+ "learning_rate": 3.435523600854652e-06,
2042
+ "loss": 0.009561920166015625,
2043
+ "step": 1345
2044
+ },
2045
+ {
2046
+ "epoch": 2.2766765078026148,
2047
+ "grad_norm": 0.03972030283651443,
2048
+ "learning_rate": 3.4197925500327973e-06,
2049
+ "loss": 0.00974578857421875,
2050
+ "step": 1350
2051
+ },
2052
+ {
2053
+ "epoch": 2.285111767186841,
2054
+ "grad_norm": 0.03864567979018308,
2055
+ "learning_rate": 3.4040497170739e-06,
2056
+ "loss": 0.009082794189453125,
2057
+ "step": 1355
2058
+ },
2059
+ {
2060
+ "epoch": 2.293547026571067,
2061
+ "grad_norm": 0.03547766099076331,
2062
+ "learning_rate": 3.3882955438252852e-06,
2063
+ "loss": 0.008104705810546875,
2064
+ "step": 1360
2065
+ },
2066
+ {
2067
+ "epoch": 2.301982285955293,
2068
+ "grad_norm": 0.042069666240123815,
2069
+ "learning_rate": 3.372530472452561e-06,
2070
+ "loss": 0.010825538635253906,
2071
+ "step": 1365
2072
+ },
2073
+ {
2074
+ "epoch": 2.310417545339519,
2075
+ "grad_norm": 0.030187240942476403,
2076
+ "learning_rate": 3.356754945427209e-06,
2077
+ "loss": 0.010921478271484375,
2078
+ "step": 1370
2079
+ },
2080
+ {
2081
+ "epoch": 2.318852804723745,
2082
+ "grad_norm": 0.03775236120881388,
2083
+ "learning_rate": 3.3409694055141636e-06,
2084
+ "loss": 0.00971527099609375,
2085
+ "step": 1375
2086
+ },
2087
+ {
2088
+ "epoch": 2.327288064107971,
2089
+ "grad_norm": 0.04517333042895106,
2090
+ "learning_rate": 3.3251742957593896e-06,
2091
+ "loss": 0.010394287109375,
2092
+ "step": 1380
2093
+ },
2094
+ {
2095
+ "epoch": 2.3357233234921972,
2096
+ "grad_norm": 0.03441694727754078,
2097
+ "learning_rate": 3.3093700594774415e-06,
2098
+ "loss": 0.008525848388671875,
2099
+ "step": 1385
2100
+ },
2101
+ {
2102
+ "epoch": 2.3441585828764233,
2103
+ "grad_norm": 0.039855958024762626,
2104
+ "learning_rate": 3.2935571402390243e-06,
2105
+ "loss": 0.01035003662109375,
2106
+ "step": 1390
2107
+ },
2108
+ {
2109
+ "epoch": 2.3525938422606494,
2110
+ "grad_norm": 0.036912654679360425,
2111
+ "learning_rate": 3.2777359818585453e-06,
2112
+ "loss": 0.01036224365234375,
2113
+ "step": 1395
2114
+ },
2115
+ {
2116
+ "epoch": 2.3610291016448754,
2117
+ "grad_norm": 0.02819486898709386,
2118
+ "learning_rate": 3.2619070283816567e-06,
2119
+ "loss": 0.008788299560546876,
2120
+ "step": 1400
2121
+ },
2122
+ {
2123
+ "epoch": 2.3610291016448754,
2124
+ "eval_loss": 0.002835027640685439,
2125
+ "eval_margin": -0.006975951657119778,
2126
+ "eval_mean_neg": 0.4969240725040436,
2127
+ "eval_mean_pos": 0.7925288081169128,
2128
+ "eval_runtime": 364.0594,
2129
+ "eval_samples_per_second": 21.944,
2130
+ "eval_steps_per_second": 0.343,
2131
+ "step": 1400
2132
+ },
2133
+ {
2134
+ "epoch": 2.3694643610291015,
2135
+ "grad_norm": 0.0347736325148637,
2136
+ "learning_rate": 3.24607072407279e-06,
2137
+ "loss": 0.00931854248046875,
2138
+ "step": 1405
2139
+ },
2140
+ {
2141
+ "epoch": 2.3778996204133276,
2142
+ "grad_norm": 0.02843547221351205,
2143
+ "learning_rate": 3.2302275134026902e-06,
2144
+ "loss": 0.008514404296875,
2145
+ "step": 1410
2146
+ },
2147
+ {
2148
+ "epoch": 2.3863348797975537,
2149
+ "grad_norm": 0.035527939183407756,
2150
+ "learning_rate": 3.2143778410359414e-06,
2151
+ "loss": 0.009189605712890625,
2152
+ "step": 1415
2153
+ },
2154
+ {
2155
+ "epoch": 2.3947701391817797,
2156
+ "grad_norm": 0.02697400462877436,
2157
+ "learning_rate": 3.1985221518184845e-06,
2158
+ "loss": 0.008056259155273438,
2159
+ "step": 1420
2160
+ },
2161
+ {
2162
+ "epoch": 2.403205398566006,
2163
+ "grad_norm": 0.02974726363919492,
2164
+ "learning_rate": 3.1826608907651327e-06,
2165
+ "loss": 0.008675384521484374,
2166
+ "step": 1425
2167
+ },
2168
+ {
2169
+ "epoch": 2.411640657950232,
2170
+ "grad_norm": 0.04279127831329293,
2171
+ "learning_rate": 3.1667945030470815e-06,
2172
+ "loss": 0.009341812133789063,
2173
+ "step": 1430
2174
+ },
2175
+ {
2176
+ "epoch": 2.420075917334458,
2177
+ "grad_norm": 0.039837807919925805,
2178
+ "learning_rate": 3.1509234339794144e-06,
2179
+ "loss": 0.010208892822265624,
2180
+ "step": 1435
2181
+ },
2182
+ {
2183
+ "epoch": 2.428511176718684,
2184
+ "grad_norm": 0.03024657864136027,
2185
+ "learning_rate": 3.1350481290086038e-06,
2186
+ "loss": 0.008173370361328125,
2187
+ "step": 1440
2188
+ },
2189
+ {
2190
+ "epoch": 2.43694643610291,
2191
+ "grad_norm": 0.03564318900525913,
2192
+ "learning_rate": 3.119169033700011e-06,
2193
+ "loss": 0.00924224853515625,
2194
+ "step": 1445
2195
+ },
2196
+ {
2197
+ "epoch": 2.445381695487136,
2198
+ "grad_norm": 0.028913985964356455,
2199
+ "learning_rate": 3.103286593725377e-06,
2200
+ "loss": 0.008563995361328125,
2201
+ "step": 1450
2202
+ },
2203
+ {
2204
+ "epoch": 2.453816954871362,
2205
+ "grad_norm": 0.035875161756803144,
2206
+ "learning_rate": 3.0874012548503173e-06,
2207
+ "loss": 0.009112548828125,
2208
+ "step": 1455
2209
+ },
2210
+ {
2211
+ "epoch": 2.4622522142555883,
2212
+ "grad_norm": 0.03817913502015442,
2213
+ "learning_rate": 3.0715134629218095e-06,
2214
+ "loss": 0.007489013671875,
2215
+ "step": 1460
2216
+ },
2217
+ {
2218
+ "epoch": 2.4706874736398143,
2219
+ "grad_norm": 0.03470677728941542,
2220
+ "learning_rate": 3.0556236638556803e-06,
2221
+ "loss": 0.012370681762695313,
2222
+ "step": 1465
2223
+ },
2224
+ {
2225
+ "epoch": 2.4791227330240404,
2226
+ "grad_norm": 0.042966141209856486,
2227
+ "learning_rate": 3.0397323036240886e-06,
2228
+ "loss": 0.0088165283203125,
2229
+ "step": 1470
2230
+ },
2231
+ {
2232
+ "epoch": 2.4875579924082665,
2233
+ "grad_norm": 0.03434953324492014,
2234
+ "learning_rate": 3.023839828243012e-06,
2235
+ "loss": 0.008261871337890626,
2236
+ "step": 1475
2237
+ },
2238
+ {
2239
+ "epoch": 2.4959932517924925,
2240
+ "grad_norm": 0.03061507966476803,
2241
+ "learning_rate": 3.007946683759723e-06,
2242
+ "loss": 0.008873748779296874,
2243
+ "step": 1480
2244
+ },
2245
+ {
2246
+ "epoch": 2.5044285111767186,
2247
+ "grad_norm": 0.027732115444419583,
2248
+ "learning_rate": 2.9920533162402776e-06,
2249
+ "loss": 0.008371734619140625,
2250
+ "step": 1485
2251
+ },
2252
+ {
2253
+ "epoch": 2.5128637705609447,
2254
+ "grad_norm": 0.029951392389848317,
2255
+ "learning_rate": 2.9761601717569896e-06,
2256
+ "loss": 0.00865478515625,
2257
+ "step": 1490
2258
+ },
2259
+ {
2260
+ "epoch": 2.5212990299451707,
2261
+ "grad_norm": 0.033652436341082566,
2262
+ "learning_rate": 2.960267696375911e-06,
2263
+ "loss": 0.009691619873046875,
2264
+ "step": 1495
2265
+ },
2266
+ {
2267
+ "epoch": 2.529734289329397,
2268
+ "grad_norm": 0.024511774862390433,
2269
+ "learning_rate": 2.9443763361443203e-06,
2270
+ "loss": 0.010028076171875,
2271
+ "step": 1500
2272
+ },
2273
+ {
2274
+ "epoch": 2.529734289329397,
2275
+ "eval_loss": 0.002559108193963766,
2276
+ "eval_margin": -0.006972289358776423,
2277
+ "eval_mean_neg": 0.4843982458114624,
2278
+ "eval_mean_pos": 0.7881345152854919,
2279
+ "eval_runtime": 365.5243,
2280
+ "eval_samples_per_second": 21.856,
2281
+ "eval_steps_per_second": 0.342,
2282
+ "step": 1500
2283
+ },
2284
+ {
2285
+ "epoch": 2.538169548713623,
2286
+ "grad_norm": 0.034627287332041165,
2287
+ "learning_rate": 2.9284865370781906e-06,
2288
+ "loss": 0.00982513427734375,
2289
+ "step": 1505
2290
+ },
2291
+ {
2292
+ "epoch": 2.546604808097849,
2293
+ "grad_norm": 0.03482839500691478,
2294
+ "learning_rate": 2.9125987451496837e-06,
2295
+ "loss": 0.00842742919921875,
2296
+ "step": 1510
2297
+ },
2298
+ {
2299
+ "epoch": 2.555040067482075,
2300
+ "grad_norm": 0.034040276652772095,
2301
+ "learning_rate": 2.8967134062746236e-06,
2302
+ "loss": 0.008990859985351563,
2303
+ "step": 1515
2304
+ },
2305
+ {
2306
+ "epoch": 2.563475326866301,
2307
+ "grad_norm": 0.03868034786852329,
2308
+ "learning_rate": 2.8808309662999897e-06,
2309
+ "loss": 0.007648468017578125,
2310
+ "step": 1520
2311
+ },
2312
+ {
2313
+ "epoch": 2.571910586250527,
2314
+ "grad_norm": 0.0419385930164125,
2315
+ "learning_rate": 2.864951870991397e-06,
2316
+ "loss": 0.009268951416015626,
2317
+ "step": 1525
2318
+ },
2319
+ {
2320
+ "epoch": 2.580345845634753,
2321
+ "grad_norm": 0.03690945718603307,
2322
+ "learning_rate": 2.8490765660205857e-06,
2323
+ "loss": 0.00864715576171875,
2324
+ "step": 1530
2325
+ },
2326
+ {
2327
+ "epoch": 2.5887811050189793,
2328
+ "grad_norm": 0.02493335874585237,
2329
+ "learning_rate": 2.833205496952919e-06,
2330
+ "loss": 0.00865478515625,
2331
+ "step": 1535
2332
+ },
2333
+ {
2334
+ "epoch": 2.5972163644032054,
2335
+ "grad_norm": 0.0355467734297459,
2336
+ "learning_rate": 2.817339109234868e-06,
2337
+ "loss": 0.009038543701171875,
2338
+ "step": 1540
2339
+ },
2340
+ {
2341
+ "epoch": 2.6056516237874314,
2342
+ "grad_norm": 0.035706551369837684,
2343
+ "learning_rate": 2.801477848181517e-06,
2344
+ "loss": 0.008769607543945313,
2345
+ "step": 1545
2346
+ },
2347
+ {
2348
+ "epoch": 2.6140868831716575,
2349
+ "grad_norm": 0.030590948482880534,
2350
+ "learning_rate": 2.7856221589640584e-06,
2351
+ "loss": 0.010419464111328125,
2352
+ "step": 1550
2353
+ },
2354
+ {
2355
+ "epoch": 2.6225221425558836,
2356
+ "grad_norm": 0.031187166990055255,
2357
+ "learning_rate": 2.7697724865973103e-06,
2358
+ "loss": 0.008966064453125,
2359
+ "step": 1555
2360
+ },
2361
+ {
2362
+ "epoch": 2.6309574019401096,
2363
+ "grad_norm": 0.03195446103788609,
2364
+ "learning_rate": 2.753929275927211e-06,
2365
+ "loss": 0.00810089111328125,
2366
+ "step": 1560
2367
+ },
2368
+ {
2369
+ "epoch": 2.6393926613243357,
2370
+ "grad_norm": 0.03124766219549955,
2371
+ "learning_rate": 2.7380929716183448e-06,
2372
+ "loss": 0.00867919921875,
2373
+ "step": 1565
2374
+ },
2375
+ {
2376
+ "epoch": 2.6478279207085618,
2377
+ "grad_norm": 0.04158743972175772,
2378
+ "learning_rate": 2.722264018141455e-06,
2379
+ "loss": 0.008811187744140626,
2380
+ "step": 1570
2381
+ },
2382
+ {
2383
+ "epoch": 2.656263180092788,
2384
+ "grad_norm": 0.042358151513616535,
2385
+ "learning_rate": 2.706442859760976e-06,
2386
+ "loss": 0.008480644226074219,
2387
+ "step": 1575
2388
+ },
2389
+ {
2390
+ "epoch": 2.664698439477014,
2391
+ "grad_norm": 0.02876853915749735,
2392
+ "learning_rate": 2.6906299405225595e-06,
2393
+ "loss": 0.009603309631347656,
2394
+ "step": 1580
2395
+ },
2396
+ {
2397
+ "epoch": 2.67313369886124,
2398
+ "grad_norm": 0.031452133973887623,
2399
+ "learning_rate": 2.6748257042406114e-06,
2400
+ "loss": 0.008524322509765625,
2401
+ "step": 1585
2402
+ },
2403
+ {
2404
+ "epoch": 2.681568958245466,
2405
+ "grad_norm": 0.03502577600676223,
2406
+ "learning_rate": 2.659030594485836e-06,
2407
+ "loss": 0.007845306396484375,
2408
+ "step": 1590
2409
+ },
2410
+ {
2411
+ "epoch": 2.690004217629692,
2412
+ "grad_norm": 0.029358280910616305,
2413
+ "learning_rate": 2.6432450545727913e-06,
2414
+ "loss": 0.008304595947265625,
2415
+ "step": 1595
2416
+ },
2417
+ {
2418
+ "epoch": 2.698439477013918,
2419
+ "grad_norm": 0.037226468621806945,
2420
+ "learning_rate": 2.62746952754744e-06,
2421
+ "loss": 0.0089141845703125,
2422
+ "step": 1600
2423
+ },
2424
+ {
2425
+ "epoch": 2.698439477013918,
2426
+ "eval_loss": 0.002468662802129984,
2427
+ "eval_margin": -0.006652700444383006,
2428
+ "eval_mean_neg": 0.5055871605873108,
2429
+ "eval_mean_pos": 0.8004181981086731,
2430
+ "eval_runtime": 363.3432,
2431
+ "eval_samples_per_second": 21.987,
2432
+ "eval_steps_per_second": 0.344,
2433
+ "step": 1600
2434
+ },
2435
+ {
2436
+ "epoch": 2.7068747363981442,
2437
+ "grad_norm": 0.029979441347867175,
2438
+ "learning_rate": 2.6117044561747145e-06,
2439
+ "loss": 0.007899856567382813,
2440
+ "step": 1605
2441
+ },
2442
+ {
2443
+ "epoch": 2.7153099957823703,
2444
+ "grad_norm": 0.04117264280378634,
2445
+ "learning_rate": 2.5959502829261e-06,
2446
+ "loss": 0.009801483154296875,
2447
+ "step": 1610
2448
+ },
2449
+ {
2450
+ "epoch": 2.7237452551665964,
2451
+ "grad_norm": 0.02874139529420723,
2452
+ "learning_rate": 2.5802074499672033e-06,
2453
+ "loss": 0.007126617431640625,
2454
+ "step": 1615
2455
+ },
2456
+ {
2457
+ "epoch": 2.7321805145508224,
2458
+ "grad_norm": 0.032009387593884574,
2459
+ "learning_rate": 2.564476399145349e-06,
2460
+ "loss": 0.007319259643554688,
2461
+ "step": 1620
2462
+ },
2463
+ {
2464
+ "epoch": 2.7406157739350485,
2465
+ "grad_norm": 0.0343660828009257,
2466
+ "learning_rate": 2.5487575719771774e-06,
2467
+ "loss": 0.010648345947265625,
2468
+ "step": 1625
2469
+ },
2470
+ {
2471
+ "epoch": 2.7490510333192746,
2472
+ "grad_norm": 0.033859872264591424,
2473
+ "learning_rate": 2.533051409636255e-06,
2474
+ "loss": 0.007244110107421875,
2475
+ "step": 1630
2476
+ },
2477
+ {
2478
+ "epoch": 2.7574862927035007,
2479
+ "grad_norm": 0.032973506044290384,
2480
+ "learning_rate": 2.517358352940688e-06,
2481
+ "loss": 0.008284759521484376,
2482
+ "step": 1635
2483
+ },
2484
+ {
2485
+ "epoch": 2.7659215520877267,
2486
+ "grad_norm": 0.03481146191160576,
2487
+ "learning_rate": 2.501678842340753e-06,
2488
+ "loss": 0.00882110595703125,
2489
+ "step": 1640
2490
+ },
2491
+ {
2492
+ "epoch": 2.774356811471953,
2493
+ "grad_norm": 0.03862588539253724,
2494
+ "learning_rate": 2.4860133179065323e-06,
2495
+ "loss": 0.00964202880859375,
2496
+ "step": 1645
2497
+ },
2498
+ {
2499
+ "epoch": 2.782792070856179,
2500
+ "grad_norm": 0.02979780702601001,
2501
+ "learning_rate": 2.4703622193155676e-06,
2502
+ "loss": 0.009095001220703124,
2503
+ "step": 1650
2504
+ },
2505
+ {
2506
+ "epoch": 2.791227330240405,
2507
+ "grad_norm": 0.02658002258647219,
2508
+ "learning_rate": 2.4547259858405147e-06,
2509
+ "loss": 0.008580398559570313,
2510
+ "step": 1655
2511
+ },
2512
+ {
2513
+ "epoch": 2.799662589624631,
2514
+ "grad_norm": 0.03237100489547251,
2515
+ "learning_rate": 2.439105056336816e-06,
2516
+ "loss": 0.006137275695800781,
2517
+ "step": 1660
2518
+ },
2519
+ {
2520
+ "epoch": 2.808097849008857,
2521
+ "grad_norm": 0.035925961611001624,
2522
+ "learning_rate": 2.423499869230385e-06,
2523
+ "loss": 0.006979179382324219,
2524
+ "step": 1665
2525
+ },
2526
+ {
2527
+ "epoch": 2.816533108393083,
2528
+ "grad_norm": 0.028925897672990208,
2529
+ "learning_rate": 2.4079108625053e-06,
2530
+ "loss": 0.007439422607421875,
2531
+ "step": 1670
2532
+ },
2533
+ {
2534
+ "epoch": 2.824968367777309,
2535
+ "grad_norm": 0.02643424196739614,
2536
+ "learning_rate": 2.392338473691513e-06,
2537
+ "loss": 0.007563400268554688,
2538
+ "step": 1675
2539
+ },
2540
+ {
2541
+ "epoch": 2.8334036271615353,
2542
+ "grad_norm": 0.029469931037551172,
2543
+ "learning_rate": 2.376783139852564e-06,
2544
+ "loss": 0.00782928466796875,
2545
+ "step": 1680
2546
+ },
2547
+ {
2548
+ "epoch": 2.8418388865457613,
2549
+ "grad_norm": 0.03519097117769341,
2550
+ "learning_rate": 2.3612452975733225e-06,
2551
+ "loss": 0.0081695556640625,
2552
+ "step": 1685
2553
+ },
2554
+ {
2555
+ "epoch": 2.8502741459299874,
2556
+ "grad_norm": 0.041842720836538394,
2557
+ "learning_rate": 2.3457253829477284e-06,
2558
+ "loss": 0.00938720703125,
2559
+ "step": 1690
2560
+ },
2561
+ {
2562
+ "epoch": 2.8587094053142135,
2563
+ "grad_norm": 0.02803118980318521,
2564
+ "learning_rate": 2.3302238315665544e-06,
2565
+ "loss": 0.007602310180664063,
2566
+ "step": 1695
2567
+ },
2568
+ {
2569
+ "epoch": 2.8671446646984395,
2570
+ "grad_norm": 0.06643247372472408,
2571
+ "learning_rate": 2.314741078505177e-06,
2572
+ "loss": 0.009275436401367188,
2573
+ "step": 1700
2574
+ },
2575
+ {
2576
+ "epoch": 2.8671446646984395,
2577
+ "eval_loss": 0.002381447935476899,
2578
+ "eval_margin": -0.0063614378337778395,
2579
+ "eval_mean_neg": 0.4982295334339142,
2580
+ "eval_mean_pos": 0.7957465648651123,
2581
+ "eval_runtime": 364.3957,
2582
+ "eval_samples_per_second": 21.924,
2583
+ "eval_steps_per_second": 0.343,
2584
+ "step": 1700
2585
+ },
2586
+ {
2587
+ "epoch": 2.8755799240826656,
2588
+ "grad_norm": 0.04573493937998368,
2589
+ "learning_rate": 2.299277558311373e-06,
2590
+ "loss": 0.008275604248046875,
2591
+ "step": 1705
2592
+ },
2593
+ {
2594
+ "epoch": 2.8840151834668917,
2595
+ "grad_norm": 0.030855319414577996,
2596
+ "learning_rate": 2.283833704993116e-06,
2597
+ "loss": 0.008497047424316406,
2598
+ "step": 1710
2599
+ },
2600
+ {
2601
+ "epoch": 2.8924504428511177,
2602
+ "grad_norm": 0.03287831217925721,
2603
+ "learning_rate": 2.268409952006397e-06,
2604
+ "loss": 0.006939697265625,
2605
+ "step": 1715
2606
+ },
2607
+ {
2608
+ "epoch": 2.900885702235344,
2609
+ "grad_norm": 0.03738971418410914,
2610
+ "learning_rate": 2.253006732243061e-06,
2611
+ "loss": 0.00982208251953125,
2612
+ "step": 1720
2613
+ },
2614
+ {
2615
+ "epoch": 2.90932096161957,
2616
+ "grad_norm": 0.02295281003302144,
2617
+ "learning_rate": 2.237624478018656e-06,
2618
+ "loss": 0.00743560791015625,
2619
+ "step": 1725
2620
+ },
2621
+ {
2622
+ "epoch": 2.917756221003796,
2623
+ "grad_norm": 0.03960242549923526,
2624
+ "learning_rate": 2.2222636210603002e-06,
2625
+ "loss": 0.008847427368164063,
2626
+ "step": 1730
2627
+ },
2628
+ {
2629
+ "epoch": 2.926191480388022,
2630
+ "grad_norm": 0.04741800625952587,
2631
+ "learning_rate": 2.2069245924945604e-06,
2632
+ "loss": 0.009384918212890624,
2633
+ "step": 1735
2634
+ },
2635
+ {
2636
+ "epoch": 2.934626739772248,
2637
+ "grad_norm": 0.03409532340357435,
2638
+ "learning_rate": 2.191607822835357e-06,
2639
+ "loss": 0.0076019287109375,
2640
+ "step": 1740
2641
+ },
2642
+ {
2643
+ "epoch": 2.943061999156474,
2644
+ "grad_norm": 0.0239211291682541,
2645
+ "learning_rate": 2.1763137419718826e-06,
2646
+ "loss": 0.007954025268554687,
2647
+ "step": 1745
2648
+ },
2649
+ {
2650
+ "epoch": 2.9514972585407,
2651
+ "grad_norm": 0.0255275562880085,
2652
+ "learning_rate": 2.161042779156529e-06,
2653
+ "loss": 0.007129669189453125,
2654
+ "step": 1750
2655
+ },
2656
+ {
2657
+ "epoch": 2.9599325179249263,
2658
+ "grad_norm": 0.026100931475016395,
2659
+ "learning_rate": 2.1457953629928426e-06,
2660
+ "loss": 0.007111358642578125,
2661
+ "step": 1755
2662
+ },
2663
+ {
2664
+ "epoch": 2.9683677773091524,
2665
+ "grad_norm": 0.03040565516608014,
2666
+ "learning_rate": 2.1305719214235017e-06,
2667
+ "loss": 0.00856170654296875,
2668
+ "step": 1760
2669
+ },
2670
+ {
2671
+ "epoch": 2.9768030366933784,
2672
+ "grad_norm": 0.031725391054917944,
2673
+ "learning_rate": 2.115372881718295e-06,
2674
+ "loss": 0.00930938720703125,
2675
+ "step": 1765
2676
+ },
2677
+ {
2678
+ "epoch": 2.9852382960776045,
2679
+ "grad_norm": 0.025864373534585865,
2680
+ "learning_rate": 2.100198670462137e-06,
2681
+ "loss": 0.007320022583007813,
2682
+ "step": 1770
2683
+ },
2684
+ {
2685
+ "epoch": 2.9936735554618306,
2686
+ "grad_norm": 0.013680490985647303,
2687
+ "learning_rate": 2.0850497135430897e-06,
2688
+ "loss": 0.007777786254882813,
2689
+ "step": 1775
2690
+ },
2691
+ {
2692
+ "epoch": 3.001687051876845,
2693
+ "grad_norm": 0.03143671946142631,
2694
+ "learning_rate": 2.0699264361404174e-06,
2695
+ "loss": 0.008609771728515625,
2696
+ "step": 1780
2697
+ },
2698
+ {
2699
+ "epoch": 3.010122311261071,
2700
+ "grad_norm": 0.021237532660395856,
2701
+ "learning_rate": 2.054829262712645e-06,
2702
+ "loss": 0.007422637939453125,
2703
+ "step": 1785
2704
+ },
2705
+ {
2706
+ "epoch": 3.018557570645297,
2707
+ "grad_norm": 0.02970629169587053,
2708
+ "learning_rate": 2.0397586169856488e-06,
2709
+ "loss": 0.008047866821289062,
2710
+ "step": 1790
2711
+ },
2712
+ {
2713
+ "epoch": 3.0269928300295232,
2714
+ "grad_norm": 0.0280079357370666,
2715
+ "learning_rate": 2.024714921940763e-06,
2716
+ "loss": 0.008725738525390625,
2717
+ "step": 1795
2718
+ },
2719
+ {
2720
+ "epoch": 3.0354280894137493,
2721
+ "grad_norm": 0.05178206206651836,
2722
+ "learning_rate": 2.0096985998029124e-06,
2723
+ "loss": 0.007384490966796875,
2724
+ "step": 1800
2725
+ },
2726
+ {
2727
+ "epoch": 3.0354280894137493,
2728
+ "eval_loss": 0.0022954940795898438,
2729
+ "eval_margin": -0.005535545939159009,
2730
+ "eval_mean_neg": 0.49804064631462097,
2731
+ "eval_mean_pos": 0.7978142499923706,
2732
+ "eval_runtime": 362.5441,
2733
+ "eval_samples_per_second": 22.036,
2734
+ "eval_steps_per_second": 0.345,
2735
+ "step": 1800
2736
+ }
2737
+ ],
2738
+ "logging_steps": 5,
2739
+ "max_steps": 2965,
2740
+ "num_input_tokens_seen": 0,
2741
+ "num_train_epochs": 5,
2742
+ "save_steps": 100,
2743
+ "stateful_callbacks": {
2744
+ "TrainerControl": {
2745
+ "args": {
2746
+ "should_epoch_stop": false,
2747
+ "should_evaluate": false,
2748
+ "should_log": false,
2749
+ "should_save": true,
2750
+ "should_training_stop": false
2751
+ },
2752
+ "attributes": {}
2753
+ }
2754
+ },
2755
+ "total_flos": 2008625567629312.0,
2756
+ "train_batch_size": 4,
2757
+ "trial_name": null,
2758
+ "trial_params": null
2759
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e67a98968389a98ad4e22d001eae6645ff2ad0cb75b9e2316afcc7dcb356ed91
3
+ size 8888
vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
zero_to_fp32.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import gc
25
+ import json
26
+ import numpy as np
27
+ from tqdm import tqdm
28
+ from collections import OrderedDict
29
+ from dataclasses import dataclass
30
+
31
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
32
+ # DeepSpeed data structures it has to be available in the current python environment.
33
+ from deepspeed.utils import logger
34
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
35
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
36
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
37
+
38
+
39
+ @dataclass
40
+ class zero_model_state:
41
+ buffers: dict()
42
+ param_shapes: dict()
43
+ shared_params: list
44
+ ds_version: int
45
+ frozen_param_shapes: dict()
46
+ frozen_param_fragments: dict()
47
+
48
+
49
+ debug = 0
50
+
51
+ # load to cpu
52
+ device = torch.device('cpu')
53
+
54
+
55
+ def atoi(text):
56
+ return int(text) if text.isdigit() else text
57
+
58
+
59
+ def natural_keys(text):
60
+ '''
61
+ alist.sort(key=natural_keys) sorts in human order
62
+ http://nedbatchelder.com/blog/200712/human_sorting.html
63
+ (See Toothy's implementation in the comments)
64
+ '''
65
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
66
+
67
+
68
+ def get_model_state_file(checkpoint_dir, zero_stage):
69
+ if not os.path.isdir(checkpoint_dir):
70
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
71
+
72
+ # there should be only one file
73
+ if zero_stage <= 2:
74
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
75
+ elif zero_stage == 3:
76
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
77
+
78
+ if not os.path.exists(file):
79
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
80
+
81
+ return file
82
+
83
+
84
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
85
+ # XXX: need to test that this simple glob rule works for multi-node setup too
86
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
87
+
88
+ if len(ckpt_files) == 0:
89
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
90
+
91
+ return ckpt_files
92
+
93
+
94
+ def get_optim_files(checkpoint_dir):
95
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
96
+
97
+
98
+ def get_model_state_files(checkpoint_dir):
99
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
100
+
101
+
102
+ def parse_model_states(files):
103
+ zero_model_states = []
104
+ for file in files:
105
+ state_dict = torch.load(file, map_location=device, weights_only=False)
106
+
107
+ if BUFFER_NAMES not in state_dict:
108
+ raise ValueError(f"{file} is not a model state checkpoint")
109
+ buffer_names = state_dict[BUFFER_NAMES]
110
+ if debug:
111
+ print("Found buffers:", buffer_names)
112
+
113
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
114
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
115
+ param_shapes = state_dict[PARAM_SHAPES]
116
+
117
+ # collect parameters that are included in param_shapes
118
+ param_names = []
119
+ for s in param_shapes:
120
+ for name in s.keys():
121
+ param_names.append(name)
122
+
123
+ # update with frozen parameters
124
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
125
+ if frozen_param_shapes is not None:
126
+ if debug:
127
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
128
+ param_names += list(frozen_param_shapes.keys())
129
+
130
+ # handle shared params
131
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
132
+
133
+ ds_version = state_dict.get(DS_VERSION, None)
134
+
135
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
136
+
137
+ z_model_state = zero_model_state(buffers=buffers,
138
+ param_shapes=param_shapes,
139
+ shared_params=shared_params,
140
+ ds_version=ds_version,
141
+ frozen_param_shapes=frozen_param_shapes,
142
+ frozen_param_fragments=frozen_param_fragments)
143
+ zero_model_states.append(z_model_state)
144
+
145
+ return zero_model_states
146
+
147
+
148
+ def parse_optim_states(files, ds_checkpoint_dir):
149
+ total_files = len(files)
150
+ state_dicts = []
151
+ for f in tqdm(files, desc='Loading checkpoint shards'):
152
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
153
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
154
+ # and also handle the case where it was already removed by another helper script
155
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
156
+ state_dicts.append(state_dict)
157
+
158
+ if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
159
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
160
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
161
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
162
+
163
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
164
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
165
+ # use the max of the partition_count to get the dp world_size.
166
+
167
+ if type(world_size) is list:
168
+ world_size = max(world_size)
169
+
170
+ if world_size != total_files:
171
+ raise ValueError(
172
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
173
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
174
+ )
175
+
176
+ # the groups are named differently in each stage
177
+ if zero_stage <= 2:
178
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
179
+ elif zero_stage == 3:
180
+ fp32_groups_key = FP32_FLAT_GROUPS
181
+ else:
182
+ raise ValueError(f"unknown zero stage {zero_stage}")
183
+
184
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
185
+ return zero_stage, world_size, fp32_flat_groups
186
+
187
+
188
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
189
+ """
190
+ Returns fp32 state_dict reconstructed from ds checkpoint
191
+
192
+ Args:
193
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
194
+
195
+ """
196
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
197
+
198
+ optim_files = get_optim_files(ds_checkpoint_dir)
199
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
200
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
201
+
202
+ model_files = get_model_state_files(ds_checkpoint_dir)
203
+
204
+ zero_model_states = parse_model_states(model_files)
205
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
206
+
207
+ if zero_stage <= 2:
208
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
209
+ exclude_frozen_parameters)
210
+ elif zero_stage == 3:
211
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
212
+ exclude_frozen_parameters)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _has_callable(obj, fn):
248
+ attr = getattr(obj, fn, None)
249
+ return callable(attr)
250
+
251
+
252
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
253
+ param_shapes = zero_model_states[0].param_shapes
254
+
255
+ # Reconstruction protocol:
256
+ #
257
+ # XXX: document this
258
+
259
+ if debug:
260
+ for i in range(world_size):
261
+ for j in range(len(fp32_flat_groups[0])):
262
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
263
+
264
+ # XXX: memory usage doubles here (zero2)
265
+ num_param_groups = len(fp32_flat_groups[0])
266
+ merged_single_partition_of_fp32_groups = []
267
+ for i in range(num_param_groups):
268
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
269
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
270
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
271
+ avail_numel = sum(
272
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
273
+
274
+ if debug:
275
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
276
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
277
+ # not asserting if there is a mismatch due to possible padding
278
+ print(f"Have {avail_numel} numels to process.")
279
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
280
+
281
+ # params
282
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
283
+ # out-of-core computing solution
284
+ total_numel = 0
285
+ total_params = 0
286
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
287
+ offset = 0
288
+ avail_numel = full_single_fp32_vector.numel()
289
+ for name, shape in shapes.items():
290
+
291
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
292
+ total_numel += unpartitioned_numel
293
+ total_params += 1
294
+
295
+ if debug:
296
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
297
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
298
+ offset += unpartitioned_numel
299
+
300
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
301
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
302
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
303
+ # live optimizer object, so we are checking that the numbers are within the right range
304
+ align_to = 2 * world_size
305
+
306
+ def zero2_align(x):
307
+ return align_to * math.ceil(x / align_to)
308
+
309
+ if debug:
310
+ print(f"original offset={offset}, avail_numel={avail_numel}")
311
+
312
+ offset = zero2_align(offset)
313
+ avail_numel = zero2_align(avail_numel)
314
+
315
+ if debug:
316
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
317
+
318
+ # Sanity check
319
+ if offset != avail_numel:
320
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
321
+
322
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
323
+
324
+
325
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
326
+ exclude_frozen_parameters):
327
+ state_dict = OrderedDict()
328
+
329
+ # buffers
330
+ buffers = zero_model_states[0].buffers
331
+ state_dict.update(buffers)
332
+ if debug:
333
+ print(f"added {len(buffers)} buffers")
334
+
335
+ if not exclude_frozen_parameters:
336
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
337
+
338
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
339
+
340
+ # recover shared parameters
341
+ for pair in zero_model_states[0].shared_params:
342
+ if pair[1] in state_dict:
343
+ state_dict[pair[0]] = state_dict[pair[1]]
344
+
345
+ return state_dict
346
+
347
+
348
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
349
+ remainder = unpartitioned_numel % world_size
350
+ padding_numel = (world_size - remainder) if remainder else 0
351
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
352
+ return partitioned_numel, padding_numel
353
+
354
+
355
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
356
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
357
+ return
358
+
359
+ if debug:
360
+ for i in range(world_size):
361
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
362
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
363
+
364
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
365
+ wanted_params = len(frozen_param_shapes)
366
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
367
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
368
+ print(f'Frozen params: Have {avail_numel} numels to process.')
369
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
370
+
371
+ total_params = 0
372
+ total_numel = 0
373
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
374
+ total_params += 1
375
+ unpartitioned_numel = shape.numel()
376
+ total_numel += unpartitioned_numel
377
+
378
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
379
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
380
+
381
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
382
+
383
+ if debug:
384
+ print(
385
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
386
+ )
387
+
388
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
389
+
390
+
391
+ class GatheredTensor:
392
+ """
393
+ A pseudo tensor that collects partitioned weights.
394
+ It is more memory efficient when there are multiple groups.
395
+ """
396
+
397
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
398
+ self.flat_groups = flat_groups
399
+ self.flat_groups_offset = flat_groups_offset
400
+ self.offset = offset
401
+ self.partitioned_numel = partitioned_numel
402
+ self.shape = shape
403
+ self.dtype = self.flat_groups[0][0].dtype
404
+
405
+ def contiguous(self):
406
+ """
407
+ Merge partitioned weights from flat_groups into a single tensor.
408
+ """
409
+ end_idx = self.offset + self.partitioned_numel
410
+ world_size = len(self.flat_groups)
411
+ pad_flat_param_chunks = []
412
+
413
+ for rank_i in range(world_size):
414
+ # for each rank, we need to collect weights from related group/groups
415
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
416
+ start_group_id = None
417
+ end_group_id = None
418
+ for group_id in range(len(self.flat_groups_offset)):
419
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
420
+ start_group_id = group_id
421
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
422
+ end_group_id = group_id
423
+ break
424
+ # collect weights from related group/groups
425
+ for group_id in range(start_group_id, end_group_id + 1):
426
+ flat_tensor = flat_groups_at_rank_i[group_id]
427
+ start_offset = self.offset - self.flat_groups_offset[group_id]
428
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
429
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
430
+
431
+ # collect weights from all ranks
432
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
433
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
434
+ return param
435
+
436
+
437
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
438
+ param_shapes = zero_model_states[0].param_shapes
439
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
440
+
441
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
442
+ # param, re-consolidating each param, while dealing with padding if any
443
+
444
+ # merge list of dicts, preserving order
445
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
446
+
447
+ if debug:
448
+ for i in range(world_size):
449
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
450
+
451
+ wanted_params = len(param_shapes)
452
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
453
+ # not asserting if there is a mismatch due to possible padding
454
+ avail_numel = fp32_flat_groups[0].numel() * world_size
455
+ print(f"Trainable params: Have {avail_numel} numels to process.")
456
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
457
+
458
+ # params
459
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
460
+ # out-of-core computing solution
461
+ offset = 0
462
+ total_numel = 0
463
+ total_params = 0
464
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
465
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
466
+ unpartitioned_numel = shape.numel()
467
+ total_numel += unpartitioned_numel
468
+ total_params += 1
469
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
470
+
471
+ if debug:
472
+ print(
473
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
474
+ )
475
+
476
+ # memory efficient tensor
477
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
478
+ state_dict[name] = tensor
479
+ offset += partitioned_numel
480
+
481
+ offset *= world_size
482
+
483
+ # Sanity check
484
+ if offset != avail_numel:
485
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
486
+
487
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
488
+
489
+
490
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
491
+ exclude_frozen_parameters):
492
+ state_dict = OrderedDict()
493
+
494
+ # buffers
495
+ buffers = zero_model_states[0].buffers
496
+ state_dict.update(buffers)
497
+ if debug:
498
+ print(f"added {len(buffers)} buffers")
499
+
500
+ if not exclude_frozen_parameters:
501
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
502
+
503
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
504
+
505
+ # recover shared parameters
506
+ for pair in zero_model_states[0].shared_params:
507
+ if pair[1] in state_dict:
508
+ state_dict[pair[0]] = state_dict[pair[1]]
509
+
510
+ return state_dict
511
+
512
+
513
+ def to_torch_tensor(state_dict, return_empty_tensor=False):
514
+ """
515
+ Convert state_dict of GatheredTensor to torch tensor
516
+ """
517
+ torch_state_dict = {}
518
+ converted_tensors = {}
519
+ for name, tensor in state_dict.items():
520
+ tensor_id = id(tensor)
521
+ if tensor_id in converted_tensors: # shared tensors
522
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
523
+ torch_state_dict[name] = shared_tensor
524
+ else:
525
+ converted_tensors[tensor_id] = name
526
+ if return_empty_tensor:
527
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
528
+ else:
529
+ torch_state_dict[name] = tensor.contiguous()
530
+ return torch_state_dict
531
+
532
+
533
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
534
+ tag=None,
535
+ exclude_frozen_parameters=False,
536
+ lazy_mode=False):
537
+ """
538
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
539
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
540
+ via a model hub.
541
+
542
+ Args:
543
+ - ``checkpoint_dir``: path to the desired checkpoint folder
544
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
545
+ - ``exclude_frozen_parameters``: exclude frozen parameters
546
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
547
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
548
+
549
+ Returns:
550
+ - pytorch ``state_dict``
551
+
552
+ A typical usage might be ::
553
+
554
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
555
+ # do the training and checkpoint saving
556
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
557
+ model = model.cpu() # move to cpu
558
+ model.load_state_dict(state_dict)
559
+ # submit to model hub or save the model to share with others
560
+
561
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
562
+ application. i.e. you will need to re-initialize the deepspeed engine, since
563
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
564
+
565
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
566
+
567
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
568
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
569
+ the checkpoint. Or you can load state_dict in lazy mode ::
570
+
571
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
572
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
573
+ for name, lazy_tensor in state_dict.item():
574
+ tensor = lazy_tensor.contiguous() # to cpu
575
+ print(name, tensor)
576
+ # del tensor to release memory if it no longer in use
577
+ """
578
+ if tag is None:
579
+ latest_path = os.path.join(checkpoint_dir, 'latest')
580
+ if os.path.isfile(latest_path):
581
+ with open(latest_path, 'r') as fd:
582
+ tag = fd.read().strip()
583
+ else:
584
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
585
+
586
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
587
+
588
+ if not os.path.isdir(ds_checkpoint_dir):
589
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
590
+
591
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
592
+ if lazy_mode:
593
+ return state_dict
594
+ else:
595
+ return to_torch_tensor(state_dict)
596
+
597
+
598
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
599
+ output_dir,
600
+ max_shard_size="5GB",
601
+ safe_serialization=False,
602
+ tag=None,
603
+ exclude_frozen_parameters=False):
604
+ """
605
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
606
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
607
+
608
+ Args:
609
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
610
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
611
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
612
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
613
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
614
+ - ``exclude_frozen_parameters``: exclude frozen parameters
615
+ """
616
+
617
+ # Dependency pre-check
618
+ if safe_serialization:
619
+ try:
620
+ from safetensors.torch import save_file
621
+ except ImportError:
622
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
623
+ raise
624
+ if max_shard_size is not None:
625
+ try:
626
+ from huggingface_hub import split_torch_state_dict_into_shards
627
+ except ImportError:
628
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
629
+ raise
630
+
631
+ # Convert zero checkpoint to state_dict
632
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
633
+ tag,
634
+ exclude_frozen_parameters,
635
+ lazy_mode=True)
636
+
637
+ # Shard the model if it is too big.
638
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
639
+ if max_shard_size is not None:
640
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
641
+ # an memory-efficient approach for sharding
642
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
643
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
644
+ filename_pattern=filename_pattern,
645
+ max_shard_size=max_shard_size)
646
+ else:
647
+ from collections import namedtuple
648
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
649
+ state_dict_split = StateDictSplit(is_sharded=False,
650
+ filename_to_tensors={weights_name: list(state_dict.keys())})
651
+
652
+ # Save the model by shard
653
+ os.makedirs(output_dir, exist_ok=True)
654
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
655
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
656
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
657
+ shard_state_dict = to_torch_tensor(shard_state_dict)
658
+ output_path = os.path.join(output_dir, shard_file)
659
+ if safe_serialization:
660
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
661
+ else:
662
+ torch.save(shard_state_dict, output_path)
663
+ # release the memory of current shard
664
+ for tensor_name in list(shard_state_dict.keys()):
665
+ del state_dict[tensor_name]
666
+ del shard_state_dict[tensor_name]
667
+ del shard_state_dict
668
+ gc.collect()
669
+
670
+ # Save index if sharded
671
+ if state_dict_split.is_sharded:
672
+ index = {
673
+ "metadata": state_dict_split.metadata,
674
+ "weight_map": state_dict_split.tensor_to_filename,
675
+ }
676
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
677
+ save_index_file = os.path.join(output_dir, save_index_file)
678
+ with open(save_index_file, "w", encoding="utf-8") as f:
679
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
680
+ f.write(content)
681
+
682
+
683
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
684
+ """
685
+ 1. Put the provided model to cpu
686
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
687
+ 3. Load it into the provided model
688
+
689
+ Args:
690
+ - ``model``: the model object to update
691
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
692
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
693
+
694
+ Returns:
695
+ - ``model`: modified model
696
+
697
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
698
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
699
+ conveniently placed for you in the checkpoint folder.
700
+
701
+ A typical usage might be ::
702
+
703
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
704
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
705
+ # submit to model hub or save the model to share with others
706
+
707
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
708
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
709
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
710
+
711
+ """
712
+ logger.info("Extracting fp32 weights")
713
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
714
+
715
+ logger.info("Overwriting model with fp32 weights")
716
+ model = model.cpu()
717
+ model.load_state_dict(state_dict, strict=False)
718
+
719
+ return model
720
+
721
+
722
+ if __name__ == "__main__":
723
+ parser = argparse.ArgumentParser()
724
+ parser.add_argument("checkpoint_dir",
725
+ type=str,
726
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
727
+ parser.add_argument("output_dir",
728
+ type=str,
729
+ help="directory to the pytorch fp32 state_dict output files"
730
+ "(e.g. path/checkpoint-12-output/)")
731
+ parser.add_argument(
732
+ "--max_shard_size",
733
+ type=str,
734
+ default="5GB",
735
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
736
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
737
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
738
+ "without CPU OOM issues.")
739
+ parser.add_argument(
740
+ "--safe_serialization",
741
+ default=False,
742
+ action='store_true',
743
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
744
+ parser.add_argument("-t",
745
+ "--tag",
746
+ type=str,
747
+ default=None,
748
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
749
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
750
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
751
+ args = parser.parse_args()
752
+
753
+ debug = args.debug
754
+
755
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
756
+ args.output_dir,
757
+ max_shard_size=args.max_shard_size,
758
+ safe_serialization=args.safe_serialization,
759
+ tag=args.tag,
760
+ exclude_frozen_parameters=args.exclude_frozen_parameters)