Training in progress, step 428, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 201880976
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5edf0f88f8f2e77ac94d05aceec9aba273d720dcd1481e1016e8ee91c4f65a86
|
| 3 |
size 201880976
|
last-checkpoint/optimizer.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 102771659
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cb5bc5310f6e8984854e8b0a8f074e20d6f57e5be6750d3e330f559c6f2e7d52
|
| 3 |
size 102771659
|
last-checkpoint/rng_state.pth
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 14645
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e61e21d7a01dc7c642e9701af3fb914f9e6ca6f2866bd5182c14f9860c3646dc
|
| 3 |
size 14645
|
last-checkpoint/scheduler.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 1465
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:13916fdf7ba8313e9fab314d766f1e35afde90bbe7316098fdd56f9cc606892b
|
| 3 |
size 1465
|
last-checkpoint/trainer_state.json
CHANGED
|
@@ -2,9 +2,9 @@
|
|
| 2 |
"best_global_step": 400,
|
| 3 |
"best_metric": 0.754337888795046,
|
| 4 |
"best_model_checkpoint": "./qwen2.5-7b-sft-qlora/checkpoint-400",
|
| 5 |
-
"epoch":
|
| 6 |
"eval_steps": 50,
|
| 7 |
-
"global_step":
|
| 8 |
"is_hyper_param_search": false,
|
| 9 |
"is_local_process_zero": true,
|
| 10 |
"is_world_process_zero": true,
|
|
@@ -512,6 +512,26 @@
|
|
| 512 |
"eval_samples_per_second": 27.224,
|
| 513 |
"eval_steps_per_second": 1.703,
|
| 514 |
"step": 400
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 515 |
}
|
| 516 |
],
|
| 517 |
"logging_steps": 10,
|
|
@@ -526,12 +546,12 @@
|
|
| 526 |
"should_evaluate": false,
|
| 527 |
"should_log": false,
|
| 528 |
"should_save": true,
|
| 529 |
-
"should_training_stop":
|
| 530 |
},
|
| 531 |
"attributes": {}
|
| 532 |
}
|
| 533 |
},
|
| 534 |
-
"total_flos": 8.
|
| 535 |
"train_batch_size": 16,
|
| 536 |
"trial_name": null,
|
| 537 |
"trial_params": null
|
|
|
|
| 2 |
"best_global_step": 400,
|
| 3 |
"best_metric": 0.754337888795046,
|
| 4 |
"best_model_checkpoint": "./qwen2.5-7b-sft-qlora/checkpoint-400",
|
| 5 |
+
"epoch": 2.0,
|
| 6 |
"eval_steps": 50,
|
| 7 |
+
"global_step": 428,
|
| 8 |
"is_hyper_param_search": false,
|
| 9 |
"is_local_process_zero": true,
|
| 10 |
"is_world_process_zero": true,
|
|
|
|
| 512 |
"eval_samples_per_second": 27.224,
|
| 513 |
"eval_steps_per_second": 1.703,
|
| 514 |
"step": 400
|
| 515 |
+
},
|
| 516 |
+
{
|
| 517 |
+
"entropy": 0.5425351396203041,
|
| 518 |
+
"epoch": 1.9169590643274854,
|
| 519 |
+
"grad_norm": 0.2328052669763565,
|
| 520 |
+
"learning_rate": 1.1994589235353681e-06,
|
| 521 |
+
"loss": 0.5376,
|
| 522 |
+
"mean_token_accuracy": 0.8435128018260002,
|
| 523 |
+
"num_tokens": 9853669.0,
|
| 524 |
+
"step": 410
|
| 525 |
+
},
|
| 526 |
+
{
|
| 527 |
+
"entropy": 0.533241743594408,
|
| 528 |
+
"epoch": 1.9637426900584796,
|
| 529 |
+
"grad_norm": 0.22453628480434418,
|
| 530 |
+
"learning_rate": 2.695492370149988e-07,
|
| 531 |
+
"loss": 0.5238,
|
| 532 |
+
"mean_token_accuracy": 0.8451769664883614,
|
| 533 |
+
"num_tokens": 10097512.0,
|
| 534 |
+
"step": 420
|
| 535 |
}
|
| 536 |
],
|
| 537 |
"logging_steps": 10,
|
|
|
|
| 546 |
"should_evaluate": false,
|
| 547 |
"should_log": false,
|
| 548 |
"should_save": true,
|
| 549 |
+
"should_training_stop": true
|
| 550 |
},
|
| 551 |
"attributes": {}
|
| 552 |
}
|
| 553 |
},
|
| 554 |
+
"total_flos": 8.941697521363354e+17,
|
| 555 |
"train_batch_size": 16,
|
| 556 |
"trial_name": null,
|
| 557 |
"trial_params": null
|