lesso07 commited on
Commit
e184e52
·
verified ·
1 Parent(s): 56f026f

Training in progress, step 75, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b57fd0cff793537098323bd889b5020738b1e5e3873ae4c3af72cdc2de9247e4
3
  size 35237104
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24a563f5a4ddd822e5bcf1fe4d9864c503a827a33f3eca3615cf428388d2b6b3
3
  size 35237104
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:edf79ebb668d9aa689be333cfefdfbb14459870e45c29d0406fdaa3599bbecf1
3
  size 70667778
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4358faaad86c1bb59c4f36737764daa2f32c856743a4b1b243e2e92cb9cba37d
3
  size 70667778
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:12268954978dab25b5aaff68eb836df3d54e4f583ead17bf0e2167e105e8bf94
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1ac9e9984393f21feee4c2b4ab3c9ba671bf2d1c3b0d832bd5e9f9194775eae
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a89ffc445067fef9d6d02bb3ff9e61d5e3209e6fa67c7259b3b364b90dbaa2cd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f23e2214bcafb439ebc7528dcc283ef6218d509a276c0baff0743503ecbe3d92
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.041736227045075125,
5
  "eval_steps": 9,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -405,6 +405,205 @@
405
  "learning_rate": 5.868240888334653e-05,
406
  "loss": 2.5819,
407
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
  }
409
  ],
410
  "logging_steps": 1,
@@ -424,7 +623,7 @@
424
  "attributes": {}
425
  }
426
  },
427
- "total_flos": 1964600469749760.0,
428
  "train_batch_size": 8,
429
  "trial_name": null,
430
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.06260434056761269,
5
  "eval_steps": 9,
6
+ "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
405
  "learning_rate": 5.868240888334653e-05,
406
  "loss": 2.5819,
407
  "step": 50
408
+ },
409
+ {
410
+ "epoch": 0.04257095158597663,
411
+ "grad_norm": 1.1659084558486938,
412
+ "learning_rate": 5.695865504800327e-05,
413
+ "loss": 3.0356,
414
+ "step": 51
415
+ },
416
+ {
417
+ "epoch": 0.04340567612687813,
418
+ "grad_norm": 1.0479695796966553,
419
+ "learning_rate": 5.522642316338268e-05,
420
+ "loss": 2.9132,
421
+ "step": 52
422
+ },
423
+ {
424
+ "epoch": 0.04424040066777963,
425
+ "grad_norm": 1.0335667133331299,
426
+ "learning_rate": 5.348782368720626e-05,
427
+ "loss": 2.8807,
428
+ "step": 53
429
+ },
430
+ {
431
+ "epoch": 0.045075125208681135,
432
+ "grad_norm": 0.9959902763366699,
433
+ "learning_rate": 5.174497483512506e-05,
434
+ "loss": 2.5507,
435
+ "step": 54
436
+ },
437
+ {
438
+ "epoch": 0.045075125208681135,
439
+ "eval_loss": 2.8698601722717285,
440
+ "eval_runtime": 32.0898,
441
+ "eval_samples_per_second": 31.443,
442
+ "eval_steps_per_second": 3.958,
443
+ "step": 54
444
+ },
445
+ {
446
+ "epoch": 0.045909849749582635,
447
+ "grad_norm": 1.5530030727386475,
448
+ "learning_rate": 5e-05,
449
+ "loss": 2.9575,
450
+ "step": 55
451
+ },
452
+ {
453
+ "epoch": 0.04674457429048414,
454
+ "grad_norm": 1.4625574350357056,
455
+ "learning_rate": 4.825502516487497e-05,
456
+ "loss": 2.9145,
457
+ "step": 56
458
+ },
459
+ {
460
+ "epoch": 0.04757929883138564,
461
+ "grad_norm": 1.3883436918258667,
462
+ "learning_rate": 4.6512176312793736e-05,
463
+ "loss": 3.0878,
464
+ "step": 57
465
+ },
466
+ {
467
+ "epoch": 0.048414023372287146,
468
+ "grad_norm": 1.4030698537826538,
469
+ "learning_rate": 4.477357683661734e-05,
470
+ "loss": 2.7505,
471
+ "step": 58
472
+ },
473
+ {
474
+ "epoch": 0.049248747913188645,
475
+ "grad_norm": 1.3059231042861938,
476
+ "learning_rate": 4.3041344951996746e-05,
477
+ "loss": 2.8434,
478
+ "step": 59
479
+ },
480
+ {
481
+ "epoch": 0.05008347245409015,
482
+ "grad_norm": 0.8829728960990906,
483
+ "learning_rate": 4.131759111665349e-05,
484
+ "loss": 2.1365,
485
+ "step": 60
486
+ },
487
+ {
488
+ "epoch": 0.05091819699499165,
489
+ "grad_norm": 1.467118501663208,
490
+ "learning_rate": 3.960441545911204e-05,
491
+ "loss": 3.1508,
492
+ "step": 61
493
+ },
494
+ {
495
+ "epoch": 0.05175292153589316,
496
+ "grad_norm": 1.2122942209243774,
497
+ "learning_rate": 3.790390522001662e-05,
498
+ "loss": 2.8352,
499
+ "step": 62
500
+ },
501
+ {
502
+ "epoch": 0.052587646076794656,
503
+ "grad_norm": 0.994874119758606,
504
+ "learning_rate": 3.6218132209150045e-05,
505
+ "loss": 2.911,
506
+ "step": 63
507
+ },
508
+ {
509
+ "epoch": 0.052587646076794656,
510
+ "eval_loss": 2.8601207733154297,
511
+ "eval_runtime": 32.4405,
512
+ "eval_samples_per_second": 31.103,
513
+ "eval_steps_per_second": 3.915,
514
+ "step": 63
515
+ },
516
+ {
517
+ "epoch": 0.05342237061769616,
518
+ "grad_norm": 1.038936734199524,
519
+ "learning_rate": 3.4549150281252636e-05,
520
+ "loss": 2.8027,
521
+ "step": 64
522
+ },
523
+ {
524
+ "epoch": 0.05425709515859766,
525
+ "grad_norm": 0.9290437698364258,
526
+ "learning_rate": 3.289899283371657e-05,
527
+ "loss": 2.8319,
528
+ "step": 65
529
+ },
530
+ {
531
+ "epoch": 0.05509181969949917,
532
+ "grad_norm": 1.4522688388824463,
533
+ "learning_rate": 3.12696703292044e-05,
534
+ "loss": 2.9978,
535
+ "step": 66
536
+ },
537
+ {
538
+ "epoch": 0.055926544240400666,
539
+ "grad_norm": 1.5390747785568237,
540
+ "learning_rate": 2.9663167846209998e-05,
541
+ "loss": 2.7995,
542
+ "step": 67
543
+ },
544
+ {
545
+ "epoch": 0.05676126878130217,
546
+ "grad_norm": 1.095628261566162,
547
+ "learning_rate": 2.8081442660546125e-05,
548
+ "loss": 2.832,
549
+ "step": 68
550
+ },
551
+ {
552
+ "epoch": 0.05759599332220367,
553
+ "grad_norm": 1.1453615427017212,
554
+ "learning_rate": 2.6526421860705473e-05,
555
+ "loss": 2.7923,
556
+ "step": 69
557
+ },
558
+ {
559
+ "epoch": 0.05843071786310518,
560
+ "grad_norm": 1.3366105556488037,
561
+ "learning_rate": 2.500000000000001e-05,
562
+ "loss": 2.8731,
563
+ "step": 70
564
+ },
565
+ {
566
+ "epoch": 0.05926544240400668,
567
+ "grad_norm": 1.453717827796936,
568
+ "learning_rate": 2.350403678833976e-05,
569
+ "loss": 2.8363,
570
+ "step": 71
571
+ },
572
+ {
573
+ "epoch": 0.06010016694490818,
574
+ "grad_norm": 1.1732043027877808,
575
+ "learning_rate": 2.2040354826462668e-05,
576
+ "loss": 2.8937,
577
+ "step": 72
578
+ },
579
+ {
580
+ "epoch": 0.06010016694490818,
581
+ "eval_loss": 2.853658437728882,
582
+ "eval_runtime": 32.1886,
583
+ "eval_samples_per_second": 31.347,
584
+ "eval_steps_per_second": 3.945,
585
+ "step": 72
586
+ },
587
+ {
588
+ "epoch": 0.06093489148580968,
589
+ "grad_norm": 0.9332495331764221,
590
+ "learning_rate": 2.061073738537635e-05,
591
+ "loss": 2.8937,
592
+ "step": 73
593
+ },
594
+ {
595
+ "epoch": 0.06176961602671119,
596
+ "grad_norm": 1.1398509740829468,
597
+ "learning_rate": 1.9216926233717085e-05,
598
+ "loss": 2.8762,
599
+ "step": 74
600
+ },
601
+ {
602
+ "epoch": 0.06260434056761269,
603
+ "grad_norm": 0.7939409613609314,
604
+ "learning_rate": 1.7860619515673033e-05,
605
+ "loss": 2.9231,
606
+ "step": 75
607
  }
608
  ],
609
  "logging_steps": 1,
 
623
  "attributes": {}
624
  }
625
  },
626
+ "total_flos": 2991960348426240.0,
627
  "train_batch_size": 8,
628
  "trial_name": null,
629
  "trial_params": null