beamaia commited on
Commit
bc187a2
·
verified ·
1 Parent(s): 6c4526d

Upload folder using huggingface_hub

Browse files
checkpoint-30/adapter_config.json CHANGED
@@ -20,9 +20,9 @@
20
  "revision": null,
21
  "target_modules": [
22
  "q_proj",
 
23
  "v_proj",
24
- "o_proj",
25
- "k_proj"
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
 
20
  "revision": null,
21
  "target_modules": [
22
  "q_proj",
23
+ "k_proj",
24
  "v_proj",
25
+ "o_proj"
 
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
checkpoint-30/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78ab022ca343660d1f280ec34a80faffada6b025a2a7879119d1810181a75394
3
  size 436242776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24867322dfb4671521880b7952dfecf869c9ddf50ba8c8d0ab60442d46c3b6e7
3
  size 436242776
checkpoint-30/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:19650f1cc2280227bbfe7e965edd69b2ee9cacbd13b3e7bb3742aba3a3e324d5
3
  size 872568314
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a095acb983c64222b3980c352b38e58ba4bd6e2f0b8e559eddea78a65277790
3
  size 872568314
checkpoint-30/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8dadf22c8d1bce4ad601a4f73bbe39dc8987d3b42389b4025267f79988efccce
3
- size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c2a616f0c7a81cc42f0138e0036f3bc925663524713430ec02128feb7cc22f7
3
+ size 14512
checkpoint-30/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:528debf43ac8e68f3972cc6eedc188d70690e0a9bfc2feeeff97e757be99c6b8
3
- size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3547aee58cf2280e2d966f891fba83ef181a792754cf251e62b59ae644dd2253
3
+ size 14512
checkpoint-30/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8783ce7e95711682ad7c1657af7a6be242f3bc4b6aac1f9aa4521dd59b1618c4
3
  size 1000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35079c4c5c54e3e2a4600c729d1fa32b43362d22b0baf443894af8a786492df1
3
  size 1000
checkpoint-30/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 0.434685617685318,
3
  "best_model_checkpoint": "./mistral/01-03-24-Weni-ZeroShot-3.3.18-Mistral-7b-Multilanguage-3.2.0_Zeroshot-2_max_steps-100_batch_16_2024-03-01_ppid_7/checkpoint-30",
4
- "epoch": 0.297029702970297,
5
  "eval_steps": 10,
6
  "global_step": 30,
7
  "is_hyper_param_search": false,
@@ -9,34 +9,34 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.1,
13
- "eval_loss": 0.6981944441795349,
14
- "eval_runtime": 94.6462,
15
- "eval_samples_per_second": 30.292,
16
- "eval_steps_per_second": 0.951,
17
  "step": 10
18
  },
19
  {
20
- "epoch": 0.2,
21
- "grad_norm": 0.2654201090335846,
22
- "learning_rate": 0.0001961261695938319,
23
- "loss": 1.0016,
24
  "step": 20
25
  },
26
  {
27
- "epoch": 0.2,
28
- "eval_loss": 0.467332661151886,
29
- "eval_runtime": 94.5373,
30
- "eval_samples_per_second": 30.327,
31
- "eval_steps_per_second": 0.952,
32
  "step": 20
33
  },
34
  {
35
- "epoch": 0.3,
36
- "eval_loss": 0.434685617685318,
37
- "eval_runtime": 94.5899,
38
- "eval_samples_per_second": 30.31,
39
- "eval_steps_per_second": 0.951,
40
  "step": 30
41
  }
42
  ],
@@ -45,7 +45,7 @@
45
  "num_input_tokens_seen": 0,
46
  "num_train_epochs": 1,
47
  "save_steps": 10,
48
- "total_flos": 2.6847278785573683e+17,
49
  "train_batch_size": 16,
50
  "trial_name": null,
51
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.44581571221351624,
3
  "best_model_checkpoint": "./mistral/01-03-24-Weni-ZeroShot-3.3.18-Mistral-7b-Multilanguage-3.2.0_Zeroshot-2_max_steps-100_batch_16_2024-03-01_ppid_7/checkpoint-30",
4
+ "epoch": 0.14869888475836432,
5
  "eval_steps": 10,
6
  "global_step": 30,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.05,
13
+ "eval_loss": 0.6367942690849304,
14
+ "eval_runtime": 180.8957,
15
+ "eval_samples_per_second": 15.849,
16
+ "eval_steps_per_second": 0.995,
17
  "step": 10
18
  },
19
  {
20
+ "epoch": 0.1,
21
+ "grad_norm": 0.34346863627433777,
22
+ "learning_rate": 0.00019510565162951537,
23
+ "loss": 0.9774,
24
  "step": 20
25
  },
26
  {
27
+ "epoch": 0.1,
28
+ "eval_loss": 0.48089343309402466,
29
+ "eval_runtime": 180.8882,
30
+ "eval_samples_per_second": 15.85,
31
+ "eval_steps_per_second": 0.995,
32
  "step": 20
33
  },
34
  {
35
+ "epoch": 0.15,
36
+ "eval_loss": 0.44581571221351624,
37
+ "eval_runtime": 180.8305,
38
+ "eval_samples_per_second": 15.855,
39
+ "eval_steps_per_second": 0.995,
40
  "step": 30
41
  }
42
  ],
 
45
  "num_input_tokens_seen": 0,
46
  "num_train_epochs": 1,
47
  "save_steps": 10,
48
+ "total_flos": 1.3497349192024064e+17,
49
  "train_batch_size": 16,
50
  "trial_name": null,
51
  "trial_params": null
checkpoint-30/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66ceb8ab0424702f162d3dcd459022d693533c009fa75ecbe9af10b7fcf8a54d
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:397ed785970a10324cddec1fc7a5b8a987e32e5f4a937dc38def4b1cb481ef1b
3
  size 5176
checkpoint-40/adapter_config.json CHANGED
@@ -20,9 +20,9 @@
20
  "revision": null,
21
  "target_modules": [
22
  "q_proj",
 
23
  "v_proj",
24
- "o_proj",
25
- "k_proj"
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
 
20
  "revision": null,
21
  "target_modules": [
22
  "q_proj",
23
+ "k_proj",
24
  "v_proj",
25
+ "o_proj"
 
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
checkpoint-40/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:77f4591fb4db57788c1d5980e4f03bad1b5d20469e935858f70b9470b0272c71
3
  size 436242776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b041c9a2283489464450c8f036707f48c1437ef863952d49130a0f6ffff827a
3
  size 436242776
checkpoint-40/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a08fe71eb4b0f6212455ce0cb2b747581415bf7441f0367bc16d14b31234dea6
3
  size 872568314
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbf571fc43c326ce5b7f8ab1133043927c1104c6b8d07f4365744906cc3954f2
3
  size 872568314
checkpoint-40/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:afa79b6205f47f6bc3f42fc7af51c14822e2a566a3aaf2dc09246fbe343751e3
3
- size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:562c6644f18ed45bd23812c0c9184280a67775561c0f90429c64bbb0b4cfdd52
3
+ size 14512
checkpoint-40/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:34467bd4ec00665d227cf44d137c4ef3d307f23ed69db18dbcedb53da88a6bb8
3
- size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e449608c637ff3eb44facd6dbe70487e4bba04dec462d49ef0b0b376478b590
3
+ size 14512
checkpoint-40/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5849ed9499535f9ffc23dafcb42ec8d336c161ca18201ff7d098f2b17aac424f
3
  size 1000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f3f71a5b5e00e23a44af22895931ea5920a9f7ce8cbf52213ba10a1597ee79b
3
  size 1000
checkpoint-40/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 0.4082697033882141,
3
  "best_model_checkpoint": "./mistral/01-03-24-Weni-ZeroShot-3.3.18-Mistral-7b-Multilanguage-3.2.0_Zeroshot-2_max_steps-100_batch_16_2024-03-01_ppid_7/checkpoint-40",
4
- "epoch": 0.39603960396039606,
5
  "eval_steps": 10,
6
  "global_step": 40,
7
  "is_hyper_param_search": false,
@@ -9,49 +9,49 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.1,
13
- "eval_loss": 0.6981944441795349,
14
- "eval_runtime": 94.6462,
15
- "eval_samples_per_second": 30.292,
16
- "eval_steps_per_second": 0.951,
17
  "step": 10
18
  },
19
  {
20
- "epoch": 0.2,
21
- "grad_norm": 0.2654201090335846,
22
- "learning_rate": 0.0001961261695938319,
23
- "loss": 1.0016,
24
  "step": 20
25
  },
26
  {
27
- "epoch": 0.2,
28
- "eval_loss": 0.467332661151886,
29
- "eval_runtime": 94.5373,
30
- "eval_samples_per_second": 30.327,
31
- "eval_steps_per_second": 0.952,
32
  "step": 20
33
  },
34
  {
35
- "epoch": 0.3,
36
- "eval_loss": 0.434685617685318,
37
- "eval_runtime": 94.5899,
38
- "eval_samples_per_second": 30.31,
39
- "eval_steps_per_second": 0.951,
40
  "step": 30
41
  },
42
  {
43
- "epoch": 0.4,
44
- "grad_norm": 1.3763986825942993,
45
- "learning_rate": 0.0001559192903470747,
46
- "loss": 0.4357,
47
  "step": 40
48
  },
49
  {
50
- "epoch": 0.4,
51
- "eval_loss": 0.4082697033882141,
52
- "eval_runtime": 94.551,
53
- "eval_samples_per_second": 30.322,
54
- "eval_steps_per_second": 0.952,
55
  "step": 40
56
  }
57
  ],
@@ -60,7 +60,7 @@
60
  "num_input_tokens_seen": 0,
61
  "num_train_epochs": 1,
62
  "save_steps": 10,
63
- "total_flos": 3.5844587589625446e+17,
64
  "train_batch_size": 16,
65
  "trial_name": null,
66
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.4221758544445038,
3
  "best_model_checkpoint": "./mistral/01-03-24-Weni-ZeroShot-3.3.18-Mistral-7b-Multilanguage-3.2.0_Zeroshot-2_max_steps-100_batch_16_2024-03-01_ppid_7/checkpoint-40",
4
+ "epoch": 0.1982651796778191,
5
  "eval_steps": 10,
6
  "global_step": 40,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.05,
13
+ "eval_loss": 0.6367942690849304,
14
+ "eval_runtime": 180.8957,
15
+ "eval_samples_per_second": 15.849,
16
+ "eval_steps_per_second": 0.995,
17
  "step": 10
18
  },
19
  {
20
+ "epoch": 0.1,
21
+ "grad_norm": 0.34346863627433777,
22
+ "learning_rate": 0.00019510565162951537,
23
+ "loss": 0.9774,
24
  "step": 20
25
  },
26
  {
27
+ "epoch": 0.1,
28
+ "eval_loss": 0.48089343309402466,
29
+ "eval_runtime": 180.8882,
30
+ "eval_samples_per_second": 15.85,
31
+ "eval_steps_per_second": 0.995,
32
  "step": 20
33
  },
34
  {
35
+ "epoch": 0.15,
36
+ "eval_loss": 0.44581571221351624,
37
+ "eval_runtime": 180.8305,
38
+ "eval_samples_per_second": 15.855,
39
+ "eval_steps_per_second": 0.995,
40
  "step": 30
41
  },
42
  {
43
+ "epoch": 0.2,
44
+ "grad_norm": 0.22729116678237915,
45
+ "learning_rate": 0.0001529919264233205,
46
+ "loss": 0.4459,
47
  "step": 40
48
  },
49
  {
50
+ "epoch": 0.2,
51
+ "eval_loss": 0.4221758544445038,
52
+ "eval_runtime": 180.8406,
53
+ "eval_samples_per_second": 15.854,
54
+ "eval_steps_per_second": 0.995,
55
  "step": 40
56
  }
57
  ],
 
60
  "num_input_tokens_seen": 0,
61
  "num_train_epochs": 1,
62
  "save_steps": 10,
63
+ "total_flos": 1.8087006649607782e+17,
64
  "train_batch_size": 16,
65
  "trial_name": null,
66
  "trial_params": null
checkpoint-40/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66ceb8ab0424702f162d3dcd459022d693533c009fa75ecbe9af10b7fcf8a54d
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:397ed785970a10324cddec1fc7a5b8a987e32e5f4a937dc38def4b1cb481ef1b
3
  size 5176
checkpoint-50/adapter_config.json CHANGED
@@ -20,9 +20,9 @@
20
  "revision": null,
21
  "target_modules": [
22
  "q_proj",
 
23
  "v_proj",
24
- "o_proj",
25
- "k_proj"
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
 
20
  "revision": null,
21
  "target_modules": [
22
  "q_proj",
23
+ "k_proj",
24
  "v_proj",
25
+ "o_proj"
 
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
checkpoint-50/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e638a9f9216b03397158412851fd7d42949246bfa2ad7c3058437c76160f7d09
3
  size 436242776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:792bf8f744bca5db30a5b2fe80e0610d871d2e0e9c0a4f386c15921efdc2cada
3
  size 436242776
checkpoint-50/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66dd45f2150a0a6d4c60db415e39f4e3b5a2c0f9141ca87580540f33e9090d61
3
  size 872568314
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bfe60b7f0163e31d4d317b854f7aa8b441fdda4591c40ed389e383d3a3f89cf
3
  size 872568314
checkpoint-50/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1acbc70db12510c2b2cee4658b6acfca9e76743915e6d5251c5387fc60850037
3
- size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:155af0328dced3a7822123bfbbfc0a98082468058997d134358c43dd61128a48
3
+ size 14512
checkpoint-50/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29aac9ff927aa57aaa01a137c1fa61f3cbd6758e0fdcb77bcf8f5a4ddaebeb8f
3
- size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4b7ab3a551c96c4ceca4cd3993bf33763b09c0ba81bc2a77fec0f7faa8f7890
3
+ size 14512
checkpoint-50/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a711e3dc499e314bc1a87cadf8f61ca720790ae9992957172045f5ab8655fd0
3
  size 1000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17f23ea92413fa32eaeae1119269ac938b5f442880c1543708aed43058dfbadd
3
  size 1000
checkpoint-50/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 0.40642818808555603,
3
  "best_model_checkpoint": "./mistral/01-03-24-Weni-ZeroShot-3.3.18-Mistral-7b-Multilanguage-3.2.0_Zeroshot-2_max_steps-100_batch_16_2024-03-01_ppid_7/checkpoint-50",
4
- "epoch": 0.49504950495049505,
5
  "eval_steps": 10,
6
  "global_step": 50,
7
  "is_hyper_param_search": false,
@@ -9,57 +9,57 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.1,
13
- "eval_loss": 0.6981944441795349,
14
- "eval_runtime": 94.6462,
15
- "eval_samples_per_second": 30.292,
16
- "eval_steps_per_second": 0.951,
17
  "step": 10
18
  },
19
  {
20
- "epoch": 0.2,
21
- "grad_norm": 0.2654201090335846,
22
- "learning_rate": 0.0001961261695938319,
23
- "loss": 1.0016,
24
  "step": 20
25
  },
26
  {
27
- "epoch": 0.2,
28
- "eval_loss": 0.467332661151886,
29
- "eval_runtime": 94.5373,
30
- "eval_samples_per_second": 30.327,
31
- "eval_steps_per_second": 0.952,
32
  "step": 20
33
  },
34
  {
35
- "epoch": 0.3,
36
- "eval_loss": 0.434685617685318,
37
- "eval_runtime": 94.5899,
38
- "eval_samples_per_second": 30.31,
39
- "eval_steps_per_second": 0.951,
40
  "step": 30
41
  },
42
  {
43
- "epoch": 0.4,
44
- "grad_norm": 1.3763986825942993,
45
- "learning_rate": 0.0001559192903470747,
46
- "loss": 0.4357,
47
  "step": 40
48
  },
49
  {
50
- "epoch": 0.4,
51
- "eval_loss": 0.4082697033882141,
52
- "eval_runtime": 94.551,
53
- "eval_samples_per_second": 30.322,
54
- "eval_steps_per_second": 0.952,
55
  "step": 40
56
  },
57
  {
58
- "epoch": 0.5,
59
- "eval_loss": 0.40642818808555603,
60
- "eval_runtime": 94.5831,
61
- "eval_samples_per_second": 30.312,
62
- "eval_steps_per_second": 0.952,
63
  "step": 50
64
  }
65
  ],
@@ -68,7 +68,7 @@
68
  "num_input_tokens_seen": 0,
69
  "num_train_epochs": 1,
70
  "save_steps": 10,
71
- "total_flos": 4.502771471776481e+17,
72
  "train_batch_size": 16,
73
  "trial_name": null,
74
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.40922558307647705,
3
  "best_model_checkpoint": "./mistral/01-03-24-Weni-ZeroShot-3.3.18-Mistral-7b-Multilanguage-3.2.0_Zeroshot-2_max_steps-100_batch_16_2024-03-01_ppid_7/checkpoint-50",
4
+ "epoch": 0.24783147459727387,
5
  "eval_steps": 10,
6
  "global_step": 50,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.05,
13
+ "eval_loss": 0.6367942690849304,
14
+ "eval_runtime": 180.8957,
15
+ "eval_samples_per_second": 15.849,
16
+ "eval_steps_per_second": 0.995,
17
  "step": 10
18
  },
19
  {
20
+ "epoch": 0.1,
21
+ "grad_norm": 0.34346863627433777,
22
+ "learning_rate": 0.00019510565162951537,
23
+ "loss": 0.9774,
24
  "step": 20
25
  },
26
  {
27
+ "epoch": 0.1,
28
+ "eval_loss": 0.48089343309402466,
29
+ "eval_runtime": 180.8882,
30
+ "eval_samples_per_second": 15.85,
31
+ "eval_steps_per_second": 0.995,
32
  "step": 20
33
  },
34
  {
35
+ "epoch": 0.15,
36
+ "eval_loss": 0.44581571221351624,
37
+ "eval_runtime": 180.8305,
38
+ "eval_samples_per_second": 15.855,
39
+ "eval_steps_per_second": 0.995,
40
  "step": 30
41
  },
42
  {
43
+ "epoch": 0.2,
44
+ "grad_norm": 0.22729116678237915,
45
+ "learning_rate": 0.0001529919264233205,
46
+ "loss": 0.4459,
47
  "step": 40
48
  },
49
  {
50
+ "epoch": 0.2,
51
+ "eval_loss": 0.4221758544445038,
52
+ "eval_runtime": 180.8406,
53
+ "eval_samples_per_second": 15.854,
54
+ "eval_steps_per_second": 0.995,
55
  "step": 40
56
  },
57
  {
58
+ "epoch": 0.25,
59
+ "eval_loss": 0.40922558307647705,
60
+ "eval_runtime": 181.1791,
61
+ "eval_samples_per_second": 15.824,
62
+ "eval_steps_per_second": 0.993,
63
  "step": 50
64
  }
65
  ],
 
68
  "num_input_tokens_seen": 0,
69
  "num_train_epochs": 1,
70
  "save_steps": 10,
71
+ "total_flos": 2.2680060137832448e+17,
72
  "train_batch_size": 16,
73
  "trial_name": null,
74
  "trial_params": null
checkpoint-50/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66ceb8ab0424702f162d3dcd459022d693533c009fa75ecbe9af10b7fcf8a54d
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:397ed785970a10324cddec1fc7a5b8a987e32e5f4a937dc38def4b1cb481ef1b
3
  size 5176