csikasote commited on
Commit
de0b9eb
·
verified ·
1 Parent(s): 11f85bc

End of training

Browse files
README.md CHANGED
@@ -3,6 +3,8 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: facebook/wav2vec2-xls-r-300m
5
  tags:
 
 
6
  - generated_from_trainer
7
  metrics:
8
  - wer
@@ -16,10 +18,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # xls-r-300m-bemgen-combined-hp-tuning-test-model
18
 
19
- This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.3925
22
- - Wer: 0.4228
23
 
24
  ## Model description
25
 
 
3
  license: apache-2.0
4
  base_model: facebook/wav2vec2-xls-r-300m
5
  tags:
6
+ - automatic-speech-recognition
7
+ - bemgen
8
  - generated_from_trainer
9
  metrics:
10
  - wer
 
18
 
19
  # xls-r-300m-bemgen-combined-hp-tuning-test-model
20
 
21
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the BEMGEN - NA dataset.
22
  It achieves the following results on the evaluation set:
23
  - Loss: 0.3925
24
+ - Wer: 0.4224
25
 
26
  ## Model description
27
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 22.228163992869874,
3
+ "eval_loss": 0.3924838602542877,
4
+ "eval_runtime": 40.0406,
5
+ "eval_samples": 541,
6
+ "eval_samples_per_second": 13.511,
7
+ "eval_steps_per_second": 3.397,
8
+ "eval_wer": 0.42236746550472043,
9
+ "total_flos": 3.4912812658192863e+19,
10
+ "train_loss": 20.112410697937012,
11
+ "train_runtime": 9256.0708,
12
+ "train_samples": 4482,
13
+ "train_samples_per_second": 14.527,
14
+ "train_steps_per_second": 0.113
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 22.228163992869874,
3
+ "eval_loss": 0.3924838602542877,
4
+ "eval_runtime": 40.0406,
5
+ "eval_samples": 541,
6
+ "eval_samples_per_second": 13.511,
7
+ "eval_steps_per_second": 3.397,
8
+ "eval_wer": 0.42236746550472043
9
+ }
runs/Dec24_19-41-58_srvrocgpu011.uct.ac.za/events.out.tfevents.1735144860.srvrocgpu011.uct.ac.za ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1296dc71e152b2e0fc62022a6e52b4725eab6ef101943ae660a27612252d715e
3
+ size 358
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 22.228163992869874,
3
+ "total_flos": 3.4912812658192863e+19,
4
+ "train_loss": 20.112410697937012,
5
+ "train_runtime": 9256.0708,
6
+ "train_samples": 4482,
7
+ "train_samples_per_second": 14.527,
8
+ "train_steps_per_second": 0.113
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.3383650779724121,
3
+ "best_model_checkpoint": "/scratch/skscla001/speech/results/xls-r-300m-bemgen-combined-hp-tuning-test-model/checkpoint-500",
4
+ "epoch": 22.228163992869874,
5
+ "eval_steps": 100,
6
+ "global_step": 800,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 2.7985739750445635,
13
+ "eval_loss": 2.9111883640289307,
14
+ "eval_runtime": 39.716,
15
+ "eval_samples_per_second": 13.622,
16
+ "eval_steps_per_second": 3.424,
17
+ "eval_wer": 1.0,
18
+ "step": 100
19
+ },
20
+ {
21
+ "epoch": 5.570409982174688,
22
+ "eval_loss": 1.6968202590942383,
23
+ "eval_runtime": 39.8919,
24
+ "eval_samples_per_second": 13.562,
25
+ "eval_steps_per_second": 3.409,
26
+ "eval_wer": 0.9970951343500363,
27
+ "step": 200
28
+ },
29
+ {
30
+ "epoch": 8.342245989304812,
31
+ "eval_loss": 0.3893207609653473,
32
+ "eval_runtime": 40.2019,
33
+ "eval_samples_per_second": 13.457,
34
+ "eval_steps_per_second": 3.383,
35
+ "eval_wer": 0.615831517792302,
36
+ "step": 300
37
+ },
38
+ {
39
+ "epoch": 11.114081996434937,
40
+ "eval_loss": 0.34530752897262573,
41
+ "eval_runtime": 40.0609,
42
+ "eval_samples_per_second": 13.504,
43
+ "eval_steps_per_second": 3.395,
44
+ "eval_wer": 0.5106753812636166,
45
+ "step": 400
46
+ },
47
+ {
48
+ "epoch": 13.9126559714795,
49
+ "grad_norm": 7.798405170440674,
50
+ "learning_rate": 0.0005819322340474784,
51
+ "loss": 31.2339,
52
+ "step": 500
53
+ },
54
+ {
55
+ "epoch": 13.9126559714795,
56
+ "eval_loss": 0.3383650779724121,
57
+ "eval_runtime": 39.984,
58
+ "eval_samples_per_second": 13.53,
59
+ "eval_steps_per_second": 3.401,
60
+ "eval_wer": 0.49092229484386346,
61
+ "step": 500
62
+ },
63
+ {
64
+ "epoch": 16.684491978609625,
65
+ "eval_loss": 0.3592090606689453,
66
+ "eval_runtime": 39.9246,
67
+ "eval_samples_per_second": 13.551,
68
+ "eval_steps_per_second": 3.406,
69
+ "eval_wer": 0.46187363834422657,
70
+ "step": 600
71
+ },
72
+ {
73
+ "epoch": 19.45632798573975,
74
+ "eval_loss": 0.367597371339798,
75
+ "eval_runtime": 39.9675,
76
+ "eval_samples_per_second": 13.536,
77
+ "eval_steps_per_second": 3.403,
78
+ "eval_wer": 0.4335511982570806,
79
+ "step": 700
80
+ },
81
+ {
82
+ "epoch": 22.228163992869874,
83
+ "eval_loss": 0.3925014138221741,
84
+ "eval_runtime": 39.8278,
85
+ "eval_samples_per_second": 13.583,
86
+ "eval_steps_per_second": 3.415,
87
+ "eval_wer": 0.42280319535221494,
88
+ "step": 800
89
+ },
90
+ {
91
+ "epoch": 22.228163992869874,
92
+ "step": 800,
93
+ "total_flos": 3.4912812658192863e+19,
94
+ "train_loss": 20.112410697937012,
95
+ "train_runtime": 9256.0708,
96
+ "train_samples_per_second": 14.527,
97
+ "train_steps_per_second": 0.113
98
+ }
99
+ ],
100
+ "logging_steps": 500,
101
+ "max_steps": 1050,
102
+ "num_input_tokens_seen": 0,
103
+ "num_train_epochs": 30,
104
+ "save_steps": 400,
105
+ "stateful_callbacks": {
106
+ "EarlyStoppingCallback": {
107
+ "args": {
108
+ "early_stopping_patience": 3,
109
+ "early_stopping_threshold": 0.0
110
+ },
111
+ "attributes": {
112
+ "early_stopping_patience_counter": 3
113
+ }
114
+ },
115
+ "TrainerControl": {
116
+ "args": {
117
+ "should_epoch_stop": false,
118
+ "should_evaluate": false,
119
+ "should_log": false,
120
+ "should_save": true,
121
+ "should_training_stop": true
122
+ },
123
+ "attributes": {}
124
+ }
125
+ },
126
+ "total_flos": 3.4912812658192863e+19,
127
+ "train_batch_size": 8,
128
+ "trial_name": null,
129
+ "trial_params": null
130
+ }