Upload lm-boosted decoder
Browse files- alphabet.json +1 -0
- language_model/5gram.bin +3 -0
- language_model/attrs.json +1 -0
- language_model/unigrams.txt +0 -0
- preprocessor_config.json +1 -1
- special_tokens_map.json +28 -4
- tokenizer_config.json +1 -0
alphabet.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"labels": [" ", "'", "0", "1", "2", "3", "4", "5", "6", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "\u00e1", "\u00e3", "\u00e7", "\u00e9", "\u00ed", "\u00f1", "\u00f3", "\u00f5", "\u00fa", "\u00fc", "\u014b", "\u0161", "\u03b7", "\u0572", "\u2047", "", "<s>", "</s>"], "is_bpe": false}
|
language_model/5gram.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7c71aa1b56786d75560dfc90f2e8573d2a7df7d03b3d2e082f7aea332a27a4d9
|
3 |
+
size 342794117
|
language_model/attrs.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
|
language_model/unigrams.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
preprocessor_config.json
CHANGED
@@ -4,7 +4,7 @@
|
|
4 |
"num_mel_bins": 80,
|
5 |
"padding_side": "right",
|
6 |
"padding_value": 1,
|
7 |
-
"processor_class": "
|
8 |
"return_attention_mask": true,
|
9 |
"sampling_rate": 16000,
|
10 |
"stride": 2
|
|
|
4 |
"num_mel_bins": 80,
|
5 |
"padding_side": "right",
|
6 |
"padding_value": 1,
|
7 |
+
"processor_class": "Wav2Vec2ProcessorWithLM",
|
8 |
"return_attention_mask": true,
|
9 |
"sampling_rate": 16000,
|
10 |
"stride": 2
|
special_tokens_map.json
CHANGED
@@ -1,6 +1,30 @@
|
|
1 |
{
|
2 |
-
"bos_token":
|
3 |
-
|
4 |
-
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
}
|
|
|
1 |
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": true,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": true,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "[UNK]",
|
25 |
+
"lstrip": true,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": true,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
}
|
tokenizer_config.json
CHANGED
@@ -39,6 +39,7 @@
|
|
39 |
"eos_token": "</s>",
|
40 |
"model_max_length": 1000000000000000019884624838656,
|
41 |
"pad_token": "[PAD]",
|
|
|
42 |
"replace_word_delimiter_char": " ",
|
43 |
"target_lang": null,
|
44 |
"tokenizer_class": "Wav2Vec2CTCTokenizer",
|
|
|
39 |
"eos_token": "</s>",
|
40 |
"model_max_length": 1000000000000000019884624838656,
|
41 |
"pad_token": "[PAD]",
|
42 |
+
"processor_class": "Wav2Vec2ProcessorWithLM",
|
43 |
"replace_word_delimiter_char": " ",
|
44 |
"target_lang": null,
|
45 |
"tokenizer_class": "Wav2Vec2CTCTokenizer",
|