cwkeam commited on
Commit
12bf1ca
1 Parent(s): b1b00a9

uploading dummy files

Browse files
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MCTCModel"
4
+ ],
5
+ "attention_head_dim": 384,
6
+ "attention_probs_dropout_prob": 0.3,
7
+ "bos_token_id": 0,
8
+ "conv_channels": null,
9
+ "conv_dropout": 0.3,
10
+ "conv_glu_dim": 2,
11
+ "conv_kernel": [
12
+ 7
13
+ ],
14
+ "conv_stride": [
15
+ 3
16
+ ],
17
+ "ctc_loss_reduction": "sum",
18
+ "ctc_zero_infinity": false,
19
+ "eos_token_id": 2,
20
+ "hidden_act": "relu",
21
+ "hidden_dropout_prob": 0.3,
22
+ "hidden_size": 1536,
23
+ "initializer_range": 0.02,
24
+ "input_channels": 1,
25
+ "input_feat_per_channel": 80,
26
+ "intermediate_size": 6144,
27
+ "layer_norm_eps": 1e-12,
28
+ "layerdrop": 0.3,
29
+ "model_type": "mctc",
30
+ "num_attention_heads": 4,
31
+ "num_conv_layers": 1,
32
+ "num_hidden_layers": 36,
33
+ "pad_token_id": 1,
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.18.0.dev0",
36
+ "use_cache": true,
37
+ "vocab_size": 8065
38
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_size": 1,
4
+ "padding_side": "right",
5
+ "padding_value": 0.0,
6
+ "return_attention_mask": false,
7
+ "sampling_rate": 16000
8
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38147e5a3277e391f5a2398cb24d10d2e05cd0549a174766410a2b68813fdf6b
3
+ size 4083428601
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": false, "return_attention_mask": false, "do_normalize": true}
vocab.json ADDED
The diff for this file is too large to render. See raw diff