|
model: CTTransformer |
|
model_conf: |
|
ignore_id: 0 |
|
embed_unit: 516 |
|
att_unit: 516 |
|
dropout_rate: 0.1 |
|
punc_list: |
|
- <unk> |
|
- _ |
|
- , |
|
- 。 |
|
- ? |
|
- 、 |
|
punc_weight: |
|
- 1.0 |
|
- 1.0 |
|
- 1.0 |
|
- 1.0 |
|
- 1.0 |
|
- 1.0 |
|
sentence_end_id: 3 |
|
|
|
encoder: SANMEncoder |
|
encoder_conf: |
|
input_size: 516 |
|
output_size: 516 |
|
attention_heads: 12 |
|
linear_units: 2048 |
|
num_blocks: 12 |
|
dropout_rate: 0.1 |
|
positional_dropout_rate: 0.1 |
|
attention_dropout_rate: 0.0 |
|
input_layer: pe |
|
pos_enc_class: SinusoidalPositionEncoder |
|
normalize_before: true |
|
kernel_size: 11 |
|
sanm_shfit: 0 |
|
selfattention_layer_type: sanm |
|
padding_idx: 0 |
|
|
|
tokenizer: CharTokenizer |
|
tokenizer_conf: |
|
unk_symbol: <unk> |
|
|
|
|
|
|
|
|