blob: e5ff43a5a6b5cec4d9a73bdedf891bb8a1d2b82e (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
|
model:
n_cnn_layers: 3
n_rnn_layers: 5
rnn_dim: 512
n_feats: 128 # number of mel features
stride: 2
dropout: 0.3 # recommended to be around 0.4 for smaller datasets, 0.1 for really large datasets
training:
learning_rate: 5e-4
batch_size: 8 # recommended to maximum number that fits on the GPU (batch size of 32 fits on a 12GB GPU)
epochs: 3
eval_every_n: 3 # evaluate every n epochs
num_workers: 8 # number of workers for dataloader
dataset:
download: True
dataset_root_path: "YOUR/PATH" # files will be downloaded into this dir
language_name: "mls_german_opus"
limited_supervision: False # set to True if you want to use limited supervision
dataset_percentage: 1.0 # percentage of dataset to use (1.0 = 100%)
shuffle: True
tokenizer:
tokenizer_path: "data/tokenizers/char_tokenizer_german.yaml"
checkpoints:
model_load_path: "YOUR/PATH" # path to load model from
model_save_path: "YOUR/PATH" # path to save model to
inference:
model_load_path: "YOUR/PATH" # path to load model from
beam_width: 10 # beam width for beam search
device: "cuda" # device to run inference on if gpu is available, else "cpu" will be set automatically
|