aboutsummaryrefslogtreecommitdiff
path: root/config.cluster.yaml
diff options
context:
space:
mode:
Diffstat (limited to 'config.cluster.yaml')
-rw-r--r--config.cluster.yaml34
1 files changed, 34 insertions, 0 deletions
diff --git a/config.cluster.yaml b/config.cluster.yaml
new file mode 100644
index 0000000..a3def0e
--- /dev/null
+++ b/config.cluster.yaml
@@ -0,0 +1,34 @@
+model:
+ n_cnn_layers: 3
+ n_rnn_layers: 5
+ rnn_dim: 512
+ n_feats: 128 # number of mel features
+ stride: 2
+ dropout: 0.25 # recommended to be around 0.4-0.6 for smaller datasets, 0.1 for really large datasets
+
+training:
+ learning_rate: 0.0005
+ batch_size: 64 # recommended to maximum number that fits on the GPU (batch size of 32 fits on a 12GB GPU)
+ epochs: 150
+ eval_every_n: 5 # evaluate every n epochs
+ num_workers: 8 # number of workers for dataloader
+ device: "cuda" # device to run inference on if gpu is available, else "cpu" will be set automatically
+
+dataset:
+ download: True
+ dataset_root_path: "/mnt/lustre/mladm/mfa252/data" # files will be downloaded into this dir
+ language_name: "mls_german_opus"
+ limited_supervision: False # set to True if you want to use limited supervision
+ dataset_percentage: 1.0 # percentage of dataset to use (1.0 = 100%)
+ shuffle: True
+
+tokenizer:
+ tokenizer_path: "data/tokenizers/char_tokenizer_german.json"
+
+checkpoints:
+ model_load_path: "data/runs/epoch31" # path to load model from
+ model_save_path: "data/runs/epoch" # path to save model to
+
+inference:
+ model_load_path: ~ # path to load model from
+ device: "cuda" # device to run inference on if gpu is available, else "cpu" will be set automatically \ No newline at end of file