aboutsummaryrefslogtreecommitdiff
path: root/config.cluster.yaml
diff options
context:
space:
mode:
authorMarvin Borner2023-09-16 15:57:54 +0200
committerJoJoBarthold22023-09-18 12:29:46 +0200
commitc56acdd7702ff65aac9cff4ea7fe3522c2b4d0f9 (patch)
treefaa8898e71bdac0b89064a4a54867b467eb26648 /config.cluster.yaml
parent0d38af95f0058875d42dd261a287856ba84d3ce6 (diff)
Synced cluster code
Diffstat (limited to 'config.cluster.yaml')
-rw-r--r--config.cluster.yaml12
1 files changed, 6 insertions, 6 deletions
diff --git a/config.cluster.yaml b/config.cluster.yaml
index a3def0e..7af0aca 100644
--- a/config.cluster.yaml
+++ b/config.cluster.yaml
@@ -4,18 +4,18 @@ model:
rnn_dim: 512
n_feats: 128 # number of mel features
stride: 2
- dropout: 0.25 # recommended to be around 0.4-0.6 for smaller datasets, 0.1 for really large datasets
+ dropout: 0.2 # recommended to be around 0.4-0.6 for smaller datasets, 0.1 for really large datasets
training:
learning_rate: 0.0005
- batch_size: 64 # recommended to maximum number that fits on the GPU (batch size of 32 fits on a 12GB GPU)
+ batch_size: 400 # recommended to maximum number that fits on the GPU (batch size of 32 fits on a 12GB GPU)
epochs: 150
eval_every_n: 5 # evaluate every n epochs
- num_workers: 8 # number of workers for dataloader
+ num_workers: 12 # number of workers for dataloader
device: "cuda" # device to run inference on if gpu is available, else "cpu" will be set automatically
dataset:
- download: True
+ download: False
dataset_root_path: "/mnt/lustre/mladm/mfa252/data" # files will be downloaded into this dir
language_name: "mls_german_opus"
limited_supervision: False # set to True if you want to use limited supervision
@@ -26,9 +26,9 @@ tokenizer:
tokenizer_path: "data/tokenizers/char_tokenizer_german.json"
checkpoints:
- model_load_path: "data/runs/epoch31" # path to load model from
+ model_load_path: "data/runs/epoch50" # path to load model from
model_save_path: "data/runs/epoch" # path to save model to
inference:
model_load_path: ~ # path to load model from
- device: "cuda" # device to run inference on if gpu is available, else "cpu" will be set automatically \ No newline at end of file
+ device: "cuda" # device to run inference on if gpu is available, else "cpu" will be set automatically