diff options
author | Pherkel | 2023-09-18 12:44:40 +0200 |
---|---|---|
committer | Pherkel | 2023-09-18 12:44:40 +0200 |
commit | d5689047fa7062b284d13271bda39013dcf6150f (patch) | |
tree | bd1a843abda1929b826d9441df3ddc3db5cbec29 /config.cluster.yaml | |
parent | 9475900a1085b8277808b0a0b1555c59f7eb6d36 (diff) | |
parent | e06227289ad9d2fa45c736c771d859e9911b9a11 (diff) |
Merge branch 'decoder' of github.com:Algo-Boys/SWR2-cool-projekt into decoder
Diffstat (limited to 'config.cluster.yaml')
-rw-r--r-- | config.cluster.yaml | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/config.cluster.yaml b/config.cluster.yaml index a3def0e..7af0aca 100644 --- a/config.cluster.yaml +++ b/config.cluster.yaml @@ -4,18 +4,18 @@ model: rnn_dim: 512 n_feats: 128 # number of mel features stride: 2 - dropout: 0.25 # recommended to be around 0.4-0.6 for smaller datasets, 0.1 for really large datasets + dropout: 0.2 # recommended to be around 0.4-0.6 for smaller datasets, 0.1 for really large datasets training: learning_rate: 0.0005 - batch_size: 64 # recommended to maximum number that fits on the GPU (batch size of 32 fits on a 12GB GPU) + batch_size: 400 # recommended to maximum number that fits on the GPU (batch size of 32 fits on a 12GB GPU) epochs: 150 eval_every_n: 5 # evaluate every n epochs - num_workers: 8 # number of workers for dataloader + num_workers: 12 # number of workers for dataloader device: "cuda" # device to run inference on if gpu is available, else "cpu" will be set automatically dataset: - download: True + download: False dataset_root_path: "/mnt/lustre/mladm/mfa252/data" # files will be downloaded into this dir language_name: "mls_german_opus" limited_supervision: False # set to True if you want to use limited supervision @@ -26,9 +26,9 @@ tokenizer: tokenizer_path: "data/tokenizers/char_tokenizer_german.json" checkpoints: - model_load_path: "data/runs/epoch31" # path to load model from + model_load_path: "data/runs/epoch50" # path to load model from model_save_path: "data/runs/epoch" # path to save model to inference: model_load_path: ~ # path to load model from - device: "cuda" # device to run inference on if gpu is available, else "cpu" will be set automatically
\ No newline at end of file + device: "cuda" # device to run inference on if gpu is available, else "cpu" will be set automatically |