aboutsummaryrefslogtreecommitdiff
path: root/cfg
diff options
context:
space:
mode:
authorfredeee2023-11-02 10:47:21 +0100
committerfredeee2023-11-02 10:47:21 +0100
commitf8302ee886ef9b631f11a52900dac964a61350e1 (patch)
tree87288be6f851ab69405e524b81940c501c52789a /cfg
parentf16fef1ab9371e1c81a2e0b2fbea59dee285a9f8 (diff)
initiaƶ commit
Diffstat (limited to 'cfg')
-rw-r--r--cfg/adept/adept-level1-run1.json81
-rw-r--r--cfg/adept/adept-level1-run2.json81
-rw-r--r--cfg/adept/adept-level1-run3.json81
-rw-r--r--cfg/adept/adept-level1-run4.json81
-rw-r--r--cfg/clevrer/clevrer-level1-run1.json81
-rw-r--r--cfg/clevrer/clevrer-level1-run2.json81
-rw-r--r--cfg/clevrer/clevrer-level1-run3.json81
7 files changed, 567 insertions, 0 deletions
diff --git a/cfg/adept/adept-level1-run1.json b/cfg/adept/adept-level1-run1.json
new file mode 100644
index 0000000..22f6abb
--- /dev/null
+++ b/cfg/adept/adept-level1-run1.json
@@ -0,0 +1,81 @@
+{
+ "model_path": "adept_level1",
+ "datatype": "adept",
+ "dataset": "ADEPT",
+ "learning_rate": 0.0001,
+ "num_updates": 0,
+ "max_epochs": 1000,
+ "max_updates": 400000,
+ "phases": {
+ "start_inner_loop": 60000,
+ "shufleslots_end": 30000,
+ "entity_pretraining_phase2_end": 50000,
+ "entity_pretraining_phase1_end": 30000,
+ "background_pretraining_end": 0
+ },
+ "defaults": {
+ "num_workers": 2,
+ "prefetch_factor": 2,
+ "statistics_offset": 10,
+ "load_optimizers": false,
+ "teacher_forcing": 10,
+ "skip_frames": 3,
+ "error_dropout": 0.1
+ },
+ "bptt": {
+ "bptt_start_timestep": 0,
+ "bptt_steps": 1,
+ "bptt_steps_max": 2,
+ "increase_bptt_steps_every": 200000
+ },
+ "model": {
+ "level": 1,
+ "batch_size": 16,
+ "num_objects": 7,
+ "img_channels": 3,
+ "input_size": [
+ 320,
+ 480
+ ],
+ "latent_size": [
+ 20,
+ 30
+ ],
+ "gestalt_size": 96,
+ "bottleneck": "binar",
+ "position_regularizer": 0.01,
+ "time_regularizer": 0.1,
+ "encoder_regularizer": 0.333333,
+ "inner_loop_enabled": false,
+ "encoder": {
+ "channels": 48,
+ "level1_channels": 24,
+ "num_layers": 3,
+ "reg_lambda": 1e-10
+ },
+ "predictor": {
+ "heads": 2,
+ "layers": 2,
+ "channels_multiplier": 2,
+ "reg_lambda": 1e-10,
+ "transformer_type": "standard"
+ },
+ "decoder": {
+ "channels": 48,
+ "level1_channels": 24,
+ "num_layers": 5
+ },
+ "background": {
+ "learning_rate": 0.0001,
+ "learning_rate_old": 0.0001,
+ "num_layers": 1,
+ "latent_channels": 48,
+ "level1_channels": 24,
+ "gestalt_size": 8,
+ "flow": false
+ },
+ "update_module": {
+ "reg_lambda": 5e-06
+ }
+ }
+ } \ No newline at end of file
diff --git a/cfg/adept/adept-level1-run2.json b/cfg/adept/adept-level1-run2.json
new file mode 100644
index 0000000..7263eff
--- /dev/null
+++ b/cfg/adept/adept-level1-run2.json
@@ -0,0 +1,81 @@
+{
+ "model_path": "adept_level1",
+ "datatype": "adept",
+ "dataset": "ADEPT",
+ "learning_rate": 3.333e-05,
+ "num_updates": 400000,
+ "max_epochs": 1000,
+ "max_updates": 600000,
+ "phases": {
+ "start_inner_loop": 60000,
+ "shufleslots_end": 30000,
+ "entity_pretraining_phase2_end": 50000,
+ "entity_pretraining_phase1_end": 30000,
+ "background_pretraining_end": 0
+ },
+ "defaults": {
+ "num_workers": 2,
+ "prefetch_factor": 2,
+ "statistics_offset": 10,
+ "load_optimizers": false,
+ "teacher_forcing": 10,
+ "skip_frames": 3,
+ "error_dropout": 0.1
+ },
+ "bptt": {
+ "bptt_start_timestep": 0,
+ "bptt_steps": 2,
+ "bptt_steps_max": 2,
+ "increase_bptt_steps_every": 200000
+ },
+ "model": {
+ "level": 1,
+ "batch_size": 16,
+ "num_objects": 7,
+ "img_channels": 3,
+ "input_size": [
+ 320,
+ 480
+ ],
+ "latent_size": [
+ 20,
+ 30
+ ],
+ "gestalt_size": 96,
+ "bottleneck": "binar",
+ "position_regularizer": 0.01,
+ "time_regularizer": 0.1,
+ "encoder_regularizer": 0.333333,
+ "inner_loop_enabled": true,
+ "encoder": {
+ "channels": 48,
+ "level1_channels": 24,
+ "num_layers": 3,
+ "reg_lambda": 1e-10
+ },
+ "predictor": {
+ "heads": 2,
+ "layers": 2,
+ "channels_multiplier": 2,
+ "reg_lambda": 1e-10,
+ "transformer_type": "standard"
+ },
+ "decoder": {
+ "channels": 48,
+ "level1_channels": 24,
+ "num_layers": 5
+ },
+ "background": {
+ "learning_rate": 0.0001,
+ "learning_rate_old": 0.0001,
+ "num_layers": 1,
+ "latent_channels": 48,
+ "level1_channels": 24,
+ "gestalt_size": 8,
+ "flow": false
+ },
+ "update_module": {
+ "reg_lambda": 5e-06
+ }
+ }
+ } \ No newline at end of file
diff --git a/cfg/adept/adept-level1-run3.json b/cfg/adept/adept-level1-run3.json
new file mode 100644
index 0000000..c715e97
--- /dev/null
+++ b/cfg/adept/adept-level1-run3.json
@@ -0,0 +1,81 @@
+{
+ "model_path": "adept_level1",
+ "datatype": "adept",
+ "dataset": "ADEPT",
+ "learning_rate": 3.333e-05,
+ "num_updates": 600000,
+ "max_epochs": 1000,
+ "max_updates": 800000,
+ "phases": {
+ "start_inner_loop": 60000,
+ "shufleslots_end": 30000,
+ "entity_pretraining_phase2_end": 50000,
+ "entity_pretraining_phase1_end": 30000,
+ "background_pretraining_end": 0
+ },
+ "defaults": {
+ "num_workers": 2,
+ "prefetch_factor": 2,
+ "statistics_offset": 10,
+ "load_optimizers": false,
+ "teacher_forcing": 10,
+ "skip_frames": 3,
+ "error_dropout": 0.1
+ },
+ "bptt": {
+ "bptt_start_timestep": 0,
+ "bptt_steps": 2,
+ "bptt_steps_max": 2,
+ "increase_bptt_steps_every": 200000
+ },
+ "model": {
+ "level": 1,
+ "batch_size": 16,
+ "num_objects": 7,
+ "img_channels": 3,
+ "input_size": [
+ 320,
+ 480
+ ],
+ "latent_size": [
+ 20,
+ 30
+ ],
+ "gestalt_size": 96,
+ "bottleneck": "binar",
+ "position_regularizer": 0.01,
+ "time_regularizer": 0.1,
+ "encoder_regularizer": 0.333333,
+ "inner_loop_enabled": true,
+ "encoder": {
+ "channels": 48,
+ "level1_channels": 24,
+ "num_layers": 3,
+ "reg_lambda": 1e-10
+ },
+ "predictor": {
+ "heads": 2,
+ "layers": 2,
+ "channels_multiplier": 2,
+ "reg_lambda": 1e-10,
+ "transformer_type": "standard"
+ },
+ "decoder": {
+ "channels": 48,
+ "level1_channels": 24,
+ "num_layers": 5
+ },
+ "background": {
+ "learning_rate": 0.0001,
+ "learning_rate_old": 0.0001,
+ "num_layers": 1,
+ "latent_channels": 48,
+ "level1_channels": 24,
+ "gestalt_size": 8,
+ "flow": false
+ },
+ "update_module": {
+ "reg_lambda": 5e-06
+ }
+ }
+ } \ No newline at end of file
diff --git a/cfg/adept/adept-level1-run4.json b/cfg/adept/adept-level1-run4.json
new file mode 100644
index 0000000..c750f0d
--- /dev/null
+++ b/cfg/adept/adept-level1-run4.json
@@ -0,0 +1,81 @@
+{
+ "model_path": "adept_level2",
+ "datatype": "adept",
+ "dataset": "ADEPT",
+ "learning_rate": 3.333e-05,
+ "num_updates": 800000,
+ "max_epochs": 1000,
+ "max_updates": 1150000,
+ "phases": {
+ "start_inner_loop": 60000,
+ "shufleslots_end": 30000,
+ "entity_pretraining_phase2_end": 50000,
+ "entity_pretraining_phase1_end": 30000,
+ "background_pretraining_end": 0
+ },
+ "defaults": {
+ "num_workers": 2,
+ "prefetch_factor": 2,
+ "statistics_offset": 10,
+ "load_optimizers": false,
+ "teacher_forcing": 10,
+ "skip_frames": 3,
+ "error_dropout": 0.1
+ },
+ "bptt": {
+ "bptt_start_timestep": 0,
+ "bptt_steps": 2,
+ "bptt_steps_max": 2,
+ "increase_bptt_steps_every": 200000
+ },
+ "model": {
+ "level": 2,
+ "batch_size": 16,
+ "num_objects": 7,
+ "img_channels": 3,
+ "input_size": [
+ 320,
+ 480
+ ],
+ "latent_size": [
+ 20,
+ 30
+ ],
+ "gestalt_size": 96,
+ "bottleneck": "binar",
+ "position_regularizer": 0.01,
+ "time_regularizer": 0.1,
+ "encoder_regularizer": 0.333333,
+ "inner_loop_enabled": true,
+ "encoder": {
+ "channels": 48,
+ "level1_channels": 24,
+ "num_layers": 3,
+ "reg_lambda": 1e-10
+ },
+ "predictor": {
+ "heads": 2,
+ "layers": 2,
+ "channels_multiplier": 2,
+ "reg_lambda": 1e-10,
+ "transformer_type": "standard"
+ },
+ "decoder": {
+ "channels": 48,
+ "level1_channels": 24,
+ "num_layers": 5
+ },
+ "background": {
+ "learning_rate": 0.0001,
+ "learning_rate_old": 0.0001,
+ "num_layers": 1,
+ "latent_channels": 48,
+ "level1_channels": 24,
+ "gestalt_size": 8,
+ "flow": false
+ },
+ "update_module": {
+ "reg_lambda": 5e-06
+ }
+ }
+ } \ No newline at end of file
diff --git a/cfg/clevrer/clevrer-level1-run1.json b/cfg/clevrer/clevrer-level1-run1.json
new file mode 100644
index 0000000..da64d01
--- /dev/null
+++ b/cfg/clevrer/clevrer-level1-run1.json
@@ -0,0 +1,81 @@
+{
+ "model_path": "clevrer_level1",
+ "datatype": "clevrer",
+ "dataset": "CLEVRER",
+ "learning_rate": 0.0001,
+ "num_updates": 0,
+ "max_epochs": 1000,
+ "max_updates": 200000,
+ "phases": {
+ "start_inner_loop": 100000,
+ "shufleslots_end": 30000,
+ "entity_pretraining_phase2_end": 50000,
+ "entity_pretraining_phase1_end": 30000,
+ "background_pretraining_end": 0
+ },
+ "defaults": {
+ "num_workers": 2,
+ "prefetch_factor": 2,
+ "statistics_offset": 10,
+ "load_optimizers": false,
+ "teacher_forcing": 10,
+ "skip_frames": 2,
+ "error_dropout": 0.1
+ },
+ "bptt": {
+ "bptt_start_timestep": 0,
+ "bptt_steps": 1,
+ "bptt_steps_max": 2,
+ "increase_bptt_steps_every": 200000
+ },
+ "model": {
+ "level": 1,
+ "batch_size": 32,
+ "num_objects": 6,
+ "img_channels": 3,
+ "input_size": [
+ 320,
+ 480
+ ],
+ "latent_size": [
+ 20,
+ 30
+ ],
+ "gestalt_size": 96,
+ "bottleneck": "binar",
+ "position_regularizer": 0.01,
+ "time_regularizer": 0.1,
+ "encoder_regularizer": 0.333333,
+ "inner_loop_enabled": false,
+ "encoder": {
+ "channels": 48,
+ "level1_channels": 24,
+ "num_layers": 3,
+ "reg_lambda": 1e-10
+ },
+ "predictor": {
+ "heads": 2,
+ "layers": 2,
+ "channels_multiplier": 2,
+ "reg_lambda": 1e-10,
+ "transformer_type": "shared"
+ },
+ "decoder": {
+ "channels": 48,
+ "level1_channels": 24,
+ "num_layers": 3
+ },
+ "background": {
+ "learning_rate": 0.0001,
+ "learning_rate_old": 0.0001,
+ "num_layers": 1,
+ "latent_channels": 48,
+ "level1_channels": 24,
+ "gestalt_size": 8,
+ "flow": false
+ },
+ "update_module": {
+ "reg_lambda": 5e-08
+ }
+ }
+ } \ No newline at end of file
diff --git a/cfg/clevrer/clevrer-level1-run2.json b/cfg/clevrer/clevrer-level1-run2.json
new file mode 100644
index 0000000..0d2d23e
--- /dev/null
+++ b/cfg/clevrer/clevrer-level1-run2.json
@@ -0,0 +1,81 @@
+{
+ "model_path": "clevrer_level1",
+ "datatype": "clevrer",
+ "dataset": "CLEVRER",
+ "learning_rate": 3.3333e-05,
+ "num_updates": 200000,
+ "max_epochs": 1000,
+ "max_updates": 600000,
+ "phases": {
+ "start_inner_loop": 100000,
+ "shufleslots_end": 30000,
+ "entity_pretraining_phase2_end": 50000,
+ "entity_pretraining_phase1_end": 30000,
+ "background_pretraining_end": 0
+ },
+ "defaults": {
+ "num_workers": 2,
+ "prefetch_factor": 2,
+ "statistics_offset": 10,
+ "load_optimizers": false,
+ "teacher_forcing": 10,
+ "skip_frames": 2,
+ "error_dropout": 0.1
+ },
+ "bptt": {
+ "bptt_start_timestep": 0,
+ "bptt_steps": 3,
+ "bptt_steps_max": 3,
+ "increase_bptt_steps_every": 200000
+ },
+ "model": {
+ "level": 1,
+ "batch_size": 32,
+ "num_objects": 6,
+ "img_channels": 3,
+ "input_size": [
+ 320,
+ 480
+ ],
+ "latent_size": [
+ 20,
+ 30
+ ],
+ "gestalt_size": 96,
+ "bottleneck": "binar",
+ "position_regularizer": 0.01,
+ "time_regularizer": 0.1,
+ "encoder_regularizer": 0.333333,
+ "inner_loop_enabled": true,
+ "encoder": {
+ "channels": 48,
+ "level1_channels": 24,
+ "num_layers": 3,
+ "reg_lambda": 1e-10
+ },
+ "predictor": {
+ "heads": 2,
+ "layers": 2,
+ "channels_multiplier": 2,
+ "reg_lambda": 1e-10,
+ "transformer_type": "shared"
+ },
+ "decoder": {
+ "channels": 48,
+ "level1_channels": 24,
+ "num_layers": 3
+ },
+ "background": {
+ "learning_rate": 0.0001,
+ "learning_rate_old": 0.0001,
+ "num_layers": 1,
+ "latent_channels": 48,
+ "level1_channels": 24,
+ "gestalt_size": 8,
+ "flow": false
+ },
+ "update_module": {
+ "reg_lambda": 5e-08
+ }
+ }
+ } \ No newline at end of file
diff --git a/cfg/clevrer/clevrer-level1-run3.json b/cfg/clevrer/clevrer-level1-run3.json
new file mode 100644
index 0000000..df31316
--- /dev/null
+++ b/cfg/clevrer/clevrer-level1-run3.json
@@ -0,0 +1,81 @@
+{
+ "model_path": "clevrer_level1",
+ "datatype": "clevrer",
+ "dataset": "CLEVRER",
+ "learning_rate": 3.3333e-05,
+ "num_updates": 600000,
+ "max_epochs": 1000,
+ "max_updates": 800000,
+ "phases": {
+ "start_inner_loop": 100000,
+ "shufleslots_end": 30000,
+ "entity_pretraining_phase2_end": 50000,
+ "entity_pretraining_phase1_end": 30000,
+ "background_pretraining_end": 0
+ },
+ "defaults": {
+ "num_workers": 2,
+ "prefetch_factor": 2,
+ "statistics_offset": 10,
+ "load_optimizers": false,
+ "teacher_forcing": 10,
+ "skip_frames": 2,
+ "error_dropout": 0.1
+ },
+ "bptt": {
+ "bptt_start_timestep": 0,
+ "bptt_steps": 4,
+ "bptt_steps_max": 4,
+ "increase_bptt_steps_every": 200000
+ },
+ "model": {
+ "level": 1,
+ "batch_size": 32,
+ "num_objects": 6,
+ "img_channels": 3,
+ "input_size": [
+ 320,
+ 480
+ ],
+ "latent_size": [
+ 20,
+ 30
+ ],
+ "gestalt_size": 96,
+ "bottleneck": "binar",
+ "position_regularizer": 0.01,
+ "time_regularizer": 0.1,
+ "encoder_regularizer": 0.333333,
+ "inner_loop_enabled": true,
+ "encoder": {
+ "channels": 48,
+ "level1_channels": 24,
+ "num_layers": 3,
+ "reg_lambda": 1e-10
+ },
+ "predictor": {
+ "heads": 2,
+ "layers": 2,
+ "channels_multiplier": 2,
+ "reg_lambda": 1e-10,
+ "transformer_type": "shared"
+ },
+ "decoder": {
+ "channels": 48,
+ "level1_channels": 24,
+ "num_layers": 3
+ },
+ "background": {
+ "learning_rate": 0.0001,
+ "learning_rate_old": 0.0001,
+ "num_layers": 1,
+ "latent_channels": 48,
+ "level1_channels": 24,
+ "gestalt_size": 8,
+ "flow": false
+ },
+ "update_module": {
+ "reg_lambda": 5e-08
+ }
+ }
+ } \ No newline at end of file