diff options
author | Alex Auvolat <alex.auvolat@ens.fr> | 2015-07-28 18:00:18 -0400 |
---|---|---|
committer | Alex Auvolat <alex.auvolat@ens.fr> | 2015-07-28 18:00:18 -0400 |
commit | ca40e5c81d385e1422cebe40e009d7e93b95bfbb (patch) | |
tree | 3e7e1f24089c6976b28917ccd31fb8873d22b652 | |
parent | 3acb6f5ec0e3c0d2c6ceef6eacfea731ef18beb1 (diff) | |
download | taxi-ca40e5c81d385e1422cebe40e009d7e93b95bfbb.tar.gz taxi-ca40e5c81d385e1422cebe40e009d7e93b95bfbb.zip |
Memory networks
-rw-r--r-- | config/memory_network_mlp_3_momentum_normalization.py | 55 | ||||
-rw-r--r-- | config/memory_network_mlp_4_momentum.py | 2 | ||||
-rw-r--r-- | config/memory_network_mlp_5_momentum.py | 57 |
3 files changed, 113 insertions, 1 deletions
diff --git a/config/memory_network_mlp_3_momentum_normalization.py b/config/memory_network_mlp_3_momentum_normalization.py new file mode 100644 index 0000000..aeae401 --- /dev/null +++ b/config/memory_network_mlp_3_momentum_normalization.py @@ -0,0 +1,55 @@ +from blocks.initialization import IsotropicGaussian, Constant +from blocks.algorithms import Momentum + +from blocks.bricks import Tanh + +import data +from model.memory_network_mlp import Model, Stream + +n_begin_end_pts = 5 + +dim_embeddings = [ + ('origin_call', data.origin_call_train_size, 10), + ('origin_stand', data.stands_size, 10), + ('week_of_year', 52, 10), + ('day_of_week', 7, 10), + ('qhour_of_day', 24 * 4, 10), + ('day_type', 3, 10), +] + +embed_weights_init = IsotropicGaussian(0.001) + +class MLPConfig(object): + __slots__ = ('dim_input', 'dim_hidden', 'dim_output', 'weights_init', 'biases_init', 'embed_weights_init', 'dim_embeddings') + +prefix_encoder = MLPConfig() +prefix_encoder.dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings) +prefix_encoder.dim_hidden = [500] +prefix_encoder.weights_init = IsotropicGaussian(0.01) +prefix_encoder.biases_init = Constant(0.001) +prefix_encoder.embed_weights_init = embed_weights_init +prefix_encoder.dim_embeddings = dim_embeddings + +candidate_encoder = MLPConfig() +candidate_encoder.dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings) +candidate_encoder.dim_hidden = [500] +candidate_encoder.weights_init = IsotropicGaussian(0.01) +candidate_encoder.biases_init = Constant(0.001) +candidate_encoder.embed_weights_init = embed_weights_init +candidate_encoder.dim_embeddings = dim_embeddings + +representation_size = 500 +representation_activation = Tanh + +normalize_representation = True + +step_rule = Momentum(learning_rate=0.001, momentum=0.9) + +batch_size = 5000 +# batch_sort_size = 20 + +max_splits = 200 + +train_candidate_size = 10000 +valid_candidate_size = 10000 +test_candidate_size = 10000 diff --git a/config/memory_network_mlp_4_momentum.py b/config/memory_network_mlp_4_momentum.py index 3590732..9b17137 100644 --- a/config/memory_network_mlp_4_momentum.py +++ b/config/memory_network_mlp_4_momentum.py @@ -48,7 +48,7 @@ step_rule = Momentum(learning_rate=0.1, momentum=0.9) batch_size = 10000 # batch_sort_size = 20 -# monitor_freq = 2 +monitor_freq = 1000 max_splits = 200 diff --git a/config/memory_network_mlp_5_momentum.py b/config/memory_network_mlp_5_momentum.py new file mode 100644 index 0000000..af0bb40 --- /dev/null +++ b/config/memory_network_mlp_5_momentum.py @@ -0,0 +1,57 @@ +from blocks.initialization import IsotropicGaussian, Constant +from blocks.algorithms import Momentum + +from blocks.bricks import Tanh + +import data +from model.memory_network_mlp import Model, Stream + +n_begin_end_pts = 5 + +dim_embeddings = [ + ('origin_call', data.origin_call_train_size, 10), + ('origin_stand', data.stands_size, 10), + ('week_of_year', 52, 10), + ('day_of_week', 7, 10), + ('qhour_of_day', 24 * 4, 10), + ('day_type', 3, 10), +] + +embed_weights_init = IsotropicGaussian(0.001) + +class MLPConfig(object): + __slots__ = ('dim_input', 'dim_hidden', 'dim_output', 'weights_init', 'biases_init', 'embed_weights_init', 'dim_embeddings') + +prefix_encoder = MLPConfig() +prefix_encoder.dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings) +prefix_encoder.dim_hidden = [100] +prefix_encoder.weights_init = IsotropicGaussian(0.01) +prefix_encoder.biases_init = Constant(0.001) +prefix_encoder.embed_weights_init = embed_weights_init +prefix_encoder.dim_embeddings = dim_embeddings + +candidate_encoder = MLPConfig() +candidate_encoder.dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings) +candidate_encoder.dim_hidden = [100] +candidate_encoder.weights_init = IsotropicGaussian(0.01) +candidate_encoder.biases_init = Constant(0.001) +candidate_encoder.embed_weights_init = embed_weights_init +candidate_encoder.dim_embeddings = dim_embeddings + +representation_size = 100 +representation_activation = Tanh + +normalize_representation = False + +step_rule = Momentum(learning_rate=0.1, momentum=0.9) + +batch_size = 10000 +# batch_sort_size = 20 + +monitor_freq = 1000 + +max_splits = 200 + +train_candidate_size = 20000 +valid_candidate_size = 20000 +test_candidate_size = 20000 |