aboutsummaryrefslogtreecommitdiff
path: root/model/joint_simple_mlp_tgtcls.py
diff options
context:
space:
mode:
authorÉtienne Simon <esimon@esimon.eu>2015-07-02 12:59:15 -0400
committerÉtienne Simon <esimon@esimon.eu>2015-07-02 12:59:15 -0400
commit98139f573eb179c8f5a06ba6c8d8883376814ccf (patch)
treef27270d80cb91c19639227c921549f762eda2f72 /model/joint_simple_mlp_tgtcls.py
parenta4b190516d00428b1d8a81686a3291e5fa5f9865 (diff)
downloadtaxi-98139f573eb179c8f5a06ba6c8d8883376814ccf.tar.gz
taxi-98139f573eb179c8f5a06ba6c8d8883376814ccf.zip
Remove _simple
Diffstat (limited to 'model/joint_simple_mlp_tgtcls.py')
-rw-r--r--model/joint_simple_mlp_tgtcls.py71
1 files changed, 0 insertions, 71 deletions
diff --git a/model/joint_simple_mlp_tgtcls.py b/model/joint_simple_mlp_tgtcls.py
deleted file mode 100644
index d6d4e49..0000000
--- a/model/joint_simple_mlp_tgtcls.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import numpy
-import theano
-from theano import tensor
-from blocks import roles
-from blocks.bricks import application, MLP, Rectifier, Softmax
-
-import error
-from model.mlp import FFMLP, Stream
-
-
-class Model(FFMLP):
- def __init__(self, config, **kwargs):
- super(Model, self).__init__(config, **kwargs)
-
- self.dest_mlp = MLP(activations=[Rectifier() for _ in config.dim_hidden_dest] + [Softmax()],
- dims=[config.dim_hidden[-1]] + config.dim_hidden_dest + [config.dim_output_dest],
- name='dest_mlp')
- self.time_mlp = MLP(activations=[Rectifier() for _ in config.dim_hidden_time] + [Softmax()],
- dims=[config.dim_hidden[-1]] + config.dim_hidden_time + [config.dim_output_time],
- name='time_mlp')
-
- self.dest_classes = theano.shared(numpy.array(config.dest_tgtcls, dtype=theano.config.floatX), name='dest_classes')
- self.time_classes = theano.shared(numpy.array(config.time_tgtcls, dtype=theano.config.floatX), name='time_classes')
-
- self.inputs.append('input_time')
- self.children.extend([self.dest_mlp, self.time_mlp])
-
- def _push_initialization_config(self):
- super(Model, self)._push_initialization_config()
- for mlp in [self.dest_mlp, self.time_mlp]:
- mlp.weights_init = self.config.mlp_weights_init
- mlp.biases_init = self.config.mlp_biases_init
-
- @application(outputs=['destination', 'duration'])
- def predict(self, **kwargs):
- hidden = super(Model, self).predict(**kwargs)
-
- dest_cls_probas = self.dest_mlp.apply(hidden)
- dest_outputs = tensor.dot(dest_cls_probas, self.dest_classes)
-
- time_cls_probas = self.time_mlp.apply(hidden)
- time_outputs = kwargs['input_time'] + tensor.dot(time_cls_probas, self.time_classes)
-
- self.add_auxiliary_variable(dest_cls_probas, name='destination classes ponderations')
- self.add_auxiliary_variable(time_cls_probas, name='time classes ponderations')
-
- return (dest_outputs, time_outputs)
-
- @predict.property('inputs')
- def predict_inputs(self):
- return self.inputs
-
- @application(outputs=['cost'])
- def cost(self, **kwargs):
- (destination_hat, time_hat) = self.predict(**kwargs)
-
- destination = tensor.concatenate((kwargs['destination_latitude'][:, None],
- kwargs['destination_longitude'][:, None]), axis=1)
- time = kwargs['travel_time']
-
- destination_cost = error.erdist(destination_hat, destination).mean()
- time_cost = error.rmsle(time_hat.flatten(), time.flatten())
-
- self.add_auxiliary_variable(destination_cost, [roles.COST], 'destination_cost')
- self.add_auxiliary_variable(time_cost, [roles.COST], 'time_cost')
-
- return destination_cost + self.config.time_cost_factor * time_cost
-
- @cost.property('inputs')
- def cost_inputs(self):
- return self.inputs + ['destination_latitude', 'destination_longitude', 'travel_time']