diff options
author | Alex Auvolat <alex.auvolat@ens.fr> | 2015-05-08 14:59:44 -0400 |
---|---|---|
committer | Alex Auvolat <alex.auvolat@ens.fr> | 2015-05-08 15:00:50 -0400 |
commit | 20a1a01cef9d61ce9dd09995f2c811ab5aca2a9d (patch) | |
tree | c2638b5607820e596b8d7cd46e5137b41b25c61f | |
parent | 0ecac7973fd02f44af9c8bc5765f7c159c94b23a (diff) | |
download | taxi-20a1a01cef9d61ce9dd09995f2c811ab5aca2a9d.tar.gz taxi-20a1a01cef9d61ce9dd09995f2c811ab5aca2a9d.zip |
Add model for a network that predicts both time and destination.
-rw-r--r-- | config/joint_simple_mlp_tgtcls_1_cswdtx.py | 52 | ||||
-rw-r--r-- | model/joint_simple_mlp_tgtcls.py | 90 | ||||
-rwxr-xr-x | train.py | 40 |
3 files changed, 165 insertions, 17 deletions
diff --git a/config/joint_simple_mlp_tgtcls_1_cswdtx.py b/config/joint_simple_mlp_tgtcls_1_cswdtx.py new file mode 100644 index 0000000..f3de40b --- /dev/null +++ b/config/joint_simple_mlp_tgtcls_1_cswdtx.py @@ -0,0 +1,52 @@ +import cPickle + +import model.joint_simple_mlp_tgtcls as model + +from blocks.initialization import IsotropicGaussian, Constant + +import data + +n_begin_end_pts = 5 # how many points we consider at the beginning and end of the known trajectory +n_end_pts = 5 + +n_valid = 1000 + +with open("%s/arrival-clusters.pkl" % data.path) as f: + dest_tgtcls = cPickle.load(f) + +# generate target classes for time prediction as a Fibonacci sequence +time_tgtcls = [1, 2] +for i in range(22): + time_tgtcls.append(time_tgtcls[-1] + time_tgtcls[-2]) + +dim_embeddings = [ + ('origin_call', data.origin_call_size+1, 10), + ('origin_stand', data.stands_size+1, 10), + ('week_of_year', 52, 10), + ('day_of_week', 7, 10), + ('qhour_of_day', 24 * 4, 10), + ('day_type', 3, 10), + ('taxi_id', 448, 10), +] + +# Common network part +dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings) +dim_hidden = [500] + +# Destination prediction part +dim_hidden_dest = [] +dim_output_dest = len(dest_tgtcls) + +# Time prediction part +dim_hidden_time = [] +dim_output_time = len(time_tgtcls) + +embed_weights_init = IsotropicGaussian(0.001) +mlp_weights_init = IsotropicGaussian(0.01) +mlp_biases_init = Constant(0.001) + +learning_rate = 0.0001 +momentum = 0.99 +batch_size = 200 + +valid_set = 'cuts/test_times_0' diff --git a/model/joint_simple_mlp_tgtcls.py b/model/joint_simple_mlp_tgtcls.py new file mode 100644 index 0000000..0a38e06 --- /dev/null +++ b/model/joint_simple_mlp_tgtcls.py @@ -0,0 +1,90 @@ +from blocks.bricks import MLP, Rectifier, Linear, Sigmoid, Identity, Softmax +from blocks.bricks.lookup import LookupTable + +import numpy +import theano +from theano import tensor + +import data +import error + +class Model(object): + def __init__(self, config): + # The input and the targets + x_firstk_latitude = (tensor.matrix('first_k_latitude') - data.train_gps_mean[0]) / data.train_gps_std[0] + x_firstk_longitude = (tensor.matrix('first_k_longitude') - data.train_gps_mean[1]) / data.train_gps_std[1] + + x_lastk_latitude = (tensor.matrix('last_k_latitude') - data.train_gps_mean[0]) / data.train_gps_std[0] + x_lastk_longitude = (tensor.matrix('last_k_longitude') - data.train_gps_mean[1]) / data.train_gps_std[1] + + x_input_time = tensor.lvector('input_time') + + input_list = [x_firstk_latitude, x_firstk_longitude, x_lastk_latitude, x_lastk_longitude] + embed_tables = [] + + self.require_inputs = ['first_k_latitude', 'first_k_longitude', 'last_k_latitude', 'last_k_longitude', 'input_time'] + + for (varname, num, dim) in config.dim_embeddings: + self.require_inputs.append(varname) + vardata = tensor.lvector(varname) + tbl = LookupTable(length=num, dim=dim, name='%s_lookup'%varname) + embed_tables.append(tbl) + input_list.append(tbl.apply(vardata)) + + y_dest = tensor.concatenate((tensor.vector('destination_latitude')[:, None], + tensor.vector('destination_longitude')[:, None]), axis=1) + y_time = tensor.lvector('travel_time') + + # Define the model + common_mlp = MLP(activations=[Rectifier() for _ in config.dim_hidden], + dims=[config.dim_input] + config.dim_hidden) + + dest_mlp = MLP(activations=[Rectifier() for _ in config.dim_hidden_dest] + [Softmax()], + dims=[config.dim_hidden[-1]] + config.dim_hidden_dest + [config.dim_output_dest], + name='dest_mlp') + dest_classes = theano.shared(numpy.array(config.dest_tgtcls, dtype=theano.config.floatX), name='dest_classes') + + time_mlp = MLP(activations=[Rectifier() for _ in config.dim_hidden_time] + [Softmax()], + dims=[config.dim_hidden[-1]] + config.dim_hidden_time + [config.dim_output_time], + name='time_mlp') + time_classes = theano.shared(numpy.array(config.time_tgtcls, dtype=theano.config.floatX), name='time_classes') + + # Create the Theano variables + inputs = tensor.concatenate(input_list, axis=1) + # inputs = theano.printing.Print("inputs")(inputs) + hidden = common_mlp.apply(inputs) + + dest_cls_probas = dest_mlp.apply(hidden) + dest_outputs = tensor.dot(dest_cls_probas, dest_classes) + dest_outputs.name = 'dest_outputs' + + time_cls_probas = time_mlp.apply(hidden) + time_outputs = tensor.dot(time_cls_probas, time_classes) + x_input_time + time_outputs.name = 'time_outputs' + + # Calculate the cost + dest_cost = error.erdist(dest_outputs, y_dest).mean() + dest_cost.name = 'dest_cost' + dest_hcost = error.hdist(dest_outputs, y_dest).mean() + dest_hcost.name = 'dest_hcost' + time_cost = error.rmsle(time_outputs.flatten(), y_time.flatten()) + time_cost.name = 'time_cost' + cost = dest_cost + time_cost + cost.name = 'cost' + + # Initialization + for tbl in embed_tables: + tbl.weights_init = config.embed_weights_init + tbl.initialize() + + for mlp in [common_mlp, dest_mlp, time_mlp]: + mlp.weights_init = config.mlp_weights_init + mlp.biases_init = config.mlp_biases_init + mlp.initialize() + + self.cost = cost + self.monitor = [cost, dest_cost, dest_hcost, time_cost] + self.outputs = tensor.concatenate([dest_outputs, time_outputs[:, None]], axis=1) + self.outputs.name = 'outputs' + self.pred_vars = ['destination_longitude', 'destination_latitude', 'travel_time'] + @@ -67,7 +67,6 @@ def setup_test_stream(req_vars): test = transformers.TaxiAddDateTime(test) test = transformers.TaxiAddFirstLastLen(config.n_begin_end_pts, test) - test = transformers.TaxiAddLast(config.n_begin_end_pts, test) test = transformers.Select(test, tuple(req_vars)) test_stream = Batch(test, iteration_scheme=ConstantScheme(1000)) @@ -96,13 +95,17 @@ def main(): # step_rule=AdaDelta(decay_rate=0.5), step_rule=Momentum(learning_rate=config.learning_rate, momentum=config.momentum), params=params) + + plot_vars = [['valid_' + x.name for x in model.monitor]] + # plot_vars = ['valid_cost'] + print "Plot: ", plot_vars extensions=[TrainingDataMonitoring(model.monitor, prefix='train', every_n_batches=1000), DataStreamMonitoring(model.monitor, valid_stream, prefix='valid', every_n_batches=1000), Printing(every_n_batches=1000), - Plot(model_name, channels=[['valid_cost']], every_n_batches=1000), + Plot(model_name, channels=plot_vars, every_n_batches=1000), # Checkpoint('model.pkl', every_n_batches=100), Dump('model_data/' + model_name, every_n_batches=1000), LoadFromDump('model_data/' + model_name), @@ -120,21 +123,24 @@ def main(): # Produce an output on the test data test_stream = setup_test_stream(req_vars_test) - outfile = open("output/test-output-%s.csv" % model_name, "w") - outcsv = csv.writer(outfile) - if model.pred_vars == ['travel_time']: - outcsv.writerow(["TRIP_ID", "TRAVEL_TIME"]) - for out in apply_model.Apply(outputs=outputs, stream=test_stream, return_vars=['trip_id', 'outputs']): - time = out['outputs'] - for i, trip in enumerate(out['trip_id']): - outcsv.writerow([trip, int(time[i])]) - else: - outcsv.writerow(["TRIP_ID", "LATITUDE", "LONGITUDE"]) - for out in apply_model.Apply(outputs=outputs, stream=test_stream, return_vars=['trip_id', 'outputs']): - dest = out['outputs'] - for i, trip in enumerate(out['trip_id']): - outcsv.writerow([trip, repr(dest[i, 0]), repr(dest[i, 1])]) - outfile.close() + if 'destination_longitude' in model.pred_vars: + dest_outfile = open("output/test-dest-output-%s.csv" % model_name, "w") + dest_outcsv = csv.writer(dest_outfile) + dest_outcsv.writerow(["TRIP_ID", "LATITUDE", "LONGITUDE"]) + if 'travel_time' in model.pred_vars: + time_outfile = open("output/test-time-output-%s.csv" % model_name, "w") + time_outcsv = csv.writer(time_outfile) + time_outcsv.writerow(["TRIP_ID", "TRAVEL_TIME"]) + + for out in apply_model.Apply(outputs=outputs, stream=test_stream, return_vars=['trip_id', 'outputs']): + outputs = out['outputs'] + for i, trip in enumerate(out['trip_id']): + if model.pred_vars == ['travel_time']: + time_outcsv.writerow([trip, int(outputs[i])]) + else: + dest_outcsv.writerow([trip, repr(outputs[i, 0]), repr(outputs[i, 1])]) + if 'travel_time' in model.pred_vars: + time_outcsv.writerow([trip, int(outputs[i, 2])]) if __name__ == "__main__": |