1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
|
from theano import tensor
from fuel.transformers import Batch, MultiProcessing, Merge
from fuel.streams import DataStream
from fuel.schemes import ConstantScheme, ShuffledExampleScheme, SequentialExampleScheme
from blocks.bricks import application, MLP, Rectifier, Initializable, Softmax
import data
from data import transformers
from data.cut import TaxiTimeCutScheme
from data.hdf5 import TaxiDataset, TaxiStream
import error
from model import ContextEmbedder
from memory_network import StreamSimple as Stream
from memory_network import MemoryNetworkBase
class MLPEncoder(Initializable):
def __init__(self, config, output_dim, activation, **kwargs):
super(MLPEncoder, self).__init__(**kwargs)
self.config = config
self.context_embedder = ContextEmbedder(self.config)
self.encoder_mlp = MLP(activations=[Rectifier() for _ in config.dim_hidden]
+ [activation()],
dims=[config.dim_input]
+ config.dim_hidden
+ [output_dim],
name='encoder')
self.extremities = {'%s_k_%s' % (side, ['latitude', 'longitude'][axis]): axis
for side in ['first', 'last'] for axis in [0, 1]}
self.children = [ self.context_embedder,
self.encoder_mlp ]
def _push_initialization_config(self):
for brick in [self.context_embedder, self.encoder_mlp]:
brick.weights_init = self.config.weights_init
brick.biases_init = self.config.biases_init
@application
def apply(self, **kwargs):
embeddings = tuple(self.context_embedder.apply(
**{k: kwargs[k] for k in self.context_embedder.inputs }))
extremities = tuple((kwargs[k] - data.train_gps_mean[v]) / data.train_gps_std[v]
for k, v in self.extremities.items())
inputs = tensor.concatenate(extremities + embeddings, axis=1)
return self.encoder_mlp.apply(inputs)
@apply.property('inputs')
def apply_inputs(self):
return self.context_embedder.inputs + self.extremities.keys()
class Model(MemoryNetworkBase):
def __init__(self, config, **kwargs):
prefix_encoder = MLPEncoder(config.prefix_encoder,
config.representation_size,
config.representation_activation,
name='prefix_encoder')
candidate_encoder = MLPEncoder(config.candidate_encoder,
config.representation_size,
config.representation_activation,
name='candidate_encoder')
super(Model, self).__init__(config, prefix_encoder, candidate_encoder, **kwargs)
|