summaryrefslogtreecommitdiff
path: root/model
diff options
context:
space:
mode:
Diffstat (limited to 'model')
-rw-r--r--model/__init__.py0
-rw-r--r--model/cchlstm.py248
-rw-r--r--model/dgsrnn.py205
-rw-r--r--model/gfgru.py294
-rw-r--r--model/hpc_lstm.py115
-rw-r--r--model/lstm.py119
6 files changed, 981 insertions, 0 deletions
diff --git a/model/__init__.py b/model/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/model/__init__.py
diff --git a/model/cchlstm.py b/model/cchlstm.py
new file mode 100644
index 0000000..78c9a1f
--- /dev/null
+++ b/model/cchlstm.py
@@ -0,0 +1,248 @@
+import theano
+from theano import tensor
+import numpy
+
+from theano.tensor.shared_randomstreams import RandomStreams
+
+from blocks.algorithms import Momentum, AdaDelta, RMSProp
+from blocks.bricks import Tanh, Softmax, Linear, MLP, Initializable
+from blocks.bricks.lookup import LookupTable
+from blocks.bricks.recurrent import LSTM, BaseRecurrent, recurrent
+from blocks.initialization import IsotropicGaussian, Constant
+
+from blocks.filter import VariableFilter
+from blocks.roles import WEIGHT
+from blocks.graph import ComputationGraph, apply_noise, apply_dropout
+
+rng = RandomStreams()
+
+# An epoch will be composed of 'num_seqs' sequences of len 'seq_len'
+# divided in chunks of lengh 'seq_div_size'
+num_seqs = 50
+seq_len = 2000
+seq_div_size = 100
+
+io_dim = 256
+
+# Model structure
+hidden_dims = [512, 512, 512, 512, 512]
+activation_function = Tanh()
+
+cond_cert = [0.5, 0.5, 0.5, 0.5]
+block_prob = [0.1, 0.1, 0.1, 0.1]
+
+# Regularization
+w_noise_std = 0.02
+
+# Step rule
+step_rule = 'adadelta'
+learning_rate = 0.1
+momentum = 0.9
+
+
+param_desc = '%s(x%sp%s)-n%s-%dx%d(%d)-%s' % (
+ repr(hidden_dims), repr(cond_cert), repr(block_prob),
+ repr(w_noise_std),
+ num_seqs, seq_len, seq_div_size,
+ step_rule
+ )
+
+save_freq = 5
+on_irc = False
+
+# parameters for sample generation
+sample_len = 200
+sample_temperature = 0.7 #0.5
+sample_freq = 1
+
+if step_rule == 'rmsprop':
+ step_rule = RMSProp()
+elif step_rule == 'adadelta':
+ step_rule = AdaDelta()
+elif step_rule == 'momentum':
+ step_rule = Momentum(learning_rate=learning_rate, momentum=momentum)
+else:
+ assert(False)
+
+class CCHLSTM(BaseRecurrent, Initializable):
+ def __init__(self, io_dim, hidden_dims, cond_cert, activation=None, **kwargs):
+ super(CCHLSTM, self).__init__(**kwargs)
+
+ self.cond_cert = cond_cert
+
+ self.io_dim = io_dim
+ self.hidden_dims = hidden_dims
+
+ self.children = []
+ self.layers = []
+
+ self.softmax = Softmax()
+ self.children.append(self.softmax)
+
+ for i, d in enumerate(hidden_dims):
+ i0 = LookupTable(length=io_dim,
+ dim=4*d,
+ name='i0-%d'%i)
+ self.children.append(i0)
+
+ if i > 0:
+ i1 = Linear(input_dim=hidden_dims[i-1],
+ output_dim=4*d,
+ name='i1-%d'%i)
+ self.children.append(i1)
+ else:
+ i1 = None
+
+ lstm = LSTM(dim=d, activation=activation,
+ name='LSTM-%d'%i)
+ self.children.append(lstm)
+
+ o = Linear(input_dim=d,
+ output_dim=io_dim,
+ name='o-%d'%i)
+ self.children.append(o)
+
+ self.layers.append((i0, i1, lstm, o))
+
+
+ @recurrent(contexts=[])
+ def apply(self, inputs, **kwargs):
+
+ l0i, _, l0l, l0o = self.layers[0]
+ l0iv = l0i.apply(inputs)
+ new_states0, new_cells0 = l0l.apply(states=kwargs['states0'],
+ cells=kwargs['cells0'],
+ inputs=l0iv,
+ iterate=False)
+ l0ov = l0o.apply(new_states0)
+
+ pos = l0ov
+ ps = new_states0
+
+ passnext = tensor.ones((inputs.shape[0],))
+ out_sc = [new_states0, new_cells0, passnext]
+
+ for i, (cch, (i0, i1, l, o)) in enumerate(zip(self.cond_cert, self.layers[1:])):
+ pop = self.softmax.apply(pos)
+ best = pop.max(axis=1)
+ passnext = passnext * tensor.le(best, cch) * kwargs['pass%d'%i]
+
+ i0v = i0.apply(inputs)
+ i1v = i1.apply(ps)
+
+ prev_states = kwargs['states%d'%i]
+ prev_cells = kwargs['cells%d'%i]
+ new_states, new_cells = l.apply(inputs=i0v + i1v,
+ states=prev_states,
+ cells=prev_cells,
+ iterate=False)
+ new_states = tensor.switch(passnext[:, None], new_states, prev_states)
+ new_cells = tensor.switch(passnext[:, None], new_cells, prev_cells)
+ out_sc += [new_states, new_cells, passnext]
+
+ ov = o.apply(new_states)
+ pos = tensor.switch(passnext[:, None], pos + ov, pos)
+ ps = new_states
+
+ return [pos] + out_sc
+
+ def get_dim(self, name):
+ dims = {'pred': self.io_dim}
+ for i, d in enumerate(self.hidden_dims):
+ dims['states%d'%i] = dims['cells%d'%i] = d
+ if name in dims:
+ return dims[name]
+ return super(CCHLSTM, self).get_dim(name)
+
+ @apply.property('sequences')
+ def apply_sequences(self):
+ return ['inputs'] + ['pass%d'%i for i in range(len(self.hidden_dims)-1)]
+
+ @apply.property('states')
+ def apply_states(self):
+ ret = []
+ for i in range(len(self.hidden_dims)):
+ ret += ['states%d'%i, 'cells%d'%i]
+ return ret
+
+ @apply.property('outputs')
+ def apply_outputs(self):
+ ret = ['pred']
+ for i in range(len(self.hidden_dims)):
+ ret += ['states%d'%i, 'cells%d'%i, 'active%d'%i]
+ return ret
+
+
+class Model():
+ def __init__(self):
+ inp = tensor.lmatrix('bytes')
+
+ # Make state vars
+ state_vars = {}
+ for i, d in enumerate(hidden_dims):
+ state_vars['states%d'%i] = theano.shared(numpy.zeros((num_seqs, d))
+ .astype(theano.config.floatX),
+ name='states%d'%i)
+ state_vars['cells%d'%i] = theano.shared(numpy.zeros((num_seqs, d))
+ .astype(theano.config.floatX),
+ name='cells%d'%i)
+ # Construct brick
+ cchlstm = CCHLSTM(io_dim=io_dim,
+ hidden_dims=hidden_dims,
+ cond_cert=cond_cert,
+ activation=activation_function)
+
+ # Random pass
+ passdict = {}
+ for i, p in enumerate(block_prob):
+ passdict['pass%d'%i] = rng.binomial(size=(inp.shape[1], inp.shape[0]), p=1-p)
+
+ # Apply it
+ outs = cchlstm.apply(inputs=inp.dimshuffle(1, 0),
+ **dict(state_vars.items() + passdict.items()))
+ states = []
+ active_prop = []
+ for i in range(len(hidden_dims)):
+ states.append((state_vars['states%d'%i], outs[3*i+1][-1, :, :]))
+ states.append((state_vars['cells%d'%i], outs[3*i+2][-1, :, :]))
+ active_prop.append(outs[3*i+3].mean())
+ active_prop[-1].name = 'active_prop_%d'%i
+
+ out = outs[0].dimshuffle(1, 0, 2)
+
+ # Do prediction and calculate cost
+ pred = out.argmax(axis=2)
+
+ cost = Softmax().categorical_cross_entropy(inp[:, 1:].flatten(),
+ out[:, :-1, :].reshape((inp.shape[0]*(inp.shape[1]-1),
+ io_dim)))
+ error_rate = tensor.neq(inp[:, 1:].flatten(), pred[:, :-1].flatten()).mean()
+
+ # Initialize all bricks
+ for brick in [cchlstm]:
+ brick.weights_init = IsotropicGaussian(0.1)
+ brick.biases_init = Constant(0.)
+ brick.initialize()
+
+ # Apply noise and dropoutvars
+ cg = ComputationGraph([cost, error_rate])
+ if w_noise_std > 0:
+ noise_vars = VariableFilter(roles=[WEIGHT])(cg)
+ cg = apply_noise(cg, noise_vars, w_noise_std)
+ [cost_reg, error_rate_reg] = cg.outputs
+
+ self.sgd_cost = cost_reg
+ self.monitor_vars = [[cost, cost_reg],
+ [error_rate, error_rate_reg],
+ active_prop]
+
+ cost.name = 'cost'
+ cost_reg.name = 'cost_reg'
+ error_rate.name = 'error_rate'
+ error_rate_reg.name = 'error_rate_reg'
+
+ self.out = out
+ self.pred = pred
+
+ self.states = states
+
diff --git a/model/dgsrnn.py b/model/dgsrnn.py
new file mode 100644
index 0000000..d6d93ff
--- /dev/null
+++ b/model/dgsrnn.py
@@ -0,0 +1,205 @@
+import theano
+from theano import tensor
+import numpy
+
+from theano.tensor.shared_randomstreams import RandomStreams
+
+from blocks.algorithms import Momentum, AdaDelta, RMSProp, Adam
+from blocks.bricks import Activation, Tanh, Logistic, Softmax, Rectifier, Linear, MLP, Initializable, Identity
+from blocks.bricks.base import application, lazy
+from blocks.bricks.recurrent import BaseRecurrent, recurrent
+from blocks.initialization import IsotropicGaussian, Constant
+from blocks.utils import shared_floatx_zeros
+
+from blocks.filter import VariableFilter
+from blocks.roles import WEIGHT, INITIAL_STATE, add_role
+from blocks.graph import ComputationGraph, apply_noise, apply_dropout
+
+rng = RandomStreams()
+
+class TRectifier(Activation):
+ @application(inputs=['input_'], outputs=['output'])
+ def apply(self, input_):
+ return tensor.switch(input_ > 1, input_, 0)
+
+# An epoch will be composed of 'num_seqs' sequences of len 'seq_len'
+# divided in chunks of lengh 'seq_div_size'
+num_seqs = 10
+seq_len = 1000
+seq_div_size = 5
+
+io_dim = 256
+
+state_dim = 1024
+activation = Tanh()
+transition_hidden = [1024, 1024]
+transition_hidden_activations = [Rectifier(), Rectifier()]
+
+output_hidden = []
+output_hidden_activations = []
+
+weight_noise_std = 0.05
+
+output_h_dropout = 0.0
+drop_update = 0.0
+
+l1_state = 0.00
+l1_reset = 0.1
+
+step_rule = 'momentum'
+learning_rate = 0.001
+momentum = 0.99
+
+
+param_desc = '%s,t%s,o%s-n%s-d%s,%s-L1:%s,%s-%s' % (
+ repr(state_dim), repr(transition_hidden), repr(output_hidden),
+ repr(weight_noise_std),
+ repr(output_h_dropout), repr(drop_update),
+ repr(l1_state), repr(l1_reset),
+ step_rule
+ )
+
+save_freq = 5
+on_irc = False
+
+# parameters for sample generation
+sample_len = 100
+sample_temperature = 0.7 #0.5
+sample_freq = 1
+
+if step_rule == 'rmsprop':
+ step_rule = RMSProp()
+elif step_rule == 'adadelta':
+ step_rule = AdaDelta()
+elif step_rule == 'momentum':
+ step_rule = Momentum(learning_rate=learning_rate, momentum=momentum)
+elif step_rule == 'adam':
+ step_rule = Adam()
+else:
+ assert(False)
+
+
+
+class DGSRNN(BaseRecurrent, Initializable):
+ def __init__(self, input_dim, state_dim, act, transition_h, tr_h_activations, **kwargs):
+ super(DGSRNN, self).__init__(**kwargs)
+
+ self.input_dim = input_dim
+ self.state_dim = state_dim
+
+ logistic = Logistic()
+
+ self.inter = MLP(dims=[input_dim + state_dim] + transition_h,
+ activations=tr_h_activations,
+ name='inter')
+ self.reset = MLP(dims=[transition_h[-1], state_dim],
+ activations=[logistic],
+ name='reset')
+ self.update = MLP(dims=[transition_h[-1], state_dim],
+ activations=[act],
+ name='update')
+
+ self.children = [self.inter, self.reset, self.update, logistic, act] + tr_h_activations
+
+ # init state
+ self.params = [shared_floatx_zeros((state_dim,), name='init_state')]
+ add_role(self.params[0], INITIAL_STATE)
+
+ def get_dim(self, name):
+ if name == 'state':
+ return self.state_dim
+ return super(GFGRU, self).get_dim(name)
+
+ @recurrent(sequences=['inputs', 'drop_updates_mask'], states=['state'],
+ outputs=['state', 'reset'], contexts=[])
+ def apply(self, inputs=None, drop_updates_mask=None, state=None):
+ inter_v = self.inter.apply(tensor.concatenate([inputs, state], axis=1))
+ reset_v = self.reset.apply(inter_v)
+ update_v = self.update.apply(inter_v)
+
+ reset_v = reset_v * drop_updates_mask
+
+ new_state = state * (1 - reset_v) + reset_v * update_v
+
+ return new_state, reset_v
+
+ @application
+ def initial_state(self, state_name, batch_size, *args, **kwargs):
+ return tensor.repeat(self.params[0][None, :],
+ repeats=batch_size,
+ axis=0)
+
+
+class Model():
+ def __init__(self):
+ inp = tensor.lmatrix('bytes')
+
+ in_onehot = tensor.eq(tensor.arange(io_dim, dtype='int16').reshape((1, 1, io_dim)),
+ inp[:, :, None])
+ in_onehot.name = 'in_onehot'
+
+ dgsrnn = DGSRNN(input_dim=io_dim,
+ state_dim=state_dim,
+ act=activation,
+ transition_h=transition_hidden,
+ tr_h_activations=transition_hidden_activations,
+ name='dgsrnn')
+
+ prev_state = theano.shared(numpy.zeros((num_seqs, state_dim)).astype(theano.config.floatX),
+ name='state')
+
+ states, resets = dgsrnn.apply(inputs=in_onehot.dimshuffle(1, 0, 2),
+ drop_updates_mask=rng.binomial(size=(inp.shape[1], inp.shape[0], state_dim),
+ p=1-drop_update,
+ dtype=theano.config.floatX),
+ state=prev_state)
+ states = states.dimshuffle(1, 0, 2)
+ resets = resets.dimshuffle(1, 0, 2)
+
+ self.states = [(prev_state, states[:, -1, :])]
+
+ out_mlp = MLP(dims=[state_dim] + output_hidden + [io_dim],
+ activations=output_hidden_activations + [None],
+ name='output_mlp')
+ states_sh = states.reshape((inp.shape[0]*inp.shape[1], state_dim))
+ out = out_mlp.apply(states_sh).reshape((inp.shape[0], inp.shape[1], io_dim))
+
+
+ # Do prediction and calculate cost
+ pred = out.argmax(axis=2)
+
+ cost = Softmax().categorical_cross_entropy(inp[:, 1:].flatten(),
+ out[:, :-1, :].reshape((inp.shape[0]*(inp.shape[1]-1),
+ io_dim)))
+ error_rate = tensor.neq(inp[:, 1:].flatten(), pred[:, :-1].flatten()).mean()
+
+ # Initialize all bricks
+ for brick in [dgsrnn, out_mlp]:
+ brick.weights_init = IsotropicGaussian(0.001)
+ brick.biases_init = Constant(0.0)
+ brick.initialize()
+
+ # Apply noise and dropout
+ cg = ComputationGraph([cost, error_rate, states, resets])
+ if weight_noise_std > 0:
+ noise_vars = VariableFilter(roles=[WEIGHT])(cg)
+ cg = apply_noise(cg, noise_vars, weight_noise_std)
+ if output_h_dropout > 0:
+ dv = VariableFilter(name='input_', bricks=out_mlp.linear_transformations)(cg)
+ print "Output H dropout on", len(dv), "vars"
+ cg = apply_dropout(cg, dv, output_h_dropout)
+ [cost_reg, error_rate_reg, states, resets] = cg.outputs
+
+ if l1_state > 0:
+ cost_reg = cost_reg + l1_state * abs(states).mean()
+ if l1_reset > 0:
+ cost_reg = cost_reg + l1_reset * abs(resets).mean()
+
+ self.cost = cost
+ self.error_rate = error_rate
+ self.cost_reg = cost_reg
+ self.error_rate_reg = error_rate_reg
+ self.out = out
+ self.pred = pred
+
+
diff --git a/model/gfgru.py b/model/gfgru.py
new file mode 100644
index 0000000..29d4398
--- /dev/null
+++ b/model/gfgru.py
@@ -0,0 +1,294 @@
+import theano
+from theano import tensor
+import numpy
+
+from blocks.algorithms import Momentum, AdaDelta, RMSProp, Adam
+from blocks.bricks import Activation, Tanh, Logistic, Softmax, Rectifier, Linear, MLP, Initializable, Identity
+from blocks.bricks.base import application, lazy
+from blocks.bricks.recurrent import BaseRecurrent, recurrent
+from blocks.initialization import IsotropicGaussian, Constant
+from blocks.utils import shared_floatx_zeros
+
+from blocks.filter import VariableFilter
+from blocks.roles import WEIGHT, INITIAL_STATE, add_role
+from blocks.graph import ComputationGraph, apply_noise, apply_dropout
+
+class TRectifier(Activation):
+ @application(inputs=['input_'], outputs=['output'])
+ def apply(self, input_):
+ return tensor.switch(input_ > 1, input_, 0)
+
+# An epoch will be composed of 'num_seqs' sequences of len 'seq_len'
+# divided in chunks of lengh 'seq_div_size'
+num_seqs = 10
+seq_len = 2000
+seq_div_size = 100
+
+io_dim = 256
+
+recurrent_blocks = [
+# (256, Tanh(), [2048], [Rectifier()]),
+# (512, Rectifier(), [1024], [Rectifier()]),
+ (512, Tanh(), [1024], [Rectifier()]),
+ (512, Tanh(), [1024], [Rectifier()]),
+# (2, Tanh(), [2], [Rectifier()]),
+# (2, Tanh(), [], []),
+ ]
+
+control_hidden = [1024]
+control_hidden_activations = [Rectifier()]
+
+output_hidden = [1024]
+output_hidden_activations = [Rectifier()]
+
+weight_noise_std = 0.05
+
+recurrent_h_dropout = 0
+control_h_dropout = 0
+output_h_dropout = 0.5
+
+step_rule = 'adam'
+learning_rate = 0.1
+momentum = 0.99
+
+
+param_desc = '%s,c%s,o%s-n%s-d%s,%s,%s-%s' % (
+ repr(map(lambda (a, b, c, d): (a, c), recurrent_blocks)),
+ repr(control_hidden), repr(output_hidden),
+ repr(weight_noise_std),
+ repr(recurrent_h_dropout), repr(control_h_dropout), repr(output_h_dropout),
+ step_rule
+ )
+
+save_freq = 5
+on_irc = False
+
+# parameters for sample generation
+sample_len = 100
+sample_temperature = 0.7 #0.5
+sample_freq = 1
+
+if step_rule == 'rmsprop':
+ step_rule = RMSProp()
+elif step_rule == 'adadelta':
+ step_rule = AdaDelta()
+elif step_rule == 'momentum':
+ step_rule = Momentum(learning_rate=learning_rate, momentum=momentum)
+elif step_rule == 'adam':
+ step_rule = Adam()
+else:
+ assert(False)
+
+
+
+
+class GFGRU(BaseRecurrent, Initializable):
+ def __init__(self, input_dim, recurrent_blocks, control_hidden, control_hidden_activations, **kwargs):
+ super(GFGRU, self).__init__(**kwargs)
+
+ self.input_dim = input_dim
+ self.recurrent_blocks = recurrent_blocks
+ self.control_hidden = control_hidden
+ self.control_hidden_activations = control_hidden_activations
+
+ # setup children
+ self.children = control_hidden_activations
+ for (_, a, _, b) in recurrent_blocks:
+ self.children.append(a)
+ for c in b:
+ self.children.append(c)
+
+ logistic = Logistic()
+ self.children.append(logistic)
+
+ self.hidden_total_dim = sum(x for (x, _, _, _) in self.recurrent_blocks)
+
+ # control block
+ self.cblocklen = len(self.recurrent_blocks) + 2
+
+ control_idim = self.hidden_total_dim + self.input_dim
+ control_odim = len(self.recurrent_blocks) * self.cblocklen
+ self.control = MLP(dims=[control_idim] + self.control_hidden + [control_odim],
+ activations=self.control_hidden_activations + [logistic],
+ name='control')
+
+ self.children.append(self.control)
+
+ # recurrent blocks
+ self.blocks = []
+ self.params = []
+ for i, (dim, act, hdim, hact) in enumerate(self.recurrent_blocks):
+ idim = self.input_dim + self.hidden_total_dim
+ if i > 0:
+ idim = idim + self.recurrent_blocks[i-1][0]
+
+ idims = [idim] + hdim
+ if hdim == []:
+ inter = Identity()
+ else:
+ inter = MLP(dims=idims, activations=hact, name='inter%d'%i)
+
+ rgate = MLP(dims=[idims[-1], dim], activations=[logistic], name='rgate%d'%i)
+ nstate = MLP(dims=[idims[-1], dim], activations=[act], name='nstate%d'%i)
+
+ for brick in [inter, rgate, nstate]:
+ self.children.append(brick)
+ self.blocks.append((inter, rgate, nstate))
+
+ # init state zeros
+ self.init_states_names = []
+ self.init_states_dict = {}
+ self.params = []
+
+ for i, (dim, _, _, _) in enumerate(self.recurrent_blocks):
+ name = 'init_state_%d'%i
+ svar = shared_floatx_zeros((dim,), name=name)
+ add_role(svar, INITIAL_STATE)
+
+ self.init_states_names.append(name)
+ self.init_states_dict[name] = svar
+ self.params.append(svar)
+
+ def get_dim(self, name):
+ if name in self.init_states_dict:
+ return self.init_states_dict[name].shape.eval()
+ return super(GFGRU, self).get_dim(name)
+
+ def recurrent_h_dropout_vars(self, cg):
+ ret = []
+ for (inter, rgate, nstate) in self.blocks:
+ ret = ret + VariableFilter(name='input_',
+ bricks=inter.linear_transformations + rgate.linear_transformations + nstate.linear_transformations
+ )(cg)
+ return ret
+
+ def control_h_dropout_vars(self, cg):
+ return VariableFilter(name='input_', bricks=self.control.linear_transformations)(cg)
+
+ @recurrent(sequences=['inputs'], contexts=[])
+ def apply(self, inputs=None, **kwargs):
+ states = [kwargs[i] for i in self.init_states_names]
+ concat_states = tensor.concatenate(states, axis=1)
+
+ concat_input_states = tensor.concatenate([inputs, concat_states], axis=1)
+
+ control_v = self.control.apply(concat_input_states)
+
+ new_states = []
+ for i, (inter, rgate, nstate) in enumerate(self.blocks):
+ controls = control_v[:, i * self.cblocklen:(i+1) * self.cblocklen]
+ r_inputs = tensor.concatenate([s * controls[:, j][:, None] for j, s in enumerate(states)], axis=1)
+
+ more_inputs = [inputs]
+ if i > 0:
+ more_inputs.append(new_states[-1])
+ inter_inputs = tensor.concatenate([r_inputs] + more_inputs, axis=1)
+
+ inter_v = inter.apply(inter_inputs)
+
+ rgate_v = rgate.apply(inter_v)
+ nstate_v = nstate.apply(inter_v)
+
+ rctl = controls[:, -1][:, None] * rgate_v
+ uctl = controls[:, -2][:, None]
+ nstate_v = uctl * nstate_v + (1 - rctl) * states[i]
+
+ new_states.append(nstate_v)
+
+ return new_states
+
+ @apply.property('states')
+ def apply_states(self):
+ return self.init_states_names
+
+ @apply.property('outputs')
+ def apply_outputs(self):
+ return self.init_states_names
+
+ @application
+ def initial_state(self, state_name, batch_size, *args, **kwargs):
+ return tensor.repeat(self.init_states_dict[state_name][None, :],
+ repeats=batch_size,
+ axis=0)
+
+
+
+class Model():
+ def __init__(self):
+ inp = tensor.lmatrix('bytes')
+
+ in_onehot = tensor.eq(tensor.arange(io_dim, dtype='int16').reshape((1, 1, io_dim)),
+ inp[:, :, None])
+ in_onehot.name = 'in_onehot'
+
+ gfgru = GFGRU(input_dim=io_dim,
+ recurrent_blocks=recurrent_blocks,
+ control_hidden=control_hidden,
+ control_hidden_activations=control_hidden_activations)
+
+ hidden_total_dim = sum(x for (x, _, _, _) in recurrent_blocks)
+
+ prev_states_dict = {}
+ for i, (dim, _, _, _) in enumerate(recurrent_blocks):
+ prev_state = theano.shared(numpy.zeros((num_seqs, dim)).astype(theano.config.floatX),
+ name='states_save')
+ prev_states_dict['init_state_%d'%i] = prev_state
+
+ states = [x.dimshuffle(1, 0, 2) for x in gfgru.apply(in_onehot.dimshuffle(1, 0, 2), **prev_states_dict)]
+
+ self.states = []
+ for i, _ in enumerate(recurrent_blocks):
+ self.states.append((prev_states_dict['init_state_%d'%i], states[i][:, -1, :]))
+
+ states_concat = tensor.concatenate(states, axis=2)
+
+ out_mlp = MLP(dims=[hidden_total_dim] + output_hidden + [io_dim],
+ activations=output_hidden_activations + [None],
+ name='output_mlp')
+ states_sh = states_concat.reshape((inp.shape[0]*inp.shape[1], hidden_total_dim))
+ out = out_mlp.apply(states_sh).reshape((inp.shape[0], inp.shape[1], io_dim))
+
+
+
+ # Do prediction and calculate cost
+ pred = out.argmax(axis=2)
+
+ cost = Softmax().categorical_cross_entropy(inp[:, 1:].flatten(),
+ out[:, :-1, :].reshape((inp.shape[0]*(inp.shape[1]-1),
+ io_dim)))
+ error_rate = tensor.neq(inp[:, 1:].flatten(), pred[:, :-1].flatten()).mean()
+
+ # Initialize all bricks
+ for brick in [gfgru, out_mlp]:
+ brick.weights_init = IsotropicGaussian(0.01)
+ brick.biases_init = Constant(0.001)
+ brick.initialize()
+
+ # Apply noise and dropout
+ cg = ComputationGraph([cost, error_rate])
+ if weight_noise_std > 0:
+ noise_vars = VariableFilter(roles=[WEIGHT])(cg)
+ cg = apply_noise(cg, noise_vars, weight_noise_std)
+ if recurrent_h_dropout > 0:
+ dv = gfgru.recurrent_h_dropout_vars(cg)
+ print "Recurrent H dropout on", len(dv), "vars"
+ cg = apply_dropout(cg, dv, recurrent_h_dropout)
+ if control_h_dropout > 0:
+ dv = gfgru.control_h_dropout_vars(cg)
+ print "Control H dropout on", len(dv), "vars"
+ cg = apply_dropout(cg, dv, control_h_dropout)
+ if output_h_dropout > 0:
+ dv = VariableFilter(name='input_', bricks=out_mlp.linear_transformations)(cg)
+ print "Output H dropout on", len(dv), "vars"
+ cg = apply_dropout(cg, dv, output_h_dropout)
+ [cost_reg, error_rate_reg] = cg.outputs
+
+
+ self.cost = cost
+ self.error_rate = error_rate
+ self.cost_reg = cost_reg
+ self.error_rate_reg = error_rate_reg
+ self.out = out
+ self.pred = pred
+
+
diff --git a/model/hpc_lstm.py b/model/hpc_lstm.py
new file mode 100644
index 0000000..8c9cd90
--- /dev/null
+++ b/model/hpc_lstm.py
@@ -0,0 +1,115 @@
+# HPC-LSTM : Hierarchical Predictive Coding LSTM
+
+import theano
+from theano import tensor
+import numpy
+
+from blocks.bricks import Softmax, Tanh, Logistic, Linear, MLP, Identity
+from blocks.bricks.recurrent import LSTM
+from blocks.initialization import IsotropicGaussian, Constant
+
+from blocks.filter import VariableFilter
+from blocks.roles import WEIGHT
+from blocks.graph import ComputationGraph, apply_noise, apply_dropout
+
+
+class Model():
+ def __init__(self, config):
+ inp = tensor.imatrix('bytes')
+
+ in_onehot = tensor.eq(tensor.arange(config.io_dim, dtype='int16').reshape((1, 1, config.io_dim)),
+ inp[:, :, None])
+ in_onehot.name = 'in_onehot'
+
+ bricks = []
+ states = []
+
+ # Construct predictive LSTM hierarchy
+ hidden = []
+ costs = []
+ next_target = in_onehot.dimshuffle(1, 0, 2)
+ for i, (hdim, cf, q) in enumerate(zip(config.hidden_dims, config.cost_factors, config.hidden_q)):
+ init_state = theano.shared(numpy.zeros((config.num_seqs, hdim)).astype(theano.config.floatX),
+ name='st0_%d'%i)
+ init_cell = theano.shared(numpy.zeros((config.num_seqs, hdim)).astype(theano.config.floatX),
+ name='cell0_%d'%i)
+
+ linear = Linear(input_dim=config.io_dim, output_dim=4*hdim,
+ name="lstm_in_%d"%i)
+ lstm = LSTM(dim=hdim, activation=config.activation_function,
+ name="lstm_rec_%d"%i)
+ linear2 = Linear(input_dim=hdim, output_dim=config.io_dim, name='lstm_out_%d'%i)
+ tanh = Tanh('lstm_out_tanh_%d'%i)
+ bricks += [linear, lstm, linear2, tanh]
+
+ inter = linear.apply(theano.gradient.disconnected_grad(next_target))
+ new_hidden, new_cells = lstm.apply(inter,
+ states=init_state,
+ cells=init_cell)
+ states.append((init_state, new_hidden[-1, :, :]))
+ states.append((init_cell, new_cells[-1, :, :]))
+
+ hidden += [tensor.concatenate([init_state[None,:,:], new_hidden[:-1,:,:]],axis=0)]
+ pred = tanh.apply(linear2.apply(hidden[-1]))
+ diff = next_target - pred
+ costs += [numpy.float32(cf) * ((abs(next_target)+q)*(diff**2)).sum(axis=2).mean()]
+ next_target = diff
+
+
+ # Construct output from hidden states
+ hidden = [s.dimshuffle(1, 0, 2) for s in hidden]
+
+ out_parts = []
+ out_dims = config.out_hidden + [config.io_dim]
+ for i, (dim, state) in enumerate(zip(config.hidden_dims, hidden)):
+ pred_linear = Linear(input_dim=dim, output_dim=out_dims[0],
+ name='pred_linear_%d'%i)
+ bricks.append(pred_linear)
+ out_parts.append(pred_linear.apply(theano.gradient.disconnected_grad(state)))
+
+ # Do prediction and calculate cost
+ out = sum(out_parts)
+
+ if len(out_dims) > 1:
+ out = config.out_hidden_act[0](name='out_act0').apply(out)
+ mlp = MLP(dims=out_dims,
+ activations=[x(name='out_act%d'%i) for i, x in enumerate(config.out_hidden_act[1:])]
+ +[Identity()],
+ name='out_mlp')
+ bricks.append(mlp)
+ out = mlp.apply(out.reshape((inp.shape[0]*inp.shape[1],-1))).reshape((inp.shape[0],inp.shape[1],-1))
+
+ pred = out.argmax(axis=2)
+
+ cost = Softmax().categorical_cross_entropy(inp.flatten(),
+ out.reshape((inp.shape[0]*inp.shape[1],
+ config.io_dim))).mean()
+ error_rate = tensor.neq(inp.flatten(), pred.flatten()).mean()
+
+ sgd_cost = cost + sum(costs)
+
+ # Initialize all bricks
+ for brick in bricks:
+ brick.weights_init = IsotropicGaussian(0.1)
+ brick.biases_init = Constant(0.)
+ brick.initialize()
+
+
+ # put stuff into self that is usefull for training or extensions
+ self.sgd_cost = sgd_cost
+
+ sgd_cost.name = 'sgd_cost'
+ for i in range(len(costs)):
+ costs[i].name = 'pred_cost_%d'%i
+ cost.name = 'cost'
+ error_rate.name = 'error_rate'
+ self.monitor_vars = [costs, [cost],
+ [error_rate]]
+
+ self.out = out
+ self.pred = pred
+
+ self.states = states
+
+
+# vim: set sts=4 ts=4 sw=4 tw=0 et :
diff --git a/model/lstm.py b/model/lstm.py
new file mode 100644
index 0000000..abd44e0
--- /dev/null
+++ b/model/lstm.py
@@ -0,0 +1,119 @@
+import theano
+from theano import tensor
+import numpy
+
+from blocks.bricks import Softmax, Linear
+from blocks.bricks.recurrent import LSTM
+from blocks.initialization import IsotropicGaussian, Constant
+
+from blocks.filter import VariableFilter
+from blocks.roles import WEIGHT
+from blocks.graph import ComputationGraph, apply_noise, apply_dropout
+
+
+class Model():
+ def __init__(self, config):
+ inp = tensor.imatrix('bytes')
+
+ in_onehot = tensor.eq(tensor.arange(config.io_dim, dtype='int16').reshape((1, 1, config.io_dim)),
+ inp[:, :, None])
+ in_onehot.name = 'in_onehot'
+
+ # Construct hidden states
+ dims = [config.io_dim] + config.hidden_dims
+ hidden = [in_onehot.dimshuffle(1, 0, 2)]
+ bricks = []
+ states = []
+ for i in xrange(1, len(dims)):
+ init_state = theano.shared(numpy.zeros((config.num_seqs, dims[i])).astype(theano.config.floatX),
+ name='st0_%d'%i)
+ init_cell = theano.shared(numpy.zeros((config.num_seqs, dims[i])).astype(theano.config.floatX),
+ name='cell0_%d'%i)
+
+ linear = Linear(input_dim=dims[i-1], output_dim=4*dims[i],
+ name="lstm_in_%d"%i)
+ bricks.append(linear)
+ inter = linear.apply(hidden[-1])
+
+ if config.i2h_all and i > 1:
+ linear2 = Linear(input_dim=dims[0], output_dim=4*dims[i],
+ name="lstm_in0_%d"%i)
+ bricks.append(linear2)
+ inter = inter + linear2.apply(hidden[0])
+ inter.name = 'inter_bis_%d'%i
+
+ lstm = LSTM(dim=dims[i], activation=config.activation_function,
+ name="lstm_rec_%d"%i)
+ bricks.append(lstm)
+
+ new_hidden, new_cells = lstm.apply(inter,
+ states=init_state,
+ cells=init_cell)
+ states.append((init_state, new_hidden[-1, :, :]))
+ states.append((init_cell, new_cells[-1, :, :]))
+
+ hidden.append(new_hidden)
+
+ hidden = [s.dimshuffle(1, 0, 2) for s in hidden]
+
+ # Construct output from hidden states
+ out = None
+ layers = zip(dims, hidden)[1:]
+ if not config.h2o_all:
+ layers = [layers[-1]]
+ for i, (dim, state) in enumerate(layers):
+ top_linear = Linear(input_dim=dim, output_dim=config.io_dim,
+ name='top_linear_%d'%i)
+ bricks.append(top_linear)
+ out_i = top_linear.apply(state)
+ out = out_i if out is None else out + out_i
+ out.name = 'out_part_%d'%i
+
+ # Do prediction and calculate cost
+ pred = out.argmax(axis=2)
+
+ cost = Softmax().categorical_cross_entropy(inp[:, 1:].flatten(),
+ out[:, :-1, :].reshape((inp.shape[0]*(inp.shape[1]-1),
+ config.io_dim))).mean()
+ error_rate = tensor.neq(inp[:, 1:].flatten(), pred[:, :-1].flatten()).mean()
+
+ # Initialize all bricks
+ for brick in bricks:
+ brick.weights_init = IsotropicGaussian(0.1)
+ brick.biases_init = Constant(0.)
+ brick.initialize()
+
+ # Apply noise and dropout
+ cg = ComputationGraph([cost, error_rate])
+ if config.w_noise_std > 0:
+ noise_vars = VariableFilter(roles=[WEIGHT])(cg)
+ cg = apply_noise(cg, noise_vars, config.w_noise_std)
+ if config.i_dropout > 0:
+ cg = apply_dropout(cg, hidden[1:], config.i_dropout)
+ [cost_reg, error_rate_reg] = cg.outputs
+
+ # add l1 regularization
+ if config.l1_reg > 0:
+ l1pen = sum(abs(st).mean() for st in hidden[1:])
+ cost_reg = cost_reg + config.l1_reg * l1pen
+
+ cost_reg += 1e-10 # so that it is not the same Theano variable
+ error_rate_reg += 1e-10
+
+ # put stuff into self that is usefull for training or extensions
+ self.sgd_cost = cost_reg
+
+ cost.name = 'cost'
+ cost_reg.name = 'cost_reg'
+ error_rate.name = 'error_rate'
+ error_rate_reg.name = 'error_rate_reg'
+ self.monitor_vars = [[cost, cost_reg],
+ [error_rate, error_rate_reg]]
+
+ self.out = out
+ self.pred = pred
+
+ self.states = states
+
+
+# vim: set sts=4 ts=4 sw=4 tw=0 et :