From 2f479926c16d2911d0dd878c21de082abfc5b237 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 8 Mar 2016 13:26:28 +0100 Subject: Revive project --- .gitignore | 2 +- cchlstm.py | 248 --------------------------------------- config/__init__.py | 0 config/hpc-lstm-1.py | 37 ++++++ config/lstm-frigo-irc.py | 39 +++++++ datastream.py | 1 + dgsrnn.py | 205 --------------------------------- gentext.py | 2 +- gfgru.py | 294 ----------------------------------------------- irc.py | 173 ++++++++++++++++++++++++++++ ircext.py | 4 + lstm.py | 160 -------------------------- model/__init__.py | 0 model/cchlstm.py | 248 +++++++++++++++++++++++++++++++++++++++ model/dgsrnn.py | 205 +++++++++++++++++++++++++++++++++ model/gfgru.py | 294 +++++++++++++++++++++++++++++++++++++++++++++++ model/hpc_lstm.py | 115 ++++++++++++++++++ model/lstm.py | 119 +++++++++++++++++++ train.py | 120 ++++++++----------- 19 files changed, 1282 insertions(+), 984 deletions(-) delete mode 100644 cchlstm.py create mode 100644 config/__init__.py create mode 100644 config/hpc-lstm-1.py create mode 100644 config/lstm-frigo-irc.py delete mode 100644 dgsrnn.py delete mode 100644 gfgru.py create mode 100644 irc.py delete mode 100644 lstm.py create mode 100644 model/__init__.py create mode 100644 model/cchlstm.py create mode 100644 model/dgsrnn.py create mode 100644 model/gfgru.py create mode 100644 model/hpc_lstm.py create mode 100644 model/lstm.py diff --git a/.gitignore b/.gitignore index 3d6982a..9dd37c5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ *.pyc *.swp data/* -model_data/* +params/* diff --git a/cchlstm.py b/cchlstm.py deleted file mode 100644 index 78c9a1f..0000000 --- a/cchlstm.py +++ /dev/null @@ -1,248 +0,0 @@ -import theano -from theano import tensor -import numpy - -from theano.tensor.shared_randomstreams import RandomStreams - -from blocks.algorithms import Momentum, AdaDelta, RMSProp -from blocks.bricks import Tanh, Softmax, Linear, MLP, Initializable -from blocks.bricks.lookup import LookupTable -from blocks.bricks.recurrent import LSTM, BaseRecurrent, recurrent -from blocks.initialization import IsotropicGaussian, Constant - -from blocks.filter import VariableFilter -from blocks.roles import WEIGHT -from blocks.graph import ComputationGraph, apply_noise, apply_dropout - -rng = RandomStreams() - -# An epoch will be composed of 'num_seqs' sequences of len 'seq_len' -# divided in chunks of lengh 'seq_div_size' -num_seqs = 50 -seq_len = 2000 -seq_div_size = 100 - -io_dim = 256 - -# Model structure -hidden_dims = [512, 512, 512, 512, 512] -activation_function = Tanh() - -cond_cert = [0.5, 0.5, 0.5, 0.5] -block_prob = [0.1, 0.1, 0.1, 0.1] - -# Regularization -w_noise_std = 0.02 - -# Step rule -step_rule = 'adadelta' -learning_rate = 0.1 -momentum = 0.9 - - -param_desc = '%s(x%sp%s)-n%s-%dx%d(%d)-%s' % ( - repr(hidden_dims), repr(cond_cert), repr(block_prob), - repr(w_noise_std), - num_seqs, seq_len, seq_div_size, - step_rule - ) - -save_freq = 5 -on_irc = False - -# parameters for sample generation -sample_len = 200 -sample_temperature = 0.7 #0.5 -sample_freq = 1 - -if step_rule == 'rmsprop': - step_rule = RMSProp() -elif step_rule == 'adadelta': - step_rule = AdaDelta() -elif step_rule == 'momentum': - step_rule = Momentum(learning_rate=learning_rate, momentum=momentum) -else: - assert(False) - -class CCHLSTM(BaseRecurrent, Initializable): - def __init__(self, io_dim, hidden_dims, cond_cert, activation=None, **kwargs): - super(CCHLSTM, self).__init__(**kwargs) - - self.cond_cert = cond_cert - - self.io_dim = io_dim - self.hidden_dims = hidden_dims - - self.children = [] - self.layers = [] - - self.softmax = Softmax() - self.children.append(self.softmax) - - for i, d in enumerate(hidden_dims): - i0 = LookupTable(length=io_dim, - dim=4*d, - name='i0-%d'%i) - self.children.append(i0) - - if i > 0: - i1 = Linear(input_dim=hidden_dims[i-1], - output_dim=4*d, - name='i1-%d'%i) - self.children.append(i1) - else: - i1 = None - - lstm = LSTM(dim=d, activation=activation, - name='LSTM-%d'%i) - self.children.append(lstm) - - o = Linear(input_dim=d, - output_dim=io_dim, - name='o-%d'%i) - self.children.append(o) - - self.layers.append((i0, i1, lstm, o)) - - - @recurrent(contexts=[]) - def apply(self, inputs, **kwargs): - - l0i, _, l0l, l0o = self.layers[0] - l0iv = l0i.apply(inputs) - new_states0, new_cells0 = l0l.apply(states=kwargs['states0'], - cells=kwargs['cells0'], - inputs=l0iv, - iterate=False) - l0ov = l0o.apply(new_states0) - - pos = l0ov - ps = new_states0 - - passnext = tensor.ones((inputs.shape[0],)) - out_sc = [new_states0, new_cells0, passnext] - - for i, (cch, (i0, i1, l, o)) in enumerate(zip(self.cond_cert, self.layers[1:])): - pop = self.softmax.apply(pos) - best = pop.max(axis=1) - passnext = passnext * tensor.le(best, cch) * kwargs['pass%d'%i] - - i0v = i0.apply(inputs) - i1v = i1.apply(ps) - - prev_states = kwargs['states%d'%i] - prev_cells = kwargs['cells%d'%i] - new_states, new_cells = l.apply(inputs=i0v + i1v, - states=prev_states, - cells=prev_cells, - iterate=False) - new_states = tensor.switch(passnext[:, None], new_states, prev_states) - new_cells = tensor.switch(passnext[:, None], new_cells, prev_cells) - out_sc += [new_states, new_cells, passnext] - - ov = o.apply(new_states) - pos = tensor.switch(passnext[:, None], pos + ov, pos) - ps = new_states - - return [pos] + out_sc - - def get_dim(self, name): - dims = {'pred': self.io_dim} - for i, d in enumerate(self.hidden_dims): - dims['states%d'%i] = dims['cells%d'%i] = d - if name in dims: - return dims[name] - return super(CCHLSTM, self).get_dim(name) - - @apply.property('sequences') - def apply_sequences(self): - return ['inputs'] + ['pass%d'%i for i in range(len(self.hidden_dims)-1)] - - @apply.property('states') - def apply_states(self): - ret = [] - for i in range(len(self.hidden_dims)): - ret += ['states%d'%i, 'cells%d'%i] - return ret - - @apply.property('outputs') - def apply_outputs(self): - ret = ['pred'] - for i in range(len(self.hidden_dims)): - ret += ['states%d'%i, 'cells%d'%i, 'active%d'%i] - return ret - - -class Model(): - def __init__(self): - inp = tensor.lmatrix('bytes') - - # Make state vars - state_vars = {} - for i, d in enumerate(hidden_dims): - state_vars['states%d'%i] = theano.shared(numpy.zeros((num_seqs, d)) - .astype(theano.config.floatX), - name='states%d'%i) - state_vars['cells%d'%i] = theano.shared(numpy.zeros((num_seqs, d)) - .astype(theano.config.floatX), - name='cells%d'%i) - # Construct brick - cchlstm = CCHLSTM(io_dim=io_dim, - hidden_dims=hidden_dims, - cond_cert=cond_cert, - activation=activation_function) - - # Random pass - passdict = {} - for i, p in enumerate(block_prob): - passdict['pass%d'%i] = rng.binomial(size=(inp.shape[1], inp.shape[0]), p=1-p) - - # Apply it - outs = cchlstm.apply(inputs=inp.dimshuffle(1, 0), - **dict(state_vars.items() + passdict.items())) - states = [] - active_prop = [] - for i in range(len(hidden_dims)): - states.append((state_vars['states%d'%i], outs[3*i+1][-1, :, :])) - states.append((state_vars['cells%d'%i], outs[3*i+2][-1, :, :])) - active_prop.append(outs[3*i+3].mean()) - active_prop[-1].name = 'active_prop_%d'%i - - out = outs[0].dimshuffle(1, 0, 2) - - # Do prediction and calculate cost - pred = out.argmax(axis=2) - - cost = Softmax().categorical_cross_entropy(inp[:, 1:].flatten(), - out[:, :-1, :].reshape((inp.shape[0]*(inp.shape[1]-1), - io_dim))) - error_rate = tensor.neq(inp[:, 1:].flatten(), pred[:, :-1].flatten()).mean() - - # Initialize all bricks - for brick in [cchlstm]: - brick.weights_init = IsotropicGaussian(0.1) - brick.biases_init = Constant(0.) - brick.initialize() - - # Apply noise and dropoutvars - cg = ComputationGraph([cost, error_rate]) - if w_noise_std > 0: - noise_vars = VariableFilter(roles=[WEIGHT])(cg) - cg = apply_noise(cg, noise_vars, w_noise_std) - [cost_reg, error_rate_reg] = cg.outputs - - self.sgd_cost = cost_reg - self.monitor_vars = [[cost, cost_reg], - [error_rate, error_rate_reg], - active_prop] - - cost.name = 'cost' - cost_reg.name = 'cost_reg' - error_rate.name = 'error_rate' - error_rate_reg.name = 'error_rate_reg' - - self.out = out - self.pred = pred - - self.states = states - diff --git a/config/__init__.py b/config/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/config/hpc-lstm-1.py b/config/hpc-lstm-1.py new file mode 100644 index 0000000..e4009d5 --- /dev/null +++ b/config/hpc-lstm-1.py @@ -0,0 +1,37 @@ +from blocks.algorithms import AdaDelta, Momentum +from blocks.bricks import Tanh, Rectifier + +from model.hpc_lstm import Model + +dataset = 'data/logcompil-2016-03-07.txt' +io_dim = 256 + +# An epoch will be composed of 'num_seqs' sequences of len 'seq_len' +# divided in chunks of lengh 'seq_div_size' +num_seqs = 100 +seq_len = 2000 +seq_div_size = 100 + +hidden_dims = [128, 128, 256, 512] +cost_factors = [1., 1., 1., 1.] +hidden_q = [0.02, 0.02, 0.05, 0.05] +activation_function = Tanh() + +out_hidden = [512] +out_hidden_act = [Rectifier] + +step_rule = AdaDelta() +#step_rule = Momentum(learning_rate=0.0001, momentum=0.99) + +# parameter saving freq (number of batches) +monitor_freq = 10 +save_freq = 100 + +# used for sample generation and IRC mode +sample_temperature = 0.7 #0.5 + +# do we want to generate samples at times during training? +sample_len = 1000 +sample_freq = 100 +sample_init = '\nalex\ttu crois?\n' + diff --git a/config/lstm-frigo-irc.py b/config/lstm-frigo-irc.py new file mode 100644 index 0000000..2d0bf3a --- /dev/null +++ b/config/lstm-frigo-irc.py @@ -0,0 +1,39 @@ +from blocks.algorithms import AdaDelta +from blocks.bricks import Tanh + +from model.lstm import Model + +dataset = 'data/logcompil-2016-03-07.txt' +io_dim = 256 + +# An epoch will be composed of 'num_seqs' sequences of len 'seq_len' +# divided in chunks of lengh 'seq_div_size' +num_seqs = 100 +seq_len = 2000 +seq_div_size = 100 + +hidden_dims = [1024, 1024, 1024] +activation_function = Tanh() + +i2h_all = True # input to all hidden layers or only first layer +h2o_all = True # all hiden layers to output or only last layer + +w_noise_std = 0.02 +i_dropout = 0.5 + +l1_reg = 0 + +step_rule = AdaDelta() + +# parameter saving freq (number of batches) +monitor_freq = 10 +save_freq = 100 + +# used for sample generation and IRC mode +sample_temperature = 0.7 #0.5 + +# do we want to generate samples at times during training? +sample_len = 1000 +sample_freq = 100 +sample_init = '\nalex\ttu crois?\n' + diff --git a/datastream.py b/datastream.py index 8025945..b7aae25 100644 --- a/datastream.py +++ b/datastream.py @@ -38,6 +38,7 @@ class BinaryFileDataset(Dataset): return os.fstat(self.f.fileno()).st_size class RandomBlockIterator(IterationScheme): + requests_examples=True def __init__(self, item_range, seq_len, num_seqs_per_epoch, **kwargs): self.seq_len = seq_len self.num_seqs = num_seqs_per_epoch diff --git a/dgsrnn.py b/dgsrnn.py deleted file mode 100644 index d6d93ff..0000000 --- a/dgsrnn.py +++ /dev/null @@ -1,205 +0,0 @@ -import theano -from theano import tensor -import numpy - -from theano.tensor.shared_randomstreams import RandomStreams - -from blocks.algorithms import Momentum, AdaDelta, RMSProp, Adam -from blocks.bricks import Activation, Tanh, Logistic, Softmax, Rectifier, Linear, MLP, Initializable, Identity -from blocks.bricks.base import application, lazy -from blocks.bricks.recurrent import BaseRecurrent, recurrent -from blocks.initialization import IsotropicGaussian, Constant -from blocks.utils import shared_floatx_zeros - -from blocks.filter import VariableFilter -from blocks.roles import WEIGHT, INITIAL_STATE, add_role -from blocks.graph import ComputationGraph, apply_noise, apply_dropout - -rng = RandomStreams() - -class TRectifier(Activation): - @application(inputs=['input_'], outputs=['output']) - def apply(self, input_): - return tensor.switch(input_ > 1, input_, 0) - -# An epoch will be composed of 'num_seqs' sequences of len 'seq_len' -# divided in chunks of lengh 'seq_div_size' -num_seqs = 10 -seq_len = 1000 -seq_div_size = 5 - -io_dim = 256 - -state_dim = 1024 -activation = Tanh() -transition_hidden = [1024, 1024] -transition_hidden_activations = [Rectifier(), Rectifier()] - -output_hidden = [] -output_hidden_activations = [] - -weight_noise_std = 0.05 - -output_h_dropout = 0.0 -drop_update = 0.0 - -l1_state = 0.00 -l1_reset = 0.1 - -step_rule = 'momentum' -learning_rate = 0.001 -momentum = 0.99 - - -param_desc = '%s,t%s,o%s-n%s-d%s,%s-L1:%s,%s-%s' % ( - repr(state_dim), repr(transition_hidden), repr(output_hidden), - repr(weight_noise_std), - repr(output_h_dropout), repr(drop_update), - repr(l1_state), repr(l1_reset), - step_rule - ) - -save_freq = 5 -on_irc = False - -# parameters for sample generation -sample_len = 100 -sample_temperature = 0.7 #0.5 -sample_freq = 1 - -if step_rule == 'rmsprop': - step_rule = RMSProp() -elif step_rule == 'adadelta': - step_rule = AdaDelta() -elif step_rule == 'momentum': - step_rule = Momentum(learning_rate=learning_rate, momentum=momentum) -elif step_rule == 'adam': - step_rule = Adam() -else: - assert(False) - - - -class DGSRNN(BaseRecurrent, Initializable): - def __init__(self, input_dim, state_dim, act, transition_h, tr_h_activations, **kwargs): - super(DGSRNN, self).__init__(**kwargs) - - self.input_dim = input_dim - self.state_dim = state_dim - - logistic = Logistic() - - self.inter = MLP(dims=[input_dim + state_dim] + transition_h, - activations=tr_h_activations, - name='inter') - self.reset = MLP(dims=[transition_h[-1], state_dim], - activations=[logistic], - name='reset') - self.update = MLP(dims=[transition_h[-1], state_dim], - activations=[act], - name='update') - - self.children = [self.inter, self.reset, self.update, logistic, act] + tr_h_activations - - # init state - self.params = [shared_floatx_zeros((state_dim,), name='init_state')] - add_role(self.params[0], INITIAL_STATE) - - def get_dim(self, name): - if name == 'state': - return self.state_dim - return super(GFGRU, self).get_dim(name) - - @recurrent(sequences=['inputs', 'drop_updates_mask'], states=['state'], - outputs=['state', 'reset'], contexts=[]) - def apply(self, inputs=None, drop_updates_mask=None, state=None): - inter_v = self.inter.apply(tensor.concatenate([inputs, state], axis=1)) - reset_v = self.reset.apply(inter_v) - update_v = self.update.apply(inter_v) - - reset_v = reset_v * drop_updates_mask - - new_state = state * (1 - reset_v) + reset_v * update_v - - return new_state, reset_v - - @application - def initial_state(self, state_name, batch_size, *args, **kwargs): - return tensor.repeat(self.params[0][None, :], - repeats=batch_size, - axis=0) - - -class Model(): - def __init__(self): - inp = tensor.lmatrix('bytes') - - in_onehot = tensor.eq(tensor.arange(io_dim, dtype='int16').reshape((1, 1, io_dim)), - inp[:, :, None]) - in_onehot.name = 'in_onehot' - - dgsrnn = DGSRNN(input_dim=io_dim, - state_dim=state_dim, - act=activation, - transition_h=transition_hidden, - tr_h_activations=transition_hidden_activations, - name='dgsrnn') - - prev_state = theano.shared(numpy.zeros((num_seqs, state_dim)).astype(theano.config.floatX), - name='state') - - states, resets = dgsrnn.apply(inputs=in_onehot.dimshuffle(1, 0, 2), - drop_updates_mask=rng.binomial(size=(inp.shape[1], inp.shape[0], state_dim), - p=1-drop_update, - dtype=theano.config.floatX), - state=prev_state) - states = states.dimshuffle(1, 0, 2) - resets = resets.dimshuffle(1, 0, 2) - - self.states = [(prev_state, states[:, -1, :])] - - out_mlp = MLP(dims=[state_dim] + output_hidden + [io_dim], - activations=output_hidden_activations + [None], - name='output_mlp') - states_sh = states.reshape((inp.shape[0]*inp.shape[1], state_dim)) - out = out_mlp.apply(states_sh).reshape((inp.shape[0], inp.shape[1], io_dim)) - - - # Do prediction and calculate cost - pred = out.argmax(axis=2) - - cost = Softmax().categorical_cross_entropy(inp[:, 1:].flatten(), - out[:, :-1, :].reshape((inp.shape[0]*(inp.shape[1]-1), - io_dim))) - error_rate = tensor.neq(inp[:, 1:].flatten(), pred[:, :-1].flatten()).mean() - - # Initialize all bricks - for brick in [dgsrnn, out_mlp]: - brick.weights_init = IsotropicGaussian(0.001) - brick.biases_init = Constant(0.0) - brick.initialize() - - # Apply noise and dropout - cg = ComputationGraph([cost, error_rate, states, resets]) - if weight_noise_std > 0: - noise_vars = VariableFilter(roles=[WEIGHT])(cg) - cg = apply_noise(cg, noise_vars, weight_noise_std) - if output_h_dropout > 0: - dv = VariableFilter(name='input_', bricks=out_mlp.linear_transformations)(cg) - print "Output H dropout on", len(dv), "vars" - cg = apply_dropout(cg, dv, output_h_dropout) - [cost_reg, error_rate_reg, states, resets] = cg.outputs - - if l1_state > 0: - cost_reg = cost_reg + l1_state * abs(states).mean() - if l1_reset > 0: - cost_reg = cost_reg + l1_reset * abs(resets).mean() - - self.cost = cost - self.error_rate = error_rate - self.cost_reg = cost_reg - self.error_rate_reg = error_rate_reg - self.out = out - self.pred = pred - - diff --git a/gentext.py b/gentext.py index b8a27bf..2079602 100644 --- a/gentext.py +++ b/gentext.py @@ -46,7 +46,7 @@ class GenText(SimpleExtension): sys.stdout.write(self.init_text) while v.shape[1] < self.max_bytes: prob = prob / 1.00001 - pred = numpy.random.multinomial(1, prob[0, :]).nonzero()[0][0] + pred = numpy.random.multinomial(1, prob[0, :]).nonzero()[0][0].astype('int16') v = numpy.concatenate([v, pred[None, None]], axis=1) sys.stdout.write(chr(int(pred))) diff --git a/gfgru.py b/gfgru.py deleted file mode 100644 index 29d4398..0000000 --- a/gfgru.py +++ /dev/null @@ -1,294 +0,0 @@ -import theano -from theano import tensor -import numpy - -from blocks.algorithms import Momentum, AdaDelta, RMSProp, Adam -from blocks.bricks import Activation, Tanh, Logistic, Softmax, Rectifier, Linear, MLP, Initializable, Identity -from blocks.bricks.base import application, lazy -from blocks.bricks.recurrent import BaseRecurrent, recurrent -from blocks.initialization import IsotropicGaussian, Constant -from blocks.utils import shared_floatx_zeros - -from blocks.filter import VariableFilter -from blocks.roles import WEIGHT, INITIAL_STATE, add_role -from blocks.graph import ComputationGraph, apply_noise, apply_dropout - -class TRectifier(Activation): - @application(inputs=['input_'], outputs=['output']) - def apply(self, input_): - return tensor.switch(input_ > 1, input_, 0) - -# An epoch will be composed of 'num_seqs' sequences of len 'seq_len' -# divided in chunks of lengh 'seq_div_size' -num_seqs = 10 -seq_len = 2000 -seq_div_size = 100 - -io_dim = 256 - -recurrent_blocks = [ -# (256, Tanh(), [2048], [Rectifier()]), -# (512, Rectifier(), [1024], [Rectifier()]), - (512, Tanh(), [1024], [Rectifier()]), - (512, Tanh(), [1024], [Rectifier()]), -# (2, Tanh(), [2], [Rectifier()]), -# (2, Tanh(), [], []), - ] - -control_hidden = [1024] -control_hidden_activations = [Rectifier()] - -output_hidden = [1024] -output_hidden_activations = [Rectifier()] - -weight_noise_std = 0.05 - -recurrent_h_dropout = 0 -control_h_dropout = 0 -output_h_dropout = 0.5 - -step_rule = 'adam' -learning_rate = 0.1 -momentum = 0.99 - - -param_desc = '%s,c%s,o%s-n%s-d%s,%s,%s-%s' % ( - repr(map(lambda (a, b, c, d): (a, c), recurrent_blocks)), - repr(control_hidden), repr(output_hidden), - repr(weight_noise_std), - repr(recurrent_h_dropout), repr(control_h_dropout), repr(output_h_dropout), - step_rule - ) - -save_freq = 5 -on_irc = False - -# parameters for sample generation -sample_len = 100 -sample_temperature = 0.7 #0.5 -sample_freq = 1 - -if step_rule == 'rmsprop': - step_rule = RMSProp() -elif step_rule == 'adadelta': - step_rule = AdaDelta() -elif step_rule == 'momentum': - step_rule = Momentum(learning_rate=learning_rate, momentum=momentum) -elif step_rule == 'adam': - step_rule = Adam() -else: - assert(False) - - - - -class GFGRU(BaseRecurrent, Initializable): - def __init__(self, input_dim, recurrent_blocks, control_hidden, control_hidden_activations, **kwargs): - super(GFGRU, self).__init__(**kwargs) - - self.input_dim = input_dim - self.recurrent_blocks = recurrent_blocks - self.control_hidden = control_hidden - self.control_hidden_activations = control_hidden_activations - - # setup children - self.children = control_hidden_activations - for (_, a, _, b) in recurrent_blocks: - self.children.append(a) - for c in b: - self.children.append(c) - - logistic = Logistic() - self.children.append(logistic) - - self.hidden_total_dim = sum(x for (x, _, _, _) in self.recurrent_blocks) - - # control block - self.cblocklen = len(self.recurrent_blocks) + 2 - - control_idim = self.hidden_total_dim + self.input_dim - control_odim = len(self.recurrent_blocks) * self.cblocklen - self.control = MLP(dims=[control_idim] + self.control_hidden + [control_odim], - activations=self.control_hidden_activations + [logistic], - name='control') - - self.children.append(self.control) - - # recurrent blocks - self.blocks = [] - self.params = [] - for i, (dim, act, hdim, hact) in enumerate(self.recurrent_blocks): - idim = self.input_dim + self.hidden_total_dim - if i > 0: - idim = idim + self.recurrent_blocks[i-1][0] - - idims = [idim] + hdim - if hdim == []: - inter = Identity() - else: - inter = MLP(dims=idims, activations=hact, name='inter%d'%i) - - rgate = MLP(dims=[idims[-1], dim], activations=[logistic], name='rgate%d'%i) - nstate = MLP(dims=[idims[-1], dim], activations=[act], name='nstate%d'%i) - - for brick in [inter, rgate, nstate]: - self.children.append(brick) - self.blocks.append((inter, rgate, nstate)) - - # init state zeros - self.init_states_names = [] - self.init_states_dict = {} - self.params = [] - - for i, (dim, _, _, _) in enumerate(self.recurrent_blocks): - name = 'init_state_%d'%i - svar = shared_floatx_zeros((dim,), name=name) - add_role(svar, INITIAL_STATE) - - self.init_states_names.append(name) - self.init_states_dict[name] = svar - self.params.append(svar) - - def get_dim(self, name): - if name in self.init_states_dict: - return self.init_states_dict[name].shape.eval() - return super(GFGRU, self).get_dim(name) - - def recurrent_h_dropout_vars(self, cg): - ret = [] - for (inter, rgate, nstate) in self.blocks: - ret = ret + VariableFilter(name='input_', - bricks=inter.linear_transformations + rgate.linear_transformations + nstate.linear_transformations - )(cg) - return ret - - def control_h_dropout_vars(self, cg): - return VariableFilter(name='input_', bricks=self.control.linear_transformations)(cg) - - @recurrent(sequences=['inputs'], contexts=[]) - def apply(self, inputs=None, **kwargs): - states = [kwargs[i] for i in self.init_states_names] - concat_states = tensor.concatenate(states, axis=1) - - concat_input_states = tensor.concatenate([inputs, concat_states], axis=1) - - control_v = self.control.apply(concat_input_states) - - new_states = [] - for i, (inter, rgate, nstate) in enumerate(self.blocks): - controls = control_v[:, i * self.cblocklen:(i+1) * self.cblocklen] - r_inputs = tensor.concatenate([s * controls[:, j][:, None] for j, s in enumerate(states)], axis=1) - - more_inputs = [inputs] - if i > 0: - more_inputs.append(new_states[-1]) - inter_inputs = tensor.concatenate([r_inputs] + more_inputs, axis=1) - - inter_v = inter.apply(inter_inputs) - - rgate_v = rgate.apply(inter_v) - nstate_v = nstate.apply(inter_v) - - rctl = controls[:, -1][:, None] * rgate_v - uctl = controls[:, -2][:, None] - nstate_v = uctl * nstate_v + (1 - rctl) * states[i] - - new_states.append(nstate_v) - - return new_states - - @apply.property('states') - def apply_states(self): - return self.init_states_names - - @apply.property('outputs') - def apply_outputs(self): - return self.init_states_names - - @application - def initial_state(self, state_name, batch_size, *args, **kwargs): - return tensor.repeat(self.init_states_dict[state_name][None, :], - repeats=batch_size, - axis=0) - - - -class Model(): - def __init__(self): - inp = tensor.lmatrix('bytes') - - in_onehot = tensor.eq(tensor.arange(io_dim, dtype='int16').reshape((1, 1, io_dim)), - inp[:, :, None]) - in_onehot.name = 'in_onehot' - - gfgru = GFGRU(input_dim=io_dim, - recurrent_blocks=recurrent_blocks, - control_hidden=control_hidden, - control_hidden_activations=control_hidden_activations) - - hidden_total_dim = sum(x for (x, _, _, _) in recurrent_blocks) - - prev_states_dict = {} - for i, (dim, _, _, _) in enumerate(recurrent_blocks): - prev_state = theano.shared(numpy.zeros((num_seqs, dim)).astype(theano.config.floatX), - name='states_save') - prev_states_dict['init_state_%d'%i] = prev_state - - states = [x.dimshuffle(1, 0, 2) for x in gfgru.apply(in_onehot.dimshuffle(1, 0, 2), **prev_states_dict)] - - self.states = [] - for i, _ in enumerate(recurrent_blocks): - self.states.append((prev_states_dict['init_state_%d'%i], states[i][:, -1, :])) - - states_concat = tensor.concatenate(states, axis=2) - - out_mlp = MLP(dims=[hidden_total_dim] + output_hidden + [io_dim], - activations=output_hidden_activations + [None], - name='output_mlp') - states_sh = states_concat.reshape((inp.shape[0]*inp.shape[1], hidden_total_dim)) - out = out_mlp.apply(states_sh).reshape((inp.shape[0], inp.shape[1], io_dim)) - - - - # Do prediction and calculate cost - pred = out.argmax(axis=2) - - cost = Softmax().categorical_cross_entropy(inp[:, 1:].flatten(), - out[:, :-1, :].reshape((inp.shape[0]*(inp.shape[1]-1), - io_dim))) - error_rate = tensor.neq(inp[:, 1:].flatten(), pred[:, :-1].flatten()).mean() - - # Initialize all bricks - for brick in [gfgru, out_mlp]: - brick.weights_init = IsotropicGaussian(0.01) - brick.biases_init = Constant(0.001) - brick.initialize() - - # Apply noise and dropout - cg = ComputationGraph([cost, error_rate]) - if weight_noise_std > 0: - noise_vars = VariableFilter(roles=[WEIGHT])(cg) - cg = apply_noise(cg, noise_vars, weight_noise_std) - if recurrent_h_dropout > 0: - dv = gfgru.recurrent_h_dropout_vars(cg) - print "Recurrent H dropout on", len(dv), "vars" - cg = apply_dropout(cg, dv, recurrent_h_dropout) - if control_h_dropout > 0: - dv = gfgru.control_h_dropout_vars(cg) - print "Control H dropout on", len(dv), "vars" - cg = apply_dropout(cg, dv, control_h_dropout) - if output_h_dropout > 0: - dv = VariableFilter(name='input_', bricks=out_mlp.linear_transformations)(cg) - print "Output H dropout on", len(dv), "vars" - cg = apply_dropout(cg, dv, output_h_dropout) - [cost_reg, error_rate_reg] = cg.outputs - - - self.cost = cost - self.error_rate = error_rate - self.cost_reg = cost_reg - self.error_rate_reg = error_rate_reg - self.out = out - self.pred = pred - - diff --git a/irc.py b/irc.py new file mode 100644 index 0000000..f8ca125 --- /dev/null +++ b/irc.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python2 + +import logging +import sys +import importlib + +import theano + +from blocks.extensions import Printing, SimpleExtension, FinishAfter +from blocks.extensions.monitoring import DataStreamMonitoring, TrainingDataMonitoring + +from blocks.graph import ComputationGraph +from blocks.main_loop import MainLoop +from blocks.model import Model +from blocks.algorithms import GradientDescent + +try: + from blocks.extras.extensions.plot import Plot + plot_avail = False +except ImportError: + plot_avail = False + + +import datastream +from paramsaveload import SaveLoadParams +from gentext import GenText +from ircext import IRCClientExt + +logging.basicConfig(level='INFO') +logger = logging.getLogger(__name__) + +sys.setrecursionlimit(500000) + + +class ResetStates(SimpleExtension): + def __init__(self, state_vars, **kwargs): + super(ResetStates, self).__init__(**kwargs) + + self.f = theano.function( + inputs=[], outputs=[], + updates=[(v, v.zeros_like()) for v in state_vars]) + + def do(self, which_callback, *args): + self.f() + +if __name__ == "__main__": + if len(sys.argv) < 2: + print >> sys.stderr, 'Usage: %s [options] config' % sys.argv[0] + sys.exit(1) + model_name = sys.argv[-1] + config = importlib.import_module('%s' % model_name) + + + # Build datastream + train_stream = datastream.setup_datastream('data/logcompil.txt', + config.num_seqs, + config.seq_len, + config.seq_div_size) + + # Build model + m = config.Model() + m.pred.name = 'pred' + + # Train the model + saveloc = 'model_data/%s-%s' % (model_name, config.param_desc) + train_model(m, train_stream, dump_path=saveloc) + + + # Define the model + model = Model(m.sgd_cost) + + # IRC mode : just load the parameters and run an IRC server + if '--irc' in sys.argv: + try: + extensions.append(FinishAfter(before_training=True, after_n_batches=1)) + print "Initializing main loop" + main_loop.run() + print "Jumping into IRC" + irc.run_forever() + except KeyboardInterrupt: + pass + sys.exit(0) + + # Train the model + + cg = ComputationGraph(m.sgd_cost) + algorithm = GradientDescent(cost=m.sgd_cost, + step_rule=config.step_rule, + parameters=cg.parameters) + + algorithm.add_updates(m.states) + + monitor_vars = [v for p in m.monitor_vars for v in p] + extensions = [ + TrainingDataMonitoring( + monitor_vars, + prefix='train', every_n_epochs=1), + Printing(every_n_epochs=1, after_epoch=False), + + ResetStates([v for v, _ in m.states], after_epoch=True) + ] + if plot_avail: + plot_channels = [['train_' + v.name for v in p] for p in m.monitor_vars] + extensions.append( + Plot(document='text_'+model_name, + channels=plot_channels, + server_url='http://localhost:5006', + every_n_epochs=1, after_epoch=False) + ) + if config.save_freq is not None and dump_path is not None: + extensions.append( + SaveLoadParams(path=dump_path+'.pkl', + model=model, + before_training=True, + after_training=True, + after_epoch=False, + every_n_epochs=config.save_freq) + ) + if config.sample_freq is not None: + extensions.append( + GenText(m, '\nalex\ttu crois ?\n', + config.sample_len, config.sample_temperature, + every_n_epochs=config.sample_freq, + after_epoch=False, before_training=True) + ) + if config.on_irc: + irc = IRCClientExt(m, config.sample_temperature, + server='clipper.ens.fr', + port=6667, + nick='frigo', + channels=['#frigotest', '#courssysteme'], + after_batch=True) + irc.do('before_training') + extensions.append(irc) + + if config.on_irc: + irc = IRCClientExt(m, config.sample_temperature, + server='clipper.ens.fr', + port=6667, + nick='frigo', + channels=['#frigotest', '#courssysteme'], + after_batch=True) + irc.do('before_training') + extensions.append(irc) + + main_loop = MainLoop( + model=model, + data_stream=train_stream, + algorithm=algorithm, + extensions=extensions + ) + main_loop.run() + + # IRC mode : just load the parameters and run an IRC server + if '--irc' in sys.argv: + try: + extensions.append(FinishAfter(before_training=True, after_n_batches=1)) + print "Initializing main loop" + main_loop.run() + print "Jumping into IRC" + irc.run_forever() + except KeyboardInterrupt: + pass + sys.exit(0) + + + + + + + + +# vim: set sts=4 ts=4 sw=4 tw=0 et : diff --git a/ircext.py b/ircext.py index 1af2ba8..d8580ad 100644 --- a/ircext.py +++ b/ircext.py @@ -125,5 +125,9 @@ class IRCClientExt(SimpleExtension): def do(self, which_callback, *args): logger.info('Polling...') self.irc.reactor.process_once() + + def run_forever(self): + self.irc.reactor.process_forever() +# vim: set sts=4 ts=4 sw=4 tw=0 et : diff --git a/lstm.py b/lstm.py deleted file mode 100644 index 1750d58..0000000 --- a/lstm.py +++ /dev/null @@ -1,160 +0,0 @@ -import theano -from theano import tensor -import numpy - -from blocks.algorithms import Momentum, AdaDelta, RMSProp -from blocks.bricks import Tanh, Softmax, Linear, MLP -from blocks.bricks.recurrent import LSTM -from blocks.initialization import IsotropicGaussian, Constant - -from blocks.filter import VariableFilter -from blocks.roles import WEIGHT -from blocks.graph import ComputationGraph, apply_noise, apply_dropout - -# An epoch will be composed of 'num_seqs' sequences of len 'seq_len' -# divided in chunks of lengh 'seq_div_size' -num_seqs = 20 -seq_len = 5000 -seq_div_size = 200 - -io_dim = 256 - -hidden_dims = [1024, 1024, 1024] -activation_function = Tanh() - -i2h_all = True # input to all hidden layers or only first layer -h2o_all = True # all hiden layers to output or only last layer - -w_noise_std = 0.02 -i_dropout = 0.5 - -l1_reg = 0 - -step_rule = 'adadelta' -learning_rate = 0.1 -momentum = 0.9 - - -param_desc = '%s-%sIH,%sHO-n%s-d%s-l1r%s-%dx%d(%d)-%s' % ( - repr(hidden_dims), - 'all' if i2h_all else 'first', - 'all' if h2o_all else 'last', - repr(w_noise_std), - repr(i_dropout), - repr(l1_reg), - num_seqs, seq_len, seq_div_size, - step_rule - ) - -save_freq = 5 -on_irc = True - -# parameters for sample generation -sample_len = 1000 -sample_temperature = 0.7 #0.5 -sample_freq = None - -if step_rule == 'rmsprop': - step_rule = RMSProp() -elif step_rule == 'adadelta': - step_rule = AdaDelta() -elif step_rule == 'momentum': - step_rule = Momentum(learning_rate=learning_rate, momentum=momentum) -else: - assert(False) - -class Model(): - def __init__(self): - inp = tensor.lmatrix('bytes') - - in_onehot = tensor.eq(tensor.arange(io_dim, dtype='int16').reshape((1, 1, io_dim)), - inp[:, :, None]) - in_onehot.name = 'in_onehot' - - # Construct hidden states - dims = [io_dim] + hidden_dims - hidden = [in_onehot.dimshuffle(1, 0, 2)] - bricks = [] - states = [] - for i in xrange(1, len(dims)): - init_state = theano.shared(numpy.zeros((num_seqs, dims[i])).astype(theano.config.floatX), - name='st0_%d'%i) - init_cell = theano.shared(numpy.zeros((num_seqs, dims[i])).astype(theano.config.floatX), - name='cell0_%d'%i) - - linear = Linear(input_dim=dims[i-1], output_dim=4*dims[i], - name="lstm_in_%d"%i) - bricks.append(linear) - inter = linear.apply(hidden[-1]) - - if i2h_all and i > 1: - linear2 = Linear(input_dim=dims[0], output_dim=4*dims[i], - name="lstm_in0_%d"%i) - bricks.append(linear2) - inter = inter + linear2.apply(hidden[0]) - inter.name = 'inter_bis_%d'%i - - lstm = LSTM(dim=dims[i], activation=activation_function, - name="lstm_rec_%d"%i) - bricks.append(lstm) - - new_hidden, new_cells = lstm.apply(inter, - states=init_state, - cells=init_cell) - states.append((init_state, new_hidden[-1, :, :])) - states.append((init_cell, new_cells[-1, :, :])) - - hidden.append(new_hidden) - - hidden = [s.dimshuffle(1, 0, 2) for s in hidden] - - # Construct output from hidden states - out = None - layers = zip(dims, hidden)[1:] - if not h2o_all: - layers = [layers[-1]] - for i, (dim, state) in enumerate(layers): - top_linear = Linear(input_dim=dim, output_dim=io_dim, - name='top_linear_%d'%i) - bricks.append(top_linear) - out_i = top_linear.apply(state) - out = out_i if out is None else out + out_i - out.name = 'out_part_%d'%i - - # Do prediction and calculate cost - pred = out.argmax(axis=2) - - cost = Softmax().categorical_cross_entropy(inp[:, 1:].flatten(), - out[:, :-1, :].reshape((inp.shape[0]*(inp.shape[1]-1), - io_dim))) - error_rate = tensor.neq(inp[:, 1:].flatten(), pred[:, :-1].flatten()).mean() - - # Initialize all bricks - for brick in bricks: - brick.weights_init = IsotropicGaussian(0.1) - brick.biases_init = Constant(0.) - brick.initialize() - - # Apply noise and dropout - cg = ComputationGraph([cost, error_rate]) - if w_noise_std > 0: - noise_vars = VariableFilter(roles=[WEIGHT])(cg) - cg = apply_noise(cg, noise_vars, w_noise_std) - if i_dropout > 0: - cg = apply_dropout(cg, hidden[1:], i_dropout) - [cost_reg, error_rate_reg] = cg.outputs - - # add l1 regularization - if l1_reg > 0: - l1pen = sum(abs(st).mean() for st in hidden[1:]) - cost_reg = cost_reg + l1_reg * l1pen - - self.cost = cost - self.error_rate = error_rate - self.cost_reg = cost_reg - self.error_rate_reg = error_rate_reg - self.out = out - self.pred = pred - - self.states = states - diff --git a/model/__init__.py b/model/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/model/cchlstm.py b/model/cchlstm.py new file mode 100644 index 0000000..78c9a1f --- /dev/null +++ b/model/cchlstm.py @@ -0,0 +1,248 @@ +import theano +from theano import tensor +import numpy + +from theano.tensor.shared_randomstreams import RandomStreams + +from blocks.algorithms import Momentum, AdaDelta, RMSProp +from blocks.bricks import Tanh, Softmax, Linear, MLP, Initializable +from blocks.bricks.lookup import LookupTable +from blocks.bricks.recurrent import LSTM, BaseRecurrent, recurrent +from blocks.initialization import IsotropicGaussian, Constant + +from blocks.filter import VariableFilter +from blocks.roles import WEIGHT +from blocks.graph import ComputationGraph, apply_noise, apply_dropout + +rng = RandomStreams() + +# An epoch will be composed of 'num_seqs' sequences of len 'seq_len' +# divided in chunks of lengh 'seq_div_size' +num_seqs = 50 +seq_len = 2000 +seq_div_size = 100 + +io_dim = 256 + +# Model structure +hidden_dims = [512, 512, 512, 512, 512] +activation_function = Tanh() + +cond_cert = [0.5, 0.5, 0.5, 0.5] +block_prob = [0.1, 0.1, 0.1, 0.1] + +# Regularization +w_noise_std = 0.02 + +# Step rule +step_rule = 'adadelta' +learning_rate = 0.1 +momentum = 0.9 + + +param_desc = '%s(x%sp%s)-n%s-%dx%d(%d)-%s' % ( + repr(hidden_dims), repr(cond_cert), repr(block_prob), + repr(w_noise_std), + num_seqs, seq_len, seq_div_size, + step_rule + ) + +save_freq = 5 +on_irc = False + +# parameters for sample generation +sample_len = 200 +sample_temperature = 0.7 #0.5 +sample_freq = 1 + +if step_rule == 'rmsprop': + step_rule = RMSProp() +elif step_rule == 'adadelta': + step_rule = AdaDelta() +elif step_rule == 'momentum': + step_rule = Momentum(learning_rate=learning_rate, momentum=momentum) +else: + assert(False) + +class CCHLSTM(BaseRecurrent, Initializable): + def __init__(self, io_dim, hidden_dims, cond_cert, activation=None, **kwargs): + super(CCHLSTM, self).__init__(**kwargs) + + self.cond_cert = cond_cert + + self.io_dim = io_dim + self.hidden_dims = hidden_dims + + self.children = [] + self.layers = [] + + self.softmax = Softmax() + self.children.append(self.softmax) + + for i, d in enumerate(hidden_dims): + i0 = LookupTable(length=io_dim, + dim=4*d, + name='i0-%d'%i) + self.children.append(i0) + + if i > 0: + i1 = Linear(input_dim=hidden_dims[i-1], + output_dim=4*d, + name='i1-%d'%i) + self.children.append(i1) + else: + i1 = None + + lstm = LSTM(dim=d, activation=activation, + name='LSTM-%d'%i) + self.children.append(lstm) + + o = Linear(input_dim=d, + output_dim=io_dim, + name='o-%d'%i) + self.children.append(o) + + self.layers.append((i0, i1, lstm, o)) + + + @recurrent(contexts=[]) + def apply(self, inputs, **kwargs): + + l0i, _, l0l, l0o = self.layers[0] + l0iv = l0i.apply(inputs) + new_states0, new_cells0 = l0l.apply(states=kwargs['states0'], + cells=kwargs['cells0'], + inputs=l0iv, + iterate=False) + l0ov = l0o.apply(new_states0) + + pos = l0ov + ps = new_states0 + + passnext = tensor.ones((inputs.shape[0],)) + out_sc = [new_states0, new_cells0, passnext] + + for i, (cch, (i0, i1, l, o)) in enumerate(zip(self.cond_cert, self.layers[1:])): + pop = self.softmax.apply(pos) + best = pop.max(axis=1) + passnext = passnext * tensor.le(best, cch) * kwargs['pass%d'%i] + + i0v = i0.apply(inputs) + i1v = i1.apply(ps) + + prev_states = kwargs['states%d'%i] + prev_cells = kwargs['cells%d'%i] + new_states, new_cells = l.apply(inputs=i0v + i1v, + states=prev_states, + cells=prev_cells, + iterate=False) + new_states = tensor.switch(passnext[:, None], new_states, prev_states) + new_cells = tensor.switch(passnext[:, None], new_cells, prev_cells) + out_sc += [new_states, new_cells, passnext] + + ov = o.apply(new_states) + pos = tensor.switch(passnext[:, None], pos + ov, pos) + ps = new_states + + return [pos] + out_sc + + def get_dim(self, name): + dims = {'pred': self.io_dim} + for i, d in enumerate(self.hidden_dims): + dims['states%d'%i] = dims['cells%d'%i] = d + if name in dims: + return dims[name] + return super(CCHLSTM, self).get_dim(name) + + @apply.property('sequences') + def apply_sequences(self): + return ['inputs'] + ['pass%d'%i for i in range(len(self.hidden_dims)-1)] + + @apply.property('states') + def apply_states(self): + ret = [] + for i in range(len(self.hidden_dims)): + ret += ['states%d'%i, 'cells%d'%i] + return ret + + @apply.property('outputs') + def apply_outputs(self): + ret = ['pred'] + for i in range(len(self.hidden_dims)): + ret += ['states%d'%i, 'cells%d'%i, 'active%d'%i] + return ret + + +class Model(): + def __init__(self): + inp = tensor.lmatrix('bytes') + + # Make state vars + state_vars = {} + for i, d in enumerate(hidden_dims): + state_vars['states%d'%i] = theano.shared(numpy.zeros((num_seqs, d)) + .astype(theano.config.floatX), + name='states%d'%i) + state_vars['cells%d'%i] = theano.shared(numpy.zeros((num_seqs, d)) + .astype(theano.config.floatX), + name='cells%d'%i) + # Construct brick + cchlstm = CCHLSTM(io_dim=io_dim, + hidden_dims=hidden_dims, + cond_cert=cond_cert, + activation=activation_function) + + # Random pass + passdict = {} + for i, p in enumerate(block_prob): + passdict['pass%d'%i] = rng.binomial(size=(inp.shape[1], inp.shape[0]), p=1-p) + + # Apply it + outs = cchlstm.apply(inputs=inp.dimshuffle(1, 0), + **dict(state_vars.items() + passdict.items())) + states = [] + active_prop = [] + for i in range(len(hidden_dims)): + states.append((state_vars['states%d'%i], outs[3*i+1][-1, :, :])) + states.append((state_vars['cells%d'%i], outs[3*i+2][-1, :, :])) + active_prop.append(outs[3*i+3].mean()) + active_prop[-1].name = 'active_prop_%d'%i + + out = outs[0].dimshuffle(1, 0, 2) + + # Do prediction and calculate cost + pred = out.argmax(axis=2) + + cost = Softmax().categorical_cross_entropy(inp[:, 1:].flatten(), + out[:, :-1, :].reshape((inp.shape[0]*(inp.shape[1]-1), + io_dim))) + error_rate = tensor.neq(inp[:, 1:].flatten(), pred[:, :-1].flatten()).mean() + + # Initialize all bricks + for brick in [cchlstm]: + brick.weights_init = IsotropicGaussian(0.1) + brick.biases_init = Constant(0.) + brick.initialize() + + # Apply noise and dropoutvars + cg = ComputationGraph([cost, error_rate]) + if w_noise_std > 0: + noise_vars = VariableFilter(roles=[WEIGHT])(cg) + cg = apply_noise(cg, noise_vars, w_noise_std) + [cost_reg, error_rate_reg] = cg.outputs + + self.sgd_cost = cost_reg + self.monitor_vars = [[cost, cost_reg], + [error_rate, error_rate_reg], + active_prop] + + cost.name = 'cost' + cost_reg.name = 'cost_reg' + error_rate.name = 'error_rate' + error_rate_reg.name = 'error_rate_reg' + + self.out = out + self.pred = pred + + self.states = states + diff --git a/model/dgsrnn.py b/model/dgsrnn.py new file mode 100644 index 0000000..d6d93ff --- /dev/null +++ b/model/dgsrnn.py @@ -0,0 +1,205 @@ +import theano +from theano import tensor +import numpy + +from theano.tensor.shared_randomstreams import RandomStreams + +from blocks.algorithms import Momentum, AdaDelta, RMSProp, Adam +from blocks.bricks import Activation, Tanh, Logistic, Softmax, Rectifier, Linear, MLP, Initializable, Identity +from blocks.bricks.base import application, lazy +from blocks.bricks.recurrent import BaseRecurrent, recurrent +from blocks.initialization import IsotropicGaussian, Constant +from blocks.utils import shared_floatx_zeros + +from blocks.filter import VariableFilter +from blocks.roles import WEIGHT, INITIAL_STATE, add_role +from blocks.graph import ComputationGraph, apply_noise, apply_dropout + +rng = RandomStreams() + +class TRectifier(Activation): + @application(inputs=['input_'], outputs=['output']) + def apply(self, input_): + return tensor.switch(input_ > 1, input_, 0) + +# An epoch will be composed of 'num_seqs' sequences of len 'seq_len' +# divided in chunks of lengh 'seq_div_size' +num_seqs = 10 +seq_len = 1000 +seq_div_size = 5 + +io_dim = 256 + +state_dim = 1024 +activation = Tanh() +transition_hidden = [1024, 1024] +transition_hidden_activations = [Rectifier(), Rectifier()] + +output_hidden = [] +output_hidden_activations = [] + +weight_noise_std = 0.05 + +output_h_dropout = 0.0 +drop_update = 0.0 + +l1_state = 0.00 +l1_reset = 0.1 + +step_rule = 'momentum' +learning_rate = 0.001 +momentum = 0.99 + + +param_desc = '%s,t%s,o%s-n%s-d%s,%s-L1:%s,%s-%s' % ( + repr(state_dim), repr(transition_hidden), repr(output_hidden), + repr(weight_noise_std), + repr(output_h_dropout), repr(drop_update), + repr(l1_state), repr(l1_reset), + step_rule + ) + +save_freq = 5 +on_irc = False + +# parameters for sample generation +sample_len = 100 +sample_temperature = 0.7 #0.5 +sample_freq = 1 + +if step_rule == 'rmsprop': + step_rule = RMSProp() +elif step_rule == 'adadelta': + step_rule = AdaDelta() +elif step_rule == 'momentum': + step_rule = Momentum(learning_rate=learning_rate, momentum=momentum) +elif step_rule == 'adam': + step_rule = Adam() +else: + assert(False) + + + +class DGSRNN(BaseRecurrent, Initializable): + def __init__(self, input_dim, state_dim, act, transition_h, tr_h_activations, **kwargs): + super(DGSRNN, self).__init__(**kwargs) + + self.input_dim = input_dim + self.state_dim = state_dim + + logistic = Logistic() + + self.inter = MLP(dims=[input_dim + state_dim] + transition_h, + activations=tr_h_activations, + name='inter') + self.reset = MLP(dims=[transition_h[-1], state_dim], + activations=[logistic], + name='reset') + self.update = MLP(dims=[transition_h[-1], state_dim], + activations=[act], + name='update') + + self.children = [self.inter, self.reset, self.update, logistic, act] + tr_h_activations + + # init state + self.params = [shared_floatx_zeros((state_dim,), name='init_state')] + add_role(self.params[0], INITIAL_STATE) + + def get_dim(self, name): + if name == 'state': + return self.state_dim + return super(GFGRU, self).get_dim(name) + + @recurrent(sequences=['inputs', 'drop_updates_mask'], states=['state'], + outputs=['state', 'reset'], contexts=[]) + def apply(self, inputs=None, drop_updates_mask=None, state=None): + inter_v = self.inter.apply(tensor.concatenate([inputs, state], axis=1)) + reset_v = self.reset.apply(inter_v) + update_v = self.update.apply(inter_v) + + reset_v = reset_v * drop_updates_mask + + new_state = state * (1 - reset_v) + reset_v * update_v + + return new_state, reset_v + + @application + def initial_state(self, state_name, batch_size, *args, **kwargs): + return tensor.repeat(self.params[0][None, :], + repeats=batch_size, + axis=0) + + +class Model(): + def __init__(self): + inp = tensor.lmatrix('bytes') + + in_onehot = tensor.eq(tensor.arange(io_dim, dtype='int16').reshape((1, 1, io_dim)), + inp[:, :, None]) + in_onehot.name = 'in_onehot' + + dgsrnn = DGSRNN(input_dim=io_dim, + state_dim=state_dim, + act=activation, + transition_h=transition_hidden, + tr_h_activations=transition_hidden_activations, + name='dgsrnn') + + prev_state = theano.shared(numpy.zeros((num_seqs, state_dim)).astype(theano.config.floatX), + name='state') + + states, resets = dgsrnn.apply(inputs=in_onehot.dimshuffle(1, 0, 2), + drop_updates_mask=rng.binomial(size=(inp.shape[1], inp.shape[0], state_dim), + p=1-drop_update, + dtype=theano.config.floatX), + state=prev_state) + states = states.dimshuffle(1, 0, 2) + resets = resets.dimshuffle(1, 0, 2) + + self.states = [(prev_state, states[:, -1, :])] + + out_mlp = MLP(dims=[state_dim] + output_hidden + [io_dim], + activations=output_hidden_activations + [None], + name='output_mlp') + states_sh = states.reshape((inp.shape[0]*inp.shape[1], state_dim)) + out = out_mlp.apply(states_sh).reshape((inp.shape[0], inp.shape[1], io_dim)) + + + # Do prediction and calculate cost + pred = out.argmax(axis=2) + + cost = Softmax().categorical_cross_entropy(inp[:, 1:].flatten(), + out[:, :-1, :].reshape((inp.shape[0]*(inp.shape[1]-1), + io_dim))) + error_rate = tensor.neq(inp[:, 1:].flatten(), pred[:, :-1].flatten()).mean() + + # Initialize all bricks + for brick in [dgsrnn, out_mlp]: + brick.weights_init = IsotropicGaussian(0.001) + brick.biases_init = Constant(0.0) + brick.initialize() + + # Apply noise and dropout + cg = ComputationGraph([cost, error_rate, states, resets]) + if weight_noise_std > 0: + noise_vars = VariableFilter(roles=[WEIGHT])(cg) + cg = apply_noise(cg, noise_vars, weight_noise_std) + if output_h_dropout > 0: + dv = VariableFilter(name='input_', bricks=out_mlp.linear_transformations)(cg) + print "Output H dropout on", len(dv), "vars" + cg = apply_dropout(cg, dv, output_h_dropout) + [cost_reg, error_rate_reg, states, resets] = cg.outputs + + if l1_state > 0: + cost_reg = cost_reg + l1_state * abs(states).mean() + if l1_reset > 0: + cost_reg = cost_reg + l1_reset * abs(resets).mean() + + self.cost = cost + self.error_rate = error_rate + self.cost_reg = cost_reg + self.error_rate_reg = error_rate_reg + self.out = out + self.pred = pred + + diff --git a/model/gfgru.py b/model/gfgru.py new file mode 100644 index 0000000..29d4398 --- /dev/null +++ b/model/gfgru.py @@ -0,0 +1,294 @@ +import theano +from theano import tensor +import numpy + +from blocks.algorithms import Momentum, AdaDelta, RMSProp, Adam +from blocks.bricks import Activation, Tanh, Logistic, Softmax, Rectifier, Linear, MLP, Initializable, Identity +from blocks.bricks.base import application, lazy +from blocks.bricks.recurrent import BaseRecurrent, recurrent +from blocks.initialization import IsotropicGaussian, Constant +from blocks.utils import shared_floatx_zeros + +from blocks.filter import VariableFilter +from blocks.roles import WEIGHT, INITIAL_STATE, add_role +from blocks.graph import ComputationGraph, apply_noise, apply_dropout + +class TRectifier(Activation): + @application(inputs=['input_'], outputs=['output']) + def apply(self, input_): + return tensor.switch(input_ > 1, input_, 0) + +# An epoch will be composed of 'num_seqs' sequences of len 'seq_len' +# divided in chunks of lengh 'seq_div_size' +num_seqs = 10 +seq_len = 2000 +seq_div_size = 100 + +io_dim = 256 + +recurrent_blocks = [ +# (256, Tanh(), [2048], [Rectifier()]), +# (512, Rectifier(), [1024], [Rectifier()]), + (512, Tanh(), [1024], [Rectifier()]), + (512, Tanh(), [1024], [Rectifier()]), +# (2, Tanh(), [2], [Rectifier()]), +# (2, Tanh(), [], []), + ] + +control_hidden = [1024] +control_hidden_activations = [Rectifier()] + +output_hidden = [1024] +output_hidden_activations = [Rectifier()] + +weight_noise_std = 0.05 + +recurrent_h_dropout = 0 +control_h_dropout = 0 +output_h_dropout = 0.5 + +step_rule = 'adam' +learning_rate = 0.1 +momentum = 0.99 + + +param_desc = '%s,c%s,o%s-n%s-d%s,%s,%s-%s' % ( + repr(map(lambda (a, b, c, d): (a, c), recurrent_blocks)), + repr(control_hidden), repr(output_hidden), + repr(weight_noise_std), + repr(recurrent_h_dropout), repr(control_h_dropout), repr(output_h_dropout), + step_rule + ) + +save_freq = 5 +on_irc = False + +# parameters for sample generation +sample_len = 100 +sample_temperature = 0.7 #0.5 +sample_freq = 1 + +if step_rule == 'rmsprop': + step_rule = RMSProp() +elif step_rule == 'adadelta': + step_rule = AdaDelta() +elif step_rule == 'momentum': + step_rule = Momentum(learning_rate=learning_rate, momentum=momentum) +elif step_rule == 'adam': + step_rule = Adam() +else: + assert(False) + + + + +class GFGRU(BaseRecurrent, Initializable): + def __init__(self, input_dim, recurrent_blocks, control_hidden, control_hidden_activations, **kwargs): + super(GFGRU, self).__init__(**kwargs) + + self.input_dim = input_dim + self.recurrent_blocks = recurrent_blocks + self.control_hidden = control_hidden + self.control_hidden_activations = control_hidden_activations + + # setup children + self.children = control_hidden_activations + for (_, a, _, b) in recurrent_blocks: + self.children.append(a) + for c in b: + self.children.append(c) + + logistic = Logistic() + self.children.append(logistic) + + self.hidden_total_dim = sum(x for (x, _, _, _) in self.recurrent_blocks) + + # control block + self.cblocklen = len(self.recurrent_blocks) + 2 + + control_idim = self.hidden_total_dim + self.input_dim + control_odim = len(self.recurrent_blocks) * self.cblocklen + self.control = MLP(dims=[control_idim] + self.control_hidden + [control_odim], + activations=self.control_hidden_activations + [logistic], + name='control') + + self.children.append(self.control) + + # recurrent blocks + self.blocks = [] + self.params = [] + for i, (dim, act, hdim, hact) in enumerate(self.recurrent_blocks): + idim = self.input_dim + self.hidden_total_dim + if i > 0: + idim = idim + self.recurrent_blocks[i-1][0] + + idims = [idim] + hdim + if hdim == []: + inter = Identity() + else: + inter = MLP(dims=idims, activations=hact, name='inter%d'%i) + + rgate = MLP(dims=[idims[-1], dim], activations=[logistic], name='rgate%d'%i) + nstate = MLP(dims=[idims[-1], dim], activations=[act], name='nstate%d'%i) + + for brick in [inter, rgate, nstate]: + self.children.append(brick) + self.blocks.append((inter, rgate, nstate)) + + # init state zeros + self.init_states_names = [] + self.init_states_dict = {} + self.params = [] + + for i, (dim, _, _, _) in enumerate(self.recurrent_blocks): + name = 'init_state_%d'%i + svar = shared_floatx_zeros((dim,), name=name) + add_role(svar, INITIAL_STATE) + + self.init_states_names.append(name) + self.init_states_dict[name] = svar + self.params.append(svar) + + def get_dim(self, name): + if name in self.init_states_dict: + return self.init_states_dict[name].shape.eval() + return super(GFGRU, self).get_dim(name) + + def recurrent_h_dropout_vars(self, cg): + ret = [] + for (inter, rgate, nstate) in self.blocks: + ret = ret + VariableFilter(name='input_', + bricks=inter.linear_transformations + rgate.linear_transformations + nstate.linear_transformations + )(cg) + return ret + + def control_h_dropout_vars(self, cg): + return VariableFilter(name='input_', bricks=self.control.linear_transformations)(cg) + + @recurrent(sequences=['inputs'], contexts=[]) + def apply(self, inputs=None, **kwargs): + states = [kwargs[i] for i in self.init_states_names] + concat_states = tensor.concatenate(states, axis=1) + + concat_input_states = tensor.concatenate([inputs, concat_states], axis=1) + + control_v = self.control.apply(concat_input_states) + + new_states = [] + for i, (inter, rgate, nstate) in enumerate(self.blocks): + controls = control_v[:, i * self.cblocklen:(i+1) * self.cblocklen] + r_inputs = tensor.concatenate([s * controls[:, j][:, None] for j, s in enumerate(states)], axis=1) + + more_inputs = [inputs] + if i > 0: + more_inputs.append(new_states[-1]) + inter_inputs = tensor.concatenate([r_inputs] + more_inputs, axis=1) + + inter_v = inter.apply(inter_inputs) + + rgate_v = rgate.apply(inter_v) + nstate_v = nstate.apply(inter_v) + + rctl = controls[:, -1][:, None] * rgate_v + uctl = controls[:, -2][:, None] + nstate_v = uctl * nstate_v + (1 - rctl) * states[i] + + new_states.append(nstate_v) + + return new_states + + @apply.property('states') + def apply_states(self): + return self.init_states_names + + @apply.property('outputs') + def apply_outputs(self): + return self.init_states_names + + @application + def initial_state(self, state_name, batch_size, *args, **kwargs): + return tensor.repeat(self.init_states_dict[state_name][None, :], + repeats=batch_size, + axis=0) + + + +class Model(): + def __init__(self): + inp = tensor.lmatrix('bytes') + + in_onehot = tensor.eq(tensor.arange(io_dim, dtype='int16').reshape((1, 1, io_dim)), + inp[:, :, None]) + in_onehot.name = 'in_onehot' + + gfgru = GFGRU(input_dim=io_dim, + recurrent_blocks=recurrent_blocks, + control_hidden=control_hidden, + control_hidden_activations=control_hidden_activations) + + hidden_total_dim = sum(x for (x, _, _, _) in recurrent_blocks) + + prev_states_dict = {} + for i, (dim, _, _, _) in enumerate(recurrent_blocks): + prev_state = theano.shared(numpy.zeros((num_seqs, dim)).astype(theano.config.floatX), + name='states_save') + prev_states_dict['init_state_%d'%i] = prev_state + + states = [x.dimshuffle(1, 0, 2) for x in gfgru.apply(in_onehot.dimshuffle(1, 0, 2), **prev_states_dict)] + + self.states = [] + for i, _ in enumerate(recurrent_blocks): + self.states.append((prev_states_dict['init_state_%d'%i], states[i][:, -1, :])) + + states_concat = tensor.concatenate(states, axis=2) + + out_mlp = MLP(dims=[hidden_total_dim] + output_hidden + [io_dim], + activations=output_hidden_activations + [None], + name='output_mlp') + states_sh = states_concat.reshape((inp.shape[0]*inp.shape[1], hidden_total_dim)) + out = out_mlp.apply(states_sh).reshape((inp.shape[0], inp.shape[1], io_dim)) + + + + # Do prediction and calculate cost + pred = out.argmax(axis=2) + + cost = Softmax().categorical_cross_entropy(inp[:, 1:].flatten(), + out[:, :-1, :].reshape((inp.shape[0]*(inp.shape[1]-1), + io_dim))) + error_rate = tensor.neq(inp[:, 1:].flatten(), pred[:, :-1].flatten()).mean() + + # Initialize all bricks + for brick in [gfgru, out_mlp]: + brick.weights_init = IsotropicGaussian(0.01) + brick.biases_init = Constant(0.001) + brick.initialize() + + # Apply noise and dropout + cg = ComputationGraph([cost, error_rate]) + if weight_noise_std > 0: + noise_vars = VariableFilter(roles=[WEIGHT])(cg) + cg = apply_noise(cg, noise_vars, weight_noise_std) + if recurrent_h_dropout > 0: + dv = gfgru.recurrent_h_dropout_vars(cg) + print "Recurrent H dropout on", len(dv), "vars" + cg = apply_dropout(cg, dv, recurrent_h_dropout) + if control_h_dropout > 0: + dv = gfgru.control_h_dropout_vars(cg) + print "Control H dropout on", len(dv), "vars" + cg = apply_dropout(cg, dv, control_h_dropout) + if output_h_dropout > 0: + dv = VariableFilter(name='input_', bricks=out_mlp.linear_transformations)(cg) + print "Output H dropout on", len(dv), "vars" + cg = apply_dropout(cg, dv, output_h_dropout) + [cost_reg, error_rate_reg] = cg.outputs + + + self.cost = cost + self.error_rate = error_rate + self.cost_reg = cost_reg + self.error_rate_reg = error_rate_reg + self.out = out + self.pred = pred + + diff --git a/model/hpc_lstm.py b/model/hpc_lstm.py new file mode 100644 index 0000000..8c9cd90 --- /dev/null +++ b/model/hpc_lstm.py @@ -0,0 +1,115 @@ +# HPC-LSTM : Hierarchical Predictive Coding LSTM + +import theano +from theano import tensor +import numpy + +from blocks.bricks import Softmax, Tanh, Logistic, Linear, MLP, Identity +from blocks.bricks.recurrent import LSTM +from blocks.initialization import IsotropicGaussian, Constant + +from blocks.filter import VariableFilter +from blocks.roles import WEIGHT +from blocks.graph import ComputationGraph, apply_noise, apply_dropout + + +class Model(): + def __init__(self, config): + inp = tensor.imatrix('bytes') + + in_onehot = tensor.eq(tensor.arange(config.io_dim, dtype='int16').reshape((1, 1, config.io_dim)), + inp[:, :, None]) + in_onehot.name = 'in_onehot' + + bricks = [] + states = [] + + # Construct predictive LSTM hierarchy + hidden = [] + costs = [] + next_target = in_onehot.dimshuffle(1, 0, 2) + for i, (hdim, cf, q) in enumerate(zip(config.hidden_dims, config.cost_factors, config.hidden_q)): + init_state = theano.shared(numpy.zeros((config.num_seqs, hdim)).astype(theano.config.floatX), + name='st0_%d'%i) + init_cell = theano.shared(numpy.zeros((config.num_seqs, hdim)).astype(theano.config.floatX), + name='cell0_%d'%i) + + linear = Linear(input_dim=config.io_dim, output_dim=4*hdim, + name="lstm_in_%d"%i) + lstm = LSTM(dim=hdim, activation=config.activation_function, + name="lstm_rec_%d"%i) + linear2 = Linear(input_dim=hdim, output_dim=config.io_dim, name='lstm_out_%d'%i) + tanh = Tanh('lstm_out_tanh_%d'%i) + bricks += [linear, lstm, linear2, tanh] + + inter = linear.apply(theano.gradient.disconnected_grad(next_target)) + new_hidden, new_cells = lstm.apply(inter, + states=init_state, + cells=init_cell) + states.append((init_state, new_hidden[-1, :, :])) + states.append((init_cell, new_cells[-1, :, :])) + + hidden += [tensor.concatenate([init_state[None,:,:], new_hidden[:-1,:,:]],axis=0)] + pred = tanh.apply(linear2.apply(hidden[-1])) + diff = next_target - pred + costs += [numpy.float32(cf) * ((abs(next_target)+q)*(diff**2)).sum(axis=2).mean()] + next_target = diff + + + # Construct output from hidden states + hidden = [s.dimshuffle(1, 0, 2) for s in hidden] + + out_parts = [] + out_dims = config.out_hidden + [config.io_dim] + for i, (dim, state) in enumerate(zip(config.hidden_dims, hidden)): + pred_linear = Linear(input_dim=dim, output_dim=out_dims[0], + name='pred_linear_%d'%i) + bricks.append(pred_linear) + out_parts.append(pred_linear.apply(theano.gradient.disconnected_grad(state))) + + # Do prediction and calculate cost + out = sum(out_parts) + + if len(out_dims) > 1: + out = config.out_hidden_act[0](name='out_act0').apply(out) + mlp = MLP(dims=out_dims, + activations=[x(name='out_act%d'%i) for i, x in enumerate(config.out_hidden_act[1:])] + +[Identity()], + name='out_mlp') + bricks.append(mlp) + out = mlp.apply(out.reshape((inp.shape[0]*inp.shape[1],-1))).reshape((inp.shape[0],inp.shape[1],-1)) + + pred = out.argmax(axis=2) + + cost = Softmax().categorical_cross_entropy(inp.flatten(), + out.reshape((inp.shape[0]*inp.shape[1], + config.io_dim))).mean() + error_rate = tensor.neq(inp.flatten(), pred.flatten()).mean() + + sgd_cost = cost + sum(costs) + + # Initialize all bricks + for brick in bricks: + brick.weights_init = IsotropicGaussian(0.1) + brick.biases_init = Constant(0.) + brick.initialize() + + + # put stuff into self that is usefull for training or extensions + self.sgd_cost = sgd_cost + + sgd_cost.name = 'sgd_cost' + for i in range(len(costs)): + costs[i].name = 'pred_cost_%d'%i + cost.name = 'cost' + error_rate.name = 'error_rate' + self.monitor_vars = [costs, [cost], + [error_rate]] + + self.out = out + self.pred = pred + + self.states = states + + +# vim: set sts=4 ts=4 sw=4 tw=0 et : diff --git a/model/lstm.py b/model/lstm.py new file mode 100644 index 0000000..abd44e0 --- /dev/null +++ b/model/lstm.py @@ -0,0 +1,119 @@ +import theano +from theano import tensor +import numpy + +from blocks.bricks import Softmax, Linear +from blocks.bricks.recurrent import LSTM +from blocks.initialization import IsotropicGaussian, Constant + +from blocks.filter import VariableFilter +from blocks.roles import WEIGHT +from blocks.graph import ComputationGraph, apply_noise, apply_dropout + + +class Model(): + def __init__(self, config): + inp = tensor.imatrix('bytes') + + in_onehot = tensor.eq(tensor.arange(config.io_dim, dtype='int16').reshape((1, 1, config.io_dim)), + inp[:, :, None]) + in_onehot.name = 'in_onehot' + + # Construct hidden states + dims = [config.io_dim] + config.hidden_dims + hidden = [in_onehot.dimshuffle(1, 0, 2)] + bricks = [] + states = [] + for i in xrange(1, len(dims)): + init_state = theano.shared(numpy.zeros((config.num_seqs, dims[i])).astype(theano.config.floatX), + name='st0_%d'%i) + init_cell = theano.shared(numpy.zeros((config.num_seqs, dims[i])).astype(theano.config.floatX), + name='cell0_%d'%i) + + linear = Linear(input_dim=dims[i-1], output_dim=4*dims[i], + name="lstm_in_%d"%i) + bricks.append(linear) + inter = linear.apply(hidden[-1]) + + if config.i2h_all and i > 1: + linear2 = Linear(input_dim=dims[0], output_dim=4*dims[i], + name="lstm_in0_%d"%i) + bricks.append(linear2) + inter = inter + linear2.apply(hidden[0]) + inter.name = 'inter_bis_%d'%i + + lstm = LSTM(dim=dims[i], activation=config.activation_function, + name="lstm_rec_%d"%i) + bricks.append(lstm) + + new_hidden, new_cells = lstm.apply(inter, + states=init_state, + cells=init_cell) + states.append((init_state, new_hidden[-1, :, :])) + states.append((init_cell, new_cells[-1, :, :])) + + hidden.append(new_hidden) + + hidden = [s.dimshuffle(1, 0, 2) for s in hidden] + + # Construct output from hidden states + out = None + layers = zip(dims, hidden)[1:] + if not config.h2o_all: + layers = [layers[-1]] + for i, (dim, state) in enumerate(layers): + top_linear = Linear(input_dim=dim, output_dim=config.io_dim, + name='top_linear_%d'%i) + bricks.append(top_linear) + out_i = top_linear.apply(state) + out = out_i if out is None else out + out_i + out.name = 'out_part_%d'%i + + # Do prediction and calculate cost + pred = out.argmax(axis=2) + + cost = Softmax().categorical_cross_entropy(inp[:, 1:].flatten(), + out[:, :-1, :].reshape((inp.shape[0]*(inp.shape[1]-1), + config.io_dim))).mean() + error_rate = tensor.neq(inp[:, 1:].flatten(), pred[:, :-1].flatten()).mean() + + # Initialize all bricks + for brick in bricks: + brick.weights_init = IsotropicGaussian(0.1) + brick.biases_init = Constant(0.) + brick.initialize() + + # Apply noise and dropout + cg = ComputationGraph([cost, error_rate]) + if config.w_noise_std > 0: + noise_vars = VariableFilter(roles=[WEIGHT])(cg) + cg = apply_noise(cg, noise_vars, config.w_noise_std) + if config.i_dropout > 0: + cg = apply_dropout(cg, hidden[1:], config.i_dropout) + [cost_reg, error_rate_reg] = cg.outputs + + # add l1 regularization + if config.l1_reg > 0: + l1pen = sum(abs(st).mean() for st in hidden[1:]) + cost_reg = cost_reg + config.l1_reg * l1pen + + cost_reg += 1e-10 # so that it is not the same Theano variable + error_rate_reg += 1e-10 + + # put stuff into self that is usefull for training or extensions + self.sgd_cost = cost_reg + + cost.name = 'cost' + cost_reg.name = 'cost_reg' + error_rate.name = 'error_rate' + error_rate_reg.name = 'error_rate_reg' + self.monitor_vars = [[cost, cost_reg], + [error_rate, error_rate_reg]] + + self.out = out + self.pred = pred + + self.states = states + + +# vim: set sts=4 ts=4 sw=4 tw=0 et : diff --git a/train.py b/train.py index 58bff1e..09555f2 100755 --- a/train.py +++ b/train.py @@ -1,58 +1,37 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 import logging -import numpy import sys import importlib -from contextlib import closing +logging.basicConfig(level='INFO') +logger = logging.getLogger(__name__) import theano -from theano import tensor -from theano.tensor.shared_randomstreams import RandomStreams -from blocks.serialization import load_parameter_values, secure_dump, BRICK_DELIMITER -from blocks.extensions import Printing, SimpleExtension +from blocks.extensions import Printing, SimpleExtension, FinishAfter, ProgressBar from blocks.extensions.monitoring import DataStreamMonitoring, TrainingDataMonitoring -from blocks.extensions.saveload import Checkpoint, Load + from blocks.graph import ComputationGraph from blocks.main_loop import MainLoop from blocks.model import Model -from blocks.algorithms import GradientDescent, StepRule, CompositeRule +from blocks.algorithms import GradientDescent try: from blocks.extras.extensions.plot import Plot plot_avail = True except ImportError: plot_avail = False + logger.warning('Plotting extension not available') + import datastream from paramsaveload import SaveLoadParams from gentext import GenText -from ircext import IRCClientExt -logging.basicConfig(level='INFO') -logger = logging.getLogger(__name__) sys.setrecursionlimit(500000) -if __name__ == "__main__": - if len(sys.argv) != 2: - print >> sys.stderr, 'Usage: %s config' % sys.argv[0] - sys.exit(1) - model_name = sys.argv[1] - config = importlib.import_module('%s' % model_name) - - -class ElementwiseRemoveNotFinite(StepRule): - def __init__(self, scaler=0.1): - self.scaler = scaler - - def compute_step(self, param, previous_step): - not_finite = tensor.isnan(previous_step) + tensor.isinf(previous_step) - step = tensor.switch(not_finite, self.scaler * param, previous_step) - - return step, [] class ResetStates(SimpleExtension): def __init__(self, state_vars, **kwargs): @@ -65,84 +44,75 @@ class ResetStates(SimpleExtension): def do(self, which_callback, *args): self.f() -def train_model(m, train_stream, dump_path=None): +if __name__ == "__main__": + if len(sys.argv) < 2: + print >> sys.stderr, 'Usage: %s [options] config' % sys.argv[0] + sys.exit(1) + model_name = sys.argv[-1] + config = importlib.import_module('.%s' % model_name, 'config') + + # Build datastream + train_stream = datastream.setup_datastream(config.dataset, + config.num_seqs, + config.seq_len, + config.seq_div_size) - # Define the model - model = Model(m.sgd_cost) + # Build model + m = config.Model(config) - cg = ComputationGraph(m.sgd_cost) + # Train the model + cg = Model(m.sgd_cost) algorithm = GradientDescent(cost=m.sgd_cost, - step_rule=CompositeRule([ - ElementwiseRemoveNotFinite(), - config.step_rule]), + step_rule=config.step_rule, parameters=cg.parameters) algorithm.add_updates(m.states) - monitor_vars = [v for p in m.monitor_vars for v in p] + monitor_vars = list(set(v for p in m.monitor_vars for v in p)) extensions = [ TrainingDataMonitoring( monitor_vars, - prefix='train', every_n_epochs=1), - Printing(every_n_epochs=1, after_epoch=False), + prefix='train', every_n_batches=config.monitor_freq), + Printing(every_n_batches=config.monitor_freq, after_epoch=False), + ProgressBar(), ResetStates([v for v, _ in m.states], after_epoch=True) ] if plot_avail: plot_channels = [['train_' + v.name for v in p] for p in m.monitor_vars] extensions.append( - Plot(document='text_'+model_name+'_'+config.param_desc, + Plot(document='text_'+model_name, channels=plot_channels, - server_url='http://eos6:5006/', - every_n_epochs=1, after_epoch=False) + # server_url='http://localhost:5006', + every_n_batches=config.monitor_freq) ) - if config.save_freq is not None and dump_path is not None: + + if config.save_freq is not None and not '--nosave' in sys.argv: extensions.append( - SaveLoadParams(path=dump_path+'.pkl', - model=model, - before_training=True, + SaveLoadParams(path='params/%s.pkl'%model_name, + model=cg, + before_training=(not '--noload' in sys.argv), after_training=True, - after_epoch=False, - every_n_epochs=config.save_freq) + every_n_batches=config.save_freq) ) + if config.sample_freq is not None: extensions.append( - GenText(m, '\nalex\ttu crois ?\n', + GenText(m, config.sample_init, config.sample_len, config.sample_temperature, - every_n_epochs=config.sample_freq, - after_epoch=False, before_training=True) - ) - if config.on_irc: - extensions.append( - IRCClientExt(m, config.sample_temperature, - server='irc.ulminfo.fr', - port=6667, - nick='frigo', - channels=['#frigotest', '#courssysteme'], - after_batch=True) + before_training=True, + every_n_batches=config.sample_freq) ) main_loop = MainLoop( - model=model, + model=cg, data_stream=train_stream, algorithm=algorithm, extensions=extensions ) main_loop.run() + main_loop.profile.report() -if __name__ == "__main__": - # Build datastream - train_stream = datastream.setup_datastream('data/logcompil.txt', - config.num_seqs, - config.seq_len, - config.seq_div_size) - - # Build model - m = config.Model() - m.pred.name = 'pred' - - # Train the model - saveloc = 'model_data/%s-%s' % (model_name, config.param_desc) - train_model(m, train_stream, dump_path=saveloc) +# vim: set sts=4 ts=4 sw=4 tw=0 et : -- cgit v1.2.3