diff --git a/pylearn2/models/dbm/layer.py b/pylearn2/models/dbm/layer.py index 99a69e5f3c..0c57ab146c 100644 --- a/pylearn2/models/dbm/layer.py +++ b/pylearn2/models/dbm/layer.py @@ -1524,7 +1524,7 @@ def __init__(self, n_classes, layer_name, irange = None, assert isinstance(n_classes, py_integer_types) self.output_space = VectorSpace(n_classes) - self.b = sharedX( np.zeros((n_classes,)), name = 'softmax_b') + self.b = sharedX( np.zeros((n_classes,)), name=layer_name+'_b') if self.center: b = self.b.get_value() @@ -1624,7 +1624,7 @@ def set_input_space(self, space): idx = rng.randint(0, self.input_dim) W[idx, i] = rng.randn() * self.sparse_istdev - self.W = sharedX(W, 'softmax_W' ) + self.W = sharedX(W, self.layer_name+'_W' ) self._params = [ self.b, self.W ] diff --git a/pylearn2/models/maxout.py b/pylearn2/models/maxout.py index 92a2f881b3..d6746d6188 100755 --- a/pylearn2/models/maxout.py +++ b/pylearn2/models/maxout.py @@ -1226,14 +1226,14 @@ def handle_pool_shape(idx): partial_sum=self.partial_sum, rng=rng) W, = self.transformer.get_params() - W.name = 'W' + W.name = self.layer_name + '_W' if self.tied_b: self.b = sharedX(np.zeros((self.detector_space.num_channels)) + self.init_bias) else: self.b = sharedX(self.detector_space.get_origin() + self.init_bias) - self.b.name = 'b' + self.b.name = self.layer_name + '_b' logger.info('Input shape: {0}'.format(self.input_space.shape)) logger.info(self.layer_name + diff --git a/pylearn2/models/mlp.py b/pylearn2/models/mlp.py index cfe52b5b5c..5623d0616a 100755 --- a/pylearn2/models/mlp.py +++ b/pylearn2/models/mlp.py @@ -1187,7 +1187,7 @@ def __init__(self, n_classes, layer_name, irange=None, self.output_space = VectorSpace(n_classes) if not no_affine: self.b = sharedX(np.zeros((n_classes - self.non_redundant,)), - name='softmax_b') + name=layer_name+'_b') if init_bias_target_marginals: y = init_bias_target_marginals.y @@ -1331,7 +1331,7 @@ def set_input_space(self, space): idx = rng.randint(0, self.input_dim) W[idx, i] = rng.randn() - self.W = sharedX(W, 'softmax_W') + self.W = sharedX(W, self.layer_name+'_W') self._params = [self.b, self.W] diff --git a/pylearn2/scripts/tutorials/convolutional_network/convolutional_network.ipynb b/pylearn2/scripts/tutorials/convolutional_network/convolutional_network.ipynb index 26e0d079ac..cccd921a27 100644 --- a/pylearn2/scripts/tutorials/convolutional_network/convolutional_network.ipynb +++ b/pylearn2/scripts/tutorials/convolutional_network/convolutional_network.ipynb @@ -229,7 +229,7 @@ " max_kernel_norm: 1.9365\n", " }, !obj:pylearn2.models.mlp.Softmax {\n", " max_col_norm: 1.9365,\n", - " layer_name: 'y',\n", + " layer_name: 'softmax',\n", " n_classes: 10,\n", " istdev: .05\n", " }\n", diff --git a/pylearn2/scripts/tutorials/jobman_integration.ipynb b/pylearn2/scripts/tutorials/jobman_integration.ipynb index d5ab14d692..fe65e917ad 100644 --- a/pylearn2/scripts/tutorials/jobman_integration.ipynb +++ b/pylearn2/scripts/tutorials/jobman_integration.ipynb @@ -40,7 +40,7 @@ " dim: 500,\n", " sparse_init: 15,\n", " }, !obj:pylearn2.models.mlp.Softmax {\n", - " layer_name: 'y',\n", + " layer_name: 'softmax',\n", " n_classes: 10,\n", " irange: 0.\n", " }\n", @@ -100,7 +100,7 @@ " dim: 500,\n", " sparse_init: 15,\n", " }, !obj:pylearn2.models.mlp.Softmax {\n", - " layer_name: 'y',\n", + " layer_name: 'softmax',\n", " n_classes: 10,\n", " irange: 0.\n", " }\n", diff --git a/pylearn2/scripts/tutorials/multilayer_perceptron/multilayer_perceptron.ipynb b/pylearn2/scripts/tutorials/multilayer_perceptron/multilayer_perceptron.ipynb index 395be48a9b..6edeb433ab 100644 --- a/pylearn2/scripts/tutorials/multilayer_perceptron/multilayer_perceptron.ipynb +++ b/pylearn2/scripts/tutorials/multilayer_perceptron/multilayer_perceptron.ipynb @@ -213,7 +213,7 @@ " dim: 500,\n", " sparse_init: 15,\n", " }, !obj:pylearn2.models.mlp.Softmax {\n", - " layer_name: 'y',\n", + " layer_name: 'softmax',\n", " n_classes: 10,\n", " irange: 0.\n", " }\n", @@ -25405,7 +25405,7 @@ " dim: 1000,\n", " sparse_init: 15\n", " }, !obj:pylearn2.models.mlp.Softmax {\n", - " layer_name: 'y',\n", + " layer_name: 'softmax',\n", " n_classes: 10,\n", " irange: 0.\n", " }\n", @@ -56634,7 +56634,7 @@ " dim: 500,\n", " sparse_init: 15\n", " }, !obj:pylearn2.models.mlp.Softmax {\n", - " layer_name: 'y',\n", + " layer_name: 'softmax',\n", " n_classes: 10,\n", " irange: 0.\n", " }\n", diff --git a/pylearn2/scripts/tutorials/stacked_autoencoders/stacked_autoencoders.ipynb b/pylearn2/scripts/tutorials/stacked_autoencoders/stacked_autoencoders.ipynb index f2fe0af4df..49d9cd6590 100644 --- a/pylearn2/scripts/tutorials/stacked_autoencoders/stacked_autoencoders.ipynb +++ b/pylearn2/scripts/tutorials/stacked_autoencoders/stacked_autoencoders.ipynb @@ -1989,7 +1989,7 @@ " },\n", " !obj:pylearn2.models.mlp.Softmax {\n", " max_col_norm: 1.9365,\n", - " layer_name: 'y',\n", + " layer_name: 'softmax',\n", " n_classes: 10,\n", " irange: .005\n", " }\n",