|
24 | 24 | exhaustive_dropout_average,
|
25 | 25 | sampled_dropout_average, CompositeLayer,
|
26 | 26 | max_pool, mean_pool, pool_dnn,
|
27 |
| - SigmoidConvNonlinearity, ConvElemwise) |
| 27 | + SigmoidConvNonlinearity, ConvElemwise, |
| 28 | + QuantileRegression) |
28 | 29 | from pylearn2.space import VectorSpace, CompositeSpace, Conv2DSpace
|
29 | 30 | from pylearn2.utils import is_iterable, sharedX
|
30 | 31 | from pylearn2.expr.nnet import pseudoinverse_softmax_numpy
|
31 | 32 |
|
32 |
| - |
33 | 33 | class IdentityLayer(Linear):
|
34 | 34 | dropout_input_mask_value = -np.inf
|
35 | 35 |
|
@@ -1389,3 +1389,38 @@ def test_pooling_with_anon_variable():
|
1389 | 1389 | image_shape=im_shp, try_dnn=False)
|
1390 | 1390 | pool_1 = mean_pool(X_sym, pool_shape=shp, pool_stride=strd,
|
1391 | 1391 | image_shape=im_shp)
|
| 1392 | + |
| 1393 | + |
| 1394 | +def test_quantile_regression(): |
| 1395 | + """ |
| 1396 | + Create a VectorSpacesDataset with two inputs (features0 and features1) |
| 1397 | + and train an MLP which takes both inputs for 1 epoch. |
| 1398 | + """ |
| 1399 | + np.random.seed(2) |
| 1400 | + nb_rows = 1000 |
| 1401 | + X = np.random.normal(size=(nb_rows, 2)).astype(theano.config.floatX) |
| 1402 | + noise = np.random.rand(nb_rows, 1) # X[:, 0:1] * |
| 1403 | + coeffs = np.array([[3.], [4.]]) |
| 1404 | + y_0 = np.dot(X, coeffs) |
| 1405 | + y = y_0 + noise |
| 1406 | + dataset = DenseDesignMatrix(X=X, y=y) |
| 1407 | + for percentile in [0.22, 0.5, 0.65]: |
| 1408 | + mlp = MLP( |
| 1409 | + nvis=2, |
| 1410 | + layers=[ |
| 1411 | + QuantileRegression('quantile_regression_layer', |
| 1412 | + init_bias=0.0, |
| 1413 | + percentile=percentile, |
| 1414 | + irange=0.1) |
| 1415 | + ] |
| 1416 | + ) |
| 1417 | + train = Train(dataset, mlp, SGD(0.05, batch_size=100)) |
| 1418 | + train.algorithm.termination_criterion = EpochCounter(100) |
| 1419 | + train.main_loop() |
| 1420 | + inputs = mlp.get_input_space().make_theano_batch() |
| 1421 | + outputs = mlp.fprop(inputs) |
| 1422 | + theano.function([inputs], outputs, allow_input_downcast=True)(X) |
| 1423 | + layers = mlp.layers |
| 1424 | + layer = layers[0] |
| 1425 | + assert np.allclose(layer.get_weights(), coeffs, rtol=0.05) |
| 1426 | + assert np.allclose(layer.get_biases(), np.array(percentile), rtol=0.05) |
0 commit comments