|
11 | 11 | from numpy.testing import assert_allclose
|
12 | 12 |
|
13 | 13 | from pymc_extras.statespace.core.statespace import FILTER_FACTORY, PyMCStateSpace
|
| 14 | +from pymc_extras.statespace.filters.kalman_filter import StandardFilter |
| 15 | +from pymc_extras.statespace.filters.kalman_smoother import KalmanSmoother |
14 | 16 | from pymc_extras.statespace.models import structural as st
|
15 | 17 | from pymc_extras.statespace.models.utilities import make_default_coords
|
16 | 18 | from pymc_extras.statespace.utils.constants import (
|
@@ -878,3 +880,47 @@ def test_insert_batched_rvs(ss_mod, batch_size):
|
878 | 880 | ss_mod._insert_random_variables()
|
879 | 881 | matrices = ss_mod.unpack_statespace()
|
880 | 882 | assert matrices[4].type.shape == (*batch_size, 2, 2)
|
| 883 | + |
| 884 | + |
| 885 | +@pytest.mark.parametrize("batch_size", [(10,), (10, 3, 5)]) |
| 886 | +def test_insert_batched_rvs_in_kf(ss_mod, batch_size): |
| 887 | + data = pt.as_tensor(np.random.normal(size=(*batch_size, 7, 1)).astype(floatX)) |
| 888 | + data.name = "data" |
| 889 | + kf = StandardFilter() |
| 890 | + |
| 891 | + with pm.Model(): |
| 892 | + rho = pm.Normal("rho", shape=batch_size) |
| 893 | + zeta = pm.Normal("zeta", shape=batch_size) |
| 894 | + ss_mod._insert_random_variables() |
| 895 | + |
| 896 | + matrices = x0, P0, c, d, T, Z, R, H, Q = ss_mod.unpack_statespace() |
| 897 | + outputs = kf.build_graph(data, *matrices) |
| 898 | + |
| 899 | + logp = outputs.pop(-1) |
| 900 | + states, covs = outputs[:3], outputs[3:] |
| 901 | + filtered_states, predicted_states, observed_states = states |
| 902 | + filtered_covariances, predicted_covariances, observed_covariances = covs |
| 903 | + |
| 904 | + assert logp.type.shape == (*batch_size, 7) |
| 905 | + assert filtered_states.type.shape == (*batch_size, 7, 2) |
| 906 | + assert predicted_states.type.shape == (*batch_size, 7, 2) |
| 907 | + assert observed_states.type.shape == (*batch_size, 7, 1) |
| 908 | + assert filtered_covariances.type.shape == (*batch_size, 7, 2, 2) |
| 909 | + assert predicted_covariances.type.shape == (*batch_size, 7, 2, 2) |
| 910 | + assert observed_covariances.type.shape == (*batch_size, 7, 1, 1) |
| 911 | + |
| 912 | + ks = KalmanSmoother() |
| 913 | + smoothed_states, smoothed_covariances = ks.build_graph( |
| 914 | + T, R, Q, filtered_states, filtered_covariances |
| 915 | + ) |
| 916 | + assert smoothed_states.type.shape == ( |
| 917 | + *batch_size, |
| 918 | + None, |
| 919 | + 2, |
| 920 | + ) # TODO: why do we lose the time dimension here? |
| 921 | + assert smoothed_covariances.type.shape == ( |
| 922 | + *batch_size, |
| 923 | + None, |
| 924 | + 2, |
| 925 | + 2, |
| 926 | + ) # TODO: why do we lose the time dimension here? |
0 commit comments