diff --git a/src/decomon/backward_layers/activations.py b/src/decomon/backward_layers/activations.py index 5ff0319f..cb6ea6f4 100644 --- a/src/decomon/backward_layers/activations.py +++ b/src/decomon/backward_layers/activations.py @@ -79,8 +79,8 @@ def backward_relu( upper, lower = x[:nb_tensors] elif mode == ForwardMode.AFFINE: z_, w_u_, b_u_, w_l_, b_l_ = x[:nb_tensors] - upper = get_upper(z_, w_u_, b_u_) - lower = get_lower(z_, w_l_, b_l_) + upper = get_upper(z_, w_u_, b_u_, convex_domain=convex_domain) + lower = get_lower(z_, w_l_, b_l_, convex_domain=convex_domain) elif mode == ForwardMode.HYBRID: _, upper, _, _, lower, _, _ = x[:nb_tensors] else: diff --git a/src/decomon/models/convert.py b/src/decomon/models/convert.py index 108687ee..e45bcb31 100644 --- a/src/decomon/models/convert.py +++ b/src/decomon/models/convert.py @@ -213,6 +213,9 @@ def clone( if isinstance(method, str): method = ConvertMethod(method.lower()) + if len(convex_domain) and isinstance(convex_domain["name"], str): + convex_domain["name"] = ConvexDomainType(convex_domain["name"].lower()) + if not to_keras: raise NotImplementedError("Only convert to Keras for now.") @@ -262,10 +265,10 @@ def clone( if convex_domain["p"] == np.inf: radius = convex_domain["eps"] + u_c_tensor = Lambda( + lambda var: var + K.cast(radius, dtype=model.layers[0].dtype), dtype=model.layers[0].dtype + )(z_tensor) if ibp_: - u_c_tensor = Lambda( - lambda var: var + K.cast(radius, dtype=model.layers[0].dtype), dtype=model.layers[0].dtype - )(z_tensor) l_c_tensor = Lambda( lambda var: var - K.cast(radius, dtype=model.layers[0].dtype), dtype=model.layers[0].dtype )(z_tensor) @@ -282,9 +285,9 @@ def clone( def get_bounds(z: tf.Tensor) -> List[tf.Tensor]: output = [] + W = tf.linalg.diag(z_value * z + o_value) + b = z_value * z if affine_: - W = tf.linalg.diag(z_value * z + o_value) - b = z_value * z output += [W, b] if ibp_: u_c_ = get_upper(z, W, b, convex_domain) diff --git a/tests/conftest.py b/tests/conftest.py index 64f10566..f7836c0d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1190,6 +1190,85 @@ def toy_struct_cnn(dtype="float32"): ] return Sequential(layers) + def assert_output_properties_ball( + x_, y_, h_, g_, x_center_, radius, p, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, name, decimal=5 + ): + + if y_ is None: + y_ = h_ + g_ + if h_ is not None: + + assert_almost_equal( + h_ + g_, + y_, + decimal=decimal, + err_msg="decomposition error for function {}".format(name), + ) + + if w_u_ is not None or w_l_ is not None: + + x_expand = x_ + np.zeros_like(x_) + n_expand = len(w_u_.shape) - len(x_expand.shape) + for i in range(n_expand): + x_expand = np.expand_dims(x_expand, -1) + + if p == 2: + norm = lambda x: np.sqrt(np.sum(x**2)) + if p == np.inf: + norm = lambda x: np.max(np.abs(x)) + + if w_l_ is not None: + lower_ = np.sum(w_l_ * x_expand, 1) + b_l_ - radius * norm(w_l_) + if w_u_ is not None: + upper_ = np.sum(w_u_ * x_expand, 1) + b_u_ + radius * norm(w_u_) + + # check that the functions h_ and g_ remains monotonic + if h_ is not None: + assert_almost_equal( + np.clip(h_[:-1] - h_[1:], 0, np.inf), + np.zeros_like(h_[1:]), + decimal=decimal, + err_msg="h is not increasing for function {}".format(name), + ) + assert_almost_equal( + np.clip(g_[1:] - g_[:-1], 0, np.inf), + np.zeros_like(g_[1:]), + decimal=decimal, + err_msg="g is not increasing for function {}".format(name), + ) + + # + if w_u_ is not None: + if K.floatx() == "float32": + assert_almost_equal( + np.clip(y_ - upper_, 0.0, 1e6), + np.zeros_like(y_), + decimal=decimal, + err_msg="upper