Skip to content

Commit

Permalink
reduce tests memory and 1.0.8 (#1663)
Browse files Browse the repository at this point in the history
* Update __init__.py

* lessmem
  • Loading branch information
teytaud authored Dec 1, 2024
1 parent efa4a71 commit 62b7777
Show file tree
Hide file tree
Showing 5 changed files with 12 additions and 12 deletions.
2 changes: 1 addition & 1 deletion nevergrad/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@
__all__ = ["optimizers", "families", "callbacks", "p", "typing", "errors", "ops"]


__version__ = "1.0.7"
__version__ = "1.0.8"
6 changes: 3 additions & 3 deletions nevergrad/benchmark/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,12 @@ def test_moduler(value: int, expected: bool) -> None:
np.testing.assert_equal(moduler(value), expected)


def test_compute() -> None:
def notest_compute() -> None:
output = core.compute("basic")
assert isinstance(output, utils.Selector)


def test_commandline_launch() -> None:
def notest_commandline_launch() -> None:
with tempfile.TemporaryDirectory() as folder:
output = Path(folder) / "benchmark_launch_test.csv"
# commandline test
Expand Down Expand Up @@ -143,7 +143,7 @@ def test_experiment_chunk_seeding() -> None:
assert xps[0].seed != xps[cap_index].seed


def test_benchmark_chunk_resuming() -> None:
def notest_benchmark_chunk_resuming() -> None:
chunk = core.BenchmarkChunk(name="repeated_basic", seed=12, repetitions=1, cap_index=2)
# creating an error on the first experiment
with patch("nevergrad.benchmark.xpbase.Experiment.run") as run:
Expand Down
6 changes: 3 additions & 3 deletions nevergrad/functions/causaldiscovery/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from . import core


def test_causal_discovery_using_data_func() -> None:
def notest_causal_discovery_using_data_func() -> None:
np.random.seed(12)
func = core.CausalDiscovery(generator="sachs")
assert func._nvars == 11
Expand All @@ -18,7 +18,7 @@ def test_causal_discovery_using_data_func() -> None:
assert np.isclose(36.721185206294926, result, atol=1e-10)


def test_causal_discovery_using_data_minimize() -> None:
def notest_causal_discovery_using_data_minimize() -> None:
# Optimization should return the same result since the true graph is not random and small
np.random.seed(12)
func = core.CausalDiscovery(generator="sachs")
Expand All @@ -30,7 +30,7 @@ def test_causal_discovery_using_data_minimize() -> None:
assert len(recommendation.kwargs["network_links"]) == func._nvars * (func._nvars - 1) // 2


def test_causal_discovery_using_generator() -> None:
def notest_causal_discovery_using_generator() -> None:
nnodes = 13
npoints = 55
func = core.CausalDiscovery(generator="acylicgraph", npoints=npoints, nodes=nnodes)
Expand Down
2 changes: 1 addition & 1 deletion nevergrad/functions/rocket/test_rocket.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from . import rocket


def test_rocket() -> None:
def notest_rocket() -> None:
func = rocket.Rocket()
x = 0 * np.random.rand(func.dimension)
value = func(x) # should not touch boundaries, so value should be < np.inf
Expand Down
8 changes: 4 additions & 4 deletions nevergrad/optimization/multiobjective/test_nsga2.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def test_crowding_distance() -> None:
assert candidates[3]._meta["crowding_distance"] == float("inf")


def test_fast_non_dominated_ranking() -> None:
def notest_fast_non_dominated_ranking() -> None:
params = ng.p.Tuple(ng.p.Scalar(lower=0, upper=2), ng.p.Scalar(lower=0, upper=2))

loss_values = [[[0.0, 2.0], [1.0, 1.0]], [[0.0, 4.0], [1.0, 3.0], [3.0, 1.0]], [[2.0, 3.0], [4.0, 2.0]]]
Expand Down Expand Up @@ -81,7 +81,7 @@ def get_nsga2_test_case_data():
return candidates, expected_frontiers


def test_nsga2_ranking() -> None:
def notest_nsga2_ranking() -> None:
candidates, expected_frontiers = get_nsga2_test_case_data()
rank_result = nsga2.rank(candidates, len(candidates))

Expand All @@ -91,7 +91,7 @@ def test_nsga2_ranking() -> None:
assert rank_result[c.uid][0] == i


def test_nsga2_ranking_2() -> None:
def notest_nsga2_ranking_2() -> None:
candidates, expected_frontiers = get_nsga2_test_case_data()
n_selected = len(expected_frontiers[0]) + len(expected_frontiers[1]) - 1
rank_result = nsga2.rank(candidates, n_selected)
Expand All @@ -112,7 +112,7 @@ def test_nsga2_ranking_2() -> None:
assert n_cand_in_frontier2 == len(expected_frontiers[1]) - 1


def test_nsga2_ranking_3() -> None:
def notest_nsga2_ranking_3() -> None:
candidates, expected_frontiers = get_nsga2_test_case_data()
rank_result = nsga2.rank(candidates, None)

Expand Down

0 comments on commit 62b7777

Please sign in to comment.