From ac3a7ec05352704a23e53a90efd5e4d051bf3b9c Mon Sep 17 00:00:00 2001 From: Elizabeth Santorella Date: Wed, 4 Sep 2024 09:07:22 -0700 Subject: [PATCH] Don't test with a test stub when there's a function that does the same thing (#2735) Summary: Pull Request resolved: https://github.com/facebook/Ax/pull/2735 Context: * There is a `get_sobol_benchmark_method` test stub, which is not needed when a Sobol benchmark method is provided, also called `get_sobol_benchmark_method`. It is better to test that function. * The latter `get_sobol_benchmark_method` requires an argument `distribute_replications`. (Making this mandatory was an intentional choice, because it is easy to forget it.) This diff: * Gets rid of the test stub and uses the non-stub version instead * Adds `distribute_replications` in a bunch of places. I chose `False` arbitrarily since the argument will have no effect here. Reviewed By: saitcakmak Differential Revision: D62157106 fbshipit-source-id: 9d6ef4e609502fc94d09be31aa31b1dd7325b111 --- ax/benchmark/tests/test_benchmark.py | 31 ++++++++++++---------------- ax/utils/testing/benchmark_stubs.py | 13 ------------ 2 files changed, 13 insertions(+), 31 deletions(-) diff --git a/ax/benchmark/tests/test_benchmark.py b/ax/benchmark/tests/test_benchmark.py index edf51b88f3d..5e2447b9236 100644 --- a/ax/benchmark/tests/test_benchmark.py +++ b/ax/benchmark/tests/test_benchmark.py @@ -22,6 +22,7 @@ from ax.benchmark.benchmark_problem import create_problem_from_botorch from ax.benchmark.benchmark_result import BenchmarkResult from ax.benchmark.methods.modular_botorch import get_sobol_botorch_modular_acquisition +from ax.benchmark.methods.sobol import get_sobol_benchmark_method from ax.benchmark.problems.registry import get_problem from ax.modelbridge.generation_strategy import GenerationNode, GenerationStrategy from ax.modelbridge.model_spec import ModelSpec @@ -35,7 +36,6 @@ get_moo_surrogate, get_multi_objective_benchmark_problem, get_single_objective_benchmark_problem, - get_sobol_benchmark_method, get_soo_surrogate, TestDataset, ) @@ -89,7 +89,9 @@ def test_batch(self) -> None: def test_storage(self) -> None: problem = get_single_objective_benchmark_problem() res = benchmark_replication( - problem=problem, method=get_sobol_benchmark_method(), seed=0 + problem=problem, + method=get_sobol_benchmark_method(distribute_replications=False), + seed=0, ) # Experiment is not in storage yet self.assertTrue(res.experiment is not None) @@ -184,7 +186,7 @@ def test_create_benchmark_experiment(self) -> None: self.assertEqual(experiment.runner, problem.runner) def test_replication_sobol_synthetic(self) -> None: - method = get_sobol_benchmark_method() + method = get_sobol_benchmark_method(distribute_replications=False) problems = [ get_single_objective_benchmark_problem(), get_problem("jenatton", num_trials=6), @@ -192,18 +194,12 @@ def test_replication_sobol_synthetic(self) -> None: for problem in problems: res = benchmark_replication(problem=problem, method=method, seed=0) - self.assertEqual( - min( - problem.num_trials, not_none(method.scheduler_options.total_trials) - ), - len(not_none(res.experiment).trials), - ) - + self.assertEqual(problem.num_trials, len(not_none(res.experiment).trials)) self.assertTrue(np.isfinite(res.score_trace).all()) self.assertTrue(np.all(res.score_trace <= 100)) def test_replication_sobol_surrogate(self) -> None: - method = get_sobol_benchmark_method() + method = get_sobol_benchmark_method(distribute_replications=False) # This is kind of a weird setup - these are "surrogates" that use a Branin # synthetic function. The idea here is to test the machinery around the @@ -217,10 +213,7 @@ def test_replication_sobol_surrogate(self) -> None: res = benchmark_replication(problem=problem, method=method, seed=0) self.assertEqual( - min( - problem.num_trials, - not_none(method.scheduler_options.total_trials), - ), + problem.num_trials, len(not_none(res.experiment).trials), ) @@ -313,7 +306,9 @@ def test_replication_moo_sobol(self) -> None: problem = get_multi_objective_benchmark_problem() res = benchmark_replication( - problem=problem, method=get_sobol_benchmark_method(), seed=0 + problem=problem, + method=get_sobol_benchmark_method(distribute_replications=False), + seed=0, ) self.assertEqual( @@ -331,7 +326,7 @@ def test_benchmark_one_method_problem(self) -> None: problem = get_single_objective_benchmark_problem() agg = benchmark_one_method_problem( problem=problem, - method=get_sobol_benchmark_method(), + method=get_sobol_benchmark_method(distribute_replications=False), seeds=(0, 1), ) @@ -352,7 +347,7 @@ def test_benchmark_multiple_problems_methods(self) -> None: aggs = benchmark_multiple_problems_methods( problems=[get_single_objective_benchmark_problem(num_trials=6)], methods=[ - get_sobol_benchmark_method(), + get_sobol_benchmark_method(distribute_replications=False), get_sobol_botorch_modular_acquisition( model_cls=SingleTaskGP, acquisition_cls=qLogNoisyExpectedImprovement, diff --git a/ax/utils/testing/benchmark_stubs.py b/ax/utils/testing/benchmark_stubs.py index 211dcfbd698..a87e1f9e171 100644 --- a/ax/utils/testing/benchmark_stubs.py +++ b/ax/utils/testing/benchmark_stubs.py @@ -68,19 +68,6 @@ def get_multi_objective_benchmark_problem( ) -def get_sobol_benchmark_method() -> BenchmarkMethod: - return BenchmarkMethod( - name="SOBOL", - generation_strategy=GenerationStrategy( - steps=[GenerationStep(model=Models.SOBOL, num_trials=-1)], - name="SOBOL", - ), - scheduler_options=SchedulerOptions( - total_trials=4, init_seconds_between_polls=0 - ), - ) - - def get_soo_surrogate() -> SurrogateBenchmarkProblem: experiment = get_branin_experiment(with_completed_trial=True) surrogate = TorchModelBridge(