diff --git a/hydra/_internal/core_plugins/basic_launcher.py b/hydra/_internal/core_plugins/basic_launcher.py index 8613fa9838f..0fab519bf6c 100644 --- a/hydra/_internal/core_plugins/basic_launcher.py +++ b/hydra/_internal/core_plugins/basic_launcher.py @@ -50,7 +50,7 @@ def setup( self.task_function = task_function def launch( - self, job_overrides: Union[Sequence[Sequence[str]], ExperimentSequence], initial_job_idx: int + self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int ) -> Sequence[JobReturn]: setup_globals() assert self.hydra_context is not None @@ -66,7 +66,42 @@ def launch( idx = initial_job_idx + idx lst = " ".join(filter_overrides(overrides)) log.info(f"\t#{idx} : {lst}") - print(overrides) + + sweep_config = self.hydra_context.config_loader.load_sweep_config( + self.config, list(overrides) + ) + with open_dict(sweep_config): + sweep_config.hydra.job.id = idx + sweep_config.hydra.job.num = idx + ret = run_job( + hydra_context=self.hydra_context, + task_function=self.task_function, + config=sweep_config, + job_dir_key="hydra.sweep.dir", + job_subdir_key="hydra.sweep.subdir", + ) + runs.append(ret) + configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose) + return runs + + def launch_experiment_sequence( + self, job_overrides: ExperimentSequence, initial_job_idx: int + ) -> Sequence[JobReturn]: + setup_globals() + assert self.hydra_context is not None + assert self.config is not None + assert self.task_function is not None + + configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose) + sweep_dir = self.config.hydra.sweep.dir + Path(str(sweep_dir)).mkdir(parents=True, exist_ok=True) + log.info(f"Launching {len(job_overrides)} jobs locally") + runs: List[JobReturn] = [] + for idx, overrides in enumerate(job_overrides): + idx = initial_job_idx + idx + lst = " ".join(filter_overrides(overrides)) + log.info(f"\t#{idx} : {lst}") + sweep_config = self.hydra_context.config_loader.load_sweep_config( self.config, list(overrides) ) @@ -84,4 +119,4 @@ def launch( if isinstance(job_overrides, ExperimentSequence): job_overrides.update_sequence((overrides, ret)) configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose) - return runs + return runs \ No newline at end of file diff --git a/hydra/plugins/launcher.py b/hydra/plugins/launcher.py index 4a10b1d80f9..4a28e82d28e 100644 --- a/hydra/plugins/launcher.py +++ b/hydra/plugins/launcher.py @@ -17,7 +17,7 @@ Launcher plugin interface """ from abc import abstractmethod -from typing import Sequence, Union +from typing import Sequence from omegaconf import DictConfig @@ -44,10 +44,21 @@ def setup( @abstractmethod def launch( - self, job_overrides: Union[Sequence[Sequence[str]], ExperimentSequence], initial_job_idx: int + self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int ) -> Sequence[JobReturn]: """ :param job_overrides: a batch of job arguments :param initial_job_idx: Initial job idx. used by sweepers that executes several batches """ raise NotImplementedError() + + def launch_experiment_sequence( + self, job_overrides: ExperimentSequence, initial_job_idx: int + ) -> Sequence[JobReturn]: + """ + :param job_overrides: a batch of job arguments + :param initial_job_idx: Initial job idx. used by sweepers that executes several batches + """ + raise NotImplementedError( + "This launcher doesn't support launching experiment sequence." + ) diff --git a/hydra/plugins/sweeper.py b/hydra/plugins/sweeper.py index 0e1ad51ff95..4f49957bfec 100644 --- a/hydra/plugins/sweeper.py +++ b/hydra/plugins/sweeper.py @@ -17,7 +17,7 @@ Sweeper plugin interface """ from abc import abstractmethod -from typing import Any, List, Sequence, Optional, Dict, Tuple +from typing import Any, List, Sequence, Optional from hydra.types import TaskFunction from omegaconf import DictConfig diff --git a/plugins/hydra_joblib_launcher/hydra_plugins/hydra_joblib_launcher/joblib_launcher.py b/plugins/hydra_joblib_launcher/hydra_plugins/hydra_joblib_launcher/joblib_launcher.py index cae24840146..96df393e9e9 100644 --- a/plugins/hydra_joblib_launcher/hydra_plugins/hydra_joblib_launcher/joblib_launcher.py +++ b/plugins/hydra_joblib_launcher/hydra_plugins/hydra_joblib_launcher/joblib_launcher.py @@ -1,6 +1,6 @@ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import logging -from typing import Any, Optional, Sequence, Union +from typing import Any, Optional, Sequence from hydra.core.utils import JobReturn from hydra.plugins.launcher import Launcher @@ -39,7 +39,16 @@ def setup( self.hydra_context = hydra_context def launch( - self, job_overrides: Union[Sequence[Sequence[str]], ExperimentSequence], initial_job_idx: int + self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int + ) -> Sequence[JobReturn]: + from . import _core + + return _core.launch( + launcher=self, job_overrides=job_overrides, initial_job_idx=initial_job_idx + ) + + def launch_experiment_sequence( + self, job_overrides: ExperimentSequence, initial_job_idx: int ) -> Sequence[JobReturn]: from . import _core diff --git a/plugins/hydra_loky_launcher/hydra_plugins/hydra_loky_launcher/loky_launcher.py b/plugins/hydra_loky_launcher/hydra_plugins/hydra_loky_launcher/loky_launcher.py index e8d555ce6ae..3f6a9d3efe5 100644 --- a/plugins/hydra_loky_launcher/hydra_plugins/hydra_loky_launcher/loky_launcher.py +++ b/plugins/hydra_loky_launcher/hydra_plugins/hydra_loky_launcher/loky_launcher.py @@ -52,7 +52,16 @@ def setup( self.hydra_context = hydra_context def launch( - self, job_overrides: Union[Sequence[Sequence[str]], ExperimentSequence], initial_job_idx: int + self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int + ) -> Sequence[JobReturn]: + from . import _core + + return _core.launch( + launcher=self, job_overrides=job_overrides, initial_job_idx=initial_job_idx + ) + + def launch_experiment_sequence( + self, job_overrides: ExperimentSequence, initial_job_idx: int ) -> Sequence[JobReturn]: from . import _core diff --git a/plugins/hydra_multiprocessing_launcher/hydra_plugins/hydra_multiprocessing_launcher/multiprocessing_launcher.py b/plugins/hydra_multiprocessing_launcher/hydra_plugins/hydra_multiprocessing_launcher/multiprocessing_launcher.py index ba29211bd71..92f68a2a82a 100644 --- a/plugins/hydra_multiprocessing_launcher/hydra_plugins/hydra_multiprocessing_launcher/multiprocessing_launcher.py +++ b/plugins/hydra_multiprocessing_launcher/hydra_plugins/hydra_multiprocessing_launcher/multiprocessing_launcher.py @@ -84,7 +84,16 @@ def setup( self.executor = NestablePool(**self.mp_config) def launch( - self, job_overrides: Union[Sequence[Sequence[str]], ExperimentSequence], initial_job_idx: int + self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int + ) -> Sequence[JobReturn]: + from . import _core + + return _core.launch( + launcher=self, job_overrides=job_overrides, initial_job_idx=initial_job_idx + ) + + def launch_experiment_sequence( + self, job_overrides: ExperimentSequence, initial_job_idx: int ) -> Sequence[JobReturn]: from . import _core diff --git a/plugins/hydra_optuna_sweeper/example/conf/config.yaml b/plugins/hydra_optuna_sweeper/example/conf/config.yaml index 840db01de00..e3b86c71042 100644 --- a/plugins/hydra_optuna_sweeper/example/conf/config.yaml +++ b/plugins/hydra_optuna_sweeper/example/conf/config.yaml @@ -10,7 +10,7 @@ hydra: study_name: sphere storage: null n_trials: 20 - experiment_sequence: hydra_plugins.hydra_optuna_sweeper._impl.OptunaExperimentSequence + n_jobs: 1 max_failure_rate: 0.0 params: x: range(-5.5, 5.5, step=0.5) diff --git a/plugins/hydra_optuna_sweeper/example/custom-search-space/config.yaml b/plugins/hydra_optuna_sweeper/example/custom-search-space/config.yaml index 29f821bf9aa..f11a2aaed95 100644 --- a/plugins/hydra_optuna_sweeper/example/custom-search-space/config.yaml +++ b/plugins/hydra_optuna_sweeper/example/custom-search-space/config.yaml @@ -9,13 +9,14 @@ hydra: study_name: custom-search-space storage: null n_trials: 20 + n_jobs: 1 + params: x: range(-5.5, 5.5, 0.5) y: choice(-5, 0, 5) # `custom_search_space` should be a dotpath pointing to a # callable that provides search-space configuration logic: custom_search_space: custom-search-space-objective.configure - experiment_sequence: hydra_plugins.hydra_optuna_sweeper._impl.OptunaExperimentSequence x: 1 y: 1 diff --git a/plugins/hydra_optuna_sweeper/example/experiment-sequence-conf/config.yaml b/plugins/hydra_optuna_sweeper/example/experiment-sequence-conf/config.yaml new file mode 100644 index 00000000000..d32399e2072 --- /dev/null +++ b/plugins/hydra_optuna_sweeper/example/experiment-sequence-conf/config.yaml @@ -0,0 +1,23 @@ +defaults: + - override hydra/sweeper: optuna_v2 + - override hydra/sweeper/sampler: tpe + +hydra: + sweeper: + sampler: + seed: 123 + direction: minimize + study_name: sphere + storage: null + n_trials: 20 + max_failure_rate: 0.0 + params: + x: range(-5.5, 5.5, step=0.5) + y: choice(-5 ,0 ,5) + +x: 1 +y: 1 +z: 1 + +# if true, simulate a failure by raising an exception +error: false diff --git a/plugins/hydra_optuna_sweeper/example/multi-objective-conf/config.yaml b/plugins/hydra_optuna_sweeper/example/multi-objective-conf/config.yaml index 9e96084d0f6..d4cc4f2d749 100644 --- a/plugins/hydra_optuna_sweeper/example/multi-objective-conf/config.yaml +++ b/plugins/hydra_optuna_sweeper/example/multi-objective-conf/config.yaml @@ -10,7 +10,7 @@ hydra: study_name: multi-objective storage: null n_trials: 20 - experiment_sequence: hydra_plugins.hydra_optuna_sweeper._impl.OptunaExperimentSequence + n_jobs: 1 params: x: range(0, 5, step=0.5) y: range(0, 3, step=0.5) diff --git a/plugins/hydra_optuna_sweeper/example/sphere_sequence.py b/plugins/hydra_optuna_sweeper/example/sphere_sequence.py new file mode 100644 index 00000000000..0e0938816b2 --- /dev/null +++ b/plugins/hydra_optuna_sweeper/example/sphere_sequence.py @@ -0,0 +1,18 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import hydra +from omegaconf import DictConfig + + +@hydra.main(version_base=None, config_path="experiment-sequence-conf", config_name="config") +def sphere(cfg: DictConfig) -> float: + x: float = cfg.x + y: float = cfg.y + + if cfg.get("error", False): + raise RuntimeError("cfg.error is True") + + return x**2 + y**2 + + +if __name__ == "__main__": + sphere() \ No newline at end of file diff --git a/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/_impl.py b/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/_impl.py index b8205215a0e..9fd3ce6758c 100644 --- a/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/_impl.py +++ b/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/_impl.py @@ -273,17 +273,19 @@ def __init__( storage: Optional[Any], study_name: Optional[str], n_trials: int, + n_jobs: Optional[int], max_failure_rate: float, search_space: Optional[DictConfig], custom_search_space: Optional[str], params: Optional[DictConfig], - experiment_sequence: str, + experiment_sequence: Optional[str] = None, ) -> None: self.sampler = sampler self.direction = direction self.storage = storage self.study_name = study_name self.n_trials = n_trials + self.n_jobs = n_jobs self.max_failure_rate = max_failure_rate assert self.max_failure_rate >= 0.0 assert self.max_failure_rate <= 1.0 @@ -345,6 +347,36 @@ def _get_directions(self) -> List[str]: elif isinstance(self.direction, str): return [self.direction] return [self.direction.name] + + def _configure_trials( + self, + trials: List[Trial], + search_space_distributions: Dict[str, BaseDistribution], + fixed_params: Dict[str, Any], + ) -> Sequence[Sequence[str]]: + overrides = [] + for trial in trials: + for param_name, distribution in search_space_distributions.items(): + assert type(param_name) is str + trial._suggest(param_name, distribution) + for param_name, value in fixed_params.items(): + trial.set_user_attr(param_name, value) + + if self.custom_search_space_extender: + assert self.config is not None + self.custom_search_space_extender(self.config, trial) + + overlap = trial.params.keys() & trial.user_attrs + if len(overlap): + raise ValueError( + "Overlapping fixed parameters and search space parameters found!" + f"Overlapping parameters: {list(overlap)}" + ) + params = dict(trial.params) + params.update(fixed_params) + + overrides.append(tuple(f"{name}={val}" for name, val in params.items())) + return overrides def _parse_sweeper_params_config(self) -> List[str]: if not self.params: @@ -426,21 +458,95 @@ def sweep(self, arguments: List[str]) -> None: log.info(f"Sampler: {type(self.sampler).__name__}") log.info(f"Directions: {directions}") + batch_size = self.n_jobs n_trials_to_go = self.n_trials - from copy import deepcopy as copy - experiment_sequence = instantiate({ - "_target_": self.experiment_sequence_inst, - "study": study, - "num_experiments": n_trials_to_go, - "search_space_distributions": search_space_distributions, - "fixed_params": fixed_params, - "directions": directions, - "custom_search_space_extender": self.custom_search_space_extender, - "max_failure_rate": self.max_failure_rate, - "is_grid_sampler": is_grid_sampler, - #"config": self.config - }) - self.launcher.launch(experiment_sequence, 0) + + if self.experiment_sequence_inst is not None: + if batch_size is not None: + warnings.warn( + "Parameter sweeper.config.n_jobs is unused for optuna_v2." + "\n Job scheduling was delegated to launcher. Use launcher.config.n_jobs(or equivalent) instead." + ) + + experiment_sequence = instantiate({ + "_target_": self.experiment_sequence_inst, + "study": study, + "num_experiments": n_trials_to_go, + "search_space_distributions": search_space_distributions, + "fixed_params": fixed_params, + "directions": directions, + "custom_search_space_extender": self.custom_search_space_extender, + "max_failure_rate": self.max_failure_rate, + "is_grid_sampler": is_grid_sampler, + #"config": self.config + }) + self.launcher.launch_experiment_sequence(experiment_sequence, initial_job_idx=self.job_idx) + else: + while n_trials_to_go > 0: + batch_size = min(n_trials_to_go, batch_size) + + trials = [study.ask() for _ in range(batch_size)] + overrides = self._configure_trials( + trials, search_space_distributions, fixed_params + ) + + returns = self.launcher.launch(overrides, initial_job_idx=self.job_idx) + self.job_idx += len(returns) + failures = [] + for trial, ret in zip(trials, returns): + values: Optional[List[float]] = None + state: optuna.trial.TrialState = optuna.trial.TrialState.COMPLETE + try: + if len(directions) == 1: + try: + values = [float(ret.return_value)] + except (ValueError, TypeError): + raise ValueError( + f"Return value must be float-castable. Got '{ret.return_value}'." + ).with_traceback(sys.exc_info()[2]) + else: + try: + values = [float(v) for v in ret.return_value] + except (ValueError, TypeError): + raise ValueError( + "Return value must be a list or tuple of float-castable values." + f" Got '{ret.return_value}'." + ).with_traceback(sys.exc_info()[2]) + if len(values) != len(directions): + raise ValueError( + "The number of the values and the number of the objectives are" + f" mismatched. Expect {len(directions)}, but actually {len(values)}." + ) + + try: + study.tell(trial=trial, state=state, values=values) + except RuntimeError as e: + if ( + is_grid_sampler + and "`Study.stop` is supposed to be invoked inside an objective function or a callback." + in str(e) + ): + pass + else: + raise e + + except Exception as e: + state = optuna.trial.TrialState.FAIL + study.tell(trial=trial, state=state, values=values) + log.warning(f"Failed experiment: {e}") + failures.append(e) + + # raise if too many failures + if len(failures) / len(returns) > self.max_failure_rate: + log.error( + f"Failed {failures} times out of {len(returns)} " + f"with max_failure_rate={self.max_failure_rate}." + ) + assert len(failures) > 0 + for ret in returns: + ret.return_value # delegate raising to JobReturn, with actual traceback + + n_trials_to_go -= batch_size results_to_serialize: Dict[str, Any] if len(directions) < 2: diff --git a/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/config.py b/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/config.py index b7d767900ad..c89a4059d0c 100644 --- a/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/config.py +++ b/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/config.py @@ -145,7 +145,7 @@ class DistributionConfig: @dataclass -class OptunaSweeperConf: +class OptunaSweeperConfV2: _target_: str = "hydra_plugins.hydra_optuna_sweeper.optuna_sweeper.OptunaSweeper" defaults: List[Any] = field(default_factory=lambda: defaults) @@ -170,6 +170,9 @@ class OptunaSweeperConf: # Total number of function evaluations n_trials: int = 20 + # Number of parallel workers unused in optuna_v2 because scheduling is delegeted to launcher + n_jobs: Optional[int] = None + # Maximum authorized failure rate for a batch of parameters max_failure_rate: float = 0.0 @@ -186,6 +189,56 @@ class OptunaSweeperConf: experiment_sequence: str = "hydra_plugins.hydra_optuna_sweeper._impl.OptunaExperimentSequence" +ConfigStore.instance().store( + group="hydra/sweeper", + name="optuna_v2", + node=OptunaSweeperConfV2, + provider="optuna_sweeper", +) + +@dataclass +class OptunaSweeperConf: + _target_: str = "hydra_plugins.hydra_optuna_sweeper.optuna_sweeper.OptunaSweeper" + defaults: List[Any] = field(default_factory=lambda: defaults) + + # Sampling algorithm + # Please refer to the reference for further details + # https://optuna.readthedocs.io/en/stable/reference/samplers.html + sampler: SamplerConfig = MISSING + + # Direction of optimization + # Union[Direction, List[Direction]] + direction: Any = Direction.minimize + + # Storage URL to persist optimization results + # For example, you can use SQLite if you set 'sqlite:///example.db' + # Please refer to the reference for further details + # https://optuna.readthedocs.io/en/stable/reference/storages.html + storage: Optional[Any] = None + + # Name of study to persist optimization results + study_name: Optional[str] = None + + # Total number of function evaluations + n_trials: int = 20 + + # Number of parallel workers + n_jobs: int = 2 + + # Maximum authorized failure rate for a batch of parameters + max_failure_rate: float = 0.0 + + search_space: Optional[Dict[str, Any]] = None + + params: Optional[Dict[str, str]] = None + + # Allow custom trial configuration via Python methods. + # If given, `custom_search_space` should be a an instantiate-style dotpath targeting + # a callable with signature Callable[[DictConfig, optuna.trial.Trial], None]. + # https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/002_configurations.html + custom_search_space: Optional[str] = None + + ConfigStore.instance().store( group="hydra/sweeper", name="optuna", diff --git a/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/optuna_sweeper.py b/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/optuna_sweeper.py index 112e8e44aa3..a3fdd8c436c 100644 --- a/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/optuna_sweeper.py +++ b/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/optuna_sweeper.py @@ -18,11 +18,12 @@ def __init__( storage: Optional[Any], study_name: Optional[str], n_trials: int, + n_jobs: Optional[int], max_failure_rate: float, search_space: Optional[DictConfig], custom_search_space: Optional[str], params: Optional[DictConfig], - experiment_sequence: str + experiment_sequence: Optional[str] = None, ) -> None: from ._impl import OptunaSweeperImpl @@ -32,6 +33,7 @@ def __init__( storage, study_name, n_trials, + n_jobs, max_failure_rate, search_space, custom_search_space, diff --git a/plugins/hydra_optuna_sweeper/tests/conf/test_deprecated_search_space.yaml b/plugins/hydra_optuna_sweeper/tests/conf/test_deprecated_search_space.yaml index fe78c59a78e..f3d11ed8c61 100644 --- a/plugins/hydra_optuna_sweeper/tests/conf/test_deprecated_search_space.yaml +++ b/plugins/hydra_optuna_sweeper/tests/conf/test_deprecated_search_space.yaml @@ -7,7 +7,7 @@ hydra: study_name: sphere storage: null n_trials: 20 - experiment_sequence: hydra_plugins.hydra_optuna_sweeper._impl.OptunaExperimentSequence + n_jobs: 1 search_space: x: type: float diff --git a/plugins/hydra_optuna_sweeper/tests/conf/test_grid.yaml b/plugins/hydra_optuna_sweeper/tests/conf/test_grid.yaml index e766c706f30..bcf1527a9e8 100644 --- a/plugins/hydra_optuna_sweeper/tests/conf/test_grid.yaml +++ b/plugins/hydra_optuna_sweeper/tests/conf/test_grid.yaml @@ -8,7 +8,7 @@ hydra: study_name: sphere storage: null n_trials: 20 - experiment_sequence: hydra_plugins.hydra_optuna_sweeper._impl.OptunaExperimentSequence + n_jobs: 1 params: x: choice(-1, 1) y: range(-1.0, 1.0, step=1) diff --git a/plugins/hydra_optuna_sweeper/tests/test_optuna_sweeper_plugin.py b/plugins/hydra_optuna_sweeper/tests/test_optuna_sweeper_plugin.py index 26e538eb071..007af7a5dc5 100644 --- a/plugins/hydra_optuna_sweeper/tests/test_optuna_sweeper_plugin.py +++ b/plugins/hydra_optuna_sweeper/tests/test_optuna_sweeper_plugin.py @@ -144,6 +144,7 @@ def test_launch_jobs(hydra_sweep_runner: TSweepRunner) -> None: "hydra/sweeper=optuna", "hydra/launcher=basic", "hydra.sweeper.n_trials=8", + "hydra.sweeper.n_jobs=3", ], ) with sweep: @@ -160,6 +161,48 @@ def test_optuna_example(with_commandline: bool, tmpdir: Path) -> None: "hydra.sweep.dir=" + str(tmpdir), "hydra.job.chdir=True", "hydra.sweeper.n_trials=20", + "hydra.sweeper.n_jobs=1", + f"hydra.sweeper.storage={storage}", + f"hydra.sweeper.study_name={study_name}", + "hydra/sweeper/sampler=tpe", + "hydra.sweeper.sampler.seed=123", + "~z", + ] + if with_commandline: + cmd += [ + "x=choice(0, 1, 2)", + "y=0", # Fixed parameter + ] + run_python_script(cmd) + returns = OmegaConf.load(f"{tmpdir}/optimization_results.yaml") + study = optuna.load_study(storage=storage, study_name=study_name) + best_trial = study.best_trial + assert isinstance(returns, DictConfig) + assert returns.name == "optuna" + assert returns["best_params"]["x"] == best_trial.params["x"] + if with_commandline: + assert "y" not in returns["best_params"] + assert "y" not in best_trial.params + else: + assert returns["best_params"]["y"] == best_trial.params["y"] + assert returns["best_value"] == best_trial.value + # Check the search performance of the TPE sampler. + # The threshold is the 95th percentile calculated with 1000 different seed values + # to make the test robust against the detailed implementation of the sampler. + # See https://github.com/facebookresearch/hydra/pull/1746#discussion_r681549830. + assert returns["best_value"] <= 2.27 + + +@mark.parametrize("with_commandline", (True, False)) +def test_optuna_example(with_commandline: bool, tmpdir: Path) -> None: + storage = "sqlite:///" + os.path.join(str(tmpdir), "test.db") + study_name = "test-optuna-v2-example" + cmd = [ + "example/sphere_sequence.py", + "--multirun", + "hydra.sweep.dir=" + str(tmpdir), + "hydra.job.chdir=True", + "hydra.sweeper.n_trials=20", f"hydra.sweeper.storage={storage}", f"hydra.sweeper.study_name={study_name}", "hydra/sweeper/sampler=tpe", @@ -206,6 +249,7 @@ def test_example_with_grid_sampler( "hydra.sweep.dir=" + str(tmpdir), "hydra.job.chdir=False", f"hydra.sweeper.n_trials={num_trials}", + "hydra.sweeper.n_jobs=1", f"hydra.sweeper.storage={storage}", f"hydra.sweeper.study_name={study_name}", ] @@ -233,6 +277,7 @@ def test_optuna_multi_objective_example(with_commandline: bool, tmpdir: Path) -> "hydra.sweep.dir=" + str(tmpdir), "hydra.job.chdir=True", "hydra.sweeper.n_trials=20", + "hydra.sweeper.n_jobs=1", "hydra/sweeper/sampler=random", "hydra.sweeper.sampler.seed=123", ] @@ -275,6 +320,7 @@ def test_optuna_custom_search_space_example(tmpdir: Path) -> None: "hydra.sweep.dir=" + str(tmpdir), "hydra.job.chdir=True", "hydra.sweeper.n_trials=20", + "hydra.sweeper.n_jobs=1", "hydra/sweeper/sampler=random", "hydra.sweeper.sampler.seed=123", f"max_z_difference_from_x={max_z_difference_from_x}", @@ -324,8 +370,8 @@ def test_warnings( storage=None, study_name="test", n_trials=1, + n_jobs=1, max_failure_rate=0.0, - experiment_sequence='hydra_plugins.hydra_optuna_sweeper._impl.OptunaExperimentSequence', custom_search_space=None, ) if search_space is not None: @@ -352,6 +398,7 @@ def test_failure_rate(max_failure_rate: float, tmpdir: Path) -> None: "hydra.sweep.dir=" + str(tmpdir), "hydra.job.chdir=True", "hydra.sweeper.n_trials=20", + "hydra.sweeper.n_jobs=2", "hydra/sweeper/sampler=random", "hydra.sweeper.sampler.seed=123", f"hydra.sweeper.max_failure_rate={max_failure_rate}", @@ -377,6 +424,7 @@ def test_example_with_deprecated_search_space( "hydra.sweep.dir=" + str(tmpdir), "hydra.job.chdir=True", "hydra.sweeper.n_trials=20", + "hydra.sweeper.n_jobs=1", ] run_python_script(cmd)