Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert back to allowing inducing point method to be optional #486

Closed
wants to merge 10 commits into from
Closed
41 changes: 30 additions & 11 deletions aepsych/acquisition/lookahead.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from botorch.models.gpytorch import GPyTorchModel
from botorch.utils.transforms import t_batch_mode_transform
from scipy.stats import norm
from torch import Tensor

from .lookahead_utils import (
approximate_lookahead_levelset_at_xstar,
Expand Down Expand Up @@ -245,6 +246,8 @@ def construct_inputs_local_lookahead(
class GlobalLookaheadAcquisitionFunction(LookaheadAcquisitionFunction):
def __init__(
self,
lb: Tensor,
ub: Tensor,
model: GPyTorchModel,
lookahead_type: Literal["levelset", "posterior"] = "levelset",
target: Optional[float] = None,
Expand All @@ -256,14 +259,16 @@ def __init__(
A global look-ahead acquisition function.

Args:
model (GPyTorchModel): The gpytorch model to use.
lb (Tensor): Lower bounds of the input space, used to generate the query set (Xq).
ub (Tensor): Upper bounds of the input space, used to generate the query set (Xq).
model (GPyTorchModel): The gpytorch model.
lookahead_type (Literal["levelset", "posterior"]): The type of look-ahead to perform (default is "levelset").
- If the lookahead_type is "levelset", the acqf will consider the posterior probability that a point is above or below the target level set.
- If the lookahead_type is "posterior", the acqf will consider the posterior probability that a point will be detected or not.
target (float, optional): Threshold value to target in p-space.
posterior_transform (PosteriorTransform, optional): Optional transformation to apply to the posterior.
query_set_size (int, optional): Number of points in the query set.
Xq (torch.Tensor, optional): (m x d) global reference set.
posterior_transform (PosteriorTransform, optional): Posterior transform to use. Defaults to None.
query_set_size (int, optional): Size of the query set. Defaults to 256.
Xq (Tensor, optional): (m x d) global reference set. Defaults to None.
"""
super().__init__(model=model, target=target, lookahead_type=lookahead_type)
self.posterior_transform = posterior_transform
Expand All @@ -282,7 +287,7 @@ def __init__(
assert int(query_set_size) == query_set_size # make sure casting is safe
# if the asserts above pass and Xq is None, query_set_size is not None so this is safe
query_set_size = int(query_set_size) # cast
Xq = make_scaled_sobol(model.lb, model.ub, query_set_size)
Xq = make_scaled_sobol(lb, ub, query_set_size)
self.register_buffer("Xq", Xq)

@t_batch_mode_transform(expected_q=1)
Expand Down Expand Up @@ -335,8 +340,10 @@ def _compute_acqf(
class ApproxGlobalSUR(GlobalSUR):
def __init__(
self,
lb: Tensor,
ub: Tensor,
model: GPyTorchModel,
lookahead_type="levelset",
lookahead_type: Literal["levelset", "poserior"] = "levelset",
target: Optional[float] = None,
query_set_size: Optional[int] = 256,
Xq: Optional[torch.Tensor] = None,
Expand All @@ -346,7 +353,9 @@ def __init__(
Args:

model (GPyTorchModel): The gpytorch model to use.
lookahed_type (str): The type of look-ahead to perform (default is "levelset").
lookahead_type (Literal["levelset", "posterior"]): The type of look-ahead to perform (default is "levelset").
- If the lookahead_type is "levelset", the acqf will consider the posterior probability that a point is above or below the target level set.
- If the lookahead_type is "posterior", the acqf will consider the posterior probability that a point will be detected or not.
target (float, optional): Threshold value to target in p-space.
query_set_size (int, optional): Number of points in the query set.
Xq (torch.Tensor, optional): (m x d) global reference set.
Expand All @@ -355,6 +364,8 @@ def __init__(
lookahead_type == "levelset"
), f"ApproxGlobalSUR only supports lookahead on level set, got {lookahead_type}!"
super().__init__(
lb=lb,
ub=ub,
model=model,
target=target,
lookahead_type=lookahead_type,
Expand Down Expand Up @@ -431,23 +442,29 @@ class SMOCU(GlobalLookaheadAcquisitionFunction):

def __init__(
self,
lb: Tensor,
ub: Tensor,
model: GPyTorchModel,
lookahead_type="posterior",
lookahead_type: Literal["levelset", "posterior"] = "posterior",
target: Optional[float] = None,
query_set_size: Optional[int] = 256,
Xq: Optional[torch.Tensor] = None,
k: Optional[float] = 20.0,
) -> None:
"""
model (GPyTorchModel): The gpytorch model to use.
lookahead_type (str): The type of look-ahead to perform (default is "posterior").
lookahead_type (Literal["levelset", "posterior"]): The type of look-ahead to perform (default is "posterior").
- If the lookahead_type is "levelset", the acqf will consider the posterior probability that a point is above or below the target level set.
- If the lookahead_type is "posterior", the acqf will consider the posterior probability that a point will be detected or not.
target (float, optional): Threshold value to target in p-space. Default is None.
query_set_size (int, optional): Number of points in the query set. Default is 256.
Xq (torch.Tensor, optional): (m x d) global reference set. Default is None.
k (float, optional): Scaling factor for the softmax approximation, controlling the "softness" of the maximum operation. Default is 20.0.
"""

super().__init__(
lb=lb,
ub=ub,
model=model,
target=target,
lookahead_type=lookahead_type,
Expand Down Expand Up @@ -530,7 +547,7 @@ def _compute_acqf(
def construct_inputs_global_lookahead(
model: GPyTorchModel,
training_data: None,
lookahead_type="levelset",
lookahead_type: Literal["levelset", "posterior"] = "levelset",
target: Optional[float] = None,
posterior_transform: Optional[PosteriorTransform] = None,
query_set_size: Optional[int] = 256,
Expand All @@ -543,7 +560,9 @@ def construct_inputs_global_lookahead(
Args:
model (GPyTorchModel): The gpytorch model to use.
training_data (None): Placeholder for compatibility; not used in this function.
lookahead_type (str): Type of look-ahead to perform. Default is "levelset".
lookahead_type (Literal["levelset", "posterior"]): The type of look-ahead to perform (default is "levelset").
- If the lookahead_type is "levelset", the acqf will consider the posterior probability that a point is above or below the target level set.
- If the lookahead_type is "posterior", the acqf will consider the posterior probability that a point will be detected or not.
target (float, optional): Target threshold value in probability space. Default is None.
posterior_transform (PosteriorTransform, optional): Optional transformation to apply to the posterior. Default is None.
query_set_size (int, optional): Number of points in the query set. Default is 256.
Expand Down
1 change: 1 addition & 0 deletions aepsych/benchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ def run_experiment(
np.random.seed(seed)
config_dict["common"]["lb"] = str(problem.lb.tolist())
config_dict["common"]["ub"] = str(problem.ub.tolist())
config_dict["common"]["dim"] = str(problem.lb.shape[0])
config_dict["common"]["parnames"] = str(
[f"par{i}" for i in range(len(problem.ub.tolist()))]
)
Expand Down
12 changes: 6 additions & 6 deletions aepsych/benchmark/example_problems.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import os
from typing import List, Optional, Union
from typing import List, Union

import numpy as np
import torch
Expand All @@ -11,7 +11,7 @@
novel_discrimination_testfun,
)
from aepsych.models import GPClassificationModel
from aepsych.models.inducing_point_allocators import KMeansAllocator
from aepsych.models.inducing_points import KMeansAllocator

"""The DiscrimLowDim, DiscrimHighDim, ContrastSensitivity6d, and Hartmann6Binary classes
are copied from bernoulli_lse github repository (https://github.com/facebookresearch/bernoulli_lse)
Expand Down Expand Up @@ -104,13 +104,13 @@ def __init__(
)
y = torch.LongTensor(self.data[:, 0])
x = torch.Tensor(self.data[:, 1:])
inducing_size = 100

# Fit a model, with a large number of inducing points
self.m = GPClassificationModel(
lb=self.bounds[0],
ub=self.bounds[1],
inducing_size=100,
inducing_point_method=KMeansAllocator(bounds=self.bounds),
dim=6,
inducing_size=inducing_size,
inducing_point_method=KMeansAllocator(dim=6),
)

self.m.fit(
Expand Down
12 changes: 11 additions & 1 deletion aepsych/generators/acqf_thompson_sampler_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,17 @@ class AcqfThompsonSamplerGenerator(AEPsychGenerator):

def __init__(
self,
lb: torch.Tensor,
ub: torch.Tensor,
acqf: AcquisitionFunction,
acqf_kwargs: Optional[Dict[str, Any]] = None,
samps: int = 1000,
stimuli_per_trial: int = 1,
) -> None:
"""Initialize OptimizeAcqfGenerator.
Args:
lb (torch.Tensor): Lower bounds for the optimization.
ub (torch.Tensor): Upper bounds for the optimization.
acqf (AcquisitionFunction): Acquisition function to use.
acqf_kwargs (Dict[str, object], optional): Extra arguments to
pass to acquisition function. Defaults to no arguments.
Expand All @@ -61,6 +65,8 @@ def __init__(
self.acqf_kwargs = acqf_kwargs
self.samps = samps
self.stimuli_per_trial = stimuli_per_trial
self.lb = lb
self.ub = ub

def _instantiate_acquisition_fn(self, model: ModelProtocol) -> AcquisitionFunction:
"""Instantiate the acquisition function with the model and any extra arguments.
Expand Down Expand Up @@ -124,7 +130,7 @@ def _gen(
starttime = time.time()

seed = gen_options.get("seed")
bounds = torch.tensor(np.c_[model.lb, model.ub]).T.cpu()
bounds = torch.tensor(np.c_[self.lb, self.ub]).T.cpu()
bounds_cpu = bounds.cpu()
effective_dim = bounds.shape[-1] * num_points
if effective_dim <= SobolEngine.MAXDIM:
Expand Down Expand Up @@ -160,12 +166,16 @@ def from_config(cls, config: Config) -> AcqfThompsonSamplerGenerator:
AcqfThompsonSamplerGenerator: The initialized generator.
"""
classname = cls.__name__
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
acqf = config.getobj(classname, "acqf", fallback=None)
extra_acqf_args = cls._get_acqf_options(acqf, config)
stimuli_per_trial = config.getint(classname, "stimuli_per_trial")
samps = config.getint(classname, "samps", fallback=1000)

return cls(
lb=lb,
ub=ub,
acqf=acqf,
acqf_kwargs=extra_acqf_args,
samps=samps,
Expand Down
18 changes: 15 additions & 3 deletions aepsych/generators/epsilon_greedy_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,25 @@


class EpsilonGreedyGenerator(AEPsychGenerator):
def __init__(self, subgenerator: AEPsychGenerator, epsilon: float = 0.1) -> None:
def __init__(
self,
lb: torch.Tensor,
ub: torch.Tensor,
subgenerator: AEPsychGenerator,
epsilon: float = 0.1,
) -> None:
"""Initialize EpsilonGreedyGenerator.

Args:
lb (torch.Tensor): Lower bounds for the optimization.
ub (torch.Tensor): Upper bounds for the optimization.
subgenerator (AEPsychGenerator): The generator to use when not exploiting.
epsilon (float): The probability of exploration. Defaults to 0.1.
"""
self.subgenerator = subgenerator
self.epsilon = epsilon
self.lb = lb
self.ub = ub

@classmethod
def from_config(cls, config: Config) -> "EpsilonGreedyGenerator":
Expand All @@ -36,12 +46,14 @@ def from_config(cls, config: Config) -> "EpsilonGreedyGenerator":
EpsilonGreedyGenerator: The generator.
"""
classname = cls.__name__
lb = torch.tensor(config.getlist(classname, "lb"))
ub = torch.tensor(config.getlist(classname, "ub"))
subgen_cls = config.getobj(
classname, "subgenerator", fallback=OptimizeAcqfGenerator
)
subgen = subgen_cls.from_config(config)
epsilon = config.getfloat(classname, "epsilon", fallback=0.1)
return cls(subgenerator=subgen, epsilon=epsilon)
return cls(lb=lb, ub=ub, subgenerator=subgen, epsilon=epsilon)

def gen(self, num_points: int, model: ModelProtocol) -> torch.Tensor:
"""Query next point(s) to run by sampling from the subgenerator with probability 1-epsilon, and randomly otherwise.
Expand All @@ -53,7 +65,7 @@ def gen(self, num_points: int, model: ModelProtocol) -> torch.Tensor:
if num_points > 1:
raise NotImplementedError("Epsilon-greedy batched gen is not implemented!")
if np.random.uniform() < self.epsilon:
sample = np.random.uniform(low=model.lb, high=model.ub)
sample = np.random.uniform(low=self.lb, high=self.ub)
return torch.tensor(sample).reshape(1, -1)
else:
return self.subgenerator.gen(num_points, model)
15 changes: 13 additions & 2 deletions aepsych/generators/monotonic_rejection_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from aepsych.config import Config
from aepsych.generators.base import AEPsychGenerator
from aepsych.models.monotonic_rejection_gp import MonotonicRejectionGP
from aepsych.utils import _process_bounds
from botorch.acquisition import AcquisitionFunction
from botorch.logging import logger
from botorch.optim.initializers import gen_batch_initial_conditions
Expand Down Expand Up @@ -43,13 +44,17 @@ class MonotonicRejectionGenerator(AEPsychGenerator[MonotonicRejectionGP]):
def __init__(
self,
acqf: MonotonicMCAcquisition,
lb: torch.Tensor,
ub: torch.Tensor,
acqf_kwargs: Optional[Dict[str, Any]] = None,
model_gen_options: Optional[Dict[str, Any]] = None,
explore_features: Optional[Sequence[int]] = None,
) -> None:
"""Initialize MonotonicRejectionGenerator.
Args:
acqf (MonotonicMCAcquisition): Acquisition function to use.
acqf (AcquisitionFunction): Acquisition function to use.
lb (torch.Tensor): Lower bounds for the optimization.
ub (torch.Tensor): Upper bounds for the optimization.
acqf_kwargs (Dict[str, object], optional): Extra arguments to
pass to acquisition function. Defaults to None.
model_gen_options (Dict[str, Any], optional): Dictionary with options for generating candidate, such as
Expand All @@ -63,6 +68,8 @@ def __init__(
self.acqf_kwargs = acqf_kwargs
self.model_gen_options = model_gen_options
self.explore_features = explore_features
self.lb, self.ub, _ = _process_bounds(lb, ub, None)
self.bounds = torch.stack((self.lb, self.ub))

def _instantiate_acquisition_fn(
self, model: MonotonicRejectionGP
Expand Down Expand Up @@ -110,7 +117,7 @@ def gen(
)

# Augment bounds with deriv indicator
bounds = torch.cat((model.bounds_, torch.zeros(2, 1)), dim=1)
bounds = torch.cat((self.bounds, torch.zeros(2, 1)), dim=1)
# Fix deriv indicator to 0 during optimization
fixed_features = {(bounds.shape[1] - 1): 0.0}
# Fix explore features to random values
Expand Down Expand Up @@ -192,6 +199,8 @@ def from_config(cls, config: Config) -> "MonotonicRejectionGenerator":
classname = cls.__name__
acqf = config.getobj("common", "acqf", fallback=None)
extra_acqf_args = cls._get_acqf_options(acqf, config)
lb = torch.tensor(config.getlist(classname, "lb"))
ub = torch.tensor(config.getlist(classname, "ub"))

options = {}
options["num_restarts"] = config.getint(classname, "restarts", fallback=10)
Expand All @@ -217,6 +226,8 @@ def from_config(cls, config: Config) -> "MonotonicRejectionGenerator":

return cls(
acqf=acqf,
lb=lb,
ub=ub,
acqf_kwargs=extra_acqf_args,
model_gen_options=options,
explore_features=explore_features,
Expand Down
Loading