-
Notifications
You must be signed in to change notification settings - Fork 44
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Monotonic rejection model and generator
Summary: monotonic rejection model GPU support, since they're tied to the generator, we also ensure the generators are gpu ready as well. Differential Revision: D65638150
- Loading branch information
1 parent
d096c6a
commit 940cf31
Showing
7 changed files
with
270 additions
and
21 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,6 @@ | ||
#!/usr/bin/env python3 | ||
# Copyright (c) Meta, Inc. and its affiliates. | ||
# All rights reserved. | ||
|
||
# This source code is licensed under the license found in the | ||
# LICENSE file in the root directory of this source tree. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,51 @@ | ||
#!/usr/bin/env python3 | ||
# Copyright (c) Facebook, Inc. and its affiliates. | ||
# All rights reserved. | ||
|
||
# This source code is licensed under the license found in the | ||
# LICENSE file in the root directory of this source tree. | ||
|
||
import torch | ||
from aepsych.acquisition.monotonic_rejection import MonotonicMCLSE | ||
from aepsych.acquisition.objective import ProbitObjective | ||
from aepsych.models.derivative_gp import MixedDerivativeVariationalGP | ||
from botorch.acquisition.objective import IdentityMCObjective | ||
from botorch.utils.testing import BotorchTestCase | ||
|
||
|
||
class TestMonotonicAcq(BotorchTestCase): | ||
def test_monotonic_acq_gpu(self): | ||
# Init | ||
train_X_aug = torch.tensor( | ||
[[0.0, 0.0, 0.0], [1.0, 1.0, 0.0], [2.0, 2.0, 0.0]] | ||
).cuda() | ||
deriv_constraint_points = torch.tensor( | ||
[[0.0, 0.0, 1.0], [1.0, 1.0, 1.0], [2.0, 2.0, 1.0]] | ||
).cuda() | ||
train_Y = torch.tensor([[1.0], [2.0], [3.0]]).cuda() | ||
|
||
m = MixedDerivativeVariationalGP( | ||
train_x=train_X_aug, train_y=train_Y, inducing_points=train_X_aug | ||
).cuda() | ||
acq = MonotonicMCLSE( | ||
model=m, | ||
deriv_constraint_points=deriv_constraint_points, | ||
num_samples=5, | ||
num_rejection_samples=8, | ||
target=1.9, | ||
) | ||
self.assertTrue(isinstance(acq.objective, IdentityMCObjective)) | ||
acq = MonotonicMCLSE( | ||
model=m, | ||
deriv_constraint_points=deriv_constraint_points, | ||
num_samples=5, | ||
num_rejection_samples=8, | ||
target=1.9, | ||
objective=ProbitObjective(), | ||
).cuda() | ||
# forward | ||
acq(train_X_aug) | ||
Xfull = torch.cat((train_X_aug, acq.deriv_constraint_points), dim=0) | ||
posterior = m.posterior(Xfull) | ||
samples = acq.sampler(posterior) | ||
self.assertEqual(samples.shape, torch.Size([5, 6, 1])) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,188 @@ | ||
#!/usr/bin/env python3 | ||
# Copyright (c) Facebook, Inc. and its affiliates. | ||
# All rights reserved. | ||
|
||
# This source code is licensed under the license found in the | ||
# LICENSE file in the root directory of this source tree. | ||
|
||
import os | ||
|
||
import torch | ||
|
||
# run on single threads to keep us from deadlocking weirdly in CI | ||
if "CI" in os.environ or "SANDCASTLE" in os.environ: | ||
torch.set_num_threads(1) | ||
|
||
import numpy as np | ||
from aepsych import Config | ||
from aepsych.acquisition.monotonic_rejection import MonotonicMCLSE | ||
from aepsych.acquisition.objective import ProbitObjective | ||
from aepsych.generators import MonotonicRejectionGenerator | ||
from aepsych.models import MonotonicRejectionGP | ||
from aepsych.strategy import SequentialStrategy, Strategy | ||
from botorch.acquisition.objective import IdentityMCObjective | ||
from botorch.utils.testing import BotorchTestCase | ||
from gpytorch.likelihoods import BernoulliLikelihood, GaussianLikelihood | ||
from scipy.stats import norm | ||
|
||
|
||
class MonotonicRejectionGPLSETest(BotorchTestCase): | ||
def test_regression_gpu(self): | ||
# Init | ||
target = 1.5 | ||
model_gen_options = {"num_restarts": 1, "raw_samples": 3, "epochs": 5} | ||
lb = torch.tensor([0, 0]) | ||
ub = torch.tensor([4, 4]) | ||
m = MonotonicRejectionGP( | ||
lb=lb, | ||
ub=ub, | ||
likelihood=GaussianLikelihood(), | ||
fixed_prior_mean=target, | ||
monotonic_idxs=[1], | ||
num_induc=2, | ||
num_samples=3, | ||
num_rejection_samples=4, | ||
).cuda() | ||
strat = Strategy( | ||
lb=lb, | ||
ub=ub, | ||
model=m, | ||
generator=MonotonicRejectionGenerator( | ||
MonotonicMCLSE, | ||
acqf_kwargs={"target": target}, | ||
model_gen_options=model_gen_options, | ||
), | ||
min_asks=1, | ||
stimuli_per_trial=1, | ||
outcome_types=["binary"], | ||
use_gpu_modeling=True, | ||
) | ||
# Fit | ||
train_x = torch.tensor([[0.0, 0.0], [1.0, 1.0], [2.0, 2.0]]) | ||
train_y = torch.tensor([[1.0], [2.0], [3.0]]) | ||
m.fit(train_x=train_x, train_y=train_y) | ||
self.assertEqual(m.inducing_points.shape, torch.Size([2, 2])) | ||
self.assertEqual(m.mean_module.constant.item(), 1.5) | ||
# Predict | ||
f, var = m.predict(train_x) | ||
self.assertEqual(f.shape, torch.Size([3])) | ||
self.assertEqual(var.shape, torch.Size([3])) | ||
# Gen | ||
strat.add_data(train_x, train_y) | ||
Xopt = strat.gen() | ||
self.assertEqual(Xopt.shape, torch.Size([1, 2])) | ||
# Acquisition function | ||
acq = strat.generator._instantiate_acquisition_fn(m) | ||
self.assertEqual(acq.deriv_constraint_points.shape, torch.Size([2, 3])) | ||
self.assertTrue( | ||
torch.equal(acq.deriv_constraint_points[:, -1].cpu(), 2 * torch.ones(2)) | ||
) | ||
self.assertEqual(acq.target, 1.5) | ||
self.assertTrue(isinstance(acq.objective, IdentityMCObjective)) | ||
|
||
def test_classification_gpu(self): | ||
# Init | ||
target = 0.75 | ||
model_gen_options = {"num_restarts": 1, "raw_samples": 3, "epochs": 5} | ||
lb = torch.tensor([0, 0]) | ||
ub = torch.tensor([4, 4]) | ||
m = MonotonicRejectionGP( | ||
lb=lb, | ||
ub=ub, | ||
likelihood=BernoulliLikelihood(), | ||
fixed_prior_mean=target, | ||
monotonic_idxs=[1], | ||
num_induc=2, | ||
num_samples=3, | ||
num_rejection_samples=4, | ||
).cuda() | ||
strat = Strategy( | ||
lb=lb, | ||
ub=ub, | ||
model=m, | ||
generator=MonotonicRejectionGenerator( | ||
MonotonicMCLSE, | ||
acqf_kwargs={"target": target, "objective": ProbitObjective()}, | ||
model_gen_options=model_gen_options, | ||
), | ||
min_asks=1, | ||
stimuli_per_trial=1, | ||
outcome_types=["binary"], | ||
use_gpu_modeling=True, | ||
) | ||
# Fit | ||
train_x = torch.tensor([[0.0, 0.0], [1.0, 1.0], [2.0, 2.0]]) | ||
train_y = torch.tensor([1.0, 1.0, 0.0]) | ||
m.fit(train_x=train_x, train_y=train_y) | ||
self.assertEqual(m.inducing_points.shape, torch.Size([2, 2])) | ||
self.assertAlmostEqual(m.mean_module.constant.item(), norm.ppf(0.75)) | ||
# Predict | ||
f, var = m.predict(train_x) | ||
self.assertEqual(f.shape, torch.Size([3])) | ||
self.assertEqual(var.shape, torch.Size([3])) | ||
# Gen | ||
strat.add_data(train_x, train_y) | ||
Xopt = strat.gen() | ||
self.assertEqual(Xopt.shape, torch.Size([1, 2])) | ||
# Acquisition function | ||
acq = strat.generator._instantiate_acquisition_fn(m) | ||
self.assertEqual(acq.deriv_constraint_points.shape, torch.Size([2, 3])) | ||
self.assertTrue( | ||
torch.equal(acq.deriv_constraint_points[:, -1].cpu(), 2 * torch.ones(2)) | ||
) | ||
self.assertEqual(acq.target, 0.75) | ||
self.assertTrue(isinstance(acq.objective, ProbitObjective)) | ||
# Update | ||
m.update(train_x=train_x[:2, :2], train_y=train_y[:2], warmstart=True) | ||
self.assertEqual(m.train_inputs[0].shape, torch.Size([2, 3])) | ||
|
||
def test_classification_from_config_gpu(self): | ||
seed = 1 | ||
torch.manual_seed(seed) | ||
np.random.seed(seed) | ||
|
||
n_init = 15 | ||
n_opt = 1 | ||
|
||
config_str = f""" | ||
[common] | ||
parnames = [par1] | ||
outcome_types = [binary] | ||
stimuli_per_trial = 1 | ||
strategy_names = [init_strat, opt_strat] | ||
[par1] | ||
par_type = continuous | ||
lower_bound = 0 | ||
upper_bound = 1 | ||
[init_strat] | ||
generator = SobolGenerator | ||
min_asks = {n_init} | ||
[opt_strat] | ||
generator = MonotonicRejectionGenerator | ||
model = MonotonicRejectionGP | ||
acqf = MonotonicMCLSE | ||
min_asks = {n_opt} | ||
[MonotonicRejectionGenerator] | ||
use_gpu = True | ||
[MonotonicRejectionGP] | ||
num_induc = 2 | ||
num_samples = 3 | ||
num_rejection_samples = 4 | ||
monotonic_idxs = [0] | ||
use_gpu = True | ||
[MonotonicMCLSE] | ||
target = 0.75 | ||
objective = ProbitObjective | ||
""" | ||
config = Config(config_str=config_str) | ||
strat = SequentialStrategy.from_config(config) | ||
|
||
for _i in range(n_init + n_opt): | ||
next_x = strat.gen() | ||
strat.add_data(next_x, int(np.random.rand() > next_x)) |