From d718f8b1161904050de914b0b209743627e23362 Mon Sep 17 00:00:00 2001 From: Kaiwen Wu Date: Tue, 3 Sep 2024 13:29:44 -0700 Subject: [PATCH] move models and data to GPUs automatically depending on the dataset size (#374) Summary: Pull Request resolved: https://github.com/facebookresearch/aepsych/pull/374 Differential Revision: D62091553 --- aepsych/strategy.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/aepsych/strategy.py b/aepsych/strategy.py index 4842154de..55b57009b 100644 --- a/aepsych/strategy.py +++ b/aepsych/strategy.py @@ -19,6 +19,7 @@ from aepsych.generators.base import AEPsychGenerator from aepsych.generators.sobol_generator import SobolGenerator from aepsych.models.base import ModelProtocol +from aepsych.models import GPClassificationModel from aepsych.utils import ( _process_bounds, make_scaled_sobol, @@ -305,6 +306,22 @@ def add_data(self, x, y): self.x, self.y, self.n = self.normalize_inputs(x, y) self._model_is_fresh = False + if self.x.size(0) >= 100: + # TODO: Support more models beyond GPClassificationModel + if ( + isinstance(self.model, GPClassificationModel) and + self.model.variational_strategy.inducing_points.size(0) >= 100 + ): + # move the model and data to GPUs if the number of training points is at least 100 and + # the number of inducing points is at least 100 + device = "cuda" if torch.cuda.is_available() else "cpu" + self.model.to(device) + self.model.lb = self.model.lb.to(device) + self.model.ub = self.model.ub.to(device) + + self.x = self.x.to(device) + self.y = self.y.to(device) + def fit(self): if self.can_fit: if self.keep_most_recent is not None: