Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
acquisition_optimizer = GradientAcquisitionOptimizer(space)
multi_source_acquisition_optimizer = MultiSourceAcquisitionOptimizer(acquisition_optimizer, space)
# Create GP model
gpy_model = GPy.models.GPRegression(x_init, y_init)
model = GPyModelWrapper(gpy_model)
# Create acquisition
acquisition = ModelVariance(model)
# Create batch candidate point calculator
batch_candidate_point_calculator = GreedyBatchPointCalculator(model, acquisition,
multi_source_acquisition_optimizer, batch_size=5)
initial_loop_state = LoopState(intiial_results)
loop = OuterLoop(batch_candidate_point_calculator, FixedIntervalUpdater(model, 1), initial_loop_state)
loop.run_loop(objective, 10)
assert loop.loop_state.X.shape[0] == 60
y_init = user_function(x_init)
gpy_model = GPy.models.GPRegression(x_init, y_init)
model = GPyModelWrapper(gpy_model)
mse = []
def compute_mse(self, loop_state):
mse.append(np.mean(np.square(model.predict(x_test)[0] - y_test)))
loop_state = create_loop_state(x_init, y_init)
acquisition = ModelVariance(model)
acquisition_optimizer = AcquisitionOptimizer(space)
candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
model_updater = FixedIntervalUpdater(model)
loop = OuterLoop(candidate_point_calculator, model_updater, loop_state)
loop.iteration_end_event.append(compute_mse)
loop.run_loop(user_function, 5)
assert len(mse) == 5
def test_every_iteration_model_updater():
mock_model = mock.create_autospec(IModel)
mock_model.optimize.return_value(None)
updater = FixedIntervalUpdater(mock_model, 1)
loop_state_mock = mock.create_autospec(LoopState)
loop_state_mock.iteration = 1
loop_state_mock.X.return_value(np.random.rand(5, 1))
loop_state_mock.Y.return_value(np.random.rand(5, 1))
updater.update(loop_state_mock)
mock_model.optimize.assert_called_once()
pass
def set_data(self, X: np.ndarray, Y: np.ndarray):
self._X = X
self._Y = Y
@property
def X(self):
return self._X
@property
def Y(self):
return self._Y
mock_model = MockModel()
updater = FixedIntervalUpdater(mock_model, 1, lambda loop_state: loop_state.cost)
loop_state_mock = mock.create_autospec(LoopState)
loop_state_mock.iteration = 1
loop_state_mock.X.return_value(np.random.rand(5, 1))
loop_state_mock.cost = np.random.rand(5, 1)
cost = np.random.rand(5, 1)
loop_state_mock.cost = cost
updater.update(loop_state_mock)
assert np.array_equiv(mock_model.X, cost)
model_updater: ModelUpdater = None):
"""
The loop for vanilla Bayesian Quadrature
:param model: the vanilla Bayesian quadrature method
:param acquisition: The acquisition function that is be used to collect new points.
default, IntegralVarianceReduction
:param model_updater: Defines how and when the quadrature model is updated if new data arrives.
Defaults to updating hyper-parameters every iteration.
"""
if acquisition is None:
acquisition = IntegralVarianceReduction(model)
if model_updater is None:
model_updater = FixedIntervalUpdater(model, 1)
space = ParameterSpace(model.integral_bounds.convert_to_list_of_continuous_parameters())
acquisition_optimizer = AcquisitionOptimizer(space)
candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
loop_state = create_loop_state(model.X, model.Y)
super().__init__(candidate_point_calculator, model_updater, loop_state)
self.model = model
model_objective = FabolasModel(X_init=X_init, Y_init=Y_init, s_min=s_min, s_max=s_max)
model_cost = FabolasModel(X_init=X_init, Y_init=cost_init[:, None], s_min=s_min, s_max=s_max)
if marginalize_hypers:
acquisition_generator = lambda model: ContinuousFidelityEntropySearch(model_objective, space=extended_space,
target_fidelity_index=len(
extended_space.parameters) - 1)
entropy_search = IntegratedHyperParameterAcquisition(model_objective, acquisition_generator)
else:
entropy_search = ContinuousFidelityEntropySearch(model_objective, space=extended_space,
target_fidelity_index=len(extended_space.parameters) - 1)
acquisition = acquisition_per_expected_cost(entropy_search, model_cost)
model_updater_objective = FixedIntervalUpdater(model_objective, update_interval)
model_updater_cost = FixedIntervalUpdater(model_cost, update_interval, lambda state: state.cost)
acquisition_optimizer = RandomSearchAcquisitionOptimizer(extended_space, num_eval_points=num_eval_points)
candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
loop_state = create_loop_state(model_objective.X, model_objective.Y, model_cost.Y)
super(CostSensitiveBayesianOptimizationLoop, self).__init__(candidate_point_calculator,
[model_updater_objective, model_updater_cost],
loop_state)
Emukit class that implement a loop for building modular Bayesian optimization
:param space: Input space where the optimization is carried out.
:param model: The model that approximates the underlying function
:param acquisition: The acquisition function that will be used to collect new points (default, EI). If batch
size is greater than one, this acquisition must output positive values only.
:param update_interval: Number of iterations between optimization of model hyper-parameters. Defaults to 1.
:param batch_size: How many points to evaluate in one iteration of the optimization loop. Defaults to 1.
"""
self.model = model
if acquisition is None:
acquisition = ExpectedImprovement(model)
model_updaters = FixedIntervalUpdater(model, update_interval)
acquisition_optimizer = AcquisitionOptimizer(space)
if batch_size == 1:
candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
else:
if not isinstance(model, IDifferentiable):
raise ValueError('Model must implement ' + str(IDifferentiable) +
' for use with Local Penalization batch method.')
log_acquisition = LogAcquisition(acquisition)
candidate_point_calculator = LocalPenalizationPointCalculator(log_acquisition, acquisition_optimizer, model,
space, batch_size)
loop_state = create_loop_state(model.X, model.Y)
super().__init__(candidate_point_calculator, model_updaters, loop_state)
:param model_objective: The model that approximates the underlying objective function
:param model_cost: The model that approximates the cost of evaluating the objective function
:param acquisition: The acquisition function that will be used to collect new points (default, EI).
:param update_interval: Number of iterations between optimization of model hyper-parameters. Defaults to 1.
"""
if not np.all(np.isclose(model_objective.X, model_cost.X)):
raise ValueError('Emukit currently only supports identical '
'training inputs for the cost and objective model')
if acquisition is None:
expected_improvement = ExpectedImprovement(model_objective)
acquisition = acquisition_per_expected_cost(expected_improvement, model_cost)
model_updater_objective = FixedIntervalUpdater(model_objective, update_interval)
model_updater_cost = FixedIntervalUpdater(model_cost, update_interval, lambda state: state.cost)
acquisition_optimizer = AcquisitionOptimizer(space)
candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
loop_state = create_loop_state(model_objective.X, model_objective.Y, model_cost.Y)
super(CostSensitiveBayesianOptimizationLoop, self).__init__(candidate_point_calculator,
[model_updater_objective, model_updater_cost],
loop_state)
raise ValueError('Unrecognised model type: ' + str(model_type))
# Create acquisition
if acquisition_type is AcquisitionType.EI:
acquisition = ExpectedImprovement(model)
elif acquisition_type is AcquisitionType.PI:
acquisition = ProbabilityOfImprovement(model)
elif acquisition_type is AcquisitionType.NLCB:
acquisition = NegativeLowerConfidenceBound(model)
else:
raise ValueError('Unrecognised acquisition type: ' + str(acquisition_type))
acquisition_optimizer = AcquisitionOptimizer(parameter_space)
candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
loop_state = create_loop_state(x_init, y_init)
model_updater = FixedIntervalUpdater(model, 1)
return OuterLoop(candidate_point_calculator, model_updater, loop_state)
if acquisition is None:
acquisition = ModelVariance(model)
# This AcquisitionOptimizer object deals with optimizing the acquisition to find the next point to collect
acquisition_optimizer = AcquisitionOptimizer(space)
# Construct emukit classes
if batch_size == 1:
candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
elif batch_size > 1:
candidate_point_calculator = \
GreedyBatchPointCalculator(model, acquisition, acquisition_optimizer, batch_size)
else:
raise ValueError('Batch size value of ' + str(batch_size) + ' is invalid.')
model_updater = FixedIntervalUpdater(model, update_interval)
loop_state = create_loop_state(model.X, model.Y)
super().__init__(candidate_point_calculator, model_updater, loop_state)
self.model = model