Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_multi_source_sequential_with_source_context():
# Check that we can fix a non-information source parameter with context
mock_acquisition = mock.create_autospec(Acquisition)
mock_acquisition.has_gradients = False
mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
space = ParameterSpace([ContinuousParameter('x', 0, 1),
ContinuousParameter('y', 0, 1),
InformationSourceParameter(2)])
acquisition_optimizer = AcquisitionOptimizer(space)
multi_source_acquisition_optimizer = MultiSourceAcquisitionOptimizer(acquisition_optimizer, space)
loop_state_mock = mock.create_autospec(LoopState)
seq = SequentialPointCalculator(mock_acquisition, multi_source_acquisition_optimizer)
next_points = seq.compute_next_points(loop_state_mock, context={'source': 1.0})
# "SequentialPointCalculator" should only ever return 1 value
assert(len(next_points) == 1)
# Context value should be what we set
assert np.isclose(next_points[0, 1], 1.)
def test_multi_source_sequential_with_context():
# Check that we can fix a non-information source parameter with context
mock_acquisition = mock.create_autospec(Acquisition)
mock_acquisition.has_gradients = False
mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
space = ParameterSpace([ContinuousParameter('x', 0, 1),
ContinuousParameter('y', 0, 1),
InformationSourceParameter(2)])
acquisition_optimizer = AcquisitionOptimizer(space)
multi_source_acquisition_optimizer = MultiSourceAcquisitionOptimizer(acquisition_optimizer, space)
loop_state_mock = mock.create_autospec(LoopState)
seq = SequentialPointCalculator(mock_acquisition, multi_source_acquisition_optimizer)
next_points = seq.compute_next_points(loop_state_mock, context={'x': 0.25})
# "SequentialPointCalculator" should only ever return 1 value
assert(len(next_points) == 1)
# Context value should be what we set
assert np.isclose(next_points[0, 0], 0.25)
def test_local_penalization():
parameter_space = ParameterSpace([ContinuousParameter('x', 0, 1)])
acquisition_optimizer = AcquisitionOptimizer(parameter_space)
x_init = np.random.rand(5, 1)
y_init = np.random.rand(5, 1)
gpy_model = GPy.models.GPRegression(x_init, y_init)
model = GPyModelWrapper(gpy_model)
acquisition = ExpectedImprovement(model)
batch_size = 5
lp_calc = LocalPenalizationPointCalculator(acquisition, acquisition_optimizer, model, parameter_space, batch_size)
loop_state = create_loop_state(x_init, y_init)
new_points = lp_calc.compute_next_points(loop_state)
assert new_points.shape == (batch_size, 1)
def multi_source_optimizer():
mock_acquisition_optimizer = mock.create_autospec(AcquisitionOptimizer)
mock_acquisition_optimizer.optimize.return_value = (np.array([[0.]]), None)
space = ParameterSpace([ContinuousParameter('x', 0, 1), InformationSourceParameter(2)])
return MultiSourceAcquisitionOptimizer(mock_acquisition_optimizer, space)
x_init = np.linspace(0, 1, 5)[:, None]
y_init = user_function(x_init)
gpy_model = GPy.models.GPRegression(x_init, y_init)
model = GPyModelWrapper(gpy_model)
mse = []
def compute_mse(self, loop_state):
mse.append(np.mean(np.square(model.predict(x_test)[0] - y_test)))
loop_state = create_loop_state(x_init, y_init)
acquisition = ModelVariance(model)
acquisition_optimizer = AcquisitionOptimizer(space)
candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
model_updater = FixedIntervalUpdater(model)
loop = OuterLoop(candidate_point_calculator, model_updater, loop_state)
loop.iteration_end_event.append(compute_mse)
loop.run_loop(user_function, 5)
assert len(mse) == 5
def test_batch_point_calculator(mock_model):
acquisition = mock.create_autospec(Acquisition)
acquisition_optimizer = mock.create_autospec(AcquisitionOptimizer)
acquisition_optimizer.optimize.return_value = (np.zeros((1, 1)), 0)
batch_size = 10
calculator = GreedyBatchPointCalculator(mock_model, acquisition, acquisition_optimizer, batch_size)
loop_state = create_loop_state(np.zeros((1, 1)), np.zeros((1, 1)))
next_points = calculator.compute_next_points(loop_state)
assert next_points.shape[0] == batch_size
:param acquisition: The acquisition function that will be used to collect new points (default, EI).
:param update_interval: Number of iterations between optimization of model hyper-parameters. Defaults to 1.
"""
if not np.all(np.isclose(model_objective.X, model_cost.X)):
raise ValueError('Emukit currently only supports identical '
'training inputs for the cost and objective model')
if acquisition is None:
expected_improvement = ExpectedImprovement(model_objective)
acquisition = acquisition_per_expected_cost(expected_improvement, model_cost)
model_updater_objective = FixedIntervalUpdater(model_objective, update_interval)
model_updater_cost = FixedIntervalUpdater(model_cost, update_interval, lambda state: state.cost)
acquisition_optimizer = AcquisitionOptimizer(space)
candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
loop_state = create_loop_state(model_objective.X, model_objective.Y, model_cost.Y)
super(CostSensitiveBayesianOptimizationLoop, self).__init__(candidate_point_calculator,
[model_updater_objective, model_updater_cost],
loop_state)
:param model: the vanilla Bayesian quadrature method
:param acquisition: The acquisition function that is be used to collect new points.
default, IntegralVarianceReduction
:param model_updater: Defines how and when the quadrature model is updated if new data arrives.
Defaults to updating hyper-parameters every iteration.
"""
if acquisition is None:
acquisition = IntegralVarianceReduction(model)
if model_updater is None:
model_updater = FixedIntervalUpdater(model, 1)
space = ParameterSpace(model.integral_bounds.convert_to_list_of_continuous_parameters())
acquisition_optimizer = AcquisitionOptimizer(space)
candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
loop_state = create_loop_state(model.X, model.Y)
super().__init__(candidate_point_calculator, model_updater, loop_state)
self.model = model
batch_size: int=1):
"""
An outer loop class for use with Experimental design
:param space: Definition of domain bounds to collect points within
:param model: The model that approximates the underlying function
:param acquisition: experimental design acquisition function object. Default: ModelVariance acquisition
:param update_interval: How many iterations pass before next model optimization
:param batch_size: Number of points to collect in a batch. Defaults to one.
"""
if acquisition is None:
acquisition = ModelVariance(model)
# This AcquisitionOptimizer object deals with optimizing the acquisition to find the next point to collect
acquisition_optimizer = AcquisitionOptimizer(space)
# Construct emukit classes
if batch_size == 1:
candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
elif batch_size > 1:
candidate_point_calculator = \
GreedyBatchPointCalculator(model, acquisition, acquisition_optimizer, batch_size)
else:
raise ValueError('Batch size value of ' + str(batch_size) + ' is invalid.')
model_updater = FixedIntervalUpdater(model, update_interval)
loop_state = create_loop_state(model.X, model.Y)
super().__init__(candidate_point_calculator, model_updater, loop_state)
self.model = model
elif model_type is ModelType.BayesianNeuralNetwork:
model = Bohamiann(x_init, y_init, **model_kwargs)
else:
raise ValueError('Unrecognised model type: ' + str(model_type))
# Create acquisition
if acquisition_type is AcquisitionType.EI:
acquisition = ExpectedImprovement(model)
elif acquisition_type is AcquisitionType.PI:
acquisition = ProbabilityOfImprovement(model)
elif acquisition_type is AcquisitionType.NLCB:
acquisition = NegativeLowerConfidenceBound(model)
else:
raise ValueError('Unrecognised acquisition type: ' + str(acquisition_type))
acquisition_optimizer = AcquisitionOptimizer(parameter_space)
candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
loop_state = create_loop_state(x_init, y_init)
model_updater = FixedIntervalUpdater(model, 1)
return OuterLoop(candidate_point_calculator, model_updater, loop_state)