How to use the botorch.exceptions.errors.UnsupportedError function in botorch

To help you get started, we’ve selected a few botorch examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pytorch / botorch / test / optim / test_parameter_constraints.py View on Github external
)
        self.assertEqual(len(cs), 4)
        self.assertTrue({c["type"] for c in cs} == {"ineq", "eq"})
        cs = make_scipy_linear_constraints(
            shapeX=shapeX, inequality_constraints=[(indices, coefficients, 1.0)]
        )
        self.assertEqual(len(cs), 2)
        self.assertTrue(all(c["type"] == "ineq" for c in cs))
        cs = make_scipy_linear_constraints(
            shapeX=shapeX, equality_constraints=[(indices, coefficients, 1.0)]
        )
        self.assertEqual(len(cs), 2)
        self.assertTrue(all(c["type"] == "eq" for c in cs))

        # test that len(shapeX) < 3 raises an error
        with self.assertRaises(UnsupportedError):
            make_scipy_linear_constraints(
                shapeX=torch.Size([2, 1]),
                inequality_constraints=[(indices, coefficients, 1.0)],
                equality_constraints=[(indices, coefficients, 1.0)],
            )
        # test that 2-dim indices work properly
        indices = indices.unsqueeze(0)
        cs = make_scipy_linear_constraints(
            shapeX=shapeX,
            inequality_constraints=[(indices, coefficients, 1.0)],
            equality_constraints=[(indices, coefficients, 1.0)],
        )
        self.assertEqual(len(cs), 4)
        self.assertTrue({c["type"] for c in cs} == {"ineq", "eq"})
        # test that >2-dim indices raises an UnsupportedError
        indices = indices.unsqueeze(0)
github pytorch / botorch / test / sampling / test_sampler.py View on Github external
def test_unsupported_dimension(self):
        sampler = SobolQMCNormalSampler(num_samples=2)
        mean = torch.zeros(1112)
        cov = DiagLazyTensor(torch.ones(1112))
        mvn = MultivariateNormal(mean, cov)
        posterior = GPyTorchPosterior(mvn)
        with self.assertRaises(UnsupportedError) as e:
            sampler(posterior)
            self.assertIn("Requested: 1112", str(e.exception))
github pytorch / botorch / test / models / test_gp_regression_fidelity.py View on Github external
def test_init_error(self):
        train_X = torch.rand(2, 2, device=self.device)
        train_Y = torch.rand(2, 1)
        for lin_truncated in (True, False):
            with self.assertRaises(UnsupportedError):
                SingleTaskMultiFidelityGP(
                    train_X, train_Y, linear_truncated=lin_truncated
                )
github pytorch / botorch / test / optim / test_parameter_constraints.py View on Github external
shapeX=torch.Size([2, 1]),
                inequality_constraints=[(indices, coefficients, 1.0)],
                equality_constraints=[(indices, coefficients, 1.0)],
            )
        # test that 2-dim indices work properly
        indices = indices.unsqueeze(0)
        cs = make_scipy_linear_constraints(
            shapeX=shapeX,
            inequality_constraints=[(indices, coefficients, 1.0)],
            equality_constraints=[(indices, coefficients, 1.0)],
        )
        self.assertEqual(len(cs), 4)
        self.assertTrue({c["type"] for c in cs} == {"ineq", "eq"})
        # test that >2-dim indices raises an UnsupportedError
        indices = indices.unsqueeze(0)
        with self.assertRaises(UnsupportedError):
            make_scipy_linear_constraints(
                shapeX=shapeX,
                inequality_constraints=[(indices, coefficients, 1.0)],
                equality_constraints=[(indices, coefficients, 1.0)],
            )
        # test that out of bounds index raises an error
        indices = torch.tensor([0, 4], dtype=torch.long, device=self.device)
        with self.assertRaises(RuntimeError):
            make_scipy_linear_constraints(
                shapeX=shapeX,
                inequality_constraints=[(indices, coefficients, 1.0)],
                equality_constraints=[(indices, coefficients, 1.0)],
            )
        # test that two-d index out-of-bounds raises an error
        # q out of bounds
        indices = torch.tensor([[0, 0], [1, 0]], dtype=torch.long, device=self.device)
github pytorch / botorch / test / posteriors / test_gpytorch.py View on Github external
mean, covar = posterior.mvn.mean, posterior.mvn.covariance_matrix
            new_posterior = scalarize_posterior(posterior, weights, offset)
            exp_size = torch.Size(batch_shape + [1, 1])
            self.assertEqual(new_posterior.mean.shape, exp_size)
            new_mean_exp = offset + mean @ weights
            self.assertTrue(torch.allclose(new_posterior.mean[..., -1], new_mean_exp))
            self.assertEqual(new_posterior.variance.shape, exp_size)
            new_covar_exp = ((covar @ weights) @ weights).unsqueeze(-1)
            self.assertTrue(
                torch.allclose(new_posterior.variance[..., -1], new_covar_exp)
            )
            # test errors
            with self.assertRaises(RuntimeError):
                scalarize_posterior(posterior, weights[:-1], offset)
            posterior2 = _get_test_posterior(batch_shape, q=2, m=m, **tkwargs)
            with self.assertRaises(UnsupportedError):
                scalarize_posterior(posterior2, weights, offset)
github pytorch / botorch / botorch / optim / parameter_constraints.py View on Github external
- "type": Indicates the type of the constraint ("eq" if `eq=True`, "ineq" o/w)
        - "fun": A callable evaluating the constraint value on `x`, a flattened
            version of the input tensor `X`, returning a scalar.
        - "jac": A callable evaluating the constraint's Jacobian on `x`, a flattened
            version of the input tensor `X`, returning a numpy array.
    """
    if len(shapeX) != 3:
        raise UnsupportedError("`shapeX` must be `b x q x d`")
    q, d = shapeX[-2:]
    n = shapeX.numel()
    constraints: List[ScipyConstraintDict] = []
    coeffs = _arrayify(coefficients)
    ctype = "eq" if eq else "ineq"
    if indices.dim() > 2:
        raise UnsupportedError(
            "Linear constraints supported only on individual candidates and "
            "across q-batches, not across general batch shapes."
        )
    elif indices.dim() == 2:
        # indices has two dimensions (potential constraints across q-batch elements)
        if indices[:, 0].max() > q - 1:
            raise RuntimeError(f"Index out of bounds for {q}-batch")
        if indices[:, 1].max() > d - 1:
            raise RuntimeError(f"Index out of bounds for {d}-dim parameter tensor")

        offsets = [shapeX[i:].numel() for i in range(1, len(shapeX))]
        # rule is [i, j, k] is at
        # i * offsets[0] + j * offsets[1] + k
        for i in range(shapeX[0]):
            idxr = []
            for a in indices:
github pytorch / botorch / botorch / optim / parameter_constraints.py View on Github external
shapeX: The shape of the torch tensor to construct the constraints for
            (i.e. `b x q x d`). Must have three dimensions.
        eq: If True, return an equality constraint, o/w return an inequality
            constraint (indicated by "eq" / "ineq" value of the `type` key).

    Returns:
        A list of constraint dictionaries with the following keys

        - "type": Indicates the type of the constraint ("eq" if `eq=True`, "ineq" o/w)
        - "fun": A callable evaluating the constraint value on `x`, a flattened
            version of the input tensor `X`, returning a scalar.
        - "jac": A callable evaluating the constraint's Jacobian on `x`, a flattened
            version of the input tensor `X`, returning a numpy array.
    """
    if len(shapeX) != 3:
        raise UnsupportedError("`shapeX` must be `b x q x d`")
    q, d = shapeX[-2:]
    n = shapeX.numel()
    constraints: List[ScipyConstraintDict] = []
    coeffs = _arrayify(coefficients)
    ctype = "eq" if eq else "ineq"
    if indices.dim() > 2:
        raise UnsupportedError(
            "Linear constraints supported only on individual candidates and "
            "across q-batches, not across general batch shapes."
        )
    elif indices.dim() == 2:
        # indices has two dimensions (potential constraints across q-batch elements)
        if indices[:, 0].max() > q - 1:
            raise RuntimeError(f"Index out of bounds for {q}-batch")
        if indices[:, 1].max() > d - 1:
            raise RuntimeError(f"Index out of bounds for {d}-dim parameter tensor")
github pytorch / botorch / botorch / fit.py View on Github external
):
        try:  # check if backwards-conversion is possible
            model_list = batched_to_model_list(mll.model)
            model_ = model_list_to_batched(model_list)
            mll_ = SumMarginalLogLikelihood(model_list.likelihood, model_list)
            fit_gpytorch_model(
                mll=mll_,
                optimizer=optimizer,
                sequential=True,
                max_retries=max_retries,
                **kwargs,
            )
            model_ = model_list_to_batched(mll_.model)
            mll.model.load_state_dict(model_.state_dict())
            return mll.eval()
        except (NotImplementedError, UnsupportedError, RuntimeError, AttributeError):
            warnings.warn(FAILED_CONVERSION_MSG, BotorchWarning)
            return fit_gpytorch_model(
                mll=mll, optimizer=optimizer, sequential=False, max_retries=max_retries
            )
    # retry with random samples from the priors upon failure
    mll.train()
    original_state_dict = deepcopy(mll.model.state_dict())
    retry = 0
    while retry < max_retries:
        with warnings.catch_warnings(record=True) as ws:
            if retry > 0:  # use normal initial conditions on first try
                mll.model.load_state_dict(original_state_dict)
                sample_all_priors(mll.model)
            mll, _ = optimizer(mll, track_iterations=False, **kwargs)
            if not any(issubclass(w.category, OptimizationWarning) for w in ws):
                mll.eval()
github pytorch / botorch / botorch / models / deterministic.py View on Github external
def posterior(
        self, X: Tensor, output_indices: Optional[List[int]] = None, **kwargs: Any
    ) -> DeterministicPosterior:
        r"""Compute the (deterministic) posterior at X."""
        if kwargs.get("observation_noise") is not None:
            # TODO: Consider returning an MVN here instead
            raise UnsupportedError(
                "Deterministic models do not support observation noise."
            )
        values = self.forward(X)
        if output_indices is not None:
            values = values[..., output_indices]
        return DeterministicPosterior(values=values)