How to use the iminuit.Minuit.from_array_func function in iminuit

To help you get started, we’ve selected a few iminuit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github zfit / zfit / zfit / minimizers / minimizer_minuit.py View on Github external
nan_counter += 1
                info_values = {}
                info_values['loss'] = loss_value
                info_values['old_loss'] = current_loss
                info_values['nan_counter'] = nan_counter
                # but loss value not needed here
                _ = self.strategy.minimize_nan(loss=loss, params=params, minimizer=minimizer,
                                               values=info_values)
            else:
                nan_counter = 0
                current_loss = loss_value
            return gradients_values

        grad_func = grad_func if self._use_tfgrad else None

        minimizer = iminuit.Minuit.from_array_func(fcn=func, start=start_values,
                                                   error=errors, limit=limits, name=params_name,
                                                   grad=grad_func,
                                                   # use_array_call=True,
                                                   print_level=minuit_verbosity,
                                                   # forced_parameters=[f"param_{i}" for i in range(len(start_values))],
                                                   **minimizer_init)

        strategy = minimizer_setter.pop('strategy')
        minimizer.set_strategy(strategy)
        minimizer.tol = self.tolerance / 1e-3  # iminuit 1e-3 and tolerance 0.1
        assert not minimizer_setter, "minimizer_setter is not empty, bug. Please report. minimizer_setter: {}".format(
            minimizer_setter)
        self._minuit_minimizer = minimizer
        result = minimizer.migrad(**minimize_options)
        fitresult = FitResult.from_minuit(loss=loss, params=params, result=result, minimizer=self.copy())
        return fitresult
github faab5 / errortools / errortools / logistic_regression.py View on Github external
if not all([isinstance(f, (bool, int, float,)) for f in parameter_fixes]):
                    raise ValueError("A fix should be True or False")
                if len(parameter_fixes) != len(initial_parameters):
                    raise ValueError("{:d} fixes given for {:d} parameters".format(len(parameter_fixes), len(initial_parameters)))
                parameter_fixes = [bool(f) for f in parameter_fixes]
            elif self.minuit is not None:
                parameter_fixes = [state['is_fixed'] for state in self.minuit.get_param_states()]

            # define function to be minimized
            fcn = lambda p: self.negativeLogPosterior(p, self.X, self.y, self.l1, self.l2)

            # define the gradient of the function to be minimized
            grd = lambda p: self.gradientNegativeLogPosterior(p, self.X, self.y, self.l1, self.l2)

            # initiate minuit minimizer
            self.minuit = iminuit.Minuit.from_array_func(fcn=fcn,
                start=initial_parameters, error=initial_step_sizes,
                limit=parameter_limits, fix=parameter_fixes,
                throw_nan=True, print_level=print_level,
                grad=grd, use_array_call=True, errordef=0.5, pedantic=False)

        self.minuit.print_level = print_level

        # minimize with migrad
        fmin, _ = self.minuit.migrad(ncall=max_function_calls, nsplit=n_splits, resume=True)

        # check validity of minimum
        if not fmin.is_valid:
            if not fmin.has_covariance or not fmin.has_accurate_covar or not fmin.has_posdef_covar or \
                    fmin.has_made_posdef_covar or fmin.hesse_failed:
                # It is known that migrad sometimes fails calculating the covariance matrix,
                # but succeeds on a second try

iminuit

Jupyter-friendly Python frontend for MINUIT2 in C++

MIT
Latest version published 2 months ago

Package Health Score

84 / 100
Full package analysis