Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package python-iminuit for openSUSE:Factory checked in at 2023-01-07 17:19:58 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/python-iminuit (Old) and /work/SRC/openSUSE:Factory/.python-iminuit.new.1563 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "python-iminuit" Sat Jan 7 17:19:58 2023 rev:24 rq:1056767 version:2.18.0 Changes: -------- --- /work/SRC/openSUSE:Factory/python-iminuit/python-iminuit.changes 2022-08-18 16:49:39.361514200 +0200 +++ /work/SRC/openSUSE:Factory/.python-iminuit.new.1563/python-iminuit.changes 2023-01-07 17:23:19.879450638 +0100 @@ -1,0 +2,12 @@ +Sat Jan 7 12:22:22 UTC 2023 - Dirk Müller <dmuel...@suse.com> + +- update to v2.18.0: + * Bump actions/checkout from 2 to 3 + * ci: update to Python 3.11 final release + * move tutorials + * added visualize function to Minuit + * Bump pypa/cibuildwheel from 2.10.2 to 2.11.2 + * add more checks for gradients + * Add Python 3.11, drop 3.6 + +------------------------------------------------------------------- Old: ---- iminuit-2.16.0.tar.gz New: ---- iminuit-2.18.0.tar.gz ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ python-iminuit.spec ++++++ --- /var/tmp/diff_new_pack.OhamsZ/_old 2023-01-07 17:23:20.403453764 +0100 +++ /var/tmp/diff_new_pack.OhamsZ/_new 2023-01-07 17:23:20.407453788 +0100 @@ -1,7 +1,7 @@ # -# spec file for package python-iminuit +# spec file # -# Copyright (c) 2022 SUSE LLC +# Copyright (c) 2023 SUSE LLC # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed @@ -22,14 +22,14 @@ %define skip_python36 1 %define modname iminuit Name: python-%{modname} -Version: 2.16.0 +Version: 2.18.0 Release: 0 Summary: Python bindings for MINUIT2 License: MIT URL: https://github.com/scikit-hep/iminuit Source0: https://files.pythonhosted.org/packages/source/i/iminuit/%{modname}-%{version}.tar.gz BuildRequires: %{python_module Cython} -BuildRequires: %{python_module devel} +BuildRequires: %{python_module devel >= 3.7} BuildRequires: %{python_module numpy >= 1.11.3} BuildRequires: %{python_module numpy-devel} BuildRequires: %{python_module pybind11 >= 2.9.0} ++++++ iminuit-2.16.0.tar.gz -> iminuit-2.18.0.tar.gz ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/iminuit-2.16.0/CONTRIBUTING.md new/iminuit-2.18.0/CONTRIBUTING.md --- old/iminuit-2.16.0/CONTRIBUTING.md 2022-08-16 18:02:13.000000000 +0200 +++ new/iminuit-2.18.0/CONTRIBUTING.md 2022-12-14 11:10:58.000000000 +0100 @@ -1,3 +1 @@ See doc/contribute.rst in this repository or its [html version](https://iminuit.readthedocs.io/en/latest/contribute.html). - -The wheel building system comes from [scikit-hep/azure-wheel-helpers](https://github.com/scikit-hep/azure-wheel-helpers), see that first if you need to update the wheel build system (for a new Python version, for example). diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/iminuit-2.16.0/PKG-INFO new/iminuit-2.18.0/PKG-INFO --- old/iminuit-2.16.0/PKG-INFO 2022-08-16 18:02:43.544662700 +0200 +++ new/iminuit-2.18.0/PKG-INFO 2022-12-14 11:11:30.625680000 +0100 @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: iminuit -Version: 2.16.0 +Version: 2.18.0 Summary: Jupyter-friendly Python frontend for MINUIT2 in C++ Home-page: http://github.com/scikit-hep/iminuit Author: Piti Ongmongkolkul and the iminuit team @@ -19,11 +19,11 @@ Classifier: Programming Language :: C++ Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy @@ -34,7 +34,7 @@ Classifier: Operating System :: POSIX Classifier: Operating System :: Unix Classifier: Operating System :: MacOS -Requires-Python: >=3.6 +Requires-Python: >=3.7 Description-Content-Type: text/x-rst Provides-Extra: test Provides-Extra: doc diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/iminuit-2.16.0/pyproject.toml new/iminuit-2.18.0/pyproject.toml --- old/iminuit-2.16.0/pyproject.toml 2022-08-16 18:02:13.000000000 +0200 +++ new/iminuit-2.18.0/pyproject.toml 2022-12-14 11:10:58.000000000 +0100 @@ -16,7 +16,7 @@ [tool.cibuildwheel] # update skip when numpy wheels become available -skip = ["*-musllinux_*", "cp310-win32", "cp310-manylinux_i686"] +skip = ["*-musllinux_*", "cp31?-win32", "cp31?-manylinux_i686"] test-requires = "pytest" test-command = "python -m pytest {package}/tests" test-skip = ["*universal2:arm64"] diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/iminuit-2.16.0/setup.cfg new/iminuit-2.18.0/setup.cfg --- old/iminuit-2.16.0/setup.cfg 2022-08-16 18:02:43.548662700 +0200 +++ new/iminuit-2.18.0/setup.cfg 2022-12-14 11:11:30.625680000 +0100 @@ -22,11 +22,11 @@ Programming Language :: C++ Programming Language :: Python Programming Language :: Python :: 3 - Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 Programming Language :: Python :: 3 :: Only Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: Implementation :: PyPy @@ -42,45 +42,34 @@ package_dir = = src packages = iminuit -python_requires = >=3.6 +python_requires = >=3.7 install_requires = numpy [options.extras_require] test = + coverage cython - flake8 - ipykernel ipywidgets - jax - jaxlib joblib jacobi - jupyter_client matplotlib - mypy - nbconvert - nbformat numba numba-stats - numpy - pre-commit - pydocstyle - pylint pytest - pytest-cov - pytest-xdist scipy - sphinx - sphinx_rtd_theme tabulate - nbsphinx boost_histogram - resample + resample>=1.5 doc = - sphinx>=4.1 + sphinx>=4.1,<5.2 sphinx-rtd-theme - Jinja2==3.0 nbsphinx + nbconvert + nbformat + jupyter_client + ipykernel + jax==0.3.2 + jaxlib==0.3.0 [check-manifest] ignore = diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/iminuit-2.16.0/src/iminuit/cost.py new/iminuit-2.18.0/src/iminuit/cost.py --- old/iminuit-2.16.0/src/iminuit/cost.py 2022-08-16 18:02:13.000000000 +0200 +++ new/iminuit-2.18.0/src/iminuit/cost.py 2022-12-14 11:10:58.000000000 +0100 @@ -19,7 +19,7 @@ histogram of weighted samples - Fit a template to binned data with bin-wise uncertainties on the template: - :class:`BarlowBeestonLite`, which also supports weighted data and weighted templates + :class:`Template`, which also supports weighted data and weighted templates - Fit of a function f(x) to (x, y, yerror) pairs with normal-distributed fluctuations. x is one- or multi-dimensional, y is one-dimensional. @@ -62,6 +62,9 @@ import typing as _tp import warnings +CHISQUARE = 1.0 +NEGATIVE_LOG_LIKELIHOOD = 0.5 + # correct ArrayLike from numpy.typing generates horrible looking signatures # in python's help(), so we use this as a workaround _ArrayLike = _tp.Sequence @@ -100,6 +103,8 @@ Apply Bohm-Zech transform. See Bohm and Zech, NIMA 748 (2014) 1-6. + + :meta private: """ def __init__(self, val: _ArrayLike, var: _ArrayLike): @@ -114,10 +119,16 @@ Estimated variance of observed values. """ val, var = np.atleast_1d(val, var) - self._scale = val / (var + 1e-323) + + self._scale = np.ones_like(val) + np.divide(val, var, out=self._scale, where=var > 0) self._obs = val * self._scale - def __call__(self, val: _ArrayLike, var: _tp.Optional[_ArrayLike] = None): + def __call__( + self, val: _ArrayLike, var: _ArrayLike = None + ) -> _tp.Union[ + _tp.Tuple[np.ndarray, np.ndarray], _tp.Tuple[np.ndarray, np.ndarray, np.ndarray] + ]: """ Return precomputed scaled data and scaled prediction. @@ -218,7 +229,7 @@ return 2 * np.sum(mu - n + n * (_safe_log(n) - _safe_log(mu))) -def barlow_beeston_lite_chi2_jsc( +def template_chi2_jsc( n: _ArrayLike[float], mu: _ArrayLike[float], mu_var: _ArrayLike[float] ) -> float: """ @@ -239,7 +250,7 @@ Returns ------- float - Cost function value. + Asymptotically chi-square-distributed test statistic. Notes ----- @@ -251,23 +262,22 @@ beta_var = mu_var / mu**2 - # need to solve quadratic equation b^2 + (mu beta_var - 1) b - n beta_var = 0 - p = mu * beta_var - 1 - q = -n * beta_var - beta = 0.5 * (-p + np.sqrt(p**2 - 4 * q)) + # Eq. 15 from https://doi.org/10.48550/arXiv.2206.12346 + p = 0.5 - 0.5 * mu * beta_var + beta = p + np.sqrt(p**2 + n * beta_var) return poisson_chi2(n, mu * beta) + np.sum( # type:ignore (beta - 1) ** 2 / beta_var ) -def barlow_beeston_lite_chi2_hpd( +def template_chi2_da( n: _ArrayLike[float], mu: _ArrayLike[float], mu_var: _ArrayLike[float] ) -> float: """ Compute asymptotically chi2-distributed cost for a template fit. - H.P. Dembinski, https://doi.org/10.48550/arXiv.2206.12346 + H.P. Dembinski, A. Abdelmotteleb, https://doi.org/10.48550/arXiv.2206.12346 Parameters ---------- @@ -282,7 +292,7 @@ Returns ------- float - Cost function value. + Asymptotically chi-square-distributed test statistic. """ n, mu, mu_var = np.atleast_1d(n, mu, mu_var) k = mu**2 / mu_var @@ -290,6 +300,50 @@ return poisson_chi2(n, mu * beta) + poisson_chi2(k, k * beta) # type:ignore +def template_nll_asy( + n: _ArrayLike[float], mu: _ArrayLike[float], mu_var: _ArrayLike[float] +) -> float: + """ + Compute marginalized negative log-likelikihood for a template fit. + + This is the negative logarithm of equation 3.15 of the paper by + C.A. Argüelles, A. Schneider, T. Yuan, + https://doi.org/10.1007/JHEP06(2019)030. + + The authors use a Bayesian approach and integrate over the nuisance + parameters. Like the other Barlow-Beeston-lite methods, this is an + approximation. The resulting likelihood cannot be turned into an + asymptotically chi-square distributed test statistic as detailed + in Baker & Cousins, NIM 221 (1984) 437-442. + + Parameters + ---------- + n : array-like + Observed counts. + mu : array-like + Expected counts. This is the sum of the normalised templates scaled + with the component yields. + mu_var : array-like + Expected variance of mu. Must be positive everywhere. + + Returns + ------- + float + Negative log-likelihood function value. + """ + from scipy.special import loggamma as lg + + n, mu, mu_var = np.atleast_1d(n, mu, mu_var) + + alpha = mu**2 / mu_var + 1 + beta = mu / mu_var + return -np.sum( + alpha * np.log(beta) + + lg(n + alpha) + - (lg(n + 1) + (n + alpha) * np.log(1 + beta) + lg(alpha)) + ) + + # If numba is available, use it to accelerate computations in float32 and float64 # precision. Fall back to plain numpy for float128 which is not currently supported # by numba. @@ -299,11 +353,11 @@ @_overload(_safe_log, inline="always") def _ol_safe_log(x): - return _safe_log + return _safe_log # pragma: no cover @_overload(_z_squared, inline="always") def _ol_z_squared(y, ye, ym): - return _z_squared + return _z_squared # pragma: no cover _unbinned_nll_np = _unbinned_nll _unbinned_nll_nb = _njit( @@ -383,7 +437,7 @@ @_overload(_soft_l1_loss, inline="always") def _ol_soft_l1_loss(z_sqr): - return _soft_l1_loss_np + return _soft_l1_loss_np # pragma: no cover _soft_l1_cost_np = _soft_l1_cost _soft_l1_cost_nb = _njit( @@ -403,22 +457,36 @@ class Cost(abc.ABC): - """Base class for all cost functions.""" + """ + Base class for all cost functions. + + :meta private: + """ __slots__ = ("_func_code", "_verbose") @property def errordef(self): - """For internal use.""" - return 1.0 + """ + For internal use. + + :meta private: + """ + return self._errordef() + + def _errordef(self): + return CHISQUARE @property def func_code(self): - """For internal use.""" + """ + For internal use. + + :meta private: + """ return self._func_code @property - @abc.abstractmethod def ndata(self): """ Return number of points in least-squares fits or bins in a binned fit. @@ -426,6 +494,10 @@ Infinity is returned if the cost function is unbinned. This is used by Minuit to compute the reduced chi2, a goodness-of-fit estimate. """ + return self._ndata() + + @abc.abstractmethod + def _ndata(self): NotImplemented # pragma: no cover @property @@ -502,9 +574,7 @@ self.value = value super().__init__((), False) - @Cost.ndata.getter # type:ignore - def ndata(self): - """See Cost.ndata.""" + def _ndata(self): return 0 def _call(self, args): @@ -566,12 +636,10 @@ def _call(self, args): r = 0.0 for comp, cargs in self._split(args): - r += comp._call(cargs) + r += comp._call(cargs) / comp.errordef return r - @Cost.ndata.getter # type:ignore - def ndata(self): - """See Cost.ndata.""" + def _ndata(self): return sum(c.ndata for c in self._items) def __len__(self): @@ -609,23 +677,29 @@ args = np.atleast_1d(args) n = sum(hasattr(comp, "visualize") for comp in self) - fig = plt.gcf() - if n > 1: - fig.set_figheight(n * fig.get_figheight()) + + w, h = plt.rcParams["figure.figsize"] + fig, ax = plt.subplots(1, n, figsize=(w, h * n)) if component_kwargs is None: component_kwargs = {} + i = 0 for k, (comp, cargs) in enumerate(self._split(args)): - if hasattr(comp, "visualize"): - i += 1 - plt.subplot(n, 1, i) - kwargs = component_kwargs.get(k, {}) - comp.visualize(cargs, **kwargs) + if not hasattr(comp, "visualize"): + continue + kwargs = component_kwargs.get(k, {}) + plt.sca(ax[i]) + comp.visualize(cargs, **kwargs) + i += 1 class MaskedCost(Cost): - """Base class for cost functions that support data masking.""" + """ + Base class for cost functions that support data masking. + + :meta private: + """ __slots__ = "_data", "_mask", "_masked" @@ -665,15 +739,13 @@ class UnbinnedCost(MaskedCost): - """Base class for unbinned cost functions.""" + """ + Base class for unbinned cost functions. - __slots__ = "_model", "_log" + :meta private: + """ - @Cost.ndata.getter # type:ignore - def ndata(self): - """See Cost.ndata.""" - # unbinned likelihoods have infinite degrees of freedom - return np.inf + __slots__ = "_model", "_log" def __init__(self, data, model: _tp.Callable, verbose: int, log: bool): """For internal use.""" @@ -691,6 +763,10 @@ """Get number density model.""" ... # pragma: no cover + def _ndata(self): + # unbinned likelihoods have infinite degrees of freedom + return np.inf + def visualize(self, args: _ArrayLike, model_points: int = 0): """ Visualize data and model agreement (requires matplotlib). @@ -874,7 +950,11 @@ class BinnedCost(MaskedCost): - """Base class for binned cost functions.""" + """ + Base class for binned cost functions. + + :meta private: + """ __slots__ = "_xe", "_ndim", "_bztrafo" @@ -885,11 +965,6 @@ """Access bin edges.""" return self._xe - @Cost.ndata.getter # type:ignore - def ndata(self): - """See Cost.ndata.""" - return np.prod(self._masked.shape[: self._ndim]) - def __init__(self, args, n, xe, verbose, *updater): """For internal use.""" if not isinstance(xe, _tp.Iterable): @@ -921,6 +996,9 @@ super().__init__(args, n, verbose) + def _ndata(self): + return np.prod(self._masked.shape[: self._ndim]) + def _update_cache(self): super()._update_cache() if self._bztrafo: @@ -959,7 +1037,11 @@ class BinnedCostWithModel(BinnedCost): - """Base class for binned cost functions.""" + """ + Base class for binned cost functions with parametric model. + + :meta private: + """ __slots__ = "_xe_shape", "_model", "_model_arg" @@ -991,23 +1073,49 @@ return d -class BarlowBeestonLite(BinnedCost): +class Template(BinnedCost): """ Binned cost function for a template fit with uncertainties on the template. - Compared to the original Beeston-Barlow method, the lite methods uses one nuisance - parameter per bin instead of one nuisance parameter per component per bin, which - is an approximation. This class offers two different lite methods. The default - method used is the one which performs better on average. - - The cost function works for both weighted data and weighted templates. The cost - function assumes that the weights are independent of the data. This is not the - case for sWeights, and the uncertaintes for results obtained with sWeights will - only be approximately correct, see C. Langenbruch, Eur.Phys.J.C 82 (2022) 5, 393. - - Barlow and Beeston, Comput.Phys.Commun. 77 (1993) 219-228, - https://doi.org/10.1016/0010-4655(93)90005-W) - J.S. Conway, PHYSTAT 2011, https://doi.org/10.48550/arXiv.1103.0354 + This cost function is for a mixture model. Samples originate from two or more + components and we are interested in estimating the yield that originates from each + component. In high-energy physics, one component is often a peaking signal over a + smooth background component. Templates are shape estimates for these components which + are obtained from Monte-Carlo simulation. Even if the Monte-Carlo simulation is exact, + the templates introduce some uncertainty since the Monte-Carlo simulation produces + only a finite sample of events that contribute to each template. This cost function + takes that additional uncertainty into account. + + There are several ways to approach this problem. Barlow and Beeston [1]_ found an + exact likelihood for this problem, with one nuisance parameter per component per bin. + Solving this likelihood is somewhat challenging though. The Barlow-Beeston likelihood + also does not handle the additional uncertainty in weighted templates unless the + weights per bin are all equal. + + Other works [2]_ [3]_ [4]_ describe likelihoods that use only one nuisance parameter + per bin, which is an approximation. Some marginalize over the nuisance parameters with + some prior, while others profile over the nuisance parameter. This class implements + several of these methods. The default method is the one which performs best under most + conditions, according to current knowledge. The default may change if this assessment + changes. + + The cost function returns an asymptotically chi-square distributed test statistic, + except for the method "asy", where it is the negative logarithm of the marginalised + likelihood instead. The standard transform [5]_ which we use convert likelihoods into + test statistics only works for (profiled) likelihoods, not for likelihoods + marginalized over a prior. + + All methods implemented here have been generalized to work with both weighted data and + weighted templates, under the assumption that the weights are independent of the data. + This is not the case for sWeights, and the uncertaintes for results obtained with + sWeights will only be approximately correct [6]_. + + .. [1] Barlow and Beeston, Comput.Phys.Commun. 77 (1993) 219-228 + .. [2] Conway, PHYSTAT 2011 proceeding, https://doi.org/10.48550/arXiv.1103.0354 + .. [3] Argüelles, Schneider, Yuan, JHEP 06 (2019) 030 + .. [4] Dembinski and Abdelmotteleb, https://doi.org/10.48550/arXiv.2206.12346 + .. [5] Baker and Cousins, NIM 221 (1984) 437-442 + .. [6] Langenbruch, Eur.Phys.J.C 82 (2022) 5, 393 """ __slots__ = "_bbl_data", "_impl" @@ -1019,7 +1127,7 @@ templates: _tp.Sequence[_tp.Sequence], name: _tp.Collection[str] = None, verbose: int = 0, - method: str = "hpd", + method: str = "da", ): """ Initialize cost function with data and model. @@ -1028,31 +1136,29 @@ ---------- n : array-like Histogram counts. If this is an array with dimension D+1, where D is the - number of histogram axes, then the last dimension must have two elements - and is interpreted as pairs of sum of weights and sum of weights squared. + number of histogram axes, then the last dimension must have two elements and + is interpreted as pairs of sum of weights and sum of weights squared. xe : array-like or collection of array-like - Bin edge locations, must be len(n) + 1, where n is the number of bins. - If the histogram has more than one axis, xe must be a collection of the - bin edge locations along each axis. + Bin edge locations, must be len(n) + 1, where n is the number of bins. If the + histogram has more than one axis, xe must be a collection of the bin edge + locations along each axis. templates : collection of array-like - Collection of arrays, which contain the histogram counts of each template. - The template histograms must use the same axes as the data histogram. If - the counts are represented by an array with dimension D+1, where D is the - number of histogram axes, then the last dimension must have two elements - and is interpreted as pairs of sum of weights and sum of weights squared. + Collection of arrays, which contain the histogram counts of each template. The + template histograms must use the same axes as the data histogram. If the + counts are represented by an array with dimension D+1, where D is the number + of histogram axes, then the last dimension must have two elements and is + interpreted as pairs of sum of weights and sum of weights squared. name : collection of str, optional Optional name for the yield of each template. Must have length K. verbose : int, optional - Verbosity level. 0: is no output (default). - 1: print current args and negative log-likelihood value. - method : {"jsc", "hpd"}, optional - Which version of the lite method to use. jsc: Method developed by - J.S. Conway, PHYSTAT 2011, https://doi.org/10.48550/arXiv.1103.0354. - hpd: Method developed by H.P. Dembinski. Default is "hpd", which seems to - perform slightly better on average. The default may change in the future - when more practical experience with both method is gained. Set this - parameter explicitly to ensure that a particular method is used now and - in the future. + Verbosity level. 0: is no output (default). 1: print current args and negative + log-likelihood value. + method : {"jsc", "asy", "da"}, optional + Which method to use. "jsc": Conway's method [2]_. "asy": ASY method [3]_. + "da": DA method [4]_. Default is "da", which to current knowledge offers the + best overall performance. The default may change in the future, so please set + this parameter explicitly in code that has to be stable. For all methods + except the "asy" method, the minimum value is chi-square distributed. """ M = len(templates) if M < 1: @@ -1089,13 +1195,24 @@ nt_var.append(tv * f**2) self._bbl_data = (nt, nt_var) - if method == "jsc": - self._impl = barlow_beeston_lite_chi2_jsc - elif method == "hpd": - self._impl = barlow_beeston_lite_chi2_hpd - else: + known_methods = { + "jsc": template_chi2_jsc, + "asy": template_nll_asy, + "hpd": template_chi2_da, + "da": template_chi2_da, + } + try: + self._impl = known_methods[method] + except KeyError: raise ValueError( - f"method {method} is not understood, allowed values: {{'jsc', 'hpd'}}" + f"method {method} is not understood, allowed values: {known_methods}" + ) + + if method == "hpd": + warnings.warn( + "key 'hpd' is deprecated, please use 'da' instead", + category=np.VisibleDeprecationWarning, + stacklevel=2, ) super().__init__(name, n, xe, verbose) @@ -1125,6 +1242,9 @@ ma = mu > 0 return self._impl(n[ma], mu[ma], mu_var[ma]) + def _errordef(self): + return NEGATIVE_LOG_LIKELIHOOD if self._impl is template_nll_asy else CHISQUARE + def visualize(self, args: _ArrayLike): """ Visualize data and model agreement (requires matplotlib). @@ -1365,11 +1485,6 @@ loss(_z_squared(y, ye, ym)) # type:ignore ) - @Cost.ndata.getter # type:ignore - def ndata(self): - """See Cost.ndata.""" - return len(self._masked) - def __init__( self, x: _ArrayLike, @@ -1439,6 +1554,9 @@ ym = _normalize_model_output(ym) return self._cost(y, yerror, ym) + def _ndata(self): + return len(self._masked) + def visualize(self, args: _ArrayLike, model_points: int = 0): """ Visualize data and model agreement (requires matplotlib). @@ -1557,9 +1675,7 @@ return np.sum(delta**2 * self._covinv) return np.einsum("i,ij,j", delta, self._covinv, delta) - @Cost.ndata.getter # type:ignore - def ndata(self): - """See Cost.ndata.""" + def _ndata(self): return len(self._value) def visualize(self, args: _ArrayLike): @@ -1629,3 +1745,23 @@ if isinstance(xe[0], _tp.Iterable): return tuple(len(xei) - 1 for xei in xe) return (len(xe) - 1,) + + +_deprecated_content = { + "BarlowBeestonLite": ("Template", Template), + "barlow_beeston_lite_chi2_jsc": ("template_chi2_jsc", template_chi2_jsc), + "barlow_beeston_lite_chi2_hpd": ("template_chi2_da", template_chi2_da), +} + + +def __getattr__(name: str) -> _tp.Any: + if name in _deprecated_content: + new_name, obj = _deprecated_content[name] + warnings.warn( + f"{name} was renamed to {new_name}, please import {new_name} instead", + np.VisibleDeprecationWarning, + stacklevel=2, + ) + return obj + + raise AttributeError diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/iminuit-2.16.0/src/iminuit/minuit.py new/iminuit-2.18.0/src/iminuit/minuit.py --- old/iminuit-2.16.0/src/iminuit/minuit.py 2022-08-16 18:02:13.000000000 +0200 +++ new/iminuit-2.18.0/src/iminuit/minuit.py 2022-12-14 11:10:58.000000000 +0100 @@ -1244,6 +1244,14 @@ return self + def visualize(self, plot=None): + """Visualize agreement of current model with data. + + This raises an AttributeError if the cost function has no visualize + method. + """ + return self._visualize(plot)(self.values) + def hesse(self, ncall: int = None) -> "Minuit": """ Run Hesse algorithm to compute asymptotic errors. @@ -1672,9 +1680,7 @@ y: str, *, size: int = 50, - bound: _tp.Union[ - float, _tp.Tuple[_tp.Tuple[float, float], _tp.Tuple[float, float]] - ] = 2, + bound: _tp.Union[float, _tp.Iterable[_tp.Tuple[float, float]]] = 2, grid: _tp.Tuple[_ArrayLike, _ArrayLike] = None, subtract_min: bool = False, ) -> _tp.Tuple[np.ndarray, np.ndarray, np.ndarray]: @@ -1731,7 +1737,7 @@ if xv.ndim != 1 or yv.ndim != 1: raise ValueError("grid per parameter must be 1D array-like") else: - if isinstance(bound, tuple): + if isinstance(bound, _tp.Iterable): xb, yb = bound xrange = self._normalize_bound(x, xb) yrange = self._normalize_bound(y, yb) @@ -2119,6 +2125,7 @@ Layout, Dropdown, ) + from ipywidgets.widgets.interaction import show_inline_matplotlib_plots from IPython.display import clear_output from matplotlib import pyplot as plt except ModuleNotFoundError as e: @@ -2128,16 +2135,7 @@ ) raise - pyfcn = self.fcn._fcn - - if plot is None: - if hasattr(pyfcn, "visualize"): - plot = pyfcn.visualize - else: - raise ValueError( - f"class {pyfcn.__class__.__name__} has no visualize method, " - "please use the plot argument to pass a visualization function" - ) + plot = self._visualize(plot) def plot_with_frame(args, from_fit, report_success): trans = plt.gca().transAxes @@ -2239,11 +2237,12 @@ out.block = False self.fixed = save from_fit = True - # mutil._show_inline_matplotlib_plots() with out: clear_output(wait=True) plot_with_frame(args, from_fit, report_success) - mutil._show_inline_matplotlib_plots() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + show_inline_matplotlib_plots() def on_fit_button_clicked(change): for x in parameters: @@ -2262,7 +2261,9 @@ with out: clear_output(wait=True) plot_with_frame(self.values, True, report_success) - mutil._show_inline_matplotlib_plots() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + show_inline_matplotlib_plots() def on_update_button_clicked(change): for x in parameters: @@ -2309,7 +2310,10 @@ for x in parameters: x.slider.observe(on_slider_change, "value") - # mutil._show_inline_matplotlib_plots() + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + show_inline_matplotlib_plots() on_slider_change(None) return HBox([out, ui]) @@ -2436,6 +2440,18 @@ else: p.text(str(self)) + def _visualize(self, plot): + pyfcn = self.fcn._fcn + if plot is None: + if hasattr(pyfcn, "visualize"): + plot = pyfcn.visualize + else: + raise AttributeError( + f"class {pyfcn.__class__.__name__} has no visualize method, " + "please use the plot argument to pass a visualization function" + ) + return plot + def _make_init_state( pos2var: _tp.Tuple[str, ...], args: np.ndarray, kwds: _tp.Dict[str, float] diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/iminuit-2.16.0/src/iminuit/util.py new/iminuit-2.18.0/src/iminuit/util.py --- old/iminuit-2.16.0/src/iminuit/util.py 2022-08-16 18:02:13.000000000 +0200 +++ new/iminuit-2.18.0/src/iminuit/util.py 2022-12-14 11:10:58.000000000 +0100 @@ -36,6 +36,8 @@ Derived classes need to implement methods _set and _get to access specific properties of the parameter state. + + :meta private: """ __slots__ = ("_minuit", "_ndim") @@ -470,11 +472,11 @@ """ Get chi2/ndof of the fit. - This returns NaN if the cost function is unbinned or does not support - reporting the degrees of freedom. + This returns NaN if the cost function is unbinned, errordef is not 1, + or if the cost function does not report the degrees of freedom. """ - if np.isfinite(self._ndof) and self._ndof > 0: - return self.fval / self.errordef / self._ndof + if np.isfinite(self._ndof) and self._ndof > 0 and self.errordef == 1: + return self.fval / self._ndof return np.nan @property @@ -1434,28 +1436,6 @@ return segments -def _show_inline_matplotlib_plots(): - # Code taken from ipywidgets/interactive.py - # - # See comments in the original why this is needed. - # - # Copyright (c) Jupyter Development Team. - # Distributed under the terms of the Modified BSD License. - # - # This version was stripped down and modified to remove a deprecation warning. - try: - import matplotlib as mpl - from matplotlib_inline.backend_inline import flush_figures - except ImportError: # pragma: no cover - return # pragma: no cover - - if ( - mpl.get_backend() == "module://ipykernel.pylab.backend_inline" - or mpl.get_backend() == "module://matplotlib_inline.backend_inline" - ): - flush_figures() # pragma: no cover - - def _smart_sampling(f, xmin, xmax, start=5, tol=5e-3): x = np.linspace(xmin, xmax, start) ynew = f(x) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/iminuit-2.16.0/src/iminuit/version.py new/iminuit-2.18.0/src/iminuit/version.py --- old/iminuit-2.16.0/src/iminuit/version.py 2022-08-16 18:02:13.000000000 +0200 +++ new/iminuit-2.18.0/src/iminuit/version.py 2022-12-14 11:10:58.000000000 +0100 @@ -8,7 +8,7 @@ # - During development, add suffix .devN with N >= 0 # - For release candidates, add suffix .rcN with N >= 0 # - For beta releases, add suffix .betaN with N >= 0 -version = "2.16.0" +version = "2.18.0" # We list the corresponding ROOT version of the C++ Minuit2 library here root_version = "v6-25-02-2017-gd0b406db5e" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/iminuit-2.16.0/src/iminuit.egg-info/PKG-INFO new/iminuit-2.18.0/src/iminuit.egg-info/PKG-INFO --- old/iminuit-2.16.0/src/iminuit.egg-info/PKG-INFO 2022-08-16 18:02:43.000000000 +0200 +++ new/iminuit-2.18.0/src/iminuit.egg-info/PKG-INFO 2022-12-14 11:11:30.000000000 +0100 @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: iminuit -Version: 2.16.0 +Version: 2.18.0 Summary: Jupyter-friendly Python frontend for MINUIT2 in C++ Home-page: http://github.com/scikit-hep/iminuit Author: Piti Ongmongkolkul and the iminuit team @@ -19,11 +19,11 @@ Classifier: Programming Language :: C++ Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy @@ -34,7 +34,7 @@ Classifier: Operating System :: POSIX Classifier: Operating System :: Unix Classifier: Operating System :: MacOS -Requires-Python: >=3.6 +Requires-Python: >=3.7 Description-Content-Type: text/x-rst Provides-Extra: test Provides-Extra: doc diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/iminuit-2.16.0/src/iminuit.egg-info/requires.txt new/iminuit-2.18.0/src/iminuit.egg-info/requires.txt --- old/iminuit-2.16.0/src/iminuit.egg-info/requires.txt 2022-08-16 18:02:43.000000000 +0200 +++ new/iminuit-2.18.0/src/iminuit.egg-info/requires.txt 2022-12-14 11:11:30.000000000 +0100 @@ -1,38 +1,27 @@ numpy [doc] -sphinx>=4.1 +sphinx<5.2,>=4.1 sphinx-rtd-theme -Jinja2==3.0 nbsphinx +nbconvert +nbformat +jupyter_client +ipykernel +jax==0.3.2 +jaxlib==0.3.0 [test] +coverage cython -flake8 -ipykernel ipywidgets -jax -jaxlib joblib jacobi -jupyter_client matplotlib -mypy -nbconvert -nbformat numba numba-stats -numpy -pre-commit -pydocstyle -pylint pytest -pytest-cov -pytest-xdist scipy -sphinx -sphinx_rtd_theme tabulate -nbsphinx boost_histogram -resample +resample>=1.5 diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/iminuit-2.16.0/tests/test_cost.py new/iminuit-2.18.0/tests/test_cost.py --- old/iminuit-2.16.0/tests/test_cost.py 2022-08-16 18:02:13.000000000 +0200 +++ new/iminuit-2.18.0/tests/test_cost.py 2022-12-14 11:10:58.000000000 +0100 @@ -11,13 +11,14 @@ LeastSquares, Constant, NormalConstraint, - BarlowBeestonLite, + Template, multinominal_chi2, _soft_l1_loss, PerformanceWarning, ) from typing import Sequence import pickle +from sys import version_info as pyver try: # pytest.importorskip does not work for scipy.stats; @@ -25,9 +26,9 @@ # even if scipy is not installed from scipy.stats import norm, truncexpon, multivariate_normal - scipy_stats_available = True + scipy_available = True except ImportError: - scipy_stats_available = False + scipy_available = False try: @@ -57,7 +58,8 @@ def expon_cdf(x, a): - return 1 - np.exp(-x / a) + with np.errstate(over="ignore"): + return 1 - np.exp(-x / a) @pytest.fixture @@ -96,13 +98,13 @@ return a + b * x -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") def test_norm_logpdf(): x = np.linspace(-3, 3) assert_allclose(norm_logpdf(x, 3, 2), norm.logpdf(x, 3, 2)) -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") def test_norm_pdf(): x = np.linspace(-3, 3) assert_allclose(norm_pdf(x, 3, 2), norm.pdf(x, 3, 2)) @@ -131,7 +133,7 @@ assert_equal(m.fmin.reduced_chi2, np.nan) -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") def test_UnbinnedNLL_2D(): def model(x_y, mux, muy, sx, sy, rho): return mvnorm(mux, muy, sx, sy, rho).pdf(x_y.T) @@ -198,7 +200,7 @@ c.visualize((1, 2), model_points=10) -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") @pytest.mark.skipif(not matplotlib_available, reason="matplotlib is needed") def test_UnbinnedNLL_visualize_2D(): def model(x_y, mux, muy, sx, sy, rho): @@ -245,7 +247,7 @@ assert_equal(m.fmin.reduced_chi2, np.nan) -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") def test_ExtendedUnbinnedNLL_2D(): def model(x_y, n, mux, muy, sx, sy, rho): return n * 1000, n * 1000 * mvnorm(mux, muy, sx, sy, rho).pdf(x_y.T) @@ -318,7 +320,7 @@ c.visualize((1, 2, 3)) -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") @pytest.mark.skipif(not matplotlib_available, reason="matplotlib is needed") def test_ExtendedUnbinnedNLL_visualize_2D(): def model(x_y, n, mux, muy, sx, sy, rho): @@ -340,7 +342,7 @@ assert_equal(c.data, c2.data) -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") @pytest.mark.parametrize("verbose", (0, 1)) def test_BinnedNLL(binned, verbose): mle, nx, xe = binned @@ -417,7 +419,7 @@ BinnedNLL(1, 2, lambda x, a: 0) -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") def test_BinnedNLL_2D(): truth = (0.1, 0.2, 0.3, 0.4, 0.5) x, y = mvnorm(*truth).rvs(size=1000, random_state=1).T @@ -443,7 +445,7 @@ assert cost(*m.values) > m.fval -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") def test_BinnedNLL_2D_with_zero_bins(): truth = (0.1, 0.2, 0.3, 0.4, 0.5) x, y = mvnorm(*truth).rvs(size=1000, random_state=1).T @@ -500,7 +502,7 @@ c.visualize((1,)) -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") @pytest.mark.skipif(not matplotlib_available, reason="matplotlib is needed") def test_BinnedNLL_visualize_2D(): truth = (0.1, 0.2, 0.3, 0.4, 0.5) @@ -523,7 +525,7 @@ assert_equal(c.data, c2.data) -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") @pytest.mark.parametrize("verbose", (0, 1)) def test_ExtendedBinnedNLL(binned, verbose): mle, nx, xe = binned @@ -563,7 +565,7 @@ ExtendedBinnedNLL([1], [1], lambda x, a: 0) -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") def test_ExtendedBinnedNLL_2D(): truth = (1.0, 0.1, 0.2, 0.3, 0.4, 0.5) x, y = mvnorm(*truth[1:]).rvs(size=int(truth[0] * 1000), random_state=1).T @@ -583,7 +585,7 @@ assert_allclose(m.values, truth, atol=0.1) -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") def test_ExtendedBinnedNLL_3D(): truth = (1.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7) n = int(truth[0] * 10000) @@ -638,7 +640,7 @@ c.visualize((1, 2)) -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") @pytest.mark.skipif(not matplotlib_available, reason="matplotlib is needed") def test_ExtendedBinnedNLL_visualize_2D(): truth = (1.0, 0.1, 0.2, 0.3, 0.4, 0.5) @@ -892,11 +894,29 @@ assert cs((1, 1)) == lsq((1, 1)) + con((1, 1)) + 1.5 +@pytest.mark.skipif(not scipy_available, reason="scipy is needed") +def test_CostSum_4(): + + t = Template([1, 2], [1, 2, 3], [[1, 1], [0, 1]], method="asy") + assert t.errordef == Minuit.LIKELIHOOD + + m1 = Minuit(t, 1, 1) + m1.migrad() + + cs = CostSum(t) + assert cs.errordef == Minuit.LEAST_SQUARES + + m2 = Minuit(cs, 1, 1) + m2.migrad() + + assert_allclose(m1.errors, m2.errors) + + @pytest.mark.skipif(not matplotlib_available, reason="matplotlib is needed") def test_CostSum_visualize(): lsq = LeastSquares([1, 2, 3], [3, 4, 5], 1, line) con = NormalConstraint(("a", "b"), (1, 1), (1, 1)) - c = lsq + con + c = lsq + con + 1 c.visualize((1, 2)) @@ -1069,18 +1089,29 @@ assert c(1) == 0 -@pytest.mark.parametrize("method", ("jsc", "hpd")) -def test_BarlowBeestonLite(method): - n = np.array([1, 2, 3]) +@pytest.mark.parametrize("method", ("jsc", "asy", "da")) +def test_Template(method): + if method == "asy" and not scipy_available: + pytest.skip(reason="scipy needed") xe = np.array([0, 1, 2, 3]) t = np.array([[1, 1, 0], [0, 1, 3]]) + n = t[0] + t[1] - c = BarlowBeestonLite(n, xe, t, method=method) + c = Template(n, xe, t, method=method) m = Minuit(c, 1, 1) m.migrad() assert m.valid - assert_allclose(m.fval, 0, atol=1e-4) - assert_allclose(m.values, [2, 4], atol=1e-2) + assert m.ndof == 1 + if method == "asy": + assert c.errordef == 0.5 + assert_equal(m.fmin.reduced_chi2, np.nan) + # asy produces values far away from truth in this case + assert_allclose(m.values, [1, 3], atol=0.2) + else: + assert c.errordef == 1.0 + assert_allclose(m.fval, 0, atol=1e-4) + assert_allclose(m.fmin.reduced_chi2, 0, atol=1e-5) + assert_allclose(m.values, [2, 4], atol=1e-2) def generate(rng, nmc, truth, bins, tf=1, df=1): @@ -1099,18 +1130,20 @@ return n, xe, np.array(t) -@pytest.mark.skipif(not scipy_stats_available, reason="scipy.stats is needed") -@pytest.mark.parametrize("method", ("jsc", "hpd")) +@pytest.mark.skipif(not scipy_available, reason="scipy.stats is needed") +@pytest.mark.parametrize("method", ("jsc", "asy", "da")) @pytest.mark.parametrize("with_mask", (False, True)) @pytest.mark.parametrize("weighted_data", (False, True)) -def test_BarlowBeestonLite_weighted(method, with_mask, weighted_data): +def test_Template_weighted(method, with_mask, weighted_data): + if method == "asy" and not scipy_available: + pytest.skip(reason="scipy needed") rng = np.random.default_rng(1) truth = 750, 250 z = [] rng = np.random.default_rng(1) for itoy in range(100): ni, xe, ti = generate(rng, 400, truth, 15, 1.5, 1.5 if weighted_data else 1) - c = BarlowBeestonLite(ni, xe, ti, method=method) + c = Template(ni, xe, ti, method=method) if with_mask: cx = 0.5 * (xe[1:] + xe[:-1]) c.mask = cx != 1.5 @@ -1124,40 +1157,38 @@ break assert m.valid z.append((m.values[1] - truth[1]) / m.errors[1]) - assert_allclose(np.mean(z), 0, atol=0.15) + assert_allclose(np.mean(z), 0, atol=0.3) assert_allclose(np.std(z), 1, rtol=0.1) -@pytest.mark.parametrize("method", ("jsc", "hpd")) -def test_BarlowBeestonLite_bad_input(method): +def test_Template_bad_input(): with pytest.raises(ValueError): - BarlowBeestonLite([1, 2], [1, 2, 3], [], method=method) + Template([1, 2], [1, 2, 3], []) with pytest.raises(ValueError, match="do not match"): - BarlowBeestonLite([1, 2], [1, 2, 3], [[1, 2, 3], [1, 2, 3]], method=method) + Template([1, 2], [1, 2, 3], [[1, 2, 3], [1, 2, 3]]) with pytest.raises(ValueError, match="do not match"): - BarlowBeestonLite( + Template( [1, 2], [1, 2, 3], [[[1, 2], [3, 4]], [[1, 2], [3, 4], [5, 6]]], - method=method, ) with pytest.raises(ValueError, match="not understood"): - BarlowBeestonLite([1], [1, 2], [[1]], method="foo") + Template([1], [1, 2], [[1]], method="foo") with pytest.raises(ValueError, match="number of names"): - BarlowBeestonLite([1], [1, 2], [[1]], name=("b", "s")) + Template([1], [1, 2], [[1]], name=("b", "s")) @pytest.mark.skipif(not matplotlib_available, reason="matplotlib is needed") -def test_BarlowBeestonLite_visualize(): +def test_Template_visualize(): xe = [0, 1, 2] n = [1, 2] t = [[1, 2], [5, 4]] - c = BarlowBeestonLite(n, xe, t) + c = Template(n, xe, t) c.visualize((1, 2)) @@ -1166,24 +1197,49 @@ @pytest.mark.skipif(not matplotlib_available, reason="matplotlib is needed") -def test_BarlowBeestonLite_visualize_2D(): +def test_Template_visualize_2D(): xe = ([0, 1, 2], [0, 1, 2]) n = [[1, 2], [3, 4]] t = [[[1, 2], [1, 2]], [[5, 4], [5, 4]]] - c = BarlowBeestonLite(n, xe, t) + c = Template(n, xe, t) with pytest.raises(ValueError, match="not implemented for multi-dimensional"): c.visualize((1, 2)) -def test_BarlowBeestonLite_pickle(): +def test_Template_pickle(): n = np.array([1, 2, 3]) xe = np.array([0, 1, 2, 3]) t = np.array([[1, 1, 0], [0, 1, 3]]) - c = BarlowBeestonLite(n, xe, t) + c = Template(n, xe, t) b = pickle.dumps(c) c2 = pickle.loads(b) assert_equal(c.data, c2.data) + + +@pytest.mark.skipif(pyver < (3, 7), reason="module getattr requires Python-3.7+") +def test_deprecated(): + from iminuit import cost + + with pytest.warns(np.VisibleDeprecationWarning): + from iminuit.cost import BarlowBeestonLite + assert BarlowBeestonLite is cost.Template + + with pytest.warns(np.VisibleDeprecationWarning): + from iminuit.cost import barlow_beeston_lite_chi2_jsc + assert barlow_beeston_lite_chi2_jsc is cost.template_chi2_jsc + + with pytest.warns(np.VisibleDeprecationWarning): + from iminuit.cost import barlow_beeston_lite_chi2_hpd + assert barlow_beeston_lite_chi2_hpd is cost.template_chi2_da + + +def test_deprecated_Template_method(): + from iminuit import cost + + with pytest.warns(np.VisibleDeprecationWarning): + t = Template([1], [2, 3], [[1], [2]], method="hpd") + t._impl is cost.template_chi2_da diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/iminuit-2.16.0/tests/test_draw.py new/iminuit-2.18.0/tests/test_draw.py --- old/iminuit-2.16.0/tests/test_draw.py 2022-08-16 18:02:13.000000000 +0200 +++ new/iminuit-2.18.0/tests/test_draw.py 2022-12-14 11:10:58.000000000 +0100 @@ -177,7 +177,7 @@ ipywidgets_available = True m = Minuit(cost, 1, 1) - with pytest.raises(ValueError, match="no visualize method"): + with pytest.raises(AttributeError, match="no visualize method"): m.interactive(raise_on_exception=True) with plot.assert_call(): diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/iminuit-2.16.0/tests/test_minuit.py new/iminuit-2.18.0/tests/test_minuit.py --- old/iminuit-2.16.0/tests/test_minuit.py 2022-08-16 18:02:13.000000000 +0200 +++ new/iminuit-2.18.0/tests/test_minuit.py 2022-12-14 11:10:58.000000000 +0100 @@ -211,11 +211,13 @@ m.scipy() -def test_func0(): # check that providing gradient improves convergence +def test_func0(): m1 = func_test_helper(func0) m2 = func_test_helper(func0, grad=func0_grad) assert m1.ngrad == 0 assert m2.ngrad > 0 + # check that providing gradient improves convergence + assert m2.nfcn < m1.nfcn def test_lambda(): @@ -330,6 +332,7 @@ assert m.fixed == (False, True) assert m.limits["a"] == (0, 2) m.migrad() + assert m.fmin.ngrad > 0 assert_allclose(m.values, (1, 1), rtol=1e-2) c = m.covariance assert_allclose(c, ((1, 0), (0, 0)), rtol=1e-2) @@ -1034,20 +1037,23 @@ assert m.nfcn < ncalls_without_limit -def test_ngrad(): +@pytest.mark.parametrize("arg", (1, np.array([1.0, 2.0]))) +def test_ngrad(arg): class Func: ngrad = 0 def __call__(self, x): - return x**2 + return np.sum(x**2) def grad(self, x): self.ngrad += 1 + if np.ndim(x) == 1: + return 2 * x return [2 * x] # check that counting is accurate fcn = Func() - m = Minuit(fcn, 1) + m = Minuit(fcn, arg) m.migrad() assert m.ngrad > 0 assert m.ngrad == fcn.ngrad @@ -1061,6 +1067,15 @@ m.hesse() assert m.ngrad == before + m.reset() + m.migrad() + m2 = Minuit(lambda x: fcn(x), arg) + m2.migrad() + assert m.ngrad > 0 + assert m2.ngrad == 0 + # apparently this is not always the case: + # assert m2.nfcn > m.nfcn + def test_errordef(): m = Minuit(lambda x: x**2, 0) @@ -1619,3 +1634,14 @@ assert_allclose(m.errors, 10) m.errors = (1, 2) assert_allclose(m.errors, (1, 2)) + + +def test_visualize(): + m = Minuit(func0, 1, 1) + m.migrad() + with pytest.raises(AttributeError): + m.visualize() + + func0.visualize = lambda args: None + m.visualize() + del func0.visualize diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/iminuit-2.16.0/tests/test_util.py new/iminuit-2.18.0/tests/test_util.py --- old/iminuit-2.16.0/tests/test_util.py 2022-08-16 18:02:13.000000000 +0200 +++ new/iminuit-2.18.0/tests/test_util.py 2022-12-14 11:10:58.000000000 +0100 @@ -321,11 +321,12 @@ assert repr(mes) == f"<MErrors\n {mes['x']!r}\n>" -def test_FMin(): +@pytest.mark.parametrize("errordef", (0.5, 1.0)) +def test_FMin(errordef): fm = Namespace( fval=1.23456e-10, edm=1.23456e-10, - errordef=0.5, + errordef=errordef, is_valid=True, has_valid_parameters=True, has_accurate_covar=True, @@ -371,13 +372,19 @@ assert fmin != util.FMin(fm, "bar", 1, 2, 1, 0.1, 1.2) assert fmin != util.FMin(fm, "foo", 1, 2, 1, 0.1, 1.5) + if errordef == 1: + reduced_chi2 = fmin.fval + else: + reduced_chi2 = np.nan + assert repr(fmin) == ( - "<FMin algorithm='foo' edm=1.23456e-10 edm_goal=0.1 errordef=0.5 fval=1.23456e-10" + f"<FMin algorithm='foo' edm=1.23456e-10 edm_goal=0.1 errordef={errordef}" + " fval=1.23456e-10" " has_accurate_covar=True has_covariance=True has_made_posdef_covar=False" " has_parameters_at_limit=False has_posdef_covar=True" " has_reached_call_limit=False has_valid_parameters=True" " hesse_failed=False is_above_max_edm=False is_valid=True" - " nfcn=1 ngrad=2 reduced_chi2=2.46912e-10 time=1.2>" + f" nfcn=1 ngrad=2 reduced_chi2={reduced_chi2} time=1.2>" )