Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package python-scikit-learn for openSUSE:Factory checked in at 2025-06-27 23:00:29 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/python-scikit-learn (Old) and /work/SRC/openSUSE:Factory/.python-scikit-learn.new.7067 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "python-scikit-learn" Fri Jun 27 23:00:29 2025 rev:36 rq:1288244 version:1.7.0 Changes: -------- --- /work/SRC/openSUSE:Factory/python-scikit-learn/python-scikit-learn.changes 2025-02-04 18:10:59.683729999 +0100 +++ /work/SRC/openSUSE:Factory/.python-scikit-learn.new.7067/python-scikit-learn.changes 2025-06-27 23:01:15.993160061 +0200 @@ -1,0 +2,12 @@ +Tue Jun 24 11:07:04 UTC 2025 - Markéta Machová <mmach...@suse.com> + +- Update to 1.7.0 + * Improved estimator’s HTML representation + * Custom validation set for histogram-based Gradient Boosting estimators + * Plotting ROC curves from cross-validation results + * Array API support + * Improved API consistency of Multi-layer Perceptron + * Migration toward sparse arrays +- Add upstream scipy-iprint.patch to fix tests with recent scipy + +------------------------------------------------------------------- Old: ---- scikit_learn-1.6.1.tar.gz New: ---- scikit_learn-1.7.0.tar.gz scipy-iprint.patch ----------(New B)---------- New: * Migration toward sparse arrays - Add upstream scipy-iprint.patch to fix tests with recent scipy ----------(New E)---------- ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ python-scikit-learn.spec ++++++ --- /var/tmp/diff_new_pack.gsmaoG/_old 2025-06-27 23:01:17.373216938 +0200 +++ /var/tmp/diff_new_pack.gsmaoG/_new 2025-06-27 23:01:17.377217103 +0200 @@ -41,16 +41,16 @@ %endif # optionally test with extra packages %bcond_with extratest -# enable pytest color output for local debugging: osc --with pytestcolor -%bcond_with pytestcolor Name: python-scikit-learn%{psuffix} -Version: 1.6.1 +Version: 1.7.0 Release: 0 Summary: Python modules for machine learning and data mining License: BSD-3-Clause URL: https://scikit-learn.org/ Source0: https://files.pythonhosted.org/packages/source/s/scikit-learn/scikit_learn-%{version}.tar.gz +# PATCH-FIX-UPSTREAM https://github.com/scikit-learn/scikit-learn/pull/31642 MNT Remove deprecated iprint and disp usage in scipy 1.15 LBFGS +Patch0: scipy-iprint.patch BuildRequires: %{python_module Cython >= 3.0.10} BuildRequires: %{python_module devel >= 3.8} BuildRequires: %{python_module joblib >= 1.2.0} @@ -106,9 +106,6 @@ %prep %autosetup -p1 -n scikit_learn-%{version} rm -rf sklearn/.pytest_cache -%if !%{with pytestcolor} -sed -i '/--color=yes/d' setup.cfg -%endif %build %if !%{with test} ++++++ scikit_learn-1.6.1.tar.gz -> scikit_learn-1.7.0.tar.gz ++++++ /work/SRC/openSUSE:Factory/python-scikit-learn/scikit_learn-1.6.1.tar.gz /work/SRC/openSUSE:Factory/.python-scikit-learn.new.7067/scikit_learn-1.7.0.tar.gz differ: char 26, line 1 ++++++ scipy-iprint.patch ++++++ >From 37edfee05f42a46f04573600a98f45c9902d5848 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= <loic.est...@ymail.com> Date: Tue, 24 Jun 2025 10:43:54 +0200 Subject: [PATCH 1/2] Remove deprecated iprint usage in scipy 1.15 LBFGS --- sklearn/linear_model/_glm/_newton_solver.py | 3 ++- sklearn/linear_model/_glm/glm.py | 3 ++- sklearn/linear_model/_huber.py | 7 ++++++- sklearn/linear_model/_logistic.py | 3 ++- sklearn/neural_network/_multilayer_perceptron.py | 3 ++- sklearn/utils/fixes.py | 10 ++++++++++ 6 files changed, 24 insertions(+), 5 deletions(-) diff --git a/sklearn/linear_model/_glm/_newton_solver.py b/sklearn/linear_model/_glm/_newton_solver.py index c5c940bed6c39..7d89c4a5d76e1 100644 --- a/sklearn/linear_model/_glm/_newton_solver.py +++ b/sklearn/linear_model/_glm/_newton_solver.py @@ -14,6 +14,7 @@ from ..._loss.loss import HalfSquaredError from ...exceptions import ConvergenceWarning +from ...utils.fixes import _get_lbfgs_iprint_options_dict from ...utils.optimize import _check_optimize_result from .._linear_loss import LinearModelLoss @@ -187,9 +188,9 @@ def fallback_lbfgs_solve(self, X, y, sample_weight): options={ "maxiter": max_iter, "maxls": 50, # default is 20 - "iprint": self.verbose - 1, "gtol": self.tol, "ftol": 64 * np.finfo(np.float64).eps, + **_get_lbfgs_iprint_options_dict(self.verbose - 1), }, args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads), ) diff --git a/sklearn/linear_model/_glm/glm.py b/sklearn/linear_model/_glm/glm.py index 7f138f420dc36..c2fc1a221a9e3 100644 --- a/sklearn/linear_model/_glm/glm.py +++ b/sklearn/linear_model/_glm/glm.py @@ -21,6 +21,7 @@ from ...utils import check_array from ...utils._openmp_helpers import _openmp_effective_n_threads from ...utils._param_validation import Hidden, Interval, StrOptions +from ...utils.fixes import _get_lbfgs_iprint_options_dict from ...utils.optimize import _check_optimize_result from ...utils.validation import _check_sample_weight, check_is_fitted, validate_data from .._linear_loss import LinearModelLoss @@ -273,12 +274,12 @@ def fit(self, X, y, sample_weight=None): options={ "maxiter": self.max_iter, "maxls": 50, # default is 20 - "iprint": self.verbose - 1, "gtol": self.tol, # The constant 64 was found empirically to pass the test suite. # The point is that ftol is very small, but a bit larger than # machine precision for float64, which is the dtype used by lbfgs. "ftol": 64 * np.finfo(float).eps, + **_get_lbfgs_iprint_options_dict(self.verbose - 1), }, args=(X, y, sample_weight, l2_reg_strength, n_threads), ) diff --git a/sklearn/linear_model/_huber.py b/sklearn/linear_model/_huber.py index 51f24035a3c83..09d03520fe6e3 100644 --- a/sklearn/linear_model/_huber.py +++ b/sklearn/linear_model/_huber.py @@ -10,6 +10,7 @@ from ..utils._mask import axis0_safe_slice from ..utils._param_validation import Interval from ..utils.extmath import safe_sparse_dot +from ..utils.fixes import _get_lbfgs_iprint_options_dict from ..utils.optimize import _check_optimize_result from ..utils.validation import _check_sample_weight, validate_data from ._base import LinearModel @@ -329,7 +330,11 @@ def fit(self, X, y, sample_weight=None): method="L-BFGS-B", jac=True, args=(X, y, self.epsilon, self.alpha, sample_weight), - options={"maxiter": self.max_iter, "gtol": self.tol, "iprint": -1}, + options={ + "maxiter": self.max_iter, + "gtol": self.tol, + **_get_lbfgs_iprint_options_dict(-1), + }, bounds=bounds, ) diff --git a/sklearn/linear_model/_logistic.py b/sklearn/linear_model/_logistic.py index b85c01ee69f9e..f2a9972500f65 100644 --- a/sklearn/linear_model/_logistic.py +++ b/sklearn/linear_model/_logistic.py @@ -30,6 +30,7 @@ ) from ..utils._param_validation import Hidden, Interval, StrOptions from ..utils.extmath import row_norms, softmax +from ..utils.fixes import _get_lbfgs_iprint_options_dict from ..utils.metadata_routing import ( MetadataRouter, MethodMapping, @@ -464,9 +465,9 @@ def _logistic_regression_path( options={ "maxiter": max_iter, "maxls": 50, # default is 20 - "iprint": iprint, "gtol": tol, "ftol": 64 * np.finfo(float).eps, + **_get_lbfgs_iprint_options_dict(iprint), }, ) n_iter_i = _check_optimize_result( diff --git a/sklearn/neural_network/_multilayer_perceptron.py b/sklearn/neural_network/_multilayer_perceptron.py index a8a00fe3b4ac5..42eecd23c88a5 100644 --- a/sklearn/neural_network/_multilayer_perceptron.py +++ b/sklearn/neural_network/_multilayer_perceptron.py @@ -31,6 +31,7 @@ ) from ..utils._param_validation import Interval, Options, StrOptions from ..utils.extmath import safe_sparse_dot +from ..utils.fixes import _get_lbfgs_iprint_options_dict from ..utils.metaestimators import available_if from ..utils.multiclass import ( _check_partial_fit_first_call, @@ -585,8 +586,8 @@ def _fit_lbfgs( options={ "maxfun": self.max_fun, "maxiter": self.max_iter, - "iprint": iprint, "gtol": self.tol, + **_get_lbfgs_iprint_options_dict(iprint), }, args=( X, diff --git a/sklearn/utils/fixes.py b/sklearn/utils/fixes.py index d85ef82680bbb..cdd46c15b3272 100644 --- a/sklearn/utils/fixes.py +++ b/sklearn/utils/fixes.py @@ -392,3 +392,13 @@ def _in_unstable_openblas_configuration(): # See discussions in https://github.com/numpy/numpy/issues/19411 return True # pragma: no cover return False + + +# TODO: Remove when Scipy 1.15 is the minimum supported version +# In scipy 1.15, wwhen LBFGS was converted from Fortran to C the internal info +# details (via 'iprint' options key) were dropped, see +# https://github.com/scipy/scipy/issues/23186#issuecomment-2987801035. +# For scipy 1.15, iprint has no effect and for scipy >= 1.16 a +# DeprecationWarning is emitted. +def _get_lbfgs_iprint_options_dict(iprint_value): + return {} if sp_version >= parse_version("1.15") else {"iprint": iprint_value} >From c2317c697fcc39385e7085df0f99a603e14f3366 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= <loic.est...@ymail.com> Date: Tue, 24 Jun 2025 11:52:54 +0200 Subject: [PATCH 2/2] [azure parallel] [scipy-dev] more fixes --- sklearn/linear_model/_glm/_newton_solver.py | 4 ++-- sklearn/linear_model/_glm/glm.py | 4 ++-- sklearn/linear_model/_huber.py | 4 ++-- sklearn/linear_model/_logistic.py | 4 ++-- sklearn/neighbors/_nca.py | 5 ++++- sklearn/neural_network/_multilayer_perceptron.py | 4 ++-- sklearn/utils/fixes.py | 4 ++-- 7 files changed, 16 insertions(+), 13 deletions(-) diff --git a/sklearn/linear_model/_glm/_newton_solver.py b/sklearn/linear_model/_glm/_newton_solver.py index 7d89c4a5d76e1..944789d5fec8a 100644 --- a/sklearn/linear_model/_glm/_newton_solver.py +++ b/sklearn/linear_model/_glm/_newton_solver.py @@ -14,7 +14,7 @@ from ..._loss.loss import HalfSquaredError from ...exceptions import ConvergenceWarning -from ...utils.fixes import _get_lbfgs_iprint_options_dict +from ...utils.fixes import _get_lbfgs_options_dict from ...utils.optimize import _check_optimize_result from .._linear_loss import LinearModelLoss @@ -190,7 +190,7 @@ def fallback_lbfgs_solve(self, X, y, sample_weight): "maxls": 50, # default is 20 "gtol": self.tol, "ftol": 64 * np.finfo(np.float64).eps, - **_get_lbfgs_iprint_options_dict(self.verbose - 1), + **_get_lbfgs_options_dict("iprint", self.verbose - 1), }, args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads), ) diff --git a/sklearn/linear_model/_glm/glm.py b/sklearn/linear_model/_glm/glm.py index c2fc1a221a9e3..eba379f219350 100644 --- a/sklearn/linear_model/_glm/glm.py +++ b/sklearn/linear_model/_glm/glm.py @@ -21,7 +21,7 @@ from ...utils import check_array from ...utils._openmp_helpers import _openmp_effective_n_threads from ...utils._param_validation import Hidden, Interval, StrOptions -from ...utils.fixes import _get_lbfgs_iprint_options_dict +from ...utils.fixes import _get_lbfgs_options_dict from ...utils.optimize import _check_optimize_result from ...utils.validation import _check_sample_weight, check_is_fitted, validate_data from .._linear_loss import LinearModelLoss @@ -279,7 +279,7 @@ def fit(self, X, y, sample_weight=None): # The point is that ftol is very small, but a bit larger than # machine precision for float64, which is the dtype used by lbfgs. "ftol": 64 * np.finfo(float).eps, - **_get_lbfgs_iprint_options_dict(self.verbose - 1), + **_get_lbfgs_options_dict("iprint", self.verbose - 1), }, args=(X, y, sample_weight, l2_reg_strength, n_threads), ) diff --git a/sklearn/linear_model/_huber.py b/sklearn/linear_model/_huber.py index 09d03520fe6e3..5528f282ff563 100644 --- a/sklearn/linear_model/_huber.py +++ b/sklearn/linear_model/_huber.py @@ -10,7 +10,7 @@ from ..utils._mask import axis0_safe_slice from ..utils._param_validation import Interval from ..utils.extmath import safe_sparse_dot -from ..utils.fixes import _get_lbfgs_iprint_options_dict +from ..utils.fixes import _get_lbfgs_options_dict from ..utils.optimize import _check_optimize_result from ..utils.validation import _check_sample_weight, validate_data from ._base import LinearModel @@ -333,7 +333,7 @@ def fit(self, X, y, sample_weight=None): options={ "maxiter": self.max_iter, "gtol": self.tol, - **_get_lbfgs_iprint_options_dict(-1), + **_get_lbfgs_options_dict("iprint", -1), }, bounds=bounds, ) diff --git a/sklearn/linear_model/_logistic.py b/sklearn/linear_model/_logistic.py index f2a9972500f65..a7137aa585982 100644 --- a/sklearn/linear_model/_logistic.py +++ b/sklearn/linear_model/_logistic.py @@ -30,7 +30,7 @@ ) from ..utils._param_validation import Hidden, Interval, StrOptions from ..utils.extmath import row_norms, softmax -from ..utils.fixes import _get_lbfgs_iprint_options_dict +from ..utils.fixes import _get_lbfgs_options_dict from ..utils.metadata_routing import ( MetadataRouter, MethodMapping, @@ -467,7 +467,7 @@ def _logistic_regression_path( "maxls": 50, # default is 20 "gtol": tol, "ftol": 64 * np.finfo(float).eps, - **_get_lbfgs_iprint_options_dict(iprint), + **_get_lbfgs_options_dict("iprint", iprint), }, ) n_iter_i = _check_optimize_result( diff --git a/sklearn/neighbors/_nca.py b/sklearn/neighbors/_nca.py index a4ef3c303b851..c89742edb115a 100644 --- a/sklearn/neighbors/_nca.py +++ b/sklearn/neighbors/_nca.py @@ -25,6 +25,7 @@ from ..preprocessing import LabelEncoder from ..utils._param_validation import Interval, StrOptions from ..utils.extmath import softmax +from ..utils.fixes import _get_lbfgs_options_dict from ..utils.multiclass import check_classification_targets from ..utils.random import check_random_state from ..utils.validation import check_array, check_is_fitted, validate_data @@ -312,7 +313,9 @@ def fit(self, X, y): "jac": True, "x0": transformation, "tol": self.tol, - "options": dict(maxiter=self.max_iter, disp=disp), + "options": dict( + maxiter=self.max_iter, **_get_lbfgs_options_dict("disp", disp) + ), "callback": self._callback, } diff --git a/sklearn/neural_network/_multilayer_perceptron.py b/sklearn/neural_network/_multilayer_perceptron.py index 42eecd23c88a5..7b8dc5d7e2395 100644 --- a/sklearn/neural_network/_multilayer_perceptron.py +++ b/sklearn/neural_network/_multilayer_perceptron.py @@ -31,7 +31,7 @@ ) from ..utils._param_validation import Interval, Options, StrOptions from ..utils.extmath import safe_sparse_dot -from ..utils.fixes import _get_lbfgs_iprint_options_dict +from ..utils.fixes import _get_lbfgs_options_dict from ..utils.metaestimators import available_if from ..utils.multiclass import ( _check_partial_fit_first_call, @@ -587,7 +587,7 @@ def _fit_lbfgs( "maxfun": self.max_fun, "maxiter": self.max_iter, "gtol": self.tol, - **_get_lbfgs_iprint_options_dict(iprint), + **_get_lbfgs_options_dict("iprint", iprint), }, args=( X, diff --git a/sklearn/utils/fixes.py b/sklearn/utils/fixes.py index cdd46c15b3272..da010600c7649 100644 --- a/sklearn/utils/fixes.py +++ b/sklearn/utils/fixes.py @@ -400,5 +400,5 @@ def _in_unstable_openblas_configuration(): # https://github.com/scipy/scipy/issues/23186#issuecomment-2987801035. # For scipy 1.15, iprint has no effect and for scipy >= 1.16 a # DeprecationWarning is emitted. -def _get_lbfgs_iprint_options_dict(iprint_value): - return {} if sp_version >= parse_version("1.15") else {"iprint": iprint_value} +def _get_lbfgs_options_dict(key, iprint_value): + return {} if sp_version >= parse_version("1.15") else {key: iprint_value}