Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package tensorflow2 for openSUSE:Factory 
checked in at 2021-04-10 15:27:25
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/tensorflow2 (Old)
 and      /work/SRC/openSUSE:Factory/.tensorflow2.new.2401 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "tensorflow2"

Sat Apr 10 15:27:25 2021 rev:18 rq:883620 version:2.4.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/tensorflow2/tensorflow2.changes  2021-03-24 
16:11:16.767833686 +0100
+++ /work/SRC/openSUSE:Factory/.tensorflow2.new.2401/tensorflow2.changes        
2021-04-10 15:28:29.922457604 +0200
@@ -1,0 +2,11 @@
+Tue Apr  6 16:27:29 UTC 2021 - Ben Greiner <c...@bnavigator.de>
+
+- Don't BuildRequire keras_applications. Tensorflow provides it
+  itself: https://github.com/tensorflow/tensorflow/commit/23c3bdaa
+- These were discovered by Keras test suite:
+  * add numpy-tensor-small.patch for Numpy >= 1.20
+    gh#tensorflow/tensorflow#47691
+  * add tf-keras-hdf5-3.patch for hdf5 >= 3.0
+    gh#tensorflow/tensorflow#44467
+
+-------------------------------------------------------------------

New:
----
  numpy-tensor-small.patch
  tf-keras-hdf5-3.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ tensorflow2.spec ++++++
--- /var/tmp/diff_new_pack.ooEDVB/_old  2021-04-10 15:28:31.314459242 +0200
+++ /var/tmp/diff_new_pack.ooEDVB/_new  2021-04-10 15:28:31.318459246 +0200
@@ -93,6 +93,7 @@
 %{!?compiler_family:%global compiler_family gnu}
 %{hpc_init -c %compiler_family %{?with_mpi:-m %mpi_flavor} %{?c_f_ver:-v 
%{c_f_ver}} %{?mpi_ver:-V %{mpi_ver}} %{?ext:-e %{ext}}}
 %{?with_mpi:%global hpc_module_pname p%{pname}}
+# hpc macros expect this, but we do not use python-rpm-macros
 %define python_flavor python3
 %define package_name   %{hpc_package_name %_vers}
 %define package_name_provide tensorflow2%{hpc_package_name_tail}
@@ -204,9 +205,12 @@
 Patch14:        fix-lite.patch
 # Fix from upstream for gcc10.1
 Patch20:        removed-clog-build-as-included-in-cpuinfo.patch
+# Fix for numpy 1.20 -- https://stackoverflow.com/questions/66373169 , 
https://github.com/tensorflow/tensorflow/issues/47691
+Patch21:        numpy-tensor-small.patch
+# Fix for hdf5 3.0 -- https://github.com/tensorflow/tensorflow/issues/44467
+Patch22:        tf-keras-hdf5-3.patch
 
 Requires:       python3
-Requires:       python3-Keras-Applications
 Requires:       python3-Keras-Preprocessing
 Requires:       python3-abseil
 Requires:       python3-astor
@@ -244,7 +248,6 @@
 BuildRequires:  bazel-toolchains-source
 BuildRequires:  bazel-workspaces
 #BuildRequires:  bazel-rules-foreign-cc-source
-#BuildRequires:  bazel-rules-python-source
 %endif
 BuildRequires:  curl
 %if %{with cuda}
@@ -303,7 +306,6 @@
 BuildRequires:  python-pybind11-common-devel
 BuildRequires:  python3
 BuildRequires:  python3-Cython
-BuildRequires:  python3-Keras-Applications
 BuildRequires:  python3-Keras-Preprocessing
 BuildRequires:  python3-abseil
 BuildRequires:  python3-astor
@@ -402,7 +404,6 @@
 
 This package provides examples from the website.
 
-
 %package -n libtensorflow%{libmaj}%{?hpc_package_name_tail}
 Summary:        Shared library for tensorflow
 Group:          Libraries
@@ -450,7 +451,6 @@
 %endif
 
 %prep
-%{?!python_module:%define python_module() python-%{**} python3-%{**}}
 # fighting bazel
 %define bazeldir %{_sourcedir}/BAZEL
 %define bz_cachdir %{_sourcedir}/BAZEL_CACHE
@@ -502,6 +502,8 @@
 %patch13 -p 1
 %patch14 -p 1
 %patch20 -p 1
+%patch21 -p 1
+%patch22 -p 1
 
 %define make_depend_src() test -e $(basename %{1}| sed 's/-.*//') && rmdir 
%{?2}%{!?2:$(basename %{1}| sed 's/-.*//')}; test -e %{2} && rmdir %{2}; tar 
xzf %{1}; mv $(basename %{1} | sed 's/\.tar\.gz//' ) %{?2}%{!?2:$(basename 
%{1}| sed 's/-.*//')}
 # extract bazel rules
@@ -841,18 +843,21 @@
 %post -n libtensorflow_framework%{libmaj}%{?hpc_package_name_tail} -p 
/sbin/ldconfig
 %postun -n libtensorflow_framework%{libmaj}%{?hpc_package_name_tail} -p 
/sbin/ldconfig
 
-# Lite version is very different so package it separetly
 %if %{is_lite}
 %files
+# Lite version is very different so package it separetly
 %{package_bindir}/*
+
 %files -n %{package_name}-devel
 %{package_libdir}/libtensorflow-lite.a
 %dir %{_includedir}/tensorflow/lite/
 %{_includedir}/tensorflow/lite/*
 %{package_libdir}/pkgconfig/*.pc
-%else # not lite build
+
+%else
 
 %files
+# not lite build
 %defattr(-,root,root,-)
 %{package_bindir}/estimator_ckpt_converter
 %{package_bindir}/saved_model_cli
@@ -871,6 +876,7 @@
 %if %{with hpc}
 %hpc_modules_files
 %endif
+
 %files -n %{package_name}-devel
 %{package_python_sitelib}/tensorflow_core/include
 #%%{package_python_sitearch}/tensorflow_core/include
@@ -881,16 +887,21 @@
 %if %{without hpc}
 %{package_libdir}/pkgconfig/*.pc
 %endif
+
 %files -n libtensorflow_framework%{libmaj}%{?hpc_package_name_tail}
 %{package_libdir}/libtensorflow_framework.so.%{libmaj}*
+
 %files -n libtensorflow_cc%{libmaj}%{?hpc_package_name_tail}
 %{package_libdir}/libtensorflow_cc.so.%{libmaj}*
+
 %files -n libtensorflow%{libmaj}%{?hpc_package_name_tail}
 %{package_libdir}/libtensorflow.so.%{libmaj}*
+
 %ifarch x86_64
 %files -n libiomp5%{?hpc_package_name_tail}
 %{package_libdir}/libiomp5.so
 %endif
+
 %files -n %{package_name}-doc
 #%%{package_python_sitelib}/tensorflow/examples
 %license THIRD_PARTY_TF_C_LICENSES LICENSE




++++++ numpy-tensor-small.patch ++++++
Index: tensorflow2-2.4.0/tensorflow/python/ops/array_ops.py
===================================================================
--- tensorflow2-2.4.0.orig/tensorflow/python/ops/array_ops.py
+++ tensorflow2-2.4.0/tensorflow/python/ops/array_ops.py
@@ -35,6 +35,7 @@ from tensorflow.python.framework import
 from tensorflow.python.framework.constant_op import constant
 from tensorflow.python.ops import gen_array_ops
 from tensorflow.python.ops import gen_math_ops
+from tensorflow.python.ops import math_ops
 # go/tf-wildcard-import
 # pylint: disable=wildcard-import
 from tensorflow.python.ops.gen_array_ops import *
@@ -2801,7 +2802,7 @@ def matrix_set_diag(
 
 def _constant_if_small(value, shape, dtype, name):
   try:
-    if np.prod(shape) < 1000:
+    if math_ops.reduce_prod(shape) < 1000:
       return constant(value, shape=shape, dtype=dtype, name=name)
   except TypeError:
     # Happens when shape is a Tensor, list with Tensor elements, etc.


++++++ tf-keras-hdf5-3.patch ++++++
Index: tensorflow2-2.4.0/tensorflow/python/keras/saving/hdf5_format.py
===================================================================
--- tensorflow2-2.4.0.orig/tensorflow/python/keras/saving/hdf5_format.py
+++ tensorflow2-2.4.0/tensorflow/python/keras/saving/hdf5_format.py
@@ -179,7 +179,7 @@ def load_model_from_hdf5(filepath, custo
     model_config = f.attrs.get('model_config')
     if model_config is None:
       raise ValueError('No model found in config file.')
-    model_config = json_utils.decode(model_config.decode('utf-8'))
+    model_config = json_utils.decode(model_config)
     model = model_config_lib.model_from_config(model_config,
                                                custom_objects=custom_objects)
 
@@ -193,7 +193,7 @@ def load_model_from_hdf5(filepath, custo
         logging.warning('No training configuration found in the save file, so '
                         'the model was *not* compiled. Compile it manually.')
         return model
-      training_config = json_utils.decode(training_config.decode('utf-8'))
+      training_config = json_utils.decode(training_config)
 
       # Compile model.
       model.compile(**saving_utils.compile_args_from_training_config(
@@ -659,11 +659,11 @@ def load_weights_from_hdf5_group(f, laye
           and weights file.
   """
   if 'keras_version' in f.attrs:
-    original_keras_version = f.attrs['keras_version'].decode('utf8')
+    original_keras_version = f.attrs['keras_version']
   else:
     original_keras_version = '1'
   if 'backend' in f.attrs:
-    original_backend = f.attrs['backend'].decode('utf8')
+    original_backend = f.attrs['backend']
   else:
     original_backend = None
 
@@ -678,7 +678,7 @@ def load_weights_from_hdf5_group(f, laye
   for name in layer_names:
     g = f[name]
     weight_names = load_attributes_from_hdf5_group(g, 'weight_names')
-    if weight_names:
+    if np.any(weight_names):
       filtered_layer_names.append(name)
   layer_names = filtered_layer_names
   if len(layer_names) != len(filtered_layers):
@@ -730,11 +730,11 @@ def load_weights_from_hdf5_group_by_name
           and weights file and skip_match=False.
   """
   if 'keras_version' in f.attrs:
-    original_keras_version = f.attrs['keras_version'].decode('utf8')
+    original_keras_version = f.attrs['keras_version']
   else:
     original_keras_version = '1'
   if 'backend' in f.attrs:
-    original_backend = f.attrs['backend'].decode('utf8')
+    original_backend = f.attrs['backend']
   else:
     original_backend = None
 
@@ -849,13 +849,13 @@ def load_attributes_from_hdf5_group(grou
       data: Attributes data.
   """
   if name in group.attrs:
-    data = [n.decode('utf8') for n in group.attrs[name]]
+    data = group.attrs[name]
   else:
     data = []
     chunk_id = 0
     while '%s%d' % (name, chunk_id) in group.attrs:
       data.extend(
-          [n.decode('utf8') for n in group.attrs['%s%d' % (name, chunk_id)]])
+          group.attrs['%s%d' % (name, chunk_id)])
       chunk_id += 1
   return data
 

Reply via email to