Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-arf for openSUSE:Factory 
checked in at 2023-12-03 20:49:12
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-arf (Old)
 and      /work/SRC/openSUSE:Factory/.python-arf.new.25432 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-arf"

Sun Dec  3 20:49:12 2023 rev:8 rq:1130508 version:2.6.4

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-arf/python-arf.changes    2022-07-13 
13:45:23.378031939 +0200
+++ /work/SRC/openSUSE:Factory/.python-arf.new.25432/python-arf.changes 
2023-12-03 20:49:34.339448916 +0100
@@ -1,0 +2,7 @@
+Sat Dec  2 19:45:38 UTC 2023 - Dirk Müller <dmuel...@suse.com>
+
+- update to 2.6.4:
+  * no upstream changelog available
+- drop h5py-fixup.patch, arf-pr10-h5py-open.patch (upstream)
+
+-------------------------------------------------------------------

Old:
----
  arf-2.6.1.tar.gz
  arf-pr10-h5py-open.patch
  h5py-fixup.patch

New:
----
  arf-2.6.4.tar.gz

BETA DEBUG BEGIN:
  Old:  * no upstream changelog available
- drop h5py-fixup.patch, arf-pr10-h5py-open.patch (upstream)
  Old:  * no upstream changelog available
- drop h5py-fixup.patch, arf-pr10-h5py-open.patch (upstream)
BETA DEBUG END:

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-arf.spec ++++++
--- /var/tmp/diff_new_pack.kVT8zF/_old  2023-12-03 20:49:34.959471691 +0100
+++ /var/tmp/diff_new_pack.kVT8zF/_new  2023-12-03 20:49:34.963471838 +0100
@@ -1,7 +1,7 @@
 #
 # spec file for package python-arf
 #
-# Copyright (c) 2022 SUSE LLC
+# Copyright (c) 2023 SUSE LLC
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -16,10 +16,9 @@
 #
 
 
-%{?!python_module:%define python_module() python3-%{**}}
-%define skip_python2 1
+%{?sle15_python_module_pythons}
 Name:           python-arf
-Version:        2.6.1
+Version:        2.6.4
 Release:        0
 # Note: I know that "advertisement" words are frowned on, but in this case
 # the package name is an acronym so "advanced" needs to stay in
@@ -27,10 +26,6 @@
 License:        GPL-2.0-only
 URL:            https://github.com/melizalab/arf
 Source:         
https://files.pythonhosted.org/packages/source/a/arf/arf-%{version}.tar.gz
-# PATCH-FIX-UPSTREAM arf-pr10-h5py-open.patch -- gh#melizalab/arf#10
-Patch0:         arf-pr10-h5py-open.patch
-# PATCH-FIX-UPSTREAM h5py-fixup.patch -- gh#melizalab/arf#12
-Patch1:         h5py-fixup.patch
 BuildRequires:  %{python_module setuptools}
 BuildRequires:  fdupes
 BuildRequires:  python-rpm-macros

++++++ arf-2.6.1.tar.gz -> arf-2.6.4.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/arf-2.6.1/PKG-INFO new/arf-2.6.4/PKG-INFO
--- old/arf-2.6.1/PKG-INFO      2020-12-01 15:24:54.000000000 +0100
+++ new/arf-2.6.4/PKG-INFO      2022-07-21 03:09:27.761886800 +0200
@@ -1,21 +1,14 @@
-Metadata-Version: 1.2
+Metadata-Version: 2.1
 Name: arf
-Version: 2.6.1
+Version: 2.6.4
 Summary: Advanced Recording Format for acoustic, behavioral, and physiological 
data
 Home-page: https://github.com/melizalab/arf
 Author: Dan Meliza
+Author-email: d...@meliza.org
 Maintainer: Dan Meliza
-License: UNKNOWN
-Download-URL: https://github.com/melizalab/arf/archive/2.6.1.tar.gz
-Description: 
-        Library for reading and writing Advanced Recording Format files. ARF 
files
-        are HDF5 files used to store audio and neurophysiological recordings 
in a
-        rational, hierarchical format. Data are organized around the concept 
of an
-        entry, which is a set of data channels that all start at the same time.
-        Supported data types include sampled data and event data (i.e. spike 
times).
-        Requires h5py (at least 2.8) and numpy (at least 1.3).
-        
-Platform: UNKNOWN
+Maintainer-email: d...@meliza.org
+License: BSD 3-Clause License
+Keywords: one,two
 Classifier: Development Status :: 5 - Production/Stable
 Classifier: Intended Audience :: Science/Research
 Classifier: License :: OSI Approved :: GNU General Public License (GPL)
@@ -27,3 +20,136 @@
 Classifier: Operating System :: POSIX :: Linux
 Classifier: Operating System :: MacOS :: MacOS X
 Classifier: Natural Language :: English
+Requires-Python: >=3.6
+License-File: COPYING
+
+## arf
+
+[![Build 
Status](https://travis-ci.org/melizalab/arf.png?branch=master)](https://travis-ci.org/melizalab/arf)
+
+The Advanced Recording Format [ARF](https://meliza.org/spec:1/arf/) is an open
+standard for storing data from neuronal, acoustic, and behavioral experiments 
in
+a portable, high-performance, archival format. The goal is to enable labs to
+share data and tools, and to allow valuable data to be accessed and analyzed 
for
+many years in the future.
+
+**ARF** is built on the the [HDF5](http://www.hdfgroup.org/HDF5/) format, and
+all arf files are accessible through standard HDF5 tools, including interfaces
+to HDF5 written for other languages (e.g. MATLAB, Python, etc). **ARF**
+comprises a set of specifications on how different kinds of data are stored. 
The
+organization of ARF files is based around the concept of an *entry*, a
+collection of data channels associated with a particular point in time. An 
entry
+might contain one or more of the following:
+
+-   raw extracellular neural signals recorded from a multichannel probe
+-   spike times extracted from neural data
+-   acoustic signals from a microphone
+-   times when an animal interacted with a behavioral apparatus
+-   the times when a real-time signal analyzer detected vocalization
+
+Entries and datasets have metadata attributes describing how the data were
+collected. Datasets and entries retain these attributes when copied or moved
+between arf files, helping to prevent data from becoming orphaned and
+uninterpretable.
+
+This repository contains:
+
+-   The specification for arf (in specification.md). This is also hosted at 
https://meliza.org/spec:1/arf/.
+-   A fast, type-safe C++ interface for reading and writing arf files
+-   A python interface for reading and writing arf files (based on h5py).
+
+### contributing
+
+ARF is under active development and we welcome comments and contributions from
+neuroscientists and behavioral biologists interested in using it. We're
+particularly interested in use cases that don't fit the current specification.
+Please post issues or contact Dan Meliza (dan at meliza.org) directly.
+
+The MATLAB interface is out of date and could use some work.
+
+### installation
+
+ARF files require HDF5>=1.8 (<http://www.hdfgroup.org/HDF5>).
+
+The python interface requires Python 3.6 or greater, numpy>=1.19, and 
h5py>=2.10. The last version to support Python 2 was `2.5.1`. To install the 
module:
+
+```bash
+pip install arf
+```
+
+To use the C++ interface, you need boost>=1.42 (<http://boost.org>). In 
addition,
+if writing multithreaded code, HDF5 needs to be compiled with
+`--enable-threadsafe`. The interface is header-only and does not need to be
+compiled. To install:
+
+```bash
+make install
+```
+
+### version information
+
+The specification and implementations provided in this project use a form of
+semantic versioning (<http://semver.org>). Specifications receive a major and
+minor version number. Changes to minor version numbers must be backwards
+compatible (i.e., only added requirements). The current released version of the
+ARF specification is `2.1`.
+
+Implementation versions are synchronized with the major version of the
+specification but otherwise evolve independently. For example, the python `arf`
+package version `2.1.0` is compatible with any ARF version `2.x`.
+
+There was no public release of ARF prior to `2.0`.
+
+### access ARF files with HDF5 tools
+
+This section describes how to inspect ARF files using standard tools, in the
+event that the interfaces described here cease to function.
+
+The structure of an ARF file can be explored using the `h5ls` tool. For 
example,
+to list entries:
+
+```bash
+$ h5ls file.arf
+test_0001                Group
+test_0002                Group
+test_0003                Group
+test_0004                Group
+```
+
+Each entry appears as a Group. To list the contents of an entry, use path
+notation:
+
+```bash
+$ h5ls file.arf/test_0001
+pcm                      Dataset {609914}
+```
+
+This shows that the data in `test_0001` is stored in a single node, `pcm`}, 
with
+609914 data points. Typically each channel will have its own dataset.
+
+The `h5dump` command can be used to output data in binary format. See the HDF5
+documentation for details on how to structure the output. For example, to
+extract sampled data to a 16-bit little-endian file (i.e., PCM format):
+
+```bash
+h5dump -d /test_0001/pcm -b LE -o test_0001.pcm file.arf
+```
+
+### related projects
+
+- [arfx](https://github.com/melizalab/arfx) is a commandline tool for 
manipulating ARF files.
+
+#### open data formats
+
+-   [neurodata without borders](http://www.nwb.org) has similar goals and also
+    uses HDF5 for storage. The data schema is considerably more complex, but it
+    does seem to be achieving growing adoption.
+-   [pandora](https://github.com/G-Node/pandora) is also under active 
development
+
+#### i/o libraries
+
+-   [neo](https://github.com/NeuralEnsemble/python-neo) is a Python package for
+    working with electrophysiology data in Python, together with support for
+    reading a wide range of neurophysiology file formats.
+-   [neuroshare](http://neuroshare.org) is a set of routines for reading and
+    writing data in various proprietary and open formats.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/arf-2.6.1/arf.egg-info/PKG-INFO 
new/arf-2.6.4/arf.egg-info/PKG-INFO
--- old/arf-2.6.1/arf.egg-info/PKG-INFO 2020-12-01 15:24:53.000000000 +0100
+++ new/arf-2.6.4/arf.egg-info/PKG-INFO 2022-07-21 03:09:27.000000000 +0200
@@ -1,21 +1,14 @@
-Metadata-Version: 1.2
+Metadata-Version: 2.1
 Name: arf
-Version: 2.6.1
+Version: 2.6.4
 Summary: Advanced Recording Format for acoustic, behavioral, and physiological 
data
 Home-page: https://github.com/melizalab/arf
 Author: Dan Meliza
+Author-email: d...@meliza.org
 Maintainer: Dan Meliza
-License: UNKNOWN
-Download-URL: https://github.com/melizalab/arf/archive/2.6.1.tar.gz
-Description: 
-        Library for reading and writing Advanced Recording Format files. ARF 
files
-        are HDF5 files used to store audio and neurophysiological recordings 
in a
-        rational, hierarchical format. Data are organized around the concept 
of an
-        entry, which is a set of data channels that all start at the same time.
-        Supported data types include sampled data and event data (i.e. spike 
times).
-        Requires h5py (at least 2.8) and numpy (at least 1.3).
-        
-Platform: UNKNOWN
+Maintainer-email: d...@meliza.org
+License: BSD 3-Clause License
+Keywords: one,two
 Classifier: Development Status :: 5 - Production/Stable
 Classifier: Intended Audience :: Science/Research
 Classifier: License :: OSI Approved :: GNU General Public License (GPL)
@@ -27,3 +20,136 @@
 Classifier: Operating System :: POSIX :: Linux
 Classifier: Operating System :: MacOS :: MacOS X
 Classifier: Natural Language :: English
+Requires-Python: >=3.6
+License-File: COPYING
+
+## arf
+
+[![Build 
Status](https://travis-ci.org/melizalab/arf.png?branch=master)](https://travis-ci.org/melizalab/arf)
+
+The Advanced Recording Format [ARF](https://meliza.org/spec:1/arf/) is an open
+standard for storing data from neuronal, acoustic, and behavioral experiments 
in
+a portable, high-performance, archival format. The goal is to enable labs to
+share data and tools, and to allow valuable data to be accessed and analyzed 
for
+many years in the future.
+
+**ARF** is built on the the [HDF5](http://www.hdfgroup.org/HDF5/) format, and
+all arf files are accessible through standard HDF5 tools, including interfaces
+to HDF5 written for other languages (e.g. MATLAB, Python, etc). **ARF**
+comprises a set of specifications on how different kinds of data are stored. 
The
+organization of ARF files is based around the concept of an *entry*, a
+collection of data channels associated with a particular point in time. An 
entry
+might contain one or more of the following:
+
+-   raw extracellular neural signals recorded from a multichannel probe
+-   spike times extracted from neural data
+-   acoustic signals from a microphone
+-   times when an animal interacted with a behavioral apparatus
+-   the times when a real-time signal analyzer detected vocalization
+
+Entries and datasets have metadata attributes describing how the data were
+collected. Datasets and entries retain these attributes when copied or moved
+between arf files, helping to prevent data from becoming orphaned and
+uninterpretable.
+
+This repository contains:
+
+-   The specification for arf (in specification.md). This is also hosted at 
https://meliza.org/spec:1/arf/.
+-   A fast, type-safe C++ interface for reading and writing arf files
+-   A python interface for reading and writing arf files (based on h5py).
+
+### contributing
+
+ARF is under active development and we welcome comments and contributions from
+neuroscientists and behavioral biologists interested in using it. We're
+particularly interested in use cases that don't fit the current specification.
+Please post issues or contact Dan Meliza (dan at meliza.org) directly.
+
+The MATLAB interface is out of date and could use some work.
+
+### installation
+
+ARF files require HDF5>=1.8 (<http://www.hdfgroup.org/HDF5>).
+
+The python interface requires Python 3.6 or greater, numpy>=1.19, and 
h5py>=2.10. The last version to support Python 2 was `2.5.1`. To install the 
module:
+
+```bash
+pip install arf
+```
+
+To use the C++ interface, you need boost>=1.42 (<http://boost.org>). In 
addition,
+if writing multithreaded code, HDF5 needs to be compiled with
+`--enable-threadsafe`. The interface is header-only and does not need to be
+compiled. To install:
+
+```bash
+make install
+```
+
+### version information
+
+The specification and implementations provided in this project use a form of
+semantic versioning (<http://semver.org>). Specifications receive a major and
+minor version number. Changes to minor version numbers must be backwards
+compatible (i.e., only added requirements). The current released version of the
+ARF specification is `2.1`.
+
+Implementation versions are synchronized with the major version of the
+specification but otherwise evolve independently. For example, the python `arf`
+package version `2.1.0` is compatible with any ARF version `2.x`.
+
+There was no public release of ARF prior to `2.0`.
+
+### access ARF files with HDF5 tools
+
+This section describes how to inspect ARF files using standard tools, in the
+event that the interfaces described here cease to function.
+
+The structure of an ARF file can be explored using the `h5ls` tool. For 
example,
+to list entries:
+
+```bash
+$ h5ls file.arf
+test_0001                Group
+test_0002                Group
+test_0003                Group
+test_0004                Group
+```
+
+Each entry appears as a Group. To list the contents of an entry, use path
+notation:
+
+```bash
+$ h5ls file.arf/test_0001
+pcm                      Dataset {609914}
+```
+
+This shows that the data in `test_0001` is stored in a single node, `pcm`}, 
with
+609914 data points. Typically each channel will have its own dataset.
+
+The `h5dump` command can be used to output data in binary format. See the HDF5
+documentation for details on how to structure the output. For example, to
+extract sampled data to a 16-bit little-endian file (i.e., PCM format):
+
+```bash
+h5dump -d /test_0001/pcm -b LE -o test_0001.pcm file.arf
+```
+
+### related projects
+
+- [arfx](https://github.com/melizalab/arfx) is a commandline tool for 
manipulating ARF files.
+
+#### open data formats
+
+-   [neurodata without borders](http://www.nwb.org) has similar goals and also
+    uses HDF5 for storage. The data schema is considerably more complex, but it
+    does seem to be achieving growing adoption.
+-   [pandora](https://github.com/G-Node/pandora) is also under active 
development
+
+#### i/o libraries
+
+-   [neo](https://github.com/NeuralEnsemble/python-neo) is a Python package for
+    working with electrophysiology data in Python, together with support for
+    reading a wide range of neurophysiology file formats.
+-   [neuroshare](http://neuroshare.org) is a set of routines for reading and
+    writing data in various proprietary and open formats.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/arf-2.6.1/arf.egg-info/SOURCES.txt 
new/arf-2.6.4/arf.egg-info/SOURCES.txt
--- old/arf-2.6.1/arf.egg-info/SOURCES.txt      2020-12-01 15:24:53.000000000 
+0100
+++ new/arf-2.6.4/arf.egg-info/SOURCES.txt      2022-07-21 03:09:27.000000000 
+0200
@@ -3,11 +3,13 @@
 Makefile
 README.md
 arf.py
+pyproject.toml
 setup.cfg
 setup.py
 arf.egg-info/PKG-INFO
 arf.egg-info/SOURCES.txt
 arf.egg-info/dependency_links.txt
+arf.egg-info/not-zip-safe
 arf.egg-info/requires.txt
 arf.egg-info/top_level.txt
 c++/arf.hpp
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/arf-2.6.1/arf.egg-info/not-zip-safe 
new/arf-2.6.4/arf.egg-info/not-zip-safe
--- old/arf-2.6.1/arf.egg-info/not-zip-safe     1970-01-01 01:00:00.000000000 
+0100
+++ new/arf-2.6.4/arf.egg-info/not-zip-safe     2021-01-05 00:55:59.000000000 
+0100
@@ -0,0 +1 @@
+
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/arf-2.6.1/arf.egg-info/requires.txt 
new/arf-2.6.4/arf.egg-info/requires.txt
--- old/arf-2.6.1/arf.egg-info/requires.txt     2020-12-01 15:24:53.000000000 
+0100
+++ new/arf-2.6.4/arf.egg-info/requires.txt     2022-07-21 03:09:27.000000000 
+0200
@@ -1 +1 @@
-h5py>=2.8
+h5py!=3.3.*,>=2.8
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/arf-2.6.1/arf.py new/arf-2.6.4/arf.py
--- old/arf-2.6.1/arf.py        2020-11-30 21:02:02.000000000 +0100
+++ new/arf-2.6.4/arf.py        2022-07-21 03:03:01.000000000 +0200
@@ -8,18 +8,25 @@
 from __future__ import unicode_literals
 
 spec_version = "2.1"
-__version__ = version = "2.6.1"
+__version__ = version = "2.6.4"
 
 
 def version_info():
     from h5py.version import version as h5py_version, hdf5_version
-    return "Library versions:\n arf: %s\n h5py: %s\n HDF5: %s" % (__version__, 
h5py_version, hdf5_version)
+
+    return "Library versions:\n arf: %s\n h5py: %s\n HDF5: %s" % (
+        __version__,
+        h5py_version,
+        hdf5_version,
+    )
 
 
 class DataTypes:
     """Available data types, by name and integer code: """
+
     UNDEFINED, ACOUSTIC, EXTRAC_HP, EXTRAC_LF, EXTRAC_EEG, INTRAC_CC, 
INTRAC_VC = range(
-        0, 7)
+        0, 7
+    )
     EVENT, SPIKET, BEHAVET = range(1000, 1003)
     INTERVAL, STIMI, COMPONENTL = range(2000, 2003)
 
@@ -27,13 +34,15 @@
     def _doc(cls):
         out = str(cls.__doc__)
         for v, k in sorted(cls._todict().items()):
-            out += '\n%s:%d' % (k, v)
+            out += "\n%s:%d" % (k, v)
         return out
 
     @classmethod
     def _todict(cls):
         """ generate a dict keyed by value """
-        return dict((getattr(cls, attr), attr) for attr in dir(cls) if not 
attr.startswith('_'))
+        return dict(
+            (getattr(cls, attr), attr) for attr in dir(cls) if not 
attr.startswith("_")
+        )
 
     @classmethod
     def _fromstring(cls, s):
@@ -52,8 +61,9 @@
     import os
     from h5py.version import version as h5py_version
     from distutils.version import StrictVersion
-    from h5py import h5p
-    from h5py._hl import files
+    from h5py import h5p, File
+    # Caution: This is a private API of h5py, subject to change without notice
+    from h5py._hl import files as _files
 
     try:
         # If the byte string doesn't match the default
@@ -65,23 +75,34 @@
     exists = os.path.exists(name)
     try:
         fcpl = h5p.create(h5p.FILE_CREATE)
-        fcpl.set_link_creation_order(
-            h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED)
+        fcpl.set_link_creation_order(h5p.CRT_ORDER_TRACKED | 
h5p.CRT_ORDER_INDEXED)
     except AttributeError:
         # older version of h5py
-        fp = files.File(name, mode=mode, driver=driver,
-                        libver=libver, **kwargs)
+        fp = File(name, mode=mode, driver=driver, libver=libver, **kwargs)
     else:
+        posargs = []
         if StrictVersion(h5py_version) >= StrictVersion('2.9'):
-            kwargs.update(rdcc_nslots=None, rdcc_nbytes=None, rdcc_w0=None)
-        fapl = files.make_fapl(driver, libver, **kwargs)
-        fp = files.File(files.make_fid(name, mode, userblock_size, fapl, fcpl))
-
-    if not exists and fp.mode == 'r+':
-        set_attributes(fp,
-                       arf_library='python',
-                       arf_library_version=__version__,
-                       arf_version=spec_version)
+            posargs += ['rdcc_nslots', 'rdcc_nbytes', 'rdcc_w0']
+        if StrictVersion(h5py_version) >= StrictVersion('3.5'):
+            posargs += ['locking', 'page_buf_size', 'min_meta_keep', 
'min_raw_keep']
+        if StrictVersion(h5py_version) >= StrictVersion('3.7'):
+            # integer is needed
+            kwargs.update({arg: kwargs.get(arg, 1) for arg in 
['alignment_threshold', 'alignment_interval']})
+        if StrictVersion(h5py_version) >= StrictVersion('3.8'):
+            posargs += ['meta_block_size']
+        kwargs.update({arg: kwargs.get(arg, None) for arg in posargs})
+        fapl = _files.make_fapl(driver, libver, **kwargs)
+        fid = _files.make_fid(name, mode, userblock_size, fapl, fcpl=fcpl,
+                              swmr=kwargs.get('swmr', False))
+        fp = File(fid)
+
+    if not exists and fp.mode == "r+":
+        set_attributes(
+            fp,
+            arf_library="python",
+            arf_library_version=__version__,
+            arf_version=spec_version,
+        )
     return fp
 
 
@@ -106,25 +127,31 @@
     """
     # create group using low-level interface to store creation order
     from h5py import h5p, h5g, _hl
+
     try:
         gcpl = h5p.create(h5p.GROUP_CREATE)
-        gcpl.set_link_creation_order(
-            h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED)
+        gcpl.set_link_creation_order(h5p.CRT_ORDER_TRACKED | 
h5p.CRT_ORDER_INDEXED)
     except AttributeError:
         grp = group.create_group(name)
     else:
         name, lcpl = group._e(name, lcpl=True)
         grp = _hl.group.Group(h5g.create(group.id, name, lcpl=lcpl, gcpl=gcpl))
     set_uuid(grp, attributes.pop("uuid", None))
-    set_attributes(grp,
-                   timestamp=convert_timestamp(timestamp),
-                   **attributes)
+    set_attributes(grp, timestamp=convert_timestamp(timestamp), **attributes)
     return grp
 
 
-def create_dataset(group, name, data, units='', datatype=DataTypes.UNDEFINED,
-                   chunks=True, maxshape=None, compression=None,
-                   **attributes):
+def create_dataset(
+    group,
+    name,
+    data,
+    units="",
+    datatype=DataTypes.UNDEFINED,
+    chunks=True,
+    maxshape=None,
+    compression=None,
+    **attributes
+):
     """Create an ARF dataset under group, setting required attributes
 
     Required arguments:
@@ -156,41 +183,43 @@
     Returns the created dataset
     """
     from numpy import asarray
-    srate = attributes.get('sampling_rate', None)
+
+    srate = attributes.get("sampling_rate", None)
     # check data validity before doing anything
-    if not hasattr(data, 'dtype'):
+    if not hasattr(data, "dtype"):
         data = asarray(data)
-        if data.dtype.kind in ('S', 'O', 'U'):
-            raise ValueError(
-                "data must be in array with numeric or compound type")
-    if data.dtype.kind == 'V':
-        if 'start' not in data.dtype.names:
+        if data.dtype.kind in ("S", "O", "U"):
+            raise ValueError("data must be in array with numeric or compound 
type")
+    if data.dtype.kind == "V":
+        if "start" not in data.dtype.names:
             raise ValueError("complex event data requires 'start' field")
         if not isinstance(units, (list, tuple)):
             raise ValueError("complex event data requires sequence of units")
         if not len(units) == len(data.dtype.names):
             raise ValueError("number of units doesn't match number of fields")
-    if units == '':
+    if units == "":
         if srate is None or not srate > 0:
             raise ValueError(
-                "unitless data assumed time series and requires sampling_rate 
attribute")
-    elif units == 'samples':
+                "unitless data assumed time series and requires sampling_rate 
attribute"
+            )
+    elif units == "samples":
         if srate is None or not srate > 0:
             raise ValueError(
-                "data with units of 'samples' requires sampling_rate 
attribute")
+                "data with units of 'samples' requires sampling_rate attribute"
+            )
     # NB: can't really catch case where sampled data has units but doesn't
     # have sampling_rate attribute
 
     dset = group.create_dataset(
-        name, data=data, maxshape=maxshape, chunks=chunks, 
compression=compression)
+        name, data=data, maxshape=maxshape, chunks=chunks, 
compression=compression
+    )
     set_attributes(dset, units=units, datatype=datatype, **attributes)
     return dset
 
 
 def create_table(group, name, dtype, **attributes):
     """Create a new array dataset under group with compound datatype and 
maxshape=(None,)"""
-    dset = group.create_dataset(
-        name, shape=(0,), dtype=dtype, maxshape=(None,))
+    dset = group.create_dataset(name, shape=(0,), dtype=dtype, 
maxshape=(None,))
     set_attributes(dset, **attributes)
     return dset
 
@@ -198,7 +227,7 @@
 def append_data(dset, data):
     """Append data to dset along axis 0. Data must be a single element or
     a 1D array of the same type as the dataset (including compound 
datatypes)."""
-    N = data.shape[0] if hasattr(data, 'shape') else 1
+    N = data.shape[0] if hasattr(data, "shape") else 1
     if N == 0:
         return
     oldlen = dset.shape[0]
@@ -249,14 +278,16 @@
 
     """
     from distutils.version import StrictVersion as Version
+
     try:
-        ver = file.attrs.get('arf_version', None)
+        ver = file.attrs.get("arf_version", None)
         if ver is None:
-            ver = file.attrs['arf_library_version']
+            ver = file.attrs["arf_library_version"]
     except KeyError:
         raise UserWarning(
             "Unable to determine ARF version for {0.filename};"
-            "created by another program?".format(file))
+            "created by another program?".format(file)
+        )
     try:
         # if the attribute is stored as a string, it's ascii-encoded
         ver = ver.decode("ascii")
@@ -264,14 +295,16 @@
         pass
     # should be backwards compatible after 1.1
     file_version = Version(ver)
-    if file_version < Version('1.1'):
+    if file_version < Version("1.1"):
         raise DeprecationWarning(
             "ARF library {} may have trouble reading file "
-            "version {} (< 1.1)".format(version, file_version))
-    elif file_version >= Version('3.0'):
+            "version {} (< 1.1)".format(version, file_version)
+        )
+    elif file_version >= Version("3.0"):
         raise FutureWarning(
             "ARF library {} may be incompatible with file "
-            "version {} (>= 3.0)".format(version, file_version))
+            "version {} (>= 3.0)".format(version, file_version)
+        )
     return file_version
 
 
@@ -300,17 +333,19 @@
 
     """
     from h5py import h5
+
     out = []
     try:
         group._id.links.iterate(
-            out.append, idx_type=h5.INDEX_CRT_ORDER, order=h5.ITER_INC)
+            out.append, idx_type=h5.INDEX_CRT_ORDER, order=h5.ITER_INC
+        )
     except (AttributeError, RuntimeError):
         # pre 2.2 shim
         def f(name):
-            if name.find(b'/', 1) == -1:
+            if name.find(b"/", 1) == -1:
                 out.append(name)
-        group._id.links.visit(
-            f, idx_type=h5.INDEX_CRT_ORDER, order=h5.ITER_INC)
+
+        group._id.links.visit(f, idx_type=h5.INDEX_CRT_ORDER, 
order=h5.ITER_INC)
     return map(group._d, out)
 
 
@@ -331,7 +366,7 @@
     from time import mktime, struct_time
     from numpy import zeros
 
-    out = zeros(2, dtype='int64')
+    out = zeros(2, dtype="int64")
     if isinstance(obj, datetime):
         out[0] = mktime(obj.timetuple())
         out[1] = obj.microsecond
@@ -353,6 +388,7 @@
 def timestamp_to_datetime(timestamp):
     """Convert an ARF timestamp to a datetime.datetime object (naive local 
time)"""
     from datetime import datetime, timedelta
+
     obj = datetime.fromtimestamp(timestamp[0])
     return obj + timedelta(microseconds=int(timestamp[1]))
 
@@ -365,30 +401,32 @@
 def set_uuid(obj, uuid=None):
     """Set the uuid attribute of an HDF5 object. Use this method to ensure 
correct dtype """
     from uuid import uuid4, UUID
+
     if uuid is None:
         uuid = uuid4()
     elif isinstance(uuid, bytes):
         if len(uuid) == 16:
             uuid = UUID(bytes=uuid)
         else:
-            uuid = UUID(hex=uuid.decode('ascii'))
+            uuid = UUID(hex=uuid.decode("ascii"))
 
     if "uuid" in obj.attrs:
         del obj.attrs["uuid"]
-    obj.attrs.create("uuid", str(uuid).encode('ascii'), dtype="|S36")
+    obj.attrs.create("uuid", str(uuid).encode("ascii"), dtype="|S36")
 
 
 def get_uuid(obj):
     """Return the uuid for obj, or null uuid if none is set"""
     # TODO: deprecate null uuid ret val
     from uuid import UUID
+
     try:
-        uuid = obj.attrs['uuid']
+        uuid = obj.attrs["uuid"]
     except KeyError:
         return UUID(int=0)
     # convert to unicode for python 3
     try:
-        uuid = uuid.decode('ascii')
+        uuid = uuid.decode("ascii")
     except (LookupError, AttributeError):
         pass
     return UUID(uuid)
@@ -407,19 +445,22 @@
 
 def is_time_series(dset):
     """Return True if dset is a sampled time series (units are not time)"""
-    return (not is_marked_pointproc(dset)
-            and 'sampling_rate' in dset.attrs
-            and dset.attrs['units'] not in ('s', 'samples'))
+    return (
+        not is_marked_pointproc(dset)
+        and "sampling_rate" in dset.attrs
+        and dset.attrs.get("units", None) not in ("s", "samples")
+    )
 
 
 def is_marked_pointproc(dset):
     """Return True if dset is a marked point process (a complex dtype with 
'start' field)"""
-    return dset.dtype.names is not None and 'start' in dset.dtype.names
+    return dset.dtype.names is not None and "start" in dset.dtype.names
 
 
 def is_entry(obj):
     """Return True if the object is an entry (i.e. an hdf5 group)"""
     import h5py as h5
+
     return isinstance(obj, h5.Group)
 
 
@@ -430,5 +471,6 @@
     except IndexError:
         return 1
 
+
 # Variables:
 # End:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/arf-2.6.1/pyproject.toml new/arf-2.6.4/pyproject.toml
--- old/arf-2.6.1/pyproject.toml        1970-01-01 01:00:00.000000000 +0100
+++ new/arf-2.6.4/pyproject.toml        2021-01-04 19:47:02.000000000 +0100
@@ -0,0 +1,2 @@
+[build-system]
+requires = ["setuptools", "wheel"]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/arf-2.6.1/setup.cfg new/arf-2.6.4/setup.cfg
--- old/arf-2.6.1/setup.cfg     2020-12-01 15:24:54.000000000 +0100
+++ new/arf-2.6.4/setup.cfg     2022-07-21 03:09:27.762905800 +0200
@@ -1,6 +1,44 @@
+[bdist_wheel]
+universal = 1
+
 [pep8]
 ignore = E221,E501,E701
 
+[metadata]
+name = arf
+version = attr: arf.__version__
+description = Advanced Recording Format for acoustic, behavioral, and 
physiological data
+long_description = file: README.md
+keywords = one, two
+license = BSD 3-Clause License
+classifiers = 
+       Development Status :: 5 - Production/Stable
+       Intended Audience :: Science/Research
+       License :: OSI Approved :: GNU General Public License (GPL)
+       Programming Language :: Python
+       Programming Language :: Python :: 3
+       Programming Language :: C++
+       Topic :: Scientific/Engineering
+       Operating System :: Unix
+       Operating System :: POSIX :: Linux
+       Operating System :: MacOS :: MacOS X
+       Natural Language :: English
+author = Dan Meliza
+author_email = d...@meliza.org
+maintainer = Dan Meliza
+maintainer_email = d...@meliza.org
+url = https://github.com/melizalab/arf
+
+[options]
+zip_safe = false
+py_modules = arf
+python_requires = >= 3.6
+setup_requires = 
+       setuptools >=38.3.0
+install_requires = 
+       h5py>=2.8,!=3.3.*
+test_suite = tests
+
 [egg_info]
 tag_build = 
 tag_date = 0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/arf-2.6.1/setup.py new/arf-2.6.4/setup.py
--- old/arf-2.6.1/setup.py      2020-11-30 20:53:02.000000000 +0100
+++ new/arf-2.6.4/setup.py      2021-01-04 19:51:36.000000000 +0100
@@ -1,55 +1,5 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 # -*- mode: python -*-
-import sys
-import os
 from setuptools import setup
-from arf import __version__
-
-if sys.version_info[:2] < (3, 6):
-    raise RuntimeError("Python version 3.6 or greater required.")
-
-cls_txt = """
-Development Status :: 5 - Production/Stable
-Intended Audience :: Science/Research
-License :: OSI Approved :: GNU General Public License (GPL)
-Programming Language :: Python
-Programming Language :: Python :: 3
-Programming Language :: C++
-Topic :: Scientific/Engineering
-Operating System :: Unix
-Operating System :: POSIX :: Linux
-Operating System :: MacOS :: MacOS X
-Natural Language :: English
-"""
-
-short_desc = "Advanced Recording Format for acoustic, behavioral, and 
physiological data"
-
-long_desc = """
-Library for reading and writing Advanced Recording Format files. ARF files
-are HDF5 files used to store audio and neurophysiological recordings in a
-rational, hierarchical format. Data are organized around the concept of an
-entry, which is a set of data channels that all start at the same time.
-Supported data types include sampled data and event data (i.e. spike times).
-Requires h5py (at least 2.8) and numpy (at least 1.3).
-"""
-
-install_requires = ["h5py>=2.8"]
-
-setup(
-    name='arf',
-    version=__version__,
-    description=short_desc,
-    long_description=long_desc,
-    classifiers=[x for x in cls_txt.split("\n") if x],
-    author='Dan Meliza',
-    maintainer='Dan Meliza',
-    url="https://github.com/melizalab/arf";,
-    download_url="https://github.com/melizalab/arf/archive/%s.tar.gz"; % 
__version__,
-    install_requires=install_requires,
-
-    py_modules=['arf'],
-    test_suite='tests'
-)
-# Variables:
-# End:
+setup()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/arf-2.6.1/tests/test_arf.py 
new/arf-2.6.4/tests/test_arf.py
--- old/arf-2.6.1/tests/test_arf.py     2020-09-29 19:29:45.000000000 +0200
+++ new/arf-2.6.4/tests/test_arf.py     2021-01-04 19:59:29.000000000 +0100
@@ -12,72 +12,88 @@
 
 entry_base = "entry_%03d"
 tstamp = time.mktime(time.localtime())
-entry_attributes = {'intattr': 1,
-                    'vecattr': [1, 2, 3],
-                    'arrattr': randn(5),
-                    'strattr': "an attribute",
-                    }
-datasets = [dict(name="acoustic",
-                 data=randn(100000),
-                 sampling_rate=20000,
-                 datatype=arf.DataTypes.ACOUSTIC,
-                 maxshape=(None,),
-                 microphone="DK-1234",
-                 compression=0),
-            dict(name="neural",
-                 data=(randn(100000) * 2 ** 16).astype('h'),
-                 sampling_rate=20000,
-                 datatype=arf.DataTypes.EXTRAC_HP,
-                 compression=9),
-            dict(name="spikes",
-                 data=randint(0, 100000, 100),
-                 datatype=arf.DataTypes.SPIKET,
-                 units="samples",
-                 sampling_rate=20000,  # required
-                 ),
-            dict(name="empty-spikes",
-                 data=nx.array([], dtype='f'),
-                 datatype=arf.DataTypes.SPIKET,
-                 method="broken",
-                 maxshape=(None,),
-                 units="s",
-                 ),
-            dict(name="events",
-                 data=nx.rec.fromrecords(
-                     [(1.0, 1, b"stimulus"), (5.0, 0, b"stimulus")],
-                     names=("start", "state", "name")),  # 'start' required
-                 datatype=arf.DataTypes.EVENT,
-                 units=(b"s",b"",b"")) # only bytes supported by h5py
-            ]
-
-bad_datasets = [dict(name="string datatype",
-                     data="a string"),
-                dict(name="object datatype",
-                     data=bytes),
-                dict(name="missing samplerate/units",
-                     data=randn(1000)),
-                dict(name="missing samplerate for units=samples",
-                     data=randn(1000),
-                     units="samples"),
-                dict(name="missing start field",
-                     data=nx.rec.fromrecords([(1.0, 1), (2.0, 2)],
-                                             names=("time", "state")),
-                     units="s"),
-                dict(name="missing units for complex dtype",
-                     data=nx.rec.fromrecords(
-                         [(1.0, 1, b"stimulus"), (5.0, 0, b"stimulus")],
-                         names=("start", "state", "name"))),
-                dict(name="wrong length units for complex dtype",
-                     data=nx.rec.fromrecords(
-                         [(1.0, 1, b"stimulus"), (5.0, 0, b"stimulus")],
-                         names=("start", "state", "name")),
-                     units=("seconds",)),
-                ]
+entry_attributes = {
+    "intattr": 1,
+    "vecattr": [1, 2, 3],
+    "arrattr": randn(5),
+    "strattr": "an attribute",
+}
+datasets = [
+    dict(
+        name="acoustic",
+        data=randn(100000),
+        sampling_rate=20000,
+        datatype=arf.DataTypes.ACOUSTIC,
+        maxshape=(None,),
+        microphone="DK-1234",
+        compression=0,
+    ),
+    dict(
+        name="neural",
+        data=(randn(100000) * 2 ** 16).astype("h"),
+        sampling_rate=20000,
+        datatype=arf.DataTypes.EXTRAC_HP,
+        compression=9,
+    ),
+    dict(
+        name="spikes",
+        data=randint(0, 100000, 100),
+        datatype=arf.DataTypes.SPIKET,
+        units="samples",
+        sampling_rate=20000,  # required
+    ),
+    dict(
+        name="empty-spikes",
+        data=nx.array([], dtype="f"),
+        datatype=arf.DataTypes.SPIKET,
+        method="broken",
+        maxshape=(None,),
+        units="s",
+    ),
+    dict(
+        name="events",
+        data=nx.rec.fromrecords(
+            [(1.0, 1, b"stimulus"), (5.0, 0, b"stimulus")],
+            names=("start", "state", "name"),
+        ),  # 'start' required
+        datatype=arf.DataTypes.EVENT,
+        units=(b"s", b"", b""),
+    ),  # only bytes supported by h5py
+]
+
+bad_datasets = [
+    dict(name="string datatype", data="a string"),
+    dict(name="object datatype", data=bytes),
+    dict(name="missing samplerate/units", data=randn(1000)),
+    dict(
+        name="missing samplerate for units=samples", data=randn(1000), 
units="samples"
+    ),
+    dict(
+        name="missing start field",
+        data=nx.rec.fromrecords([(1.0, 1), (2.0, 2)], names=("time", "state")),
+        units="s",
+    ),
+    dict(
+        name="missing units for complex dtype",
+        data=nx.rec.fromrecords(
+            [(1.0, 1, b"stimulus"), (5.0, 0, b"stimulus")],
+            names=("start", "state", "name"),
+        ),
+    ),
+    dict(
+        name="wrong length units for complex dtype",
+        data=nx.rec.fromrecords(
+            [(1.0, 1, b"stimulus"), (5.0, 0, b"stimulus")],
+            names=("start", "state", "name"),
+        ),
+        units=("seconds",),
+    ),
+]
 
 
 class TestArfCreation(unittest.TestCase):
     def setUp(self):
-        self.fp = arf.open_file("test", 'w', driver="core", 
backing_store=False)
+        self.fp = arf.open_file("test", "w", driver="core", 
backing_store=False)
         self.entry = arf.create_entry(self.fp, "entry", tstamp)
         self.dataset = arf.create_dataset(self.entry, **datasets[2])
 
@@ -88,13 +104,13 @@
         g = arf.create_entry(self.fp, name, tstamp, **entry_attributes)
         self.assertTrue(name in self.fp)
         self.assertTrue(arf.is_entry(g))
-        self.assertTrue(arf.timestamp_to_float(g.attrs['timestamp']) > 0)
+        self.assertTrue(arf.timestamp_to_float(g.attrs["timestamp"]) > 0)
         for k in entry_attributes:
             self.assertTrue(k in g.attrs)
 
     def create_dataset(self, g, dset):
         d = arf.create_dataset(g, **dset)
-        self.assertEqual(d.shape, dset['data'].shape)
+        self.assertEqual(d.shape, dset["data"].shape)
         self.assertFalse(arf.is_entry(d))
 
     def test00_create_entries(self):
@@ -111,7 +127,7 @@
         for dset in datasets:
             yield self.create_dataset, self.entry, dset
         self.assertEqual(len(self.entry), len(datasets))
-        self.assertEqual(set(self.entry.keys()), set(dset['name'] for dset in 
datasets))
+        self.assertEqual(set(self.entry.keys()), set(dset["name"] for dset in 
datasets))
 
     def test04_create_bad_dataset(self):
         for dset in bad_datasets:
@@ -121,46 +137,52 @@
     def test05_set_attributes(self):
         """ tests the set_attributes convenience function """
         arf.set_attributes(self.entry, mystr="myvalue", myint=5000)
-        self.assertEqual(self.entry.attrs['myint'], 5000)
-        self.assertEqual(self.entry.attrs['mystr'], "myvalue")
+        self.assertEqual(self.entry.attrs["myint"], 5000)
+        self.assertEqual(self.entry.attrs["mystr"], "myvalue")
         arf.set_attributes(self.entry, mystr=None)
         self.assertFalse("mystr" in self.entry.attrs)
 
     def test06_null_uuid(self):
         # nulls in a uuid can make various things barf
         from uuid import UUID
-        uuid = UUID(bytes=b''.rjust(16, b'\0'))
+
+        uuid = UUID(bytes=b"".rjust(16, b"\0"))
         arf.set_uuid(self.entry, uuid)
         self.assertEqual(arf.get_uuid(self.entry), uuid)
 
     def test07_copy_entry_with_attrs(self):
         src_entry_attrs = dict(self.entry.attrs)
         src_entry_timestamp = src_entry_attrs.pop("timestamp")
-        tgt_entry = arf.create_entry(self.fp, "copied_entry", 
src_entry_timestamp, **src_entry_attrs)
-        self.assertEqual(self.entry.attrs['uuid'], tgt_entry.attrs['uuid'])
+        tgt_entry = arf.create_entry(
+            self.fp, "copied_entry", src_entry_timestamp, **src_entry_attrs
+        )
+        self.assertEqual(self.entry.attrs["uuid"], tgt_entry.attrs["uuid"])
 
     def test08_check_file_version(self):
         arf.check_file_version(self.fp)
 
     def test09_append_to_table(self):
-        dtype = nx.dtype({'names': ("f1","f2"), 'formats': [nx.uint, 
nx.int32]})
-        dset = arf.create_table(self.fp, 'test', dtype=dtype)
+        dtype = nx.dtype({"names": ("f1", "f2"), "formats": [nx.uint, 
nx.int32]})
+        dset = arf.create_table(self.fp, "test", dtype=dtype)
         self.assertEqual(dset.shape[0], 0)
         arf.append_data(dset, (5, 10))
         self.assertEqual(dset.shape[0], 1)
 
 
-@unittest.skipIf(version.StrictVersion(h5py_version) < 
version.StrictVersion("2.2"), "not supported on h5py < 2.2")
+@unittest.skipIf(
+    version.StrictVersion(h5py_version) < version.StrictVersion("2.2"),
+    "not supported on h5py < 2.2",
+)
 class TestArfNavigation(unittest.TestCase):
     def setUp(self):
-        self.fp = arf.open_file("test", 'w', driver="core", 
backing_store=False)
+        self.fp = arf.open_file("test", "w", driver="core", 
backing_store=False)
 
     def tearDown(self):
         self.fp.close()
 
     def test01_creation_iter(self):
         self.fp = arf.open_file("test06", mode="a", driver="core", 
backing_store=False)
-        entry_names = ['z', 'y', 'a', 'q', 'zzyfij']
+        entry_names = ["z", "y", "a", "q", "zzyfij"]
         for name in entry_names:
             g = arf.create_entry(self.fp, name, 0)
             arf.create_dataset(g, "dset", (1,), sampling_rate=1)
@@ -176,13 +198,15 @@
             else:
                 selected, offset = arf.select_interval(dset, 0.0, 1.0)
             if arf.is_time_series(dset):
-                nx.testing.assert_array_equal(selected, 
data["data"][:data["sampling_rate"]])
+                nx.testing.assert_array_equal(
+                    selected, data["data"][: data["sampling_rate"]]
+                )
 
 
 class TestArfUtility(unittest.TestCase):
-
     def test01_timestamp_conversion(self):
         from datetime import datetime
+
         dt = datetime.now()
         ts = arf.convert_timestamp(dt)
         self.assertEqual(arf.timestamp_to_datetime(ts), dt)
@@ -196,8 +220,5 @@
         arf.DataTypes._todict()
 
 
-
-
-
 # # Variables:
 # # End:

Reply via email to