Date: Saturday, March 16, 2019 @ 21:31:55
  Author: archange
Revision: 442245

archrelease: copy trunk to community-staging-x86_64

Added:
  hdf5-openmpi/repos/community-staging-x86_64/
  hdf5-openmpi/repos/community-staging-x86_64/PKGBUILD
    (from rev 442244, hdf5-openmpi/trunk/PKGBUILD)
  hdf5-openmpi/repos/community-staging-x86_64/mpi.patch
    (from rev 442244, hdf5-openmpi/trunk/mpi.patch)
  
hdf5-openmpi/repos/community-staging-x86_64/remove-openmpi-1.0-interfaces.patch
    (from rev 442244, hdf5-openmpi/trunk/remove-openmpi-1.0-interfaces.patch)

-------------------------------------+
 PKGBUILD                            |  107 ++++++++++++++++++++++++++++++++++
 mpi.patch                           |   18 +++++
 remove-openmpi-1.0-interfaces.patch |  102 ++++++++++++++++++++++++++++++++
 3 files changed, 227 insertions(+)

Copied: hdf5-openmpi/repos/community-staging-x86_64/PKGBUILD (from rev 442244, 
hdf5-openmpi/trunk/PKGBUILD)
===================================================================
--- community-staging-x86_64/PKGBUILD                           (rev 0)
+++ community-staging-x86_64/PKGBUILD   2019-03-16 21:31:55 UTC (rev 442245)
@@ -0,0 +1,107 @@
+# Maintainer: Ronald van Haren <ronald.archlinux.org>
+# Maintainer: Bruno Pagani <archa...@archlinux.org>
+# Contributor: Stefan Husmann <stefan-husm...@t-online.de>
+# Contributor: damir <da...@archlinux.org>
+# Contributor: Tom K <t...@runbox.com>
+# Contributor: Jed Brown <j...@59a2.org>
+# Contributor: Simone Pezzuto <junki....@gmail.com>
+
+_pkgname=hdf5
+_mpi=openmpi
+pkgname=${_pkgname}-${_mpi}
+pkgver=1.10.5
+pkgrel=1
+pkgdesc="General purpose library and file format for storing scientific data 
(${_mpi} version)"
+arch=('x86_64')
+url="https://www.hdfgroup.org/hdf5";
+license=('custom')
+depends=('zlib' 'libaec' 'bash' 'openmpi')
+makedepends=('cmake' 'time' 'gcc-fortran')
+provides=('hdf5' 'hdf5-cpp-fortran' "hdf5-fortran-${_mpi}")
+conflicts=('hdf5')
+replaces=("hdf5-fortran-${_mpi}")
+options=('staticlibs')
+source=("https://support.hdfgroup.org/ftp/HDF5/releases/${_pkgname}-${pkgver:0:4}/${_pkgname}-${pkgver/_/-}/src/${_pkgname}-${pkgver/_/-}.tar.bz2";
+        'mpi.patch')
+sha256sums=('68d6ea8843d2a106ec6a7828564c1689c7a85714a35d8efafa2fee20ca366f44'
+            '603006358175b7a8b35fa44c484cddf45c0381cf50db4fb7c50ea5969d361eca')
+
+prepare() {
+    cd ${_pkgname}-${pkgver/_/-}
+    # FS#33343
+    patch -p1 -i ../mpi.patch
+}
+
+build() {
+    # Crazy workaround: run CMake to generate pkg-config file
+    mkdir -p build && cd build
+    CXX="mpicxx" \
+    CC="mpicc" \
+    FC="mpif90" \
+    F9X="mpif90" \
+    RUNPARALLEL="mpirun" \
+    OMPI_MCA_disable_memory_allocator=1 \
+    cmake ../${_pkgname}-${pkgver/_/-} \
+        -DCMAKE_INSTALL_PREFIX=/usr \
+        -DBUILD_SHARED_LIBS=ON \
+        -DCMAKE_BUILD_TYPE=Release \
+        -DALLOW_UNSUPPORTED=ON \
+        -DHDF5_BUILD_HL_LIB=ON \
+        -DHDF5_BUILD_CPP_LIB=ON \
+        -DHDF5_BUILD_FORTRAN=ON \
+        -DHDF5_ENABLE_PARALLEL=ON \
+        -DHDF5_ENABLE_Z_LIB_SUPPORT=ON \
+        -DHDF5_ENABLE_SZIP_SUPPORT=ON \
+        -DHDF5_ENABLE_SZIP_ENCODING=ON
+    # But don’t build with it, it’s quite broken
+    cd ../${_pkgname}-${pkgver/_/-}
+    ./configure \
+        CXX="mpicxx" \
+        CC="mpicc" \
+        FC="mpif90" \
+        F9X="mpif90" \
+        RUNPARALLEL="mpirun" \
+        OMPI_MCA_disable_memory_allocator=1 \
+        --prefix=/usr \
+        --docdir=/usr/share/doc/hdf5/ \
+        --enable-static \
+        --disable-sharedlib-rpath \
+        --enable-build-mode=production \
+        --enable-hl \
+        --enable-cxx \
+        --enable-fortran \
+        --enable-parallel \
+        --enable-unsupported \
+        --with-pic \
+        --with-zlib \
+        --with-szlib
+    make
+}
+
+check() {
+    cd ${_pkgname}-${pkgver/_/-}
+    # Without this, checks are failing with messages like “error while loading 
shared libraries: libhdf5.so.101: cannot open shared object file: No such file 
or directory”
+    export LD_LIBRARY_PATH="${srcdir}"/${pkgname}-${pkgver/_/-}/src/.libs/
+    export 
LD_LIBRARY_PATH="$LD_LIBRARY_PATH":"${srcdir}"/${pkgname}-${pkgver/_/-}/c++/src/.libs/
+    export 
LD_LIBRARY_PATH="$LD_LIBRARY_PATH":"${srcdir}"/${pkgname}-${pkgver/_/-}/fortran/src/.libs/
+    export 
LD_LIBRARY_PATH="$LD_LIBRARY_PATH":"${srcdir}"/${pkgname}-${pkgver/_/-}/hl/src/.libs/
+    export 
LD_LIBRARY_PATH="$LD_LIBRARY_PATH":"${srcdir}"/${pkgname}-${pkgver/_/-}/hl/c++/src/.libs/
+    export 
LD_LIBRARY_PATH="$LD_LIBRARY_PATH":"${srcdir}"/${pkgname}-${pkgver/_/-}/hl/fortran/src/.libs/
+    # This is a parallel build, they are always OpenMPI bugs
+    make check || warning "Tests failed"
+}
+
+package() {
+    cd ${_pkgname}-${pkgver/_/-}
+
+    make DESTDIR="${pkgdir}" install
+
+    # Move examples to a proper place
+    install -dm755 "${pkgdir}"/usr/share/doc/${_pkgname}
+    mv "${pkgdir}"/usr/share/{hdf5_examples,doc/${_pkgname}/examples}
+
+    install -Dm644 COPYING -t "${pkgdir}"/usr/share/licenses/${_pkgname}
+
+    # Install pkg-config files from CMake tree
+    install -Dm644 ../build/CMakeFiles/hdf5{,_hl}{,_cpp}-${pkgver}.pc -t 
"${pkgdir}"/usr/lib/pkgconfig/
+}

Copied: hdf5-openmpi/repos/community-staging-x86_64/mpi.patch (from rev 442244, 
hdf5-openmpi/trunk/mpi.patch)
===================================================================
--- community-staging-x86_64/mpi.patch                          (rev 0)
+++ community-staging-x86_64/mpi.patch  2019-03-16 21:31:55 UTC (rev 442245)
@@ -0,0 +1,18 @@
+Prevent accidental inclusion of mpi c++ headers 
+when hdf5.h is included third party library
+
+https://bugs.gentoo.org/show_bug.cgi?id=420777
+https://bugs.archlinux.org/task/33343
+
+--- a/src/H5public.h
++++ b/src/H5public.h
+@@ -58,6 +58,8 @@
+ #   include <stddef.h>
+ #endif
+ #ifdef H5_HAVE_PARALLEL
++#   define OMPI_SKIP_MPICXX   /* Make sure that cxx specific headers are not 
included */
++#   define MPICH_SKIP_MPICXX
+ #   include <mpi.h>
+ #ifndef MPI_FILE_NULL         /*MPIO may be defined in mpi.h already       */
+ #   include <mpio.h>
+

Copied: 
hdf5-openmpi/repos/community-staging-x86_64/remove-openmpi-1.0-interfaces.patch 
(from rev 442244, hdf5-openmpi/trunk/remove-openmpi-1.0-interfaces.patch)
===================================================================
--- community-staging-x86_64/remove-openmpi-1.0-interfaces.patch                
                (rev 0)
+++ community-staging-x86_64/remove-openmpi-1.0-interfaces.patch        
2019-03-16 21:31:55 UTC (rev 442245)
@@ -0,0 +1,102 @@
+--- A/src/H5Smpio.c    2018-09-04 04:54:44.000000000 +0200
++++ B/src/H5Smpio.c    2018-11-22 16:58:44.365923064 +0100
+@@ -859,7 +859,7 @@
+           *   (2GB-1)number_of_blocks * the_datatype_extent.
+           */
+ 
+-            MPI_Aint stride_in_bytes, inner_extent;
++            MPI_Aint stride_in_bytes, inner_extent, lower_bound;
+             MPI_Datatype block_type;
+ 
+             /* create a contiguous datatype inner_type x number of BLOCKS.
+@@ -879,7 +879,7 @@
+                     HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", 
mpi_code)
+             }
+ 
+-            MPI_Type_extent (inner_type, &inner_extent);
++            MPI_Type_get_extent (inner_type, &lower_bound, &inner_extent);
+             stride_in_bytes = inner_extent * (MPI_Aint)d[i].strid;
+ 
+             /* If the element count is larger than what a 32 bit integer can 
hold,
+@@ -1424,7 +1424,7 @@
+     int           block_len[2];
+     int           mpi_code;               /* MPI return code */
+     MPI_Datatype  inner_type, outer_type, leftover_type, type[2];
+-    MPI_Aint      disp[2], old_extent;
++    MPI_Aint      disp[2], old_extent, lower_bound;
+     herr_t        ret_value = SUCCEED;    /* Return value */
+ 
+     FUNC_ENTER_NOAPI_NOINIT
+@@ -1500,9 +1500,9 @@
+             }
+         }
+ 
+-        MPI_Type_extent (old_type, &old_extent);
++        MPI_Type_get_extent (old_type, &lower_bound, &old_extent);
+ 
+-        /* Set up the arguments for MPI_Type_struct constructor */
++        /* Set up the arguments for MPI_Type_create_struct constructor */
+         type[0] = outer_type;
+         type[1] = leftover_type;
+         block_len[0] = 1;
+--- A/src/H5.c 2018-10-05 07:21:51.000000000 +0200
++++ B/src/H5.c 2018-11-22 16:52:52.353016751 +0100
+@@ -138,7 +138,7 @@
+         if (mpi_initialized && !mpi_finalized) {
+             int key_val;
+ 
+-            if(MPI_SUCCESS != (mpi_code = 
MPI_Comm_create_keyval(MPI_NULL_COPY_FN, 
++            if(MPI_SUCCESS != (mpi_code = 
MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, 
+                                                                  
(MPI_Comm_delete_attr_function *)H5_mpi_delete_cb, 
+                                                                  &key_val, 
NULL)))
+                 HMPI_GOTO_ERROR(FAIL, "MPI_Comm_create_keyval failed", 
mpi_code)
+--- A/testpar/t_cache.c        2018-09-10 16:43:41.000000000 +0200
++++ B/testpar/t_cache.c        2018-11-22 16:58:45.405881436 +0100
+@@ -1217,20 +1217,20 @@
+     struct mssg_t sample; /* used to compute displacements */
+ 
+     /* setup the displacements array */
+-    if ( ( MPI_SUCCESS != MPI_Address(&sample.req, &displs[0]) ) ||
+-         ( MPI_SUCCESS != MPI_Address(&sample.src, &displs[1]) ) ||
+-         ( MPI_SUCCESS != MPI_Address(&sample.dest, &displs[2]) ) ||
+-         ( MPI_SUCCESS != MPI_Address(&sample.mssg_num, &displs[3]) ) ||
+-         ( MPI_SUCCESS != MPI_Address(&sample.base_addr, &displs[4]) ) ||
+-         ( MPI_SUCCESS != MPI_Address(&sample.len, &displs[5]) ) ||
+-         ( MPI_SUCCESS != MPI_Address(&sample.ver, &displs[6]) ) ||
+-         ( MPI_SUCCESS != MPI_Address(&sample.count, &displs[7]) ) ||
+-         ( MPI_SUCCESS != MPI_Address(&sample.magic, &displs[8]) ) ) {
++    if ( ( MPI_SUCCESS != MPI_Get_address(&sample.req, &displs[0]) ) ||
++         ( MPI_SUCCESS != MPI_Get_address(&sample.src, &displs[1]) ) ||
++         ( MPI_SUCCESS != MPI_Get_address(&sample.dest, &displs[2]) ) ||
++         ( MPI_SUCCESS != MPI_Get_address(&sample.mssg_num, &displs[3]) ) ||
++         ( MPI_SUCCESS != MPI_Get_address(&sample.base_addr, &displs[4]) ) ||
++         ( MPI_SUCCESS != MPI_Get_address(&sample.len, &displs[5]) ) ||
++         ( MPI_SUCCESS != MPI_Get_address(&sample.ver, &displs[6]) ) ||
++         ( MPI_SUCCESS != MPI_Get_address(&sample.count, &displs[7]) ) ||
++         ( MPI_SUCCESS != MPI_Get_address(&sample.magic, &displs[8]) ) ) {
+ 
+         nerrors++;
+         success = FALSE;
+         if ( verbose ) {
+-            HDfprintf(stdout, "%d:%s: MPI_Address() call failed.\n",
++            HDfprintf(stdout, "%d:%s: MPI_Get_address() call failed.\n",
+                       world_mpi_rank, FUNC);
+         }
+ 
+@@ -1245,14 +1245,14 @@
+ 
+     if ( success ) {
+ 
+-        result = MPI_Type_struct(9, block_len, displs, mpi_types, 
&mpi_mssg_t);
++        result = MPI_Type_create_struct(9, block_len, displs, mpi_types, 
&mpi_mssg_t);
+ 
+         if ( result != MPI_SUCCESS ) {
+ 
+             nerrors++;
+             success = FALSE;
+             if ( verbose ) {
+-                HDfprintf(stdout, "%d:%s: MPI_Type_struct() call failed.\n",
++                HDfprintf(stdout, "%d:%s: MPI_Type_create_struct() call 
failed.\n",
+                           world_mpi_rank, FUNC);
+             }
+         }

Reply via email to