Package: valgrind
Version: 1:3.11.0-1
Followup-For: Bug #810295

Please find a debdiff in attachment. Backported from the upstream.

-- System Information:
Debian Release: stretch/sid
  APT prefers testing
  APT policy: (500, 'testing')
Architecture: amd64 (x86_64)

Kernel: Linux 3.16.0-4-amd64 (SMP w/4 CPU cores)
Locale: LANG=C, LC_CTYPE=C (charmap=ANSI_X3.4-1968)
Shell: /bin/sh linked to /bin/dash
Init: unable to detect

Versions of packages valgrind depends on:
ii  libc6      2.22-11
ii  libc6-dbg  2.22-11

Versions of packages valgrind recommends:
ii  gdb           7.10-1.1
ii  valgrind-dbg  1:3.11.0-1

Versions of packages valgrind suggests:
pn  alleyoop      <none>
pn  kcachegrind   <none>
pn  valgrind-mpi  <none>
pn  valkyrie      <none>

-- no debconf information
diff -Nru valgrind-3.11.0/debian/changelog valgrind-3.11.0/debian/changelog
--- valgrind-3.11.0/debian/changelog	2015-09-25 11:41:20.000000000 +0000
+++ valgrind-3.11.0/debian/changelog	2016-06-19 00:01:35.000000000 +0000
@@ -1,3 +1,10 @@
+valgrind (1:3.11.0-1.1) UNRELEASED; urgency=medium
+
+  * Non-maintainer upload.
+  * Add 15_compressed.patch - support compressed debug info (closes: #810295)
+
+ -- Max Dmitrichenko <dmitr...@gmail.com>  Sat, 18 Jun 2016 23:32:04 +0000
+
 valgrind (1:3.11.0-1) unstable; urgency=medium
 
   * New upstream release (Closes: #800013)
diff -Nru valgrind-3.11.0/debian/patches/15_compressed.patch valgrind-3.11.0/debian/patches/15_compressed.patch
--- valgrind-3.11.0/debian/patches/15_compressed.patch	1970-01-01 00:00:00.000000000 +0000
+++ valgrind-3.11.0/debian/patches/15_compressed.patch	2016-06-19 00:19:52.000000000 +0000
@@ -0,0 +1,1990 @@
+--- a/NEWS
++++ b/NEWS
+@@ -36,6 +36,10 @@
+ * Intel AVX2 support is more complete (64 bit targets only).  On AVX2
+   capable hosts, the simulated CPUID will now indicate AVX2 support.
+ 
++* Valgrind is able to read compressed debuginfo sections in two formats:
++  - zlib ELF gABI format with SHF_COMPRESSED flag (gcc option -gz=zlib)
++  - zlib GNU format with .zdebug sections (gcc option -gz=zlib-gnu)
++
+ * ==================== TOOL CHANGES ====================
+ 
+ * Memcheck:
+@@ -197,6 +201,7 @@
+ 269360  s390x: Fix addressing mode selection for compare-and-swap
+ 302630  Memcheck: Assertion failed: 'sizeof(UWord) == sizeof(UInt)'
+         == 326797
++303877  valgrind doesn't support compressed debuginfo sections
+ 312989  ioctl handling needs to do POST handling on generic ioctls and [..]
+ 319274  Fix unhandled syscall: unix:410 (sigsuspend_nocancel) on OS X
+ 324181  mmap does not handle MAP_32BIT (handle it now, rather than fail it)
+--- a/configure.ac
++++ b/configure.ac
+@@ -1202,6 +1202,11 @@
+ ])
+ 
+ 
++# Check for ELF32/64_CHDR
++ 
++AC_CHECK_TYPES([Elf32_Chdr, Elf64_Chdr], [], [], [[#include <elf.h>]])
++
++
+ # Check for PTHREAD_RWLOCK_T
+ 
+ AC_MSG_CHECKING([for pthread_rwlock_t])
+@@ -2076,6 +2081,45 @@
+ CFLAGS=$safe_CFLAGS
+ 
+ 
++# does this compiler support -g -gz=zlib ?
++
++AC_MSG_CHECKING([if gcc accepts -g -gz=zlib])
++
++safe_CFLAGS=$CFLAGS
++CFLAGS="-g -gz=zlib"
++
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[
++  return 0;
++]])], [
++ac_have_gz_zlib=yes
++AC_MSG_RESULT([yes])
++], [
++ac_have_gz_zlib=no
++AC_MSG_RESULT([no])
++])
++AM_CONDITIONAL(GZ_ZLIB, test x$ac_have_gz_zlib = xyes)
++CFLAGS=$safe_CFLAGS
++
++
++# does this compiler support -g -gz=zlib-gnu ?
++
++AC_MSG_CHECKING([if gcc accepts -g -gz=zlib-gnu])
++
++safe_CFLAGS=$CFLAGS
++CFLAGS="-g -gz=zlib-gnu"
++
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[
++  return 0;
++]])], [
++ac_have_gz_zlib_gnu=yes
++AC_MSG_RESULT([yes])
++], [
++ac_have_gz_zlib_gnu=no
++AC_MSG_RESULT([no])
++])
++AM_CONDITIONAL(GZ_ZLIB_GNU, test x$ac_have_gz_zlib_gnu = xyes)
++CFLAGS=$safe_CFLAGS
++
+ # does this compiler support nested functions ?
+ 
+ AC_MSG_CHECKING([if gcc accepts nested functions])
+@@ -3386,6 +3430,9 @@
+ ])
+ AM_CONDITIONAL(SOLARIS_RESERVE_SYSSTAT_ZONE_ADDR, test x$solaris_reserve_sysstat_zone_addr = xyes)
+ 
++AC_CHECK_TYPES([Elf32_Chdr, Elf64_Chdr], [], [], [[#include <elf.h>]])
++
++
+ else
+ AM_CONDITIONAL(SOLARIS_SUN_STUDIO_AS, false)
+ AM_CONDITIONAL(SOLARIS_XPG_SYMBOLS_PRESENT, false)
+--- a/coregrind/Makefile.am
++++ b/coregrind/Makefile.am
+@@ -348,6 +348,7 @@
+ 	m_debuginfo/readmacho.c \
+ 	m_debuginfo/readpdb.c \
+ 	m_debuginfo/storage.c \
++	m_debuginfo/tinfl.c \
+ 	m_debuginfo/tytypes.c \
+ 	m_demangle/cp-demangle.c \
+ 	m_demangle/cplus-dem.c \
+--- a/coregrind/m_debuginfo/image.c
++++ b/coregrind/m_debuginfo/image.c
+@@ -45,6 +45,8 @@
+ #include "priv_image.h"            /* self */
+ 
+ #include "minilzo.h"
++#define TINFL_HEADER_FILE_ONLY
++#include "tinfl.c"
+ 
+ /* These values (1024 entries of 8192 bytes each) gives a cache
+    size of 8MB. */
+@@ -53,15 +55,29 @@
+ 
+ #define CACHE_ENTRY_SIZE      (1 << CACHE_ENTRY_SIZE_BITS)
+ 
++#define COMMPRESSED_SLICE_ARRAY_GROW_SIZE 64
++
+ /* An entry in the cache. */
+ typedef
+    struct {
+-      DiOffT off; // file offset for data[0]
+-      SizeT  used; // 1 .. sizeof(data), or 0 to denote not-in-use
+-      UChar  data[CACHE_ENTRY_SIZE];
++      DiOffT off;    // file offset for data[0]
++      SizeT  size;   // sizeof(data)
++      SizeT  used;   // 1 .. sizeof(data), or 0 to denote not-in-use
++      UChar  data[];
+    }
+    CEnt;
+ 
++/* Compressed slice */
++typedef
++   struct {
++      DiOffT offD;  // offset of decompressed data
++      SizeT  szD;   // size of decompressed data
++      DiOffT offC;  // offset of compressed data
++      SizeT  szC;   // size of compressed data
++   }
++   CSlc;
++
++
+ /* Source for files */
+ typedef
+    struct {
+@@ -82,8 +98,10 @@
+ struct _DiImage {
+    // The source -- how to get hold of the file we are reading
+    Source source;
+-   // Total size of the image.
++   // Virtual size of the image = real size + size of uncompressed data
+    SizeT size;
++   // Real size of image
++   SizeT real_size;
+    // The number of entries used.  0 .. CACHE_N_ENTRIES
+    UInt  ces_used;
+    // Pointers to the entries.  ces[0 .. ces_used-1] are non-NULL.
+@@ -91,6 +109,12 @@
+    // The non-NULL entries may be arranged arbitrarily.  We expect to use
+    // a pseudo-LRU scheme though.
+    CEnt* ces[CACHE_N_ENTRIES];
++   // Array of compressed slices
++   CSlc* cslc;
++   // Number of compressed slices used
++   UInt  cslc_used;
++   // Size of cslc array
++   UInt  cslc_size;
+ };
+ 
+ /* A frame.  The first 4 bytes of |data| give the kind of the frame,
+@@ -367,7 +391,7 @@
+ static Bool parse_Frame_le64_le64_le64_bytes (
+                const Frame* fr, const HChar* tag,
+                /*OUT*/ULong* n1, /*OUT*/ULong* n2, /*OUT*/ULong* n3,
+-               /*OUT*/UChar** data, /*OUT*/ULong* n_data 
++               /*OUT*/UChar** data, /*OUT*/ULong* n_data
+             )
+ {
+    vg_assert(VG_(strlen)(tag) == 4);
+@@ -395,7 +419,7 @@
+    /* This assertion is checked by set_CEnt, so checking it here has
+       no benefit, whereas skipping it does remove it from the hottest
+       path. */
+-   /* vg_assert(cent->used > 0 && cent->used <= CACHE_ENTRY_SIZE); */
++   /* vg_assert(cent->used > 0 && cent->used <= cent->size); */
+    /* What we want to return is:
+         cent->off <= off && off < cent->off + cent->used;
+       This is however a very hot path, so here's alternative that uses
+@@ -415,18 +439,41 @@
+    return off - cent->off < cent->used;
+ }
+ 
++/* Returns pointer to CSlc or NULL */
++static inline CSlc* find_cslc ( DiImage* img, DiOffT off )
++{
++   for (UInt i = 0; i < img->cslc_used; i++) {
++      if ( (img->cslc[i].offD <= off)
++           && (img->cslc[i].offD + img->cslc[i].szD > off)
++         )
++         return &img->cslc[i];
++   }
++   return NULL;
++}
++
+ /* Allocate a new CEnt, connect it to |img|, and return its index. */
+-static UInt alloc_CEnt ( DiImage* img )
++static UInt alloc_CEnt ( DiImage* img, SizeT szB )
+ {
+-   vg_assert(img);
++   vg_assert(img != NULL);
+    vg_assert(img->ces_used < CACHE_N_ENTRIES);
++   vg_assert(szB >= CACHE_ENTRY_SIZE);
+    UInt entNo = img->ces_used;
+    img->ces_used++;
+    vg_assert(img->ces[entNo] == NULL);
+-   img->ces[entNo] = ML_(dinfo_zalloc)("di.alloc_CEnt.1", sizeof(CEnt));
++   img->ces[entNo] = ML_(dinfo_zalloc)("di.alloc_CEnt.1",
++                                       offsetof(CEnt, data) + szB);
++   img->ces[entNo]->size = szB;
+    return entNo;
+ }
+ 
++static void realloc_CEnt ( DiImage* img, UInt entNo, SizeT szB ) {
++   vg_assert(img != NULL);
++   vg_assert(szB >= CACHE_ENTRY_SIZE);
++   img->ces[entNo] = ML_(dinfo_realloc)("di.realloc_CEnt.1",
++                                        img->ces[entNo],
++                                        offsetof(CEnt, data) + szB);
++}
++
+ /* Move the given entry to the top and slide those above it down by 1,
+    to make space. */
+ static void move_CEnt_to_top ( DiImage* img, UInt entNo )
+@@ -449,23 +496,24 @@
+ {
+    SizeT len;
+    DiOffT off_orig = off;
+-   vg_assert(img);
++   vg_assert(img != NULL);
+    vg_assert(img->ces_used <= CACHE_N_ENTRIES);
+    vg_assert(entNo >= 0 && entNo < img->ces_used);
+-   vg_assert(off < img->size);
+-   vg_assert(img->ces[entNo] != NULL);
++   vg_assert(off < img->real_size);
++   CEnt* ce = img->ces[entNo];
++   vg_assert(ce != NULL);
+    /* Compute [off, +len) as the slice we are going to read. */
+    off = block_round_down(off);
+-   len = img->size - off;
+-   if (len > CACHE_ENTRY_SIZE) len = CACHE_ENTRY_SIZE;
++   len = img->real_size - off;
++   if (len > ce->size)
++      len = ce->size;
+    /* It is conceivable that the 'len > 0' bit could fail if we make
+       an image with a zero sized file.  But then no 'get' request on
+       that image would be valid. */
+-   vg_assert(len > 0 && len <= CACHE_ENTRY_SIZE);
+-   vg_assert(off + len <= img->size);
++   vg_assert(len > 0 && len <= ce->size);
++   vg_assert(off + len <= img->real_size);
+    vg_assert(off <= off_orig && off_orig < off+len);
+    /* So, read  off .. off+len-1  into the entry. */
+-   CEnt* ce = img->ces[entNo];
+ 
+    if (0) {
+       static UInt t_last = 0;
+@@ -474,7 +522,7 @@
+       UInt delay = now - t_last;
+       t_last = now;
+       nread += len;
+-      VG_(printf)("XXXXXXXX (tot %'llu)  read %'lu  offset %'llu  delay %'u\n", 
++      VG_(printf)("XXXXXXXX (tot %'llu)  read %'lu  offset %'llu  delay %'u\n",
+                   nread, len, off, delay);
+    }
+ 
+@@ -543,10 +591,10 @@
+      end_of_else_clause:
+       {}
+    }
+-   
++
+    ce->off  = off;
+    ce->used = len;
+-   vg_assert(ce->used > 0 && ce->used <= CACHE_ENTRY_SIZE);
++   vg_assert(ce->used > 0 && ce->used <= ce->size);
+ }
+ 
+ __attribute__((noinline))
+@@ -567,16 +615,45 @@
+    if (i == img->ces_used) {
+       /* It's not in any entry.  Either allocate a new entry or
+          recycle the LRU one. */
++
++      CSlc* cslc = find_cslc(img, off);
++      UChar* buf = NULL;
++      if (cslc != NULL) {
++         SizeT len = 0;
++         buf = ML_(dinfo_zalloc)("di.image.get_slowcase.1", cslc->szC);
++         // get compressed data
++         while (len < cslc->szC)
++            len += ML_(img_get_some)(buf + len, img, cslc->offC + len,
++                                     cslc->szC - len);
++      }
++
+       if (img->ces_used == CACHE_N_ENTRIES) {
+          /* All entries in use.  Recycle the (ostensibly) LRU one. */
+-         set_CEnt(img, CACHE_N_ENTRIES-1, off);
+          i = CACHE_N_ENTRIES-1;
++         if ((cslc != NULL) && (cslc->szD > img->ces[i]->size))
++            realloc_CEnt(img, i, cslc->szD);
+       } else {
+          /* Allocate a new one, and fill it in. */
+-         UInt entNo = alloc_CEnt(img);
+-         set_CEnt(img, entNo, off);
+-         i = entNo;
++         SizeT size = CACHE_ENTRY_SIZE;
++         if ((cslc != NULL) && (cslc->szD > CACHE_ENTRY_SIZE))
++            size = cslc->szD;
++         i = alloc_CEnt(img, size);
+       }
++
++      if (cslc != NULL) {
++         SizeT len = tinfl_decompress_mem_to_mem(
++                        img->ces[i]->data, cslc->szD,
++                        buf, cslc->szC,
++                        TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF
++                        | TINFL_FLAG_PARSE_ZLIB_HEADER);
++         vg_assert(len == cslc->szD);
++         img->ces[i]->used = cslc->szD;
++         img->ces[i]->off = cslc->offD;
++         ML_(dinfo_free)(buf);
++      } else {
++         set_CEnt(img, i, off);
++      }
++
+    } else {
+       /* We found it at position 'i'. */
+       vg_assert(i > 0);
+@@ -626,15 +703,19 @@
+        || /* size is unrepresentable as a SizeT */
+           size != (DiOffT)(SizeT)(size)) {
+       VG_(close)(sr_Res(fd));
+-      return NULL; 
++      return NULL;
+    }
+ 
+    DiImage* img = ML_(dinfo_zalloc)("di.image.ML_iflf.1", sizeof(DiImage));
+    img->source.is_local = True;
+    img->source.fd       = sr_Res(fd);
+    img->size            = size;
++   img->real_size       = size;
+    img->ces_used        = 0;
+    img->source.name     = ML_(dinfo_strdup)("di.image.ML_iflf.2", fullpath);
++   img->cslc            = NULL;
++   img->cslc_size       = 0;
++   img->cslc_used       = 0;
+    /* img->ces is already zeroed out */
+    vg_assert(img->source.fd >= 0);
+ 
+@@ -643,7 +724,7 @@
+       loading it at this point forcing img->cent[0] to always be
+       non-empty, thereby saving us an is-it-empty check on the fast
+       path in get(). */
+-   UInt entNo = alloc_CEnt(img);
++   UInt entNo = alloc_CEnt(img, CACHE_ENTRY_SIZE);
+    vg_assert(entNo == 0);
+    set_CEnt(img, 0, 0);
+ 
+@@ -675,7 +756,7 @@
+    if (!set_blocking(sd))
+       return NULL;
+    Int one = 1;
+-   Int sr = VG_(setsockopt)(sd, VKI_IPPROTO_TCP, VKI_TCP_NODELAY, 
++   Int sr = VG_(setsockopt)(sd, VKI_IPPROTO_TCP, VKI_TCP_NODELAY,
+                             &one, sizeof(one));
+    vg_assert(sr == 0);
+ 
+@@ -730,7 +811,7 @@
+ 
+    /* See comment on equivalent bit in ML_(img_from_local_file) for
+       rationale. */
+-   UInt entNo = alloc_CEnt(img);
++   UInt entNo = alloc_CEnt(img, CACHE_ENTRY_SIZE);
+    vg_assert(entNo == 0);
+    set_CEnt(img, 0, 0);
+ 
+@@ -756,9 +837,32 @@
+    return NULL;
+ }
+ 
++DiOffT ML_(img_mark_compressed_part)(DiImage* img, DiOffT offset, SizeT szC,
++                                     SizeT szD)
++{
++   DiOffT ret;
++   vg_assert(img != NULL);
++   vg_assert(offset + szC <= img->size);
++
++   if (img->cslc_used == img->cslc_size) {
++      img->cslc_size += COMMPRESSED_SLICE_ARRAY_GROW_SIZE;
++      img->cslc = ML_(dinfo_realloc)("di.image.ML_img_mark_compressed_part.1",
++                                     img->cslc, img->cslc_size * sizeof(CSlc));
++   }
++
++   ret = img->size;
++   img->cslc[img->cslc_used].offC = offset;
++   img->cslc[img->cslc_used].szC = szC;
++   img->cslc[img->cslc_used].offD = img->size;
++   img->cslc[img->cslc_used].szD = szD;
++   img->size += szD;
++   img->cslc_used++;
++   return ret;
++}
++
+ void ML_(img_done)(DiImage* img)
+ {
+-   vg_assert(img);
++   vg_assert(img != NULL);
+    if (img->source.is_local) {
+       /* Close the file; nothing else to do. */
+       vg_assert(img->source.session_id == 0);
+@@ -782,18 +886,25 @@
+       vg_assert(img->ces[i] == NULL);
+    }
+    ML_(dinfo_free)(img->source.name);
++   ML_(dinfo_free)(img->cslc);
+    ML_(dinfo_free)(img);
+ }
+ 
+ DiOffT ML_(img_size)(const DiImage* img)
+ {
+-   vg_assert(img);
++   vg_assert(img != NULL);
+    return img->size;
+ }
+ 
++DiOffT ML_(img_real_size)(const DiImage* img)
++{
++   vg_assert(img != NULL);
++   return img->real_size;
++}
++
+ inline Bool ML_(img_valid)(const DiImage* img, DiOffT offset, SizeT size)
+ {
+-   vg_assert(img);
++   vg_assert(img != NULL);
+    vg_assert(offset != DiOffT_INVALID);
+    return img->size > 0 && offset + size <= (DiOffT)img->size;
+ }
+@@ -830,7 +941,7 @@
+ void ML_(img_get)(/*OUT*/void* dst,
+                   DiImage* img, DiOffT offset, SizeT size)
+ {
+-   vg_assert(img);
++   vg_assert(img != NULL);
+    vg_assert(size > 0);
+    ensure_valid(img, offset, size, "ML_(img_get)");
+    SizeT i;
+@@ -842,7 +953,7 @@
+ SizeT ML_(img_get_some)(/*OUT*/void* dst,
+                         DiImage* img, DiOffT offset, SizeT size)
+ {
+-   vg_assert(img);
++   vg_assert(img != NULL);
+    vg_assert(size > 0);
+    ensure_valid(img, offset, size, "ML_(img_get_some)");
+    UChar* dstU = (UChar*)dst;
+@@ -1002,7 +1113,7 @@
+       0x2d02ef8d
+     };
+ 
+-   vg_assert(img);
++   vg_assert(img != NULL);
+ 
+    /* If the image is local, calculate the CRC here directly.  If it's
+       remote, forward the request to the server. */
+--- a/coregrind/m_debuginfo/priv_image.h
++++ b/coregrind/m_debuginfo/priv_image.h
+@@ -74,9 +74,12 @@
+ /* Destroy an existing image. */
+ void ML_(img_done)(DiImage*);
+ 
+-/* How big is the image? */
++/* Virtual size of the image. */
+ DiOffT ML_(img_size)(const DiImage* img);
+ 
++/* Real size of the image. */
++DiOffT ML_(img_real_size)(const DiImage* img);
++
+ /* Does the section [offset, +size) exist in the image? */
+ Bool ML_(img_valid)(const DiImage* img, DiOffT offset, SizeT size);
+ 
+@@ -127,6 +130,12 @@
+    connection, making the client/server split pointless. */
+ UInt ML_(img_calc_gnu_debuglink_crc32)(DiImage* img);
+ 
++/* Mark compressed part of image defined with (offset, szC).
++   szD is length of uncompressed data (should be known before decompression).
++   Returns (virtual) position in image from which decompressed data can be
++   read. */
++DiOffT ML_(img_mark_compressed_part)(DiImage* img, DiOffT offset, SizeT szC,
++                                     SizeT szD);
+ 
+ /*------------------------------------------------------------*/
+ /*--- DiCursor -- cursors for reading images               ---*/
+--- a/coregrind/m_debuginfo/readelf.c
++++ b/coregrind/m_debuginfo/readelf.c
+@@ -8,7 +8,7 @@
+    This file is part of Valgrind, a dynamic binary instrumentation
+    framework.
+ 
+-   Copyright (C) 2000-2015 Julian Seward 
++   Copyright (C) 2000-2015 Julian Seward
+       jsew...@acm.org
+ 
+    This program is free software; you can redistribute it and/or
+@@ -51,6 +51,7 @@
+ #include "priv_readdwarf.h"        /* 'cos ELF contains DWARF */
+ #include "priv_readdwarf3.h"
+ #include "priv_readexidx.h"
++#include "config.h"
+ 
+ /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
+ #include <elf.h>
+@@ -59,6 +60,33 @@
+ #endif
+ /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
+ 
++#if !defined(HAVE_ELF32_CHDR)
++   typedef struct {
++      Elf32_Word  ch_type;
++      Elf32_Word  ch_size;
++      Elf32_Word  ch_addralign;
++   } Elf32_Chdr;
++#endif
++
++#if !defined(HAVE_ELF64_CHDR)
++   typedef struct {
++      Elf64_Word  ch_type;
++      Elf64_Word  ch_reserved;
++      Elf64_Xword ch_size;
++      Elf64_Xword ch_addralign;
++   } Elf64_Chdr;
++#endif
++
++#if !defined(SHF_COMPRESSED)
++   #define SHF_COMPRESSED (1 << 11)
++#endif
++
++#if !defined(ELFCOMPRESS_ZLIB)
++   #define ELFCOMPRESS_ZLIB 1
++#endif
++
++#define SIZE_OF_ZLIB_HEADER 12
++
+ /*------------------------------------------------------------*/
+ /*--- 32/64-bit parameterisation                           ---*/
+ /*------------------------------------------------------------*/
+@@ -66,7 +94,7 @@
+ /* For all the ELF macros and types which specify '32' or '64',
+    select the correct variant for this platform and give it
+    an 'XX' name.  Then use the 'XX' variant consistently in
+-   the rest of this file. 
++   the rest of this file.
+ */
+ #if VG_WORDSIZE == 4
+ #  define  ElfXX_Ehdr     Elf32_Ehdr
+@@ -80,6 +108,7 @@
+ #  define  ElfXX_Dyn      Elf32_Dyn
+ #  define  ELFXX_ST_BIND  ELF32_ST_BIND
+ #  define  ELFXX_ST_TYPE  ELF32_ST_TYPE
++#  define  ElfXX_Chdr     Elf32_Chdr
+ 
+ #elif VG_WORDSIZE == 8
+ #  define  ElfXX_Ehdr     Elf64_Ehdr
+@@ -93,6 +122,7 @@
+ #  define  ElfXX_Dyn      Elf64_Dyn
+ #  define  ELFXX_ST_BIND  ELF64_ST_BIND
+ #  define  ELFXX_ST_TYPE  ELF64_ST_TYPE
++#  define  ElfXX_Chdr     Elf64_Chdr
+ 
+ #else
+ # error "VG_WORDSIZE should be 4 or 8"
+@@ -160,7 +190,7 @@
+ 
+ static
+ void show_raw_elf_symbol ( DiImage* strtab_img,
+-                           Int i, 
++                           Int i,
+                            const ElfXX_Sym* sym,
+                            DiOffT sym_name_ioff, Addr sym_svma,
+                            Bool ppc64_linux_format )
+@@ -190,7 +220,7 @@
+       sym_name = ML_(img_strdup)(strtab_img, "di.sres.1", sym_name_ioff);
+    VG_(printf)(": svma %#010lx, %ssz %4llu  %s\n",
+                sym_svma, space, (ULong)(sym->st_size + 0UL),
+-               (sym_name ? sym_name : "NONAME") ); 
++               (sym_name ? sym_name : "NONAME") );
+    if (sym_name)
+       ML_(dinfo_free)(sym_name);
+ }
+@@ -223,8 +253,8 @@
+    to piece together the real size, address, name of the symbol from
+    multiple calls to this function.  Ugly and confusing.
+ */
+-static 
+-Bool get_elf_symbol_info ( 
++static
++Bool get_elf_symbol_info (
+         /* INPUTS */
+         struct _DebugInfo* di, /* containing DebugInfo */
+         const ElfXX_Sym* sym,        /* ELF symbol */
+@@ -276,13 +306,13 @@
+ 
+    /* Figure out if we're interested in the symbol.  Firstly, is it of
+       the right flavour?  */
+-   plausible 
+-      = (ELFXX_ST_BIND(sym->st_info) == STB_GLOBAL 
+-         || ELFXX_ST_BIND(sym->st_info) == STB_LOCAL 
++   plausible
++      = (ELFXX_ST_BIND(sym->st_info) == STB_GLOBAL
++         || ELFXX_ST_BIND(sym->st_info) == STB_LOCAL
+          || ELFXX_ST_BIND(sym->st_info) == STB_WEAK
+         )
+         &&
+-        (ELFXX_ST_TYPE(sym->st_info) == STT_FUNC 
++        (ELFXX_ST_TYPE(sym->st_info) == STT_FUNC
+          || ELFXX_ST_TYPE(sym->st_info) == STT_OBJECT
+ #        ifdef STT_GNU_IFUNC
+          || ELFXX_ST_TYPE(sym->st_info) == STT_GNU_IFUNC
+@@ -324,42 +354,42 @@
+       the previously deduced section svma address ranges are wrong. */
+    if (di->text_present
+        && di->text_size > 0
+-       && sym_svma >= text_svma 
++       && sym_svma >= text_svma
+        && sym_svma < text_svma + di->text_size) {
+       *is_text_out = True;
+       (*sym_avmas_out).main += text_bias;
+    } else
+    if (di->data_present
+        && di->data_size > 0
+-       && sym_svma >= data_svma 
++       && sym_svma >= data_svma
+        && sym_svma < data_svma + di->data_size) {
+       *is_text_out = False;
+       (*sym_avmas_out).main += data_bias;
+    } else
+    if (di->sdata_present
+        && di->sdata_size > 0
+-       && sym_svma >= sdata_svma 
++       && sym_svma >= sdata_svma
+        && sym_svma < sdata_svma + di->sdata_size) {
+       *is_text_out = False;
+       (*sym_avmas_out).main += sdata_bias;
+    } else
+    if (di->rodata_present
+        && di->rodata_size > 0
+-       && sym_svma >= rodata_svma 
++       && sym_svma >= rodata_svma
+        && sym_svma < rodata_svma + di->rodata_size) {
+       *is_text_out = False;
+       (*sym_avmas_out).main += rodata_bias;
+    } else
+    if (di->bss_present
+        && di->bss_size > 0
+-       && sym_svma >= bss_svma 
++       && sym_svma >= bss_svma
+        && sym_svma < bss_svma + di->bss_size) {
+       *is_text_out = False;
+       (*sym_avmas_out).main += bss_bias;
+    } else
+    if (di->sbss_present
+        && di->sbss_size > 0
+-       && sym_svma >= sbss_svma 
++       && sym_svma >= sbss_svma
+        && sym_svma < sbss_svma + di->sbss_size) {
+       *is_text_out = False;
+       (*sym_avmas_out).main += sbss_bias;
+@@ -456,7 +486,7 @@
+       symbol defined elsewhere, so ignore it. */
+    if (di->got_present
+        && di->got_size > 0
+-       && (*sym_avmas_out).main >= di->got_avma 
++       && (*sym_avmas_out).main >= di->got_avma
+        && (*sym_avmas_out).main <  di->got_avma + di->got_size) {
+       if (TRACE_SYMTAB_ENABLED) {
+          HChar* sym_name = ML_(img_strdup)(escn_strtab->img,
+@@ -509,7 +539,7 @@
+       Bool   details = 1||False;
+ 
+       if (details)
+-         TRACE_SYMTAB("opdXXX: opd_bias %p, sym_svma_out %p\n", 
++         TRACE_SYMTAB("opdXXX: opd_bias %p, sym_svma_out %p\n",
+                       (void*)(opd_bias), (void*)(*sym_avmas_out).main);
+ 
+       if (!VG_IS_8_ALIGNED((*sym_avmas_out).main)) {
+@@ -562,10 +592,10 @@
+       ML_(img_get)(&fn_descr[0], escn_opd->img,
+                    escn_opd->ioff + offset_in_opd, sizeof(fn_descr));
+ 
+-      if (details) 
+-         TRACE_SYMTAB("opdXXY: offset %d,  fn_descr %p\n", 
++      if (details)
++         TRACE_SYMTAB("opdXXY: offset %d,  fn_descr %p\n",
+                       offset_in_opd, fn_descr);
+-      if (details) 
++      if (details)
+          TRACE_SYMTAB("opdXXZ: *fn_descr %p\n", (void*)(fn_descr[0]));
+ 
+       /* opd_bias is the what we have to add to SVMAs found in .opd to
+@@ -603,38 +633,38 @@
+ 
+    /* If no part of the symbol falls within the mapped range,
+       ignore it. */
+-   
+-   in_text 
++
++   in_text
+       = di->text_present
+         && di->text_size > 0
+         && !((*sym_avmas_out).main + *sym_size_out <= di->text_avma
+              || (*sym_avmas_out).main >= di->text_avma + di->text_size);
+ 
+-   in_data 
++   in_data
+       = di->data_present
+         && di->data_size > 0
+         && !((*sym_avmas_out).main + *sym_size_out <= di->data_avma
+              || (*sym_avmas_out).main >= di->data_avma + di->data_size);
+ 
+-   in_sdata 
++   in_sdata
+       = di->sdata_present
+         && di->sdata_size > 0
+         && !((*sym_avmas_out).main + *sym_size_out <= di->sdata_avma
+              || (*sym_avmas_out).main >= di->sdata_avma + di->sdata_size);
+ 
+-   in_rodata 
++   in_rodata
+       = di->rodata_present
+         && di->rodata_size > 0
+         && !((*sym_avmas_out).main + *sym_size_out <= di->rodata_avma
+              || (*sym_avmas_out).main >= di->rodata_avma + di->rodata_size);
+ 
+-   in_bss 
++   in_bss
+       = di->bss_present
+         && di->bss_size > 0
+         && !((*sym_avmas_out).main + *sym_size_out <= di->bss_avma
+              || (*sym_avmas_out).main >= di->bss_avma + di->bss_size);
+ 
+-   in_sbss 
++   in_sbss
+       = di->sbss_present
+         && di->sbss_size > 0
+         && !((*sym_avmas_out).main + *sym_size_out <= di->sbss_avma
+@@ -742,7 +772,7 @@
+    "normal" case ({x86,amd64,ppc32,arm,mips32,mips64, ppc64le}-linux). */
+ static
+ __attribute__((unused)) /* not referred to on all targets */
+-void read_elf_symtab__normal( 
++void read_elf_symtab__normal(
+         struct _DebugInfo* di, const HChar* tab_name,
+         DiSlice*   escn_symtab,
+         DiSlice*   escn_strtab,
+@@ -781,10 +811,10 @@
+       sym_avmas_really.main = 0;
+       SET_TOCPTR_AVMA(sym_avmas_really, 0);
+       SET_LOCAL_EP_AVMA(sym_avmas_really, 0);
+-      if (get_elf_symbol_info(di, &sym, sym_name, escn_strtab, 
++      if (get_elf_symbol_info(di, &sym, sym_name, escn_strtab,
+                               sym_svma, symtab_in_debug,
+                               escn_opd, di->text_bias,
+-                              &sym_name_really, 
++                              &sym_name_really,
+                               &sym_avmas_really,
+                               &sym_size,
+                               &from_opd, &is_text, &is_ifunc)) {
+@@ -829,7 +859,7 @@
+    ppc64be-linux, which requires special treatment. */
+ 
+ typedef
+-   struct { 
++   struct {
+       Addr   addr;
+       DiOffT name;
+       /* We have to store also the DiImage* so as to give context for
+@@ -868,7 +898,7 @@
+         struct _DebugInfo* di, const HChar* tab_name,
+         DiSlice*   escn_symtab,
+         DiSlice*   escn_strtab,
+-        DiSlice*   escn_opd, /* ppc64be-linux only */ 
++        DiSlice*   escn_opd, /* ppc64be-linux only */
+         Bool       symtab_in_debug
+      )
+ {
+@@ -890,8 +920,8 @@
+    TRACE_SYMTAB("\n--- Reading (ELF, ppc64be-linux) %s (%llu entries) ---\n",
+                 tab_name, escn_symtab->szB/sizeof(ElfXX_Sym) );
+ 
+-   oset = VG_(OSetGen_Create)( offsetof(TempSym,key), 
+-                               (OSetCmp_t)cmp_TempSymKey, 
++   oset = VG_(OSetGen_Create)( offsetof(TempSym,key),
++                               (OSetCmp_t)cmp_TempSymKey,
+                                ML_(dinfo_zalloc), "di.respl.1",
+                                ML_(dinfo_free) );
+ 
+@@ -920,7 +950,7 @@
+       if (get_elf_symbol_info(di, &sym, sym_name, escn_strtab,
+                               sym_svma, symtab_in_debug,
+                               escn_opd, di->text_bias,
+-                              &sym_name_really, 
++                              &sym_name_really,
+                               &sym_avmas_really,
+                               &sym_size,
+                               &from_opd, &is_text, &is_ifunc)) {
+@@ -938,7 +968,7 @@
+             modify_tocptr = False;
+             old_size   = 0;
+ 
+-            if (prev->from_opd && !from_opd 
++            if (prev->from_opd && !from_opd
+                 && (prev->size == 24 || prev->size == 16)
+                 && sym_size != prev->size) {
+                /* Existing one is an opd-redirect, with a bogus size,
+@@ -972,7 +1002,7 @@
+                            old_size,
+                            prev->key.addr,
+                            prev->tocptr,
+-                           prev->size, 
++                           prev->size,
+                            prev->key.name
+                );
+             }
+@@ -1095,7 +1125,7 @@
+                   Word j;
+                   for (j = 0; j < note.n_descsz; j++) {
+                      UChar desc_j = ML_(img_get_UChar)(img, desc_ioff + j);
+-                     VG_(sprintf)(buildid + VG_(strlen)(buildid), 
++                     VG_(sprintf)(buildid + VG_(strlen)(buildid),
+                                   "%02x", (UInt)desc_j);
+                   }
+                }
+@@ -1103,7 +1133,7 @@
+                note_ioff = note_ioff + sizeof(ElfXX_Nhdr)
+                                      + ((note.n_namesz + 3) & ~3)
+                                      + ((note.n_descsz + 3) & ~3);
+-            }            
++            }
+          }
+       }
+ 
+@@ -1135,7 +1165,7 @@
+                   Word j;
+                   for (j = 0; j < note.n_descsz; j++) {
+                      UChar desc_j = ML_(img_get_UChar)(img, desc_ioff + j);
+-                     VG_(sprintf)(buildid + VG_(strlen)(buildid), 
++                     VG_(sprintf)(buildid + VG_(strlen)(buildid),
+                                   "%02x", (UInt)desc_j);
+                   }
+                }
+@@ -1143,7 +1173,7 @@
+                note_ioff = note_ioff + sizeof(ElfXX_Nhdr)
+                                      + ((note.n_namesz + 3) & ~3)
+                                      + ((note.n_descsz + 3) & ~3);
+-            }            
++            }
+          }
+       }
+    }
+@@ -1167,7 +1197,7 @@
+ DiImage* open_debug_file( const HChar* name, const HChar* buildid, UInt crc,
+                           Bool rel_ok, const HChar* serverAddr )
+ {
+-   DiImage* dimg 
++   DiImage* dimg
+      = serverAddr ? ML_(img_from_di_server)(name, serverAddr)
+                   : ML_(img_from_local_file)(name);
+    if (dimg == NULL)
+@@ -1191,8 +1221,8 @@
+       if (debug_buildid == NULL || VG_(strcmp)(buildid, debug_buildid) != 0) {
+          ML_(img_done)(dimg);
+          if (VG_(clo_verbosity) > 1)
+-            VG_(message)(Vg_DebugMsg, 
+-               "  .. build-id mismatch (found %s wanted %s)\n", 
++            VG_(message)(Vg_DebugMsg,
++               "  .. build-id mismatch (found %s wanted %s)\n",
+                debug_buildid, buildid);
+          ML_(dinfo_free)(debug_buildid);
+          return NULL;
+@@ -1205,7 +1235,7 @@
+       if (calccrc != crc) {
+          ML_(img_done)(dimg);
+          if (VG_(clo_verbosity) > 1)
+-            VG_(message)(Vg_DebugMsg, 
++            VG_(message)(Vg_DebugMsg,
+                "  .. CRC mismatch (computed %08x wanted %08x)\n", calccrc, crc);
+          return NULL;
+       }
+@@ -1213,7 +1243,7 @@
+       if (VG_(clo_verbosity) > 1)
+          VG_(message)(Vg_DebugMsg, "  .. CRC is valid\n");
+    }
+-   
++
+    return dimg;
+ }
+ 
+@@ -1270,7 +1300,7 @@
+       VG_(sprintf)(debugpath, "%s/.debug/%s", objdir, debugname);
+       dimg = open_debug_file(debugpath, buildid, crc, rel_ok, NULL);
+       if (dimg != NULL) goto dimg_ok;
+-      
++
+       VG_(sprintf)(debugpath, "/usr/lib/debug%s/%s", objdir, debugname);
+       dimg = open_debug_file(debugpath, buildid, crc, rel_ok, NULL);
+       if (dimg != NULL) goto dimg_ok;
+@@ -1422,6 +1452,50 @@
+    return 0;
+ }
+ 
++/* Check if section is compressed and modify DiSlice if it is.
++   Returns False in case of unsupported compression type.
++*/
++static Bool check_compression(ElfXX_Shdr* h, DiSlice* s) {
++   if (h->sh_flags & SHF_COMPRESSED) {
++      ElfXX_Chdr chdr;
++      ML_(img_get)(&chdr, s->img, s->ioff, sizeof(ElfXX_Chdr));
++      if (chdr.ch_type != ELFCOMPRESS_ZLIB)
++         return False;
++      s->ioff = ML_(img_mark_compressed_part)(s->img,
++                                              s->ioff + sizeof(ElfXX_Chdr),
++                                              s->szB - sizeof(ElfXX_Chdr),
++                                              (SizeT)chdr.ch_size);
++      s->szB = chdr.ch_size;
++    } else if (h->sh_size > SIZE_OF_ZLIB_HEADER) {
++       /* Read the zlib header.  In this case, it should be "ZLIB"
++       followed by the uncompressed section size, 8 bytes in BE order. */
++       UChar tmp[SIZE_OF_ZLIB_HEADER];
++       ML_(img_get)(tmp, s->img, s->ioff, SIZE_OF_ZLIB_HEADER);
++       if (VG_(memcmp)(tmp, "ZLIB", 4) == 0) {
++          SizeT size;
++#         if (VG_WORDSIZE == 8)
++             size = tmp[4]; size <<= 8;
++             size += tmp[5]; size <<= 8;
++             size += tmp[6]; size <<= 8;
++             size += tmp[7]; size <<= 8;
++#         else
++             vg_assert((tmp[4] == 0) && (tmp[5] == 0) && (tmp[6] == 0)
++                       && (tmp[7] == 0));
++             size = 0;
++#         endif
++          size += tmp[8]; size <<= 8;
++          size += tmp[9]; size <<= 8;
++          size += tmp[10]; size <<= 8;
++          size += tmp[11];
++          s->ioff = ML_(img_mark_compressed_part)(s->img,
++                                                  s->ioff + SIZE_OF_ZLIB_HEADER,
++                                                  s->szB - SIZE_OF_ZLIB_HEADER,
++                                                  size);
++          s->szB = size;
++       }
++    }
++    return True;
++}
+ 
+ /* The central function for reading ELF debug info.  For the
+    object/exe specified by the DebugInfo, find ELF sections, then read
+@@ -1639,7 +1713,7 @@
+      shdr_strtab_mioff
+         = ehdr_mioff /* isn't this always zero? */ + a_shdr.sh_offset;
+ 
+-     if (!ML_(img_valid)(mimg, shdr_strtab_mioff, 
++     if (!ML_(img_valid)(mimg, shdr_strtab_mioff,
+                          1/*bogus, but we don't know the real size*/ )) {
+         ML_(symerr)(di, True, "Invalid ELF Section Header String Table");
+         goto out;
+@@ -1697,7 +1771,7 @@
+                       && map->size > 0 /* stay sane */
+                       && a_phdr.p_offset >= map->foff
+                       && a_phdr.p_offset <  map->foff + map->size
+-                      && a_phdr.p_offset + a_phdr.p_filesz 
++                      && a_phdr.p_offset + a_phdr.p_filesz
+                          <= map->foff + map->size) {
+                      RangeAndBias item;
+                      item.svma_base  = a_phdr.p_vaddr;
+@@ -1796,7 +1870,7 @@
+ 
+    /* TOPLEVEL */
+ 
+-   /* If, after looking at all the program headers, we still didn't 
++   /* If, after looking at all the program headers, we still didn't
+       find a soname, add a fake one. */
+    if (di->soname == NULL) {
+       TRACE_SYMTAB("No soname found; using (fake) \"NONE\"\n");
+@@ -1868,7 +1942,7 @@
+       }
+ 
+       TRACE_SYMTAB(" [sec %2ld]  %s %s  al%2u  foff %6ld .. %6lu  "
+-                   "  svma %p  name \"%s\"\n", 
++                   "  svma %p  name \"%s\"\n",
+                    i, inrx ? "rx" : "  ", inrw ? "rw" : "  ", alyn,
+                    foff, foff+size-1, (void*)svma, name);
+ 
+@@ -1919,10 +1993,10 @@
+             di->text_debug_svma = svma;
+             di->text_debug_bias = inrx->bias;
+             TRACE_SYMTAB("acquiring .text svma = %#lx .. %#lx\n",
+-                         di->text_svma, 
++                         di->text_svma,
+                          di->text_svma + di->text_size - 1);
+             TRACE_SYMTAB("acquiring .text avma = %#lx .. %#lx\n",
+-                         di->text_avma, 
++                         di->text_avma,
+                          di->text_avma + di->text_size - 1);
+             TRACE_SYMTAB("acquiring .text bias = %#lx\n", (UWord)di->text_bias);
+          } else {
+@@ -2069,7 +2143,7 @@
+                VG_(message)(Vg_UserMsg,
+                             "Warning: the following file's .bss is "
+                             "mapped r-x only - ignoring .bss syms\n");
+-               VG_(message)(Vg_UserMsg,   " %s\n", di->fsm.filename 
++               VG_(message)(Vg_UserMsg,   " %s\n", di->fsm.filename
+                                                       ? di->fsm.filename
+                                                       : "(null?!)" );
+             }
+@@ -2203,7 +2277,7 @@
+             di->plt_avma = svma + inrw->bias;
+             di->plt_size = size;
+             TRACE_SYMTAB("acquiring .plt avma = %#lx\n", di->plt_avma);
+-         } else 
++         } else
+          if ((!inrw) && (!inrx) && size > 0 && !di->plt_present) {
+             /* File contains a .plt, but it didn't get mapped.
+                Presumably it is not required on this platform.  At
+@@ -2265,10 +2339,10 @@
+             di->exidx_size = size;
+             di->exidx_bias = inrx->bias;
+             TRACE_SYMTAB("acquiring .exidx svma = %#lx .. %#lx\n",
+-                         di->exidx_svma, 
++                         di->exidx_svma,
+                          di->exidx_svma + di->exidx_size - 1);
+             TRACE_SYMTAB("acquiring .exidx avma = %#lx .. %#lx\n",
+-                         di->exidx_avma, 
++                         di->exidx_avma,
+                          di->exidx_avma + di->exidx_size - 1);
+             TRACE_SYMTAB("acquiring .exidx bias = %#lx\n",
+                          (UWord)di->exidx_bias);
+@@ -2288,10 +2362,10 @@
+             di->extab_size = size;
+             di->extab_bias = inrx->bias;
+             TRACE_SYMTAB("acquiring .extab svma = %#lx .. %#lx\n",
+-                         di->extab_svma, 
++                         di->extab_svma,
+                          di->extab_svma + di->extab_size - 1);
+             TRACE_SYMTAB("acquiring .extab avma = %#lx .. %#lx\n",
+-                         di->extab_avma, 
++                         di->extab_avma,
+                          di->extab_avma + di->extab_size - 1);
+             TRACE_SYMTAB("acquiring .extab bias = %#lx\n",
+                          (UWord)di->extab_bias);
+@@ -2354,7 +2428,7 @@
+       DiSlice debug_str_alt_escn  = DiSlice_INVALID; // .debug_str    (alt)
+       DiSlice dwarf1d_escn        = DiSlice_INVALID; // .debug        (dwarf1)
+       DiSlice dwarf1l_escn        = DiSlice_INVALID; // .line         (dwarf1)
+-      DiSlice opd_escn            = DiSlice_INVALID; // .opd (dwarf2, 
++      DiSlice opd_escn            = DiSlice_INVALID; // .opd (dwarf2,
+                                                      //       ppc64be-linux)
+       DiSlice ehframe_escn[N_EHFRAME_SECTS];         // .eh_frame (dwarf2)
+ 
+@@ -2393,15 +2467,20 @@
+                _sec_escn.img  = mimg; \
+                _sec_escn.ioff = (DiOffT)a_shdr.sh_offset; \
+                _sec_escn.szB  = a_shdr.sh_size; \
++               if (!check_compression(&a_shdr, &_sec_escn)) { \
++                  ML_(symerr)(di, True, "   Compression type is unsupported"); \
++                  goto out; \
++               } \
+                nobits         = a_shdr.sh_type == SHT_NOBITS; \
+                vg_assert(_sec_escn.img  != NULL); \
+                vg_assert(_sec_escn.ioff != DiOffT_INVALID); \
+                TRACE_SYMTAB( "%-18s:  ioff %llu .. %llu\n", \
+-                             _sec_name, (ULong)_sec_escn.ioff, \
+-                             ((ULong)_sec_escn.ioff) + _sec_escn.szB - 1); \
++                             _sec_name, (ULong)a_shdr.sh_offset, \
++                             ((ULong)a_shdr.sh_offset) + a_shdr.sh_size - 1); \
+                /* SHT_NOBITS sections have zero size in the file. */ \
+                if (!nobits && \
+-                   a_shdr.sh_offset + _sec_escn.szB > ML_(img_size)(mimg) ) { \
++                   a_shdr.sh_offset + \
++                      a_shdr.sh_size > ML_(img_real_size)(mimg)) { \
+                   ML_(symerr)(di, True, \
+                               "   section beyond image end?!"); \
+                   goto out; \
+@@ -2414,33 +2493,56 @@
+ #        define FIND(_sec_name, _sec_escn) \
+             FINDX(_sec_name, _sec_escn, /**/)
+ 
+-         /*   NAME                  ElfSec */
+-         FIND(".dynsym",            dynsym_escn)
+-         FIND(".dynstr",            dynstr_escn)
+-         FIND(".symtab",            symtab_escn)
+-         FIND(".strtab",            strtab_escn)
++         /*      NAME                  ElfSec */
++         FIND(   ".dynsym",            dynsym_escn)
++         FIND(   ".dynstr",            dynstr_escn)
++         FIND(   ".symtab",            symtab_escn)
++         FIND(   ".strtab",            strtab_escn)
+ #        if defined(VGO_solaris)
+-         FIND(".SUNW_ldynsym",      ldynsym_escn)
++         FIND(   ".SUNW_ldynsym",      ldynsym_escn)
+ #        endif
+ 
+-         FIND(".gnu_debuglink",     debuglink_escn)
+-         FIND(".gnu_debugaltlink",  debugaltlink_escn)
++         FIND(   ".gnu_debuglink",     debuglink_escn)
++         FIND(   ".gnu_debugaltlink",  debugaltlink_escn)
++
++         FIND(   ".debug_line",        debug_line_escn)
++         if (!ML_(sli_is_valid)(debug_line_escn))
++            FIND(".zdebug_line",       debug_line_escn)
++
++         FIND(   ".debug_info",        debug_info_escn)
++         if (!ML_(sli_is_valid)(debug_info_escn))
++            FIND(".zdebug_info",       debug_info_escn)
+ 
+-         FIND(".debug_line",        debug_line_escn)
+-         FIND(".debug_info",        debug_info_escn)
+-         FIND(".debug_types",       debug_types_escn)
+-         FIND(".debug_abbrev",      debug_abbv_escn)
+-         FIND(".debug_str",         debug_str_escn)
+-         FIND(".debug_ranges",      debug_ranges_escn)
+-         FIND(".debug_loc",         debug_loc_escn)
+-         FIND(".debug_frame",       debug_frame_escn)
++         FIND(   ".debug_types",       debug_types_escn)
++         if (!ML_(sli_is_valid)(debug_types_escn))
++            FIND(".zdebug_types",      debug_types_escn)
+ 
+-         FIND(".debug",             dwarf1d_escn)
+-         FIND(".line",              dwarf1l_escn)
++         FIND(   ".debug_abbrev",      debug_abbv_escn)
++         if (!ML_(sli_is_valid)(debug_abbv_escn))
++            FIND(".zdebug_abbrev",     debug_abbv_escn)
+ 
+-         FIND(".opd",               opd_escn)
++         FIND(   ".debug_str",         debug_str_escn)
++         if (!ML_(sli_is_valid)(debug_str_escn))
++            FIND(".zdebug_str",        debug_str_escn)
+ 
+-         FINDX(".eh_frame",         ehframe_escn[ehframe_mix],
++         FIND(   ".debug_ranges",      debug_ranges_escn)
++         if (!ML_(sli_is_valid)(debug_ranges_escn))
++            FIND(".zdebug_ranges",     debug_ranges_escn)
++
++         FIND(   ".debug_loc",         debug_loc_escn)
++         if (!ML_(sli_is_valid)(debug_loc_escn))
++            FIND(".zdebug_loc",    debug_loc_escn)
++
++         FIND(   ".debug_frame",       debug_frame_escn)
++         if (!ML_(sli_is_valid)(debug_frame_escn))
++            FIND(".zdebug_frame",      debug_frame_escn)
++
++         FIND(   ".debug",             dwarf1d_escn)
++         FIND(   ".line",              dwarf1l_escn)
++
++         FIND(   ".opd",               opd_escn)
++
++         FINDX(  ".eh_frame",          ehframe_escn[ehframe_mix],
+                do { ehframe_mix++; vg_assert(ehframe_mix <= N_EHFRAME_SECTS);
+                } while (0)
+          )
+@@ -2476,7 +2578,7 @@
+       if (buildid != NULL || debuglink_escn.img != NULL) {
+          /* Do have a debuglink section? */
+          if (debuglink_escn.img != NULL) {
+-            UInt crc_offset 
++            UInt crc_offset
+                = VG_ROUNDUP(ML_(img_strlen)(debuglink_escn.img,
+                                             debuglink_escn.ioff)+1, 4);
+             vg_assert(crc_offset + sizeof(UInt) <= debuglink_escn.szB);
+@@ -2582,7 +2684,7 @@
+            shdr_strtab_dioff = a_shdr.sh_offset;
+            if (!ML_(img_valid)(dimg, shdr_strtab_dioff,
+                                1/*bogus, but we don't know the real size*/)) {
+-              ML_(symerr)(di, True, 
++              ML_(symerr)(di, True,
+                           "Invalid ELF Section Header String Table"
+                           " (debuginfo file)");
+               goto out;
+@@ -2592,7 +2694,7 @@
+          for (i = 0; i < ehdr_dimg.e_phnum; i++) {
+             ElfXX_Phdr a_phdr;
+             ML_(img_get)(&a_phdr, dimg, INDEX_BIS(ehdr_dimg.e_phoff,
+-                                                  i, phdr_dent_szB), 
++                                                  i, phdr_dent_szB),
+                            sizeof(a_phdr));
+             if (a_phdr.p_type == PT_LOAD) {
+                for (j = 0; j < VG_(sizeXA)(di->fsm.maps); j++) {
+@@ -2625,7 +2727,7 @@
+          for (i = 0; i < ehdr_dimg.e_shnum; i++) {
+ 
+             /* Find debug svma and bias information for sections
+-               we found in the main file. */ 
++               we found in the main file. */
+ 
+ #           define FIND(_sec, _seg) \
+             do { \
+@@ -2690,16 +2792,20 @@
+                   _sec_escn.img  = dimg; \
+                   _sec_escn.ioff = (DiOffT)a_shdr.sh_offset;  \
+                   _sec_escn.szB  = a_shdr.sh_size; \
++                  if (!check_compression(&a_shdr, &_sec_escn)) { \
++                     ML_(symerr)(di, True, "   Compression type is unsupported"); \
++                     goto out; \
++                  } \
+                   nobits         = a_shdr.sh_type == SHT_NOBITS; \
+                   vg_assert(_sec_escn.img  != NULL); \
+                   vg_assert(_sec_escn.ioff != DiOffT_INVALID); \
+                   TRACE_SYMTAB( "%-18s: dioff %llu .. %llu\n", \
+                                 _sec_name, \
+-                                (ULong)_sec_escn.ioff, \
+-                                ((ULong)_sec_escn.ioff) + _sec_escn.szB - 1); \
++                                (ULong)a_shdr.sh_offset, \
++                                ((ULong)a_shdr.sh_offset) + a_shdr.sh_size - 1); \
+                   /* SHT_NOBITS sections have zero size in the file. */ \
+                   if (!nobits && a_shdr.sh_offset \
+-                      + _sec_escn.szB > ML_(img_size)(dimg)) { \
++                      + a_shdr.sh_size > ML_(img_real_size)(_sec_escn.img)) { \
+                      ML_(symerr)(di, True, \
+                                  "   section beyond image end?!"); \
+                      goto out; \
+@@ -2707,24 +2813,45 @@
+                } \
+             } while (0);
+ 
+-            /* NEEDED?        NAME             ElfSec */
+-            FIND(need_symtab, ".symtab",       symtab_escn)
+-            FIND(need_symtab, ".strtab",       strtab_escn)
+-            FIND(need_dwarf2, ".debug_line",   debug_line_escn)
+-            FIND(need_dwarf2, ".debug_info",   debug_info_escn)
+-            FIND(need_dwarf2, ".debug_types",  debug_types_escn)
+-
+-            FIND(need_dwarf2, ".debug_abbrev", debug_abbv_escn)
+-            FIND(need_dwarf2, ".debug_str",    debug_str_escn)
+-            FIND(need_dwarf2, ".debug_ranges", debug_ranges_escn)
+-
+-            FIND(need_dwarf2, ".debug_loc",    debug_loc_escn)
+-            FIND(need_dwarf2, ".debug_frame",  debug_frame_escn)
++            /* NEEDED?               NAME                 ElfSec */
++            FIND(   need_symtab,     ".symtab",           symtab_escn)
++            FIND(   need_symtab,     ".strtab",           strtab_escn)
++            FIND(   need_dwarf2,     ".debug_line",       debug_line_escn)
++            if (!ML_(sli_is_valid)(debug_line_escn))
++               FIND(need_dwarf2,     ".zdebug_line",      debug_line_escn)
++
++            FIND(   need_dwarf2,     ".debug_info",       debug_info_escn)
++            if (!ML_(sli_is_valid)(debug_info_escn))
++               FIND(need_dwarf2,     ".zdebug_info",      debug_info_escn)
++
++            FIND(   need_dwarf2,     ".debug_types",      debug_types_escn)
++            if (!ML_(sli_is_valid)(debug_types_escn))
++               FIND(need_dwarf2,     ".zdebug_types",     debug_types_escn)
++
++            FIND(   need_dwarf2,     ".debug_abbrev",     debug_abbv_escn)
++            if (!ML_(sli_is_valid)(debug_abbv_escn))
++               FIND(need_dwarf2,     ".zdebug_abbrev",    debug_abbv_escn)
++
++            FIND(   need_dwarf2,     ".debug_str",        debug_str_escn)
++            if (!ML_(sli_is_valid)(debug_str_escn))
++               FIND(need_dwarf2,     ".zdebug_str",       debug_str_escn)
++
++            FIND(   need_dwarf2,     ".debug_ranges",     debug_ranges_escn)
++            if (!ML_(sli_is_valid)(debug_ranges_escn))
++               FIND(need_dwarf2,     ".zdebug_ranges",    debug_ranges_escn)
++
++            FIND(   need_dwarf2,     ".debug_loc",        debug_loc_escn)
++            if (!ML_(sli_is_valid)(debug_loc_escn))
++               FIND(need_dwarf2,     ".zdebug_loc",       debug_loc_escn)
++
++            FIND(   need_dwarf2,     ".debug_frame",      debug_frame_escn)
++            if (!ML_(sli_is_valid)(debug_frame_escn))
++               FIND(need_dwarf2,     ".zdebug_frame",     debug_frame_escn)
+ 
+-            FIND(need_dwarf2, ".gnu_debugaltlink", debugaltlink_escn)
++            FIND(   need_dwarf2,     ".gnu_debugaltlink", debugaltlink_escn)
+ 
+-            FIND(need_dwarf1, ".debug",        dwarf1d_escn)
+-            FIND(need_dwarf1, ".line",         dwarf1l_escn)
++            FIND(   need_dwarf1,     ".debug",            dwarf1d_escn)
++            FIND(   need_dwarf1,     ".line",             dwarf1l_escn)
+ 
+ #           undef FIND
+          } /* Find all interesting sections */
+@@ -2756,7 +2883,7 @@
+             VG_(sprintf)(
+                altbuildid + 2 * j, "%02x",
+                (UInt)ML_(img_get_UChar)(debugaltlink_escn.img,
+-                                        debugaltlink_escn.ioff 
++                                        debugaltlink_escn.ioff
+                                         + buildid_offset + j));
+ 
+          /* See if we can find a matching debug file */
+@@ -2807,7 +2934,7 @@
+            shdr_strtab_aioff = a_shdr.sh_offset;
+            if (!ML_(img_valid)(aimg, shdr_strtab_aioff,
+                                1/*bogus, but we don't know the real size*/)) {
+-              ML_(symerr)(di, True, 
++              ML_(symerr)(di, True,
+                           "Invalid ELF Section Header String Table"
+                           " (alternate debuginfo file)");
+               goto out;
+@@ -2834,20 +2961,36 @@
+                   _sec_escn.img  = aimg; \
+                   _sec_escn.ioff = (DiOffT)a_shdr.sh_offset; \
+                   _sec_escn.szB  = a_shdr.sh_size; \
++                  if (!check_compression(&a_shdr, &_sec_escn)) { \
++                     ML_(symerr)(di, True, "   Compression type is " \
++                                           "unsupported"); \
++                     goto out; \
++                  } \
+                   vg_assert(_sec_escn.img  != NULL); \
+                   vg_assert(_sec_escn.ioff != DiOffT_INVALID); \
+                   TRACE_SYMTAB( "%-18s: aioff %llu .. %llu\n", \
+                                 _sec_name, \
+-                                (ULong)_sec_escn.ioff, \
+-                                ((ULong)_sec_escn.ioff) + _sec_escn.szB - 1); \
++                                (ULong)a_shdr.sh_offset, \
++                                ((ULong)a_shdr.sh_offset) + a_shdr.sh_size - 1); \
+                } \
+             } while (0);
+ 
+-            /*   NAME             ElfSec */
+-            FIND(".debug_line",   debug_line_alt_escn)
+-            FIND(".debug_info",   debug_info_alt_escn)
+-            FIND(".debug_abbrev", debug_abbv_alt_escn)
+-            FIND(".debug_str",    debug_str_alt_escn)
++            /*   NAME                 ElfSec */
++            FIND(".debug_line",       debug_line_alt_escn)
++            if (!ML_(sli_is_valid)(debug_line_alt_escn))
++               FIND(".zdebug_line",   debug_line_alt_escn)
++
++            FIND(".debug_info",       debug_info_alt_escn)
++            if (!ML_(sli_is_valid)(debug_info_alt_escn))
++               FIND(".zdebug_info",   debug_info_alt_escn)
++
++            FIND(".debug_abbrev",     debug_abbv_alt_escn)
++            if (!ML_(sli_is_valid)(debug_abbv_alt_escn))
++               FIND(".zdebug_abbrev", debug_abbv_alt_escn)
++
++            FIND(".debug_str",        debug_str_alt_escn)
++            if (!ML_(sli_is_valid)(debug_str_alt_escn))
++               FIND(".zdebug_str",    debug_str_alt_escn)
+ 
+ #           undef FIND
+          } /* Find all interesting sections */
+@@ -2914,7 +3057,7 @@
+          debuginfo reading for that reason, but, in
+          read_unitinfo_dwarf2, do check that debugstr is non-NULL
+          before using it. */
+-      if (ML_(sli_is_valid)(debug_info_escn) 
++      if (ML_(sli_is_valid)(debug_info_escn)
+           && ML_(sli_is_valid)(debug_abbv_escn)
+           && ML_(sli_is_valid)(debug_line_escn)) {
+          /* The old reader: line numbers and unwind info only */
+@@ -2946,7 +3089,7 @@
+       // JRS 31 July 2014: dwarf-1 reading is currently broken and
+       // therefore deactivated.
+       //if (dwarf1d_img && dwarf1l_img) {
+-      //   ML_(read_debuginfo_dwarf1) ( di, dwarf1d_img, dwarf1d_sz, 
++      //   ML_(read_debuginfo_dwarf1) ( di, dwarf1d_img, dwarf1d_sz,
+       //                                    dwarf1l_img, dwarf1l_sz );
+       //}
+ 
+@@ -3021,7 +3164,7 @@
+    }
+    /* TOPLEVEL */
+ 
+-  out: 
++  out:
+    {
+       /* Last, but not least, detach from the image(s). */
+       if (mimg) ML_(img_done)(mimg);
+@@ -3031,7 +3174,7 @@
+       if (svma_ranges) VG_(deleteXA)(svma_ranges);
+ 
+       return res;
+-   } /* out: */ 
++   } /* out: */
+ 
+    /* NOTREACHED */
+ }
+--- /dev/null
++++ b/coregrind/m_debuginfo/tinfl.c
+@@ -0,0 +1,596 @@
++/*--------------------------------------------------------------------*/
++/*--- Tiny zlib decompressor                               tinfl.c ---*/
++/*--------------------------------------------------------------------*/
++
++/* tinfl.c v1.11 - public domain inflate with zlib header parsing/adler32
++   checking (inflate-only subset of miniz.c)
++
++   Rich Geldreich <richge...@gmail.com>, last updated May 20, 2011
++
++   Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt
++   and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt
++
++   The original file has been modified in order to be a part of Valgrind
++   project, a dynamic binary instrumentation framework.
++   RT-RK Institute for Computer Based Systems, 2016 (mips-valgr...@rt-rk.com)
++
++   This program is free software; you can redistribute it and/or
++   modify it under the terms of the GNU General Public License as
++   published by the Free Software Foundation; either version 2 of the
++   License, or (at your option) any later version.
++
++   This program is distributed in the hope that it will be useful, but
++   WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++   02111-1307, USA.
++
++   The GNU General Public License is contained in the file COPYING.
++*/
++
++#ifndef TINFL_HEADER_INCLUDED
++#define TINFL_HEADER_INCLUDED
++
++/* The entire decompressor coroutine is implemented in tinfl_decompress().
++   The other functions are optional high-level helpers. */
++
++#include "pub_core_basics.h"
++
++typedef UChar  mz_uint8;
++typedef Short  mz_int16;
++typedef UShort mz_uint16;
++typedef UInt   mz_uint32;
++typedef UInt   mz_uint;
++typedef ULong  mz_uint64;
++
++#if defined(VGA_x86) || defined(VGA_amd64)
++// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 if integer loads and stores to
++// unaligned addresses are acceptable on the target platform (slightly faster).
++#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
++#endif
++
++#define MINIZ_LITTLE_ENDIAN       ( defined(VG_LITTLEENDIAN) )
++#define MINIZ_HAS_64BIT_REGISTERS ( VG_WORDSIZE == 8 )
++
++// Works around MSVC's spammy "warning C4127: conditional expression is
++// constant" message.
++#ifdef _MSC_VER
++  #define MZ_MACRO_END while (0, 0)
++#else
++  #define MZ_MACRO_END while (0)
++#endif
++
++/* Decompression flags used by tinfl_decompress().
++
++   TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
++   ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
++   input is a raw deflate stream.
++
++   TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
++   beyond the end of the supplied input buffer. If clear, the input buffer
++   contains all remaining input.
++
++   TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
++   enough to hold the entire decompressed stream. If clear, the output buffer
++   is at least the size of the dictionary (typically 32KB).
++
++   TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
++   decompressed bytes.
++*/
++
++enum
++{
++  TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
++  TINFL_FLAG_HAS_MORE_INPUT = 2,
++  TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
++  TINFL_FLAG_COMPUTE_ADLER32 = 8
++};
++
++// High level decompression functions:
++// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block allocated via malloc().
++// On entry:
++//  pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data to decompress.
++// On return:
++//  Function returns a pointer to the decompressed data, or NULL on failure.
++//  *pOut_len will be set to the decompressed data's size, which could be larger than src_buf_len on uncompressible data.
++//  The caller must free() the returned block when it's no longer needed.
++void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, SizeT src_buf_len, SizeT *pOut_len, int flags);
++
++// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block in memory.
++// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes written on success.
++#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((SizeT)(-1))
++SizeT tinfl_decompress_mem_to_mem(void *pOut_buf, SizeT out_buf_len, const void *pSrc_buf, SizeT src_buf_len, int flags);
++
++// tinfl_decompress_mem_to_callback() decompresses a block in memory to an internal 32KB buffer, and a user provided callback function will be called to flush the buffer.
++// Returns 1 on success or 0 on failure.
++typedef int (*tinfl_put_buf_func_ptr)(const void* pBuf, int len, void *pUser);
++int tinfl_decompress_mem_to_callback(const void *pIn_buf, SizeT *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);
++
++struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor;
++
++// Max size of LZ dictionary.
++#define TINFL_LZ_DICT_SIZE 32768
++
++// Return status.
++typedef enum
++{
++  TINFL_STATUS_BAD_PARAM = -3,
++  TINFL_STATUS_ADLER32_MISMATCH = -2,
++  TINFL_STATUS_FAILED = -1,
++  TINFL_STATUS_DONE = 0,
++  TINFL_STATUS_NEEDS_MORE_INPUT = 1,
++  TINFL_STATUS_HAS_MORE_OUTPUT = 2
++} tinfl_status;
++
++// Initializes the decompressor to its initial state.
++#define tinfl_init(r) do { (r)->m_state = 0; } MZ_MACRO_END
++#define tinfl_get_adler32(r) (r)->m_check_adler32
++
++// Main low-level decompressor coroutine function. This is the only function actually needed for decompression. All the other functions are just high-level helpers for improved usability.
++// This is a universal API, i.e. it can be used as a building block to build any desired higher level decompression API. In the limit case, it can be called once per every byte input or output.
++tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, SizeT *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, SizeT *pOut_buf_size, const mz_uint32 decomp_flags);
++
++// Internal/private bits follow.
++enum
++{
++  TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19,
++  TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
++};
++
++typedef struct
++{
++  mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
++  mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
++} tinfl_huff_table;
++
++#if MINIZ_HAS_64BIT_REGISTERS
++  #define TINFL_USE_64BIT_BITBUF 1
++#endif
++
++#if TINFL_USE_64BIT_BITBUF
++  typedef mz_uint64 tinfl_bit_buf_t;
++  #define TINFL_BITBUF_SIZE (64)
++#else
++  typedef mz_uint32 tinfl_bit_buf_t;
++  #define TINFL_BITBUF_SIZE (32)
++#endif
++
++struct tinfl_decompressor_tag
++{
++  mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES];
++  tinfl_bit_buf_t m_bit_buf;
++  SizeT m_dist_from_out_buf_start;
++  tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
++  mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
++};
++
++#endif // #ifdef TINFL_HEADER_INCLUDED
++
++// ------------------- End of Header: Implementation follows. (If you only want the header, define MINIZ_HEADER_FILE_ONLY.)
++
++#ifndef TINFL_HEADER_FILE_ONLY
++
++#include "pub_core_mallocfree.h"
++#include "pub_core_libcbase.h"
++
++#define MZ_MAX(a,b) (((a)>(b))?(a):(b))
++#define MZ_MIN(a,b) (((a)<(b))?(a):(b))
++#define MZ_CLEAR_OBJ(obj) VG_(memset)(&(obj), 0, sizeof(obj))
++
++#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
++  #define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
++  #define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
++#else
++  #define MZ_READ_LE16(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
++  #define MZ_READ_LE32(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
++#endif
++
++#define TINFL_MEMCPY(d, s, l) VG_(memcpy)(d, s, l)
++#define TINFL_MEMSET(p, c, l) VG_(memset)(p, c, l)
++
++#define TINFL_CR_BEGIN switch(r->m_state) { case 0:
++#define TINFL_CR_RETURN(state_index, result) do { status = result; r->m_state = state_index; goto common_exit; case state_index:; } MZ_MACRO_END
++#define TINFL_CR_RETURN_FOREVER(state_index, result) do { for ( ; ; ) { TINFL_CR_RETURN(state_index, result); } } MZ_MACRO_END
++#define TINFL_CR_FINISH }
++
++// TODO: If the caller has indicated that there's no more input, and we attempt to read beyond the input buf, then something is wrong with the input because the inflator never
++// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of the stream with 0's in this scenario.
++#define TINFL_GET_BYTE(state_index, c) do { \
++  if (pIn_buf_cur >= pIn_buf_end) { \
++    for ( ; ; ) { \
++      if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
++        TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
++        if (pIn_buf_cur < pIn_buf_end) { \
++          c = *pIn_buf_cur++; \
++          break; \
++        } \
++      } else { \
++        c = 0; \
++        break; \
++      } \
++    } \
++  } else c = *pIn_buf_cur++; } MZ_MACRO_END
++
++#define TINFL_NEED_BITS(state_index, n) do { mz_uint c; TINFL_GET_BYTE(state_index, c); bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); num_bits += 8; } while (num_bits < (mz_uint)(n))
++#define TINFL_SKIP_BITS(state_index, n) do { if (num_bits < (mz_uint)(n)) { TINFL_NEED_BITS(state_index, n); } bit_buf >>= (n); num_bits -= (n); } MZ_MACRO_END
++#define TINFL_GET_BITS(state_index, b, n) do { if (num_bits < (mz_uint)(n)) { TINFL_NEED_BITS(state_index, n); } b = bit_buf & ((1 << (n)) - 1); bit_buf >>= (n); num_bits -= (n); } MZ_MACRO_END
++
++// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes remaining in the input buffer falls below 2.
++// It reads just enough bytes from the input stream that are needed to decode the next Huffman code (and absolutely no more). It works by trying to fully decode a
++// Huffman code by using whatever bits are currently present in the bit buffer. If this fails, it reads another byte, and tries again until it succeeds or until the
++// bit buffer contains >=15 bits (deflate's max. Huffman code size).
++#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
++  do { \
++    temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
++    if (temp >= 0) { \
++      code_len = temp >> 9; \
++      if ((code_len) && (num_bits >= code_len)) \
++      break; \
++    } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
++       code_len = TINFL_FAST_LOOKUP_BITS; \
++       do { \
++          temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
++       } while ((temp < 0) && (num_bits >= (code_len + 1))); if (temp >= 0) break; \
++    } TINFL_GET_BYTE(state_index, c); bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); num_bits += 8; \
++  } while (num_bits < 15);
++
++// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex than you would initially expect because the zlib API expects the decompressor to never read
++// beyond the final byte of the deflate stream. (In other words, when this macro wants to read another byte from the input, it REALLY needs another byte in order to fully
++// decode the next Huffman code.) Handling this properly is particularly important on raw deflate (non-zlib) streams, which aren't followed by a byte aligned adler-32.
++// The slow path is only executed at the very end of the input buffer.
++#define TINFL_HUFF_DECODE(state_index, sym, pHuff) do { \
++  int temp; mz_uint code_len, c; \
++  if (num_bits < 15) { \
++    if ((pIn_buf_end - pIn_buf_cur) < 2) { \
++       TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
++    } else { \
++       bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); pIn_buf_cur += 2; num_bits += 16; \
++    } \
++  } \
++  if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) \
++    code_len = temp >> 9, temp &= 511; \
++  else { \
++    code_len = TINFL_FAST_LOOKUP_BITS; do { temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; } while (temp < 0); \
++  } sym = temp; bit_buf >>= code_len; num_bits -= code_len; } MZ_MACRO_END
++
++tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, SizeT *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, SizeT *pOut_buf_size, const mz_uint32 decomp_flags)
++{
++  static const int s_length_base[31] = { 3,4,5,6,7,8,9,10,11,13, 15,17,19,23,27,31,35,43,51,59, 67,83,99,115,131,163,195,227,258,0,0 };
++  static const int s_length_extra[31]= { 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 };
++  static const int s_dist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, 257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0};
++  static const int s_dist_extra[32] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
++  static const mz_uint8 s_length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 };
++  static const int s_min_table_sizes[3] = { 257, 1, 4 };
++
++  tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf;
++  const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size;
++  mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size;
++  SizeT out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (SizeT)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start;
++
++  // Ensure the output buffer's size is a power of 2, unless the output buffer is large enough to hold the entire output file (in which case it doesn't matter).
++  if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; }
++
++  num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start;
++  TINFL_CR_BEGIN
++
++  bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1;
++  if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER)
++  {
++    TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1);
++    counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
++    if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (SizeT)(1U << (8U + (r->m_zhdr0 >> 4)))));
++    if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); }
++  }
++
++  do
++  {
++    TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1;
++    if (r->m_type == 0)
++    {
++      TINFL_SKIP_BITS(5, num_bits & 7);
++      for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); }
++      if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); }
++      while ((counter) && (num_bits))
++      {
++        TINFL_GET_BITS(51, dist, 8);
++        while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); }
++        *pOut_buf_cur++ = (mz_uint8)dist;
++        counter--;
++      }
++      while (counter)
++      {
++        SizeT n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); }
++        while (pIn_buf_cur >= pIn_buf_end)
++        {
++          if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT)
++          {
++            TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
++          }
++          else
++          {
++            TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
++          }
++        }
++        n = MZ_MIN(MZ_MIN((SizeT)(pOut_buf_end - pOut_buf_cur), (SizeT)(pIn_buf_end - pIn_buf_cur)), counter);
++        TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n;
++      }
++    }
++    else if (r->m_type == 3)
++    {
++      TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
++    }
++    else
++    {
++      if (r->m_type == 1)
++      {
++        mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i;
++        r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
++        for ( i = 0; i <= 143; ++i) *p++ = 8; for ( ; i <= 255; ++i) *p++ = 9; for ( ; i <= 279; ++i) *p++ = 7; for ( ; i <= 287; ++i) *p++ = 8;
++      }
++      else
++      {
++        for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; }
++        MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; }
++        r->m_table_sizes[2] = 19;
++      }
++      for ( ; (int)r->m_type >= 0; r->m_type--)
++      {
++        int tree_next, tree_cur; tinfl_huff_table *pTable;
++        mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree);
++        for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++;
++        used_syms = 0, total = 0; next_code[0] = next_code[1] = 0;
++        for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); }
++        if ((65536 != total) && (used_syms > 1))
++        {
++          TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
++        }
++        for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index)
++        {
++          mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue;
++          cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1);
++          if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; }
++          if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; }
++          rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
++          for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--)
++          {
++            tree_cur -= ((rev_code >>= 1) & 1);
++            if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1];
++          }
++          tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
++        }
++        if (r->m_type == 2)
++        {
++          for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]); )
++          {
++            mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; }
++            if ((dist == 16) && (!counter))
++            {
++              TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
++            }
++            num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16];
++            TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s;
++          }
++          if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter)
++          {
++            TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
++          }
++          TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]);
++        }
++      }
++      for ( ; ; )
++      {
++        mz_uint8 *pSrc;
++        for ( ; ; )
++        {
++          if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2))
++          {
++            TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
++            if (counter >= 256)
++              break;
++            while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); }
++            *pOut_buf_cur++ = (mz_uint8)counter;
++          }
++          else
++          {
++            int sym2; mz_uint code_len;
++#if TINFL_USE_64BIT_BITBUF
++            if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; }
++#else
++            if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; }
++#endif
++            if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
++              code_len = sym2 >> 9;
++            else
++            {
++              code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0);
++            }
++            counter = sym2; bit_buf >>= code_len; num_bits -= code_len;
++            if (counter & 256)
++              break;
++
++#if !TINFL_USE_64BIT_BITBUF
++            if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; }
++#endif
++            if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
++              code_len = sym2 >> 9;
++            else
++            {
++              code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0);
++            }
++            bit_buf >>= code_len; num_bits -= code_len;
++
++            pOut_buf_cur[0] = (mz_uint8)counter;
++            if (sym2 & 256)
++            {
++              pOut_buf_cur++;
++              counter = sym2;
++              break;
++            }
++            pOut_buf_cur[1] = (mz_uint8)sym2;
++            pOut_buf_cur += 2;
++          }
++        }
++        if ((counter &= 511) == 256) break;
++
++        num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257];
++        if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; }
++
++        TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
++        num_extra = s_dist_extra[dist]; dist = s_dist_base[dist];
++        if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; }
++
++        dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
++        if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
++        {
++          TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
++        }
++
++        pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask);
++
++        if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end)
++        {
++          while (counter--)
++          {
++            while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); }
++            *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask];
++          }
++          continue;
++        }
++#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
++        else if ((counter >= 9) && (counter <= dist))
++        {
++          const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
++          do
++          {
++            ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
++            ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
++            pOut_buf_cur += 8;
++          } while ((pSrc += 8) < pSrc_end);
++          if ((counter &= 7) < 3)
++          {
++            if (counter)
++            {
++              pOut_buf_cur[0] = pSrc[0];
++              if (counter > 1)
++                pOut_buf_cur[1] = pSrc[1];
++              pOut_buf_cur += counter;
++            }
++            continue;
++          }
++        }
++#endif
++        do
++        {
++          pOut_buf_cur[0] = pSrc[0];
++          pOut_buf_cur[1] = pSrc[1];
++          pOut_buf_cur[2] = pSrc[2];
++          pOut_buf_cur += 3; pSrc += 3;
++        } while ((int)(counter -= 3) > 2);
++        if ((int)counter > 0)
++        {
++          pOut_buf_cur[0] = pSrc[0];
++          if ((int)counter > 1)
++            pOut_buf_cur[1] = pSrc[1];
++          pOut_buf_cur += counter;
++        }
++      }
++    }
++  } while (!(r->m_final & 1));
++  if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER)
++  {
++    TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; }
++  }
++  TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
++  TINFL_CR_FINISH
++
++common_exit:
++  r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start;
++  *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next;
++  if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0))
++  {
++    const mz_uint8 *ptr = pOut_buf_next; SizeT buf_len = *pOut_buf_size;
++    mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; SizeT block_len = buf_len % 5552;
++    while (buf_len)
++    {
++      for (i = 0; i + 7 < block_len; i += 8, ptr += 8)
++      {
++        s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1;
++        s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1;
++      }
++      for ( ; i < block_len; ++i) s1 += *ptr++, s2 += s1;
++      s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552;
++    }
++    r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH;
++  }
++  return status;
++}
++
++// Higher level helper functions.
++void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, SizeT src_buf_len, SizeT *pOut_len, int flags)
++{
++  tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; SizeT src_buf_ofs = 0, out_buf_capacity = 0;
++  *pOut_len = 0;
++  tinfl_init(&decomp);
++  for ( ; ; )
++  {
++    SizeT src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
++    tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8*)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8*)pBuf, pBuf ? (mz_uint8*)pBuf + *pOut_len : NULL, &dst_buf_size,
++      (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
++    if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT))
++    {
++      VG_(free)(pBuf); *pOut_len = 0; return NULL;
++    }
++    src_buf_ofs += src_buf_size;
++    *pOut_len += dst_buf_size;
++    if (status == TINFL_STATUS_DONE) break;
++    new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
++    pNew_buf = VG_(realloc)("tinfl.tinfl_decompress_mem_to_heap.1", pBuf, new_out_buf_capacity);
++    if (!pNew_buf)
++    {
++      VG_(free)(pBuf); *pOut_len = 0; return NULL;
++    }
++    pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity;
++  }
++  return pBuf;
++}
++
++SizeT tinfl_decompress_mem_to_mem(void *pOut_buf, SizeT out_buf_len, const void *pSrc_buf, SizeT src_buf_len, int flags)
++{
++  tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp);
++  status = tinfl_decompress(&decomp, (const mz_uint8*)pSrc_buf, &src_buf_len, (mz_uint8*)pOut_buf, (mz_uint8*)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
++  return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len;
++}
++
++int tinfl_decompress_mem_to_callback(const void *pIn_buf, SizeT *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
++{
++  int result = 0;
++  tinfl_decompressor decomp;
++  mz_uint8 *pDict = (mz_uint8*)VG_(malloc)("tinfl.tinfl_decompress_mem_to_callback.1", TINFL_LZ_DICT_SIZE); SizeT in_buf_ofs = 0, dict_ofs = 0;
++  if (!pDict)
++    return TINFL_STATUS_FAILED;
++  tinfl_init(&decomp);
++  for ( ; ; )
++  {
++    SizeT in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
++    tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8*)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
++      (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
++    in_buf_ofs += in_buf_size;
++    if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
++      break;
++    if (status != TINFL_STATUS_HAS_MORE_OUTPUT)
++    {
++      result = (status == TINFL_STATUS_DONE);
++      break;
++    }
++    dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
++  }
++  VG_(free)(pDict);
++  *pIn_buf_size = in_buf_ofs;
++  return result;
++}
++
++#endif // #ifndef TINFL_HEADER_FILE_ONLY
diff -Nru valgrind-3.11.0/debian/patches/series valgrind-3.11.0/debian/patches/series
--- valgrind-3.11.0/debian/patches/series	2015-09-25 11:41:20.000000000 +0000
+++ valgrind-3.11.0/debian/patches/series	2016-06-18 22:48:12.000000000 +0000
@@ -7,3 +7,4 @@
 10_mips-page-size.patch
 11_mips-link-tool.patch
 14_mips-fpxx.patch
+15_compressed.patch

Reply via email to