This is an automated email from the ASF dual-hosted git repository.

ronny pushed a commit to branch update-snappy
in repository https://gitbox.apache.org/repos/asf/couchdb-snappy.git

commit b29c86973197cb48c5d5753147dd7bdc560366ed
Author: Louis-Philippe Gauthier <[email protected]>
AuthorDate: Wed Jul 6 19:04:09 2016 -0400

    Update Snappy to 1.1.3 (#14)
---
 c_src/snappy/NEWS                    |  23 ++++
 c_src/snappy/snappy-internal.h       |  10 +-
 c_src/snappy/snappy-sinksource.cc    |  33 +++++
 c_src/snappy/snappy-sinksource.h     |  57 +++++++-
 c_src/snappy/snappy-stubs-internal.h |   6 +-
 c_src/snappy/snappy-stubs-public.h   |   8 +-
 c_src/snappy/snappy.cc               | 259 ++++++++++++++++++++++++++++++++++-
 c_src/snappy/snappy.h                |  27 +++-
 8 files changed, 395 insertions(+), 28 deletions(-)

diff --git a/c_src/snappy/NEWS b/c_src/snappy/NEWS
index f21e9d0..4eb7a1d 100644
--- a/c_src/snappy/NEWS
+++ b/c_src/snappy/NEWS
@@ -1,3 +1,26 @@
+Snappy v1.1.3, July 6th 2015:
+
+This is the first release to be done from GitHub, which means that
+some minor things like the ChangeLog format has changed (git log
+format instead of svn log).
+
+  * Add support for Uncompress() from a Source to a Sink.
+
+  * Various minor changes to improve MSVC support; in particular,
+    the unit tests now compile and run under MSVC.
+
+
+Snappy v1.1.2, February 28th 2014:
+
+This is a maintenance release with no changes to the actual library
+source code.
+
+  * Stop distributing benchmark data files that have unclear
+    or unsuitable licensing.
+
+  * Add support for padding chunks in the framing format.
+
+
 Snappy v1.1.1, October 15th 2013:
 
   * Add support for uncompressing to iovecs (scatter I/O).
diff --git a/c_src/snappy/snappy-internal.h b/c_src/snappy/snappy-internal.h
index c99d331..0653dc6 100644
--- a/c_src/snappy/snappy-internal.h
+++ b/c_src/snappy/snappy-internal.h
@@ -28,8 +28,8 @@
 //
 // Internals shared between the Snappy implementation and its unittest.
 
-#ifndef UTIL_SNAPPY_SNAPPY_INTERNAL_H_
-#define UTIL_SNAPPY_SNAPPY_INTERNAL_H_
+#ifndef THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
+#define THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
 
 #include "snappy-stubs-internal.h"
 
@@ -93,7 +93,7 @@ static inline int FindMatchLength(const char* s1,
   // the first non-matching bit and use that to calculate the total
   // length of the match.
   while (PREDICT_TRUE(s2 <= s2_limit - 8)) {
-    if (PREDICT_FALSE(UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched))) 
{
+    if (UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
       s2 += 8;
       matched += 8;
     } else {
@@ -108,7 +108,7 @@ static inline int FindMatchLength(const char* s1,
     }
   }
   while (PREDICT_TRUE(s2 < s2_limit)) {
-    if (PREDICT_TRUE(s1[matched] == *s2)) {
+    if (s1[matched] == *s2) {
       ++s2;
       ++matched;
     } else {
@@ -147,4 +147,4 @@ static inline int FindMatchLength(const char* s1,
 }  // end namespace internal
 }  // end namespace snappy
 
-#endif  // UTIL_SNAPPY_SNAPPY_INTERNAL_H_
+#endif  // THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
diff --git a/c_src/snappy/snappy-sinksource.cc 
b/c_src/snappy/snappy-sinksource.cc
index 5844552..369a132 100644
--- a/c_src/snappy/snappy-sinksource.cc
+++ b/c_src/snappy/snappy-sinksource.cc
@@ -40,6 +40,21 @@ char* Sink::GetAppendBuffer(size_t length, char* scratch) {
   return scratch;
 }
 
+char* Sink::GetAppendBufferVariable(
+      size_t min_size, size_t desired_size_hint, char* scratch,
+      size_t scratch_size, size_t* allocated_size) {
+  *allocated_size = scratch_size;
+  return scratch;
+}
+
+void Sink::AppendAndTakeOwnership(
+    char* bytes, size_t n,
+    void (*deleter)(void*, const char*, size_t),
+    void *deleter_arg) {
+  Append(bytes, n);
+  (*deleter)(deleter_arg, bytes, n);
+}
+
 ByteArraySource::~ByteArraySource() { }
 
 size_t ByteArraySource::Available() const { return left_; }
@@ -68,4 +83,22 @@ char* UncheckedByteArraySink::GetAppendBuffer(size_t len, 
char* scratch) {
   return dest_;
 }
 
+void UncheckedByteArraySink::AppendAndTakeOwnership(
+    char* data, size_t n,
+    void (*deleter)(void*, const char*, size_t),
+    void *deleter_arg) {
+  if (data != dest_) {
+    memcpy(dest_, data, n);
+    (*deleter)(deleter_arg, data, n);
+  }
+  dest_ += n;
+}
+
+char* UncheckedByteArraySink::GetAppendBufferVariable(
+      size_t min_size, size_t desired_size_hint, char* scratch,
+      size_t scratch_size, size_t* allocated_size) {
+  *allocated_size = desired_size_hint;
+  return dest_;
 }
+
+}  // namespace snappy
diff --git a/c_src/snappy/snappy-sinksource.h b/c_src/snappy/snappy-sinksource.h
index faabfa1..8afcdaa 100644
--- a/c_src/snappy/snappy-sinksource.h
+++ b/c_src/snappy/snappy-sinksource.h
@@ -26,12 +26,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
-#define UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
+#ifndef THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_
+#define THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_
 
 #include <stddef.h>
 
-
 namespace snappy {
 
 // A Sink is an interface that consumes a sequence of bytes.
@@ -60,6 +59,47 @@ class Sink {
   // The default implementation always returns the scratch buffer.
   virtual char* GetAppendBuffer(size_t length, char* scratch);
 
+  // For higher performance, Sink implementations can provide custom
+  // AppendAndTakeOwnership() and GetAppendBufferVariable() methods.
+  // These methods can reduce the number of copies done during
+  // compression/decompression.
+
+  // Append "bytes[0,n-1] to the sink. Takes ownership of "bytes"
+  // and calls the deleter function as (*deleter)(deleter_arg, bytes, n)
+  // to free the buffer. deleter function must be non NULL.
+  //
+  // The default implementation just calls Append and frees "bytes".
+  // Other implementations may avoid a copy while appending the buffer.
+  virtual void AppendAndTakeOwnership(
+      char* bytes, size_t n, void (*deleter)(void*, const char*, size_t),
+      void *deleter_arg);
+
+  // Returns a writable buffer for appending and writes the buffer's capacity 
to
+  // *allocated_size. Guarantees *allocated_size >= min_size.
+  // May return a pointer to the caller-owned scratch buffer which must have
+  // scratch_size >= min_size.
+  //
+  // The returned buffer is only valid until the next operation
+  // on this ByteSink.
+  //
+  // After writing at most *allocated_size bytes, call Append() with the
+  // pointer returned from this function and the number of bytes written.
+  // Many Append() implementations will avoid copying bytes if this function
+  // returned an internal buffer.
+  //
+  // If the sink implementation allocates or reallocates an internal buffer,
+  // it should use the desired_size_hint if appropriate. If a caller cannot
+  // provide a reasonable guess at the desired capacity, it should set
+  // desired_size_hint = 0.
+  //
+  // If a non-scratch buffer is returned, the caller may only pass
+  // a prefix to it to Append(). That is, it is not correct to pass an
+  // interior pointer to Append().
+  //
+  // The default implementation always returns the scratch buffer.
+  virtual char* GetAppendBufferVariable(
+      size_t min_size, size_t desired_size_hint, char* scratch,
+      size_t scratch_size, size_t* allocated_size);
 
  private:
   // No copying
@@ -122,6 +162,12 @@ class UncheckedByteArraySink : public Sink {
   virtual ~UncheckedByteArraySink();
   virtual void Append(const char* data, size_t n);
   virtual char* GetAppendBuffer(size_t len, char* scratch);
+  virtual char* GetAppendBufferVariable(
+      size_t min_size, size_t desired_size_hint, char* scratch,
+      size_t scratch_size, size_t* allocated_size);
+  virtual void AppendAndTakeOwnership(
+      char* bytes, size_t n, void (*deleter)(void*, const char*, size_t),
+      void *deleter_arg);
 
   // Return the current output pointer so that a caller can see how
   // many bytes were produced.
@@ -131,7 +177,6 @@ class UncheckedByteArraySink : public Sink {
   char* dest_;
 };
 
+}  // namespace snappy
 
-}
-
-#endif  // UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
+#endif  // THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_
diff --git a/c_src/snappy/snappy-stubs-internal.h 
b/c_src/snappy/snappy-stubs-internal.h
index 12393b6..ddca1a8 100644
--- a/c_src/snappy/snappy-stubs-internal.h
+++ b/c_src/snappy/snappy-stubs-internal.h
@@ -28,8 +28,8 @@
 //
 // Various stubs for the open-source version of Snappy.
 
-#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
-#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
 
 #ifdef HAVE_CONFIG_H
 #include "config.h"
@@ -488,4 +488,4 @@ inline char* string_as_array(string* str) {
 
 }  // namespace snappy
 
-#endif  // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+#endif  // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
diff --git a/c_src/snappy/snappy-stubs-public.h 
b/c_src/snappy/snappy-stubs-public.h
index ecda439..c156ba4 100644
--- a/c_src/snappy/snappy-stubs-public.h
+++ b/c_src/snappy/snappy-stubs-public.h
@@ -33,8 +33,8 @@
 // which is a public header. Instead, snappy-stubs-public.h is generated by
 // from snappy-stubs-public.h.in at configure time.
 
-#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
-#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
 
 #if 1
 #include <stdint.h>
@@ -50,7 +50,7 @@
 
 #define SNAPPY_MAJOR 1
 #define SNAPPY_MINOR 1
-#define SNAPPY_PATCHLEVEL 1
+#define SNAPPY_PATCHLEVEL 3
 #define SNAPPY_VERSION \
     ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
 
@@ -95,4 +95,4 @@ struct iovec {
 
 }  // namespace snappy
 
-#endif  // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+#endif  // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
diff --git a/c_src/snappy/snappy.cc b/c_src/snappy/snappy.cc
index f8d0d23..b6ca7ec 100644
--- a/c_src/snappy/snappy.cc
+++ b/c_src/snappy/snappy.cc
@@ -138,7 +138,7 @@ namespace {
 const int kMaxIncrementCopyOverflow = 10;
 
 inline void IncrementalCopyFastPath(const char* src, char* op, ssize_t len) {
-  while (op - src < 8) {
+  while (PREDICT_FALSE(op - src < 8)) {
     UnalignedCopy64(src, op);
     len -= op - src;
     op += op - src;
@@ -215,7 +215,7 @@ static inline char* EmitCopyLessThan64(char* op, size_t 
offset, int len) {
 
 static inline char* EmitCopy(char* op, size_t offset, int len) {
   // Emit 64 byte copies but make sure to keep at least four bytes reserved
-  while (len >= 68) {
+  while (PREDICT_FALSE(len >= 68)) {
     op = EmitCopyLessThan64(op, offset, 64);
     len -= 64;
   }
@@ -863,6 +863,7 @@ static bool InternalUncompressAllTags(SnappyDecompressor* 
decompressor,
 
   // Process the entire input
   decompressor->DecompressAllTags(writer);
+  writer->Flush();
   return (decompressor->eof() && writer->CheckLength());
 }
 
@@ -1115,6 +1116,7 @@ class SnappyIOVecWriter {
     return true;
   }
 
+  inline void Flush() {}
 };
 
 bool RawUncompressToIOVec(const char* compressed, size_t compressed_length,
@@ -1145,7 +1147,8 @@ class SnappyArrayWriter {
  public:
   inline explicit SnappyArrayWriter(char* dst)
       : base_(dst),
-        op_(dst) {
+        op_(dst),
+        op_limit_(dst) {
   }
 
   inline void SetExpectedLength(size_t len) {
@@ -1215,6 +1218,10 @@ class SnappyArrayWriter {
     op_ = op + len;
     return true;
   }
+  inline size_t Produced() const {
+    return op_ - base_;
+  }
+  inline void Flush() {}
 };
 
 bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
@@ -1241,7 +1248,6 @@ bool Uncompress(const char* compressed, size_t n, string* 
uncompressed) {
   return RawUncompress(compressed, n, string_as_array(uncompressed));
 }
 
-
 // A Writer that drops everything on the floor and just does validation
 class SnappyDecompressionValidator {
  private:
@@ -1249,7 +1255,7 @@ class SnappyDecompressionValidator {
   size_t produced_;
 
  public:
-  inline SnappyDecompressionValidator() : produced_(0) { }
+  inline SnappyDecompressionValidator() : expected_(0), produced_(0) { }
   inline void SetExpectedLength(size_t len) {
     expected_ = len;
   }
@@ -1270,6 +1276,7 @@ class SnappyDecompressionValidator {
     produced_ += len;
     return produced_ <= expected_;
   }
+  inline void Flush() {}
 };
 
 bool IsValidCompressedBuffer(const char* compressed, size_t n) {
@@ -1278,6 +1285,11 @@ bool IsValidCompressedBuffer(const char* compressed, 
size_t n) {
   return InternalUncompress(&reader, &writer);
 }
 
+bool IsValidCompressed(Source* compressed) {
+  SnappyDecompressionValidator writer;
+  return InternalUncompress(compressed, &writer);
+}
+
 void RawCompress(const char* input,
                  size_t input_length,
                  char* compressed,
@@ -1301,6 +1313,241 @@ size_t Compress(const char* input, size_t input_length, 
string* compressed) {
   return compressed_length;
 }
 
+// -----------------------------------------------------------------------
+// Sink interface
+// -----------------------------------------------------------------------
 
-} // end namespace snappy
+// A type that decompresses into a Sink. The template parameter
+// Allocator must export one method "char* Allocate(int size);", which
+// allocates a buffer of "size" and appends that to the destination.
+template <typename Allocator>
+class SnappyScatteredWriter {
+  Allocator allocator_;
+
+  // We need random access into the data generated so far.  Therefore
+  // we keep track of all of the generated data as an array of blocks.
+  // All of the blocks except the last have length kBlockSize.
+  vector<char*> blocks_;
+  size_t expected_;
+
+  // Total size of all fully generated blocks so far
+  size_t full_size_;
+
+  // Pointer into current output block
+  char* op_base_;       // Base of output block
+  char* op_ptr_;        // Pointer to next unfilled byte in block
+  char* op_limit_;      // Pointer just past block
+
+  inline size_t Size() const {
+    return full_size_ + (op_ptr_ - op_base_);
+  }
+
+  bool SlowAppend(const char* ip, size_t len);
+  bool SlowAppendFromSelf(size_t offset, size_t len);
+
+ public:
+  inline explicit SnappyScatteredWriter(const Allocator& allocator)
+      : allocator_(allocator),
+        full_size_(0),
+        op_base_(NULL),
+        op_ptr_(NULL),
+        op_limit_(NULL) {
+  }
+
+  inline void SetExpectedLength(size_t len) {
+    assert(blocks_.empty());
+    expected_ = len;
+  }
+
+  inline bool CheckLength() const {
+    return Size() == expected_;
+  }
+
+  // Return the number of bytes actually uncompressed so far
+  inline size_t Produced() const {
+    return Size();
+  }
+
+  inline bool Append(const char* ip, size_t len) {
+    size_t avail = op_limit_ - op_ptr_;
+    if (len <= avail) {
+      // Fast path
+      memcpy(op_ptr_, ip, len);
+      op_ptr_ += len;
+      return true;
+    } else {
+      return SlowAppend(ip, len);
+    }
+  }
+
+  inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
+    char* op = op_ptr_;
+    const int space_left = op_limit_ - op;
+    if (length <= 16 && available >= 16 + kMaximumTagLength &&
+        space_left >= 16) {
+      // Fast path, used for the majority (about 95%) of invocations.
+      UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
+      UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8));
+      op_ptr_ = op + length;
+      return true;
+    } else {
+      return false;
+    }
+  }
 
+  inline bool AppendFromSelf(size_t offset, size_t len) {
+    // See SnappyArrayWriter::AppendFromSelf for an explanation of
+    // the "offset - 1u" trick.
+    if (offset - 1u < op_ptr_ - op_base_) {
+      const size_t space_left = op_limit_ - op_ptr_;
+      if (space_left >= len + kMaxIncrementCopyOverflow) {
+        // Fast path: src and dst in current block.
+        IncrementalCopyFastPath(op_ptr_ - offset, op_ptr_, len);
+        op_ptr_ += len;
+        return true;
+      }
+    }
+    return SlowAppendFromSelf(offset, len);
+  }
+
+  // Called at the end of the decompress. We ask the allocator
+  // write all blocks to the sink.
+  inline void Flush() { allocator_.Flush(Produced()); }
+};
+
+template<typename Allocator>
+bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
+  size_t avail = op_limit_ - op_ptr_;
+  while (len > avail) {
+    // Completely fill this block
+    memcpy(op_ptr_, ip, avail);
+    op_ptr_ += avail;
+    assert(op_limit_ - op_ptr_ == 0);
+    full_size_ += (op_ptr_ - op_base_);
+    len -= avail;
+    ip += avail;
+
+    // Bounds check
+    if (full_size_ + len > expected_) {
+      return false;
+    }
+
+    // Make new block
+    size_t bsize = min<size_t>(kBlockSize, expected_ - full_size_);
+    op_base_ = allocator_.Allocate(bsize);
+    op_ptr_ = op_base_;
+    op_limit_ = op_base_ + bsize;
+    blocks_.push_back(op_base_);
+    avail = bsize;
+  }
+
+  memcpy(op_ptr_, ip, len);
+  op_ptr_ += len;
+  return true;
+}
+
+template<typename Allocator>
+bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
+                                                         size_t len) {
+  // Overflow check
+  // See SnappyArrayWriter::AppendFromSelf for an explanation of
+  // the "offset - 1u" trick.
+  const size_t cur = Size();
+  if (offset - 1u >= cur) return false;
+  if (expected_ - cur < len) return false;
+
+  // Currently we shouldn't ever hit this path because Compress() chops the
+  // input into blocks and does not create cross-block copies. However, it is
+  // nice if we do not rely on that, since we can get better compression if we
+  // allow cross-block copies and thus might want to change the compressor in
+  // the future.
+  size_t src = cur - offset;
+  while (len-- > 0) {
+    char c = blocks_[src >> kBlockLog][src & (kBlockSize-1)];
+    Append(&c, 1);
+    src++;
+  }
+  return true;
+}
+
+class SnappySinkAllocator {
+ public:
+  explicit SnappySinkAllocator(Sink* dest): dest_(dest) {}
+  ~SnappySinkAllocator() {}
+
+  char* Allocate(int size) {
+    Datablock block(new char[size], size);
+    blocks_.push_back(block);
+    return block.data;
+  }
+
+  // We flush only at the end, because the writer wants
+  // random access to the blocks and once we hand the
+  // block over to the sink, we can't access it anymore.
+  // Also we don't write more than has been actually written
+  // to the blocks.
+  void Flush(size_t size) {
+    size_t size_written = 0;
+    size_t block_size;
+    for (int i = 0; i < blocks_.size(); ++i) {
+      block_size = min<size_t>(blocks_[i].size, size - size_written);
+      dest_->AppendAndTakeOwnership(blocks_[i].data, block_size,
+                                    &SnappySinkAllocator::Deleter, NULL);
+      size_written += block_size;
+    }
+    blocks_.clear();
+  }
+
+ private:
+  struct Datablock {
+    char* data;
+    size_t size;
+    Datablock(char* p, size_t s) : data(p), size(s) {}
+  };
+
+  static void Deleter(void* arg, const char* bytes, size_t size) {
+    delete[] bytes;
+  }
+
+  Sink* dest_;
+  vector<Datablock> blocks_;
+
+  // Note: copying this object is allowed
+};
+
+size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) {
+  SnappySinkAllocator allocator(uncompressed);
+  SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
+  InternalUncompress(compressed, &writer);
+  return writer.Produced();
+}
+
+bool Uncompress(Source* compressed, Sink* uncompressed) {
+  // Read the uncompressed length from the front of the compressed input
+  SnappyDecompressor decompressor(compressed);
+  uint32 uncompressed_len = 0;
+  if (!decompressor.ReadUncompressedLength(&uncompressed_len)) {
+    return false;
+  }
+
+  char c;
+  size_t allocated_size;
+  char* buf = uncompressed->GetAppendBufferVariable(
+      1, uncompressed_len, &c, 1, &allocated_size);
+
+  // If we can get a flat buffer, then use it, otherwise do block by block
+  // uncompression
+  if (allocated_size >= uncompressed_len) {
+    SnappyArrayWriter writer(buf);
+    bool result = InternalUncompressAllTags(
+        &decompressor, &writer, uncompressed_len);
+    uncompressed->Append(buf, writer.Produced());
+    return result;
+  } else {
+    SnappySinkAllocator allocator(uncompressed);
+    SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
+    return InternalUncompressAllTags(&decompressor, &writer, uncompressed_len);
+  }
+}
+
+} // end namespace snappy
diff --git a/c_src/snappy/snappy.h b/c_src/snappy/snappy.h
index e879e79..4568db8 100644
--- a/c_src/snappy/snappy.h
+++ b/c_src/snappy/snappy.h
@@ -36,8 +36,8 @@
 // using BMDiff and then compressing the output of BMDiff with
 // Snappy.
 
-#ifndef UTIL_SNAPPY_SNAPPY_H__
-#define UTIL_SNAPPY_SNAPPY_H__
+#ifndef THIRD_PARTY_SNAPPY_SNAPPY_H__
+#define THIRD_PARTY_SNAPPY_SNAPPY_H__
 
 #include <stddef.h>
 #include <string>
@@ -84,6 +84,18 @@ namespace snappy {
   bool Uncompress(const char* compressed, size_t compressed_length,
                   string* uncompressed);
 
+  // Decompresses "compressed" to "*uncompressed".
+  //
+  // returns false if the message is corrupted and could not be decompressed
+  bool Uncompress(Source* compressed, Sink* uncompressed);
+
+  // This routine uncompresses as much of the "compressed" as possible
+  // into sink.  It returns the number of valid bytes added to sink
+  // (extra invalid bytes may have been added due to errors; the caller
+  // should ignore those). The emitted data typically has length
+  // GetUncompressedLength(), but may be shorter if an error is
+  // encountered.
+  size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed);
 
   // ------------------------------------------------------------------------
   // Lower-level character array based routines.  May be useful for
@@ -164,6 +176,14 @@ namespace snappy {
   bool IsValidCompressedBuffer(const char* compressed,
                                size_t compressed_length);
 
+  // Returns true iff the contents of "compressed" can be uncompressed
+  // successfully.  Does not return the uncompressed data.  Takes
+  // time proportional to *compressed length, but is usually at least
+  // a factor of four faster than actual decompression.
+  // On success, consumes all of *compressed.  On failure, consumes an
+  // unspecified prefix of *compressed.
+  bool IsValidCompressed(Source* compressed);
+
   // The size of a compression block. Note that many parts of the compression
   // code assumes that kBlockSize <= 65536; in particular, the hash table
   // can only store 16-bit offsets, and EmitCopy() also assumes the offset
@@ -180,5 +200,4 @@ namespace snappy {
   static const size_t kMaxHashTableSize = 1 << kMaxHashTableBits;
 }  // end namespace snappy
 
-
-#endif  // UTIL_SNAPPY_SNAPPY_H__
+#endif  // THIRD_PARTY_SNAPPY_SNAPPY_H__

Reply via email to