couchdb-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From d..@apache.org
Subject git commit: updated refs/heads/1904-update-snappy-1-1-0 to cc20daa
Date Sun, 06 Oct 2013 18:41:50 GMT
Updated Branches:
  refs/heads/1904-update-snappy-1-1-0 [created] cc20daafa


snappy: import 1.1.0

Previous release was 1.0.5, the minor version has been bumped
as iostream is no longer required as a dependency.


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/cc20daaf
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/cc20daaf
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/cc20daaf

Branch: refs/heads/1904-update-snappy-1-1-0
Commit: cc20daafaa9681ab53ab418e0e24d9300e8244fd
Parents: fba51db
Author: Dave Cottlehuber <dch@apache.org>
Authored: Sun Oct 6 17:00:31 2013 +0200
Committer: Dave Cottlehuber <dch@apache.org>
Committed: Sun Oct 6 20:41:12 2013 +0200

----------------------------------------------------------------------
 src/snappy/Makefile.am                          |  4 +-
 src/snappy/google-snappy/config.h.in            |  3 +
 src/snappy/google-snappy/snappy-internal.h      |  4 +-
 .../google-snappy/snappy-stubs-internal.h       | 84 +-------------------
 src/snappy/google-snappy/snappy.cc              | 78 ++++++++++--------
 src/snappy/google-snappy/snappy.h               | 24 ++++--
 src/snappy/snappy.app.in                        |  2 +-
 src/snappy/snappy_nif.cc                        |  7 +-
 8 files changed, 72 insertions(+), 134 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/cc20daaf/src/snappy/Makefile.am
----------------------------------------------------------------------
diff --git a/src/snappy/Makefile.am b/src/snappy/Makefile.am
index ff75f07..cd0e192 100644
--- a/src/snappy/Makefile.am
+++ b/src/snappy/Makefile.am
@@ -10,8 +10,8 @@
 ## License for the specific language governing permissions and limitations under
 ## the License.
 
-snappyebindir = $(localerlanglibdir)/snappy-1.0.5/ebin
-snappyprivdir = $(localerlanglibdir)/snappy-1.0.5/priv
+snappyebindir = $(localerlanglibdir)/snappy-1.1.0/ebin
+snappyprivdir = $(localerlanglibdir)/snappy-1.1.0/priv
 
 snappy_cxx_srcs = \
 	snappy_nif.cc \

http://git-wip-us.apache.org/repos/asf/couchdb/blob/cc20daaf/src/snappy/google-snappy/config.h.in
----------------------------------------------------------------------
diff --git a/src/snappy/google-snappy/config.h.in b/src/snappy/google-snappy/config.h.in
index 28f57c2..8f912f6 100644
--- a/src/snappy/google-snappy/config.h.in
+++ b/src/snappy/google-snappy/config.h.in
@@ -72,6 +72,9 @@
 /* Define to 1 if you have the <sys/stat.h> header file. */
 #undef HAVE_SYS_STAT_H
 
+/* Define to 1 if you have the <sys/time.h> header file. */
+#undef HAVE_SYS_TIME_H
+
 /* Define to 1 if you have the <sys/types.h> header file. */
 #undef HAVE_SYS_TYPES_H
 

http://git-wip-us.apache.org/repos/asf/couchdb/blob/cc20daaf/src/snappy/google-snappy/snappy-internal.h
----------------------------------------------------------------------
diff --git a/src/snappy/google-snappy/snappy-internal.h b/src/snappy/google-snappy/snappy-internal.h
index a32eda5..c99d331 100644
--- a/src/snappy/google-snappy/snappy-internal.h
+++ b/src/snappy/google-snappy/snappy-internal.h
@@ -85,7 +85,7 @@ char* CompressFragment(const char* input,
 static inline int FindMatchLength(const char* s1,
                                   const char* s2,
                                   const char* s2_limit) {
-  DCHECK_GE(s2_limit, s2);
+  assert(s2_limit >= s2);
   int matched = 0;
 
   // Find out how long the match is. We loop over the data 64 bits at a
@@ -122,7 +122,7 @@ static inline int FindMatchLength(const char* s1,
                                   const char* s2,
                                   const char* s2_limit) {
   // Implementation based on the x86-64 version, above.
-  DCHECK_GE(s2_limit, s2);
+  assert(s2_limit >= s2);
   int matched = 0;
 
   while (s2 <= s2_limit - 4 &&

http://git-wip-us.apache.org/repos/asf/couchdb/blob/cc20daaf/src/snappy/google-snappy/snappy-stubs-internal.h
----------------------------------------------------------------------
diff --git a/src/snappy/google-snappy/snappy-stubs-internal.h b/src/snappy/google-snappy/snappy-stubs-internal.h
index 6033cdf..12393b6 100644
--- a/src/snappy/google-snappy/snappy-stubs-internal.h
+++ b/src/snappy/google-snappy/snappy-stubs-internal.h
@@ -35,7 +35,6 @@
 #include "config.h"
 #endif
 
-#include <iostream>
 #include <string>
 
 #include <assert.h>
@@ -95,87 +94,6 @@ namespace snappy {
 static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
 static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
 
-// Logging.
-
-#define LOG(level) LogMessage()
-#define VLOG(level) true ? (void)0 : \
-    snappy::LogMessageVoidify() & snappy::LogMessage()
-
-class LogMessage {
- public:
-  LogMessage() { }
-  ~LogMessage() {
-    cerr << endl;
-  }
-
-  LogMessage& operator<<(const std::string& msg) {
-    cerr << msg;
-    return *this;
-  }
-  LogMessage& operator<<(int x) {
-    cerr << x;
-    return *this;
-  }
-};
-
-// Asserts, both versions activated in debug mode only,
-// and ones that are always active.
-
-#define CRASH_UNLESS(condition) \
-    PREDICT_TRUE(condition) ? (void)0 : \
-    snappy::LogMessageVoidify() & snappy::LogMessageCrash()
-
-class LogMessageCrash : public LogMessage {
- public:
-  LogMessageCrash() { }
-  ~LogMessageCrash() {
-    cerr << endl;
-    abort();
-  }
-};
-
-// This class is used to explicitly ignore values in the conditional
-// logging macros.  This avoids compiler warnings like "value computed
-// is not used" and "statement has no effect".
-
-class LogMessageVoidify {
- public:
-  LogMessageVoidify() { }
-  // This has to be an operator with a precedence lower than << but
-  // higher than ?:
-  void operator&(const LogMessage&) { }
-};
-
-#define CHECK(cond) CRASH_UNLESS(cond)
-#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
-#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
-#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
-#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
-#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
-#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
-
-#ifdef NDEBUG
-
-#define DCHECK(cond) CRASH_UNLESS(true)
-#define DCHECK_LE(a, b) CRASH_UNLESS(true)
-#define DCHECK_GE(a, b) CRASH_UNLESS(true)
-#define DCHECK_EQ(a, b) CRASH_UNLESS(true)
-#define DCHECK_NE(a, b) CRASH_UNLESS(true)
-#define DCHECK_LT(a, b) CRASH_UNLESS(true)
-#define DCHECK_GT(a, b) CRASH_UNLESS(true)
-
-#else
-
-#define DCHECK(cond) CHECK(cond)
-#define DCHECK_LE(a, b) CHECK_LE(a, b)
-#define DCHECK_GE(a, b) CHECK_GE(a, b)
-#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
-#define DCHECK_NE(a, b) CHECK_NE(a, b)
-#define DCHECK_LT(a, b) CHECK_LT(a, b)
-#define DCHECK_GT(a, b) CHECK_GT(a, b)
-
-#endif
-
 // Potentially unaligned loads and stores.
 
 // x86 and PowerPC can simply do these loads and stores native.
@@ -200,6 +118,8 @@ class LogMessageVoidify {
 // This is a mess, but there's not much we can do about it.
 
 #elif defined(__arm__) && \
+      !defined(__ARM_ARCH_4__) && \
+      !defined(__ARM_ARCH_4T__) && \
       !defined(__ARM_ARCH_5__) && \
       !defined(__ARM_ARCH_5T__) && \
       !defined(__ARM_ARCH_5TE__) && \

http://git-wip-us.apache.org/repos/asf/couchdb/blob/cc20daaf/src/snappy/google-snappy/snappy.cc
----------------------------------------------------------------------
diff --git a/src/snappy/google-snappy/snappy.cc b/src/snappy/google-snappy/snappy.cc
index 4d4eb42..1230321 100644
--- a/src/snappy/google-snappy/snappy.cc
+++ b/src/snappy/google-snappy/snappy.cc
@@ -95,7 +95,7 @@ enum {
 // Note that this does not match the semantics of either memcpy()
 // or memmove().
 static inline void IncrementalCopy(const char* src, char* op, int len) {
-  DCHECK_GT(len, 0);
+  assert(len > 0);
   do {
     *op++ = *src++;
   } while (--len > 0);
@@ -195,17 +195,17 @@ static inline char* EmitLiteral(char* op,
 }
 
 static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) {
-  DCHECK_LE(len, 64);
-  DCHECK_GE(len, 4);
-  DCHECK_LT(offset, 65536);
+  assert(len <= 64);
+  assert(len >= 4);
+  assert(offset < 65536);
 
   if ((len < 12) && (offset < 2048)) {
     size_t len_minus_4 = len - 4;
     assert(len_minus_4 < 8);            // Must fit in 3 bits
-    *op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) <<
5);
+    *op++ = COPY_1_BYTE_OFFSET + ((len_minus_4) << 2) + ((offset >> 8) <<
5);
     *op++ = offset & 0xff;
   } else {
-    *op++ = COPY_2_BYTE_OFFSET | ((len-1) << 2);
+    *op++ = COPY_2_BYTE_OFFSET + ((len-1) << 2);
     LittleEndian::Store16(op, offset);
     op += 2;
   }
@@ -253,8 +253,6 @@ uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size)
{
   while (htsize < kMaxHashTableSize && htsize < input_size) {
     htsize <<= 1;
   }
-  CHECK_EQ(0, htsize & (htsize - 1)) << ": must be power of two";
-  CHECK_LE(htsize, kMaxHashTableSize) << ": hash table too large";
 
   uint16* table;
   if (htsize <= ARRAYSIZE(small_table_)) {
@@ -294,8 +292,8 @@ static inline EightBytesReference GetEightBytesAt(const char* ptr) {
 }
 
 static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
-  DCHECK_GE(offset, 0);
-  DCHECK_LE(offset, 4);
+  assert(offset >= 0);
+  assert(offset <= 4);
   return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
 }
 
@@ -308,8 +306,8 @@ static inline EightBytesReference GetEightBytesAt(const char* ptr) {
 }
 
 static inline uint32 GetUint32AtOffset(const char* v, int offset) {
-  DCHECK_GE(offset, 0);
-  DCHECK_LE(offset, 4);
+  assert(offset >= 0);
+  assert(offset <= 4);
   return UNALIGNED_LOAD32(v + offset);
 }
 
@@ -334,10 +332,10 @@ char* CompressFragment(const char* input,
                        const int table_size) {
   // "ip" is the input pointer, and "op" is the output pointer.
   const char* ip = input;
-  CHECK_LE(input_size, kBlockSize);
-  CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
+  assert(input_size <= kBlockSize);
+  assert((table_size & (table_size - 1)) == 0); // table must be power of two
   const int shift = 32 - Bits::Log2Floor(table_size);
-  DCHECK_EQ(static_cast<int>(kuint32max >> shift), table_size - 1);
+  assert(static_cast<int>(kuint32max >> shift) == table_size - 1);
   const char* ip_end = input + input_size;
   const char* base_ip = ip;
   // Bytes in [next_emit, ip) will be emitted as literal bytes.  Or
@@ -349,7 +347,7 @@ char* CompressFragment(const char* input,
     const char* ip_limit = input + input_size - kInputMarginBytes;
 
     for (uint32 next_hash = Hash(++ip, shift); ; ) {
-      DCHECK_LT(next_emit, ip);
+      assert(next_emit < ip);
       // The body of this loop calls EmitLiteral once and then EmitCopy one or
       // more times.  (The exception is that when we're close to exhausting
       // the input we goto emit_remainder.)
@@ -382,7 +380,7 @@ char* CompressFragment(const char* input,
       do {
         ip = next_ip;
         uint32 hash = next_hash;
-        DCHECK_EQ(hash, Hash(ip, shift));
+        assert(hash == Hash(ip, shift));
         uint32 bytes_between_hash_lookups = skip++ >> 5;
         next_ip = ip + bytes_between_hash_lookups;
         if (PREDICT_FALSE(next_ip > ip_limit)) {
@@ -390,8 +388,8 @@ char* CompressFragment(const char* input,
         }
         next_hash = Hash(next_ip, shift);
         candidate = base_ip + table[hash];
-        DCHECK_GE(candidate, base_ip);
-        DCHECK_LT(candidate, ip);
+        assert(candidate >= base_ip);
+        assert(candidate < ip);
 
         table[hash] = ip - base_ip;
       } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
@@ -400,7 +398,7 @@ char* CompressFragment(const char* input,
       // Step 2: A 4-byte match has been found.  We'll later see if more
       // than 4 bytes match.  But, prior to the match, input
       // bytes [next_emit, ip) are unmatched.  Emit them as "literal bytes."
-      DCHECK_LE(next_emit + 16, ip_end);
+      assert(next_emit + 16 <= ip_end);
       op = EmitLiteral(op, next_emit, ip - next_emit, true);
 
       // Step 3: Call EmitCopy, and then see if another EmitCopy could
@@ -421,7 +419,7 @@ char* CompressFragment(const char* input,
         int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
         ip += matched;
         size_t offset = base - candidate;
-        DCHECK_EQ(0, memcmp(base, candidate, matched));
+        assert(0 == memcmp(base, candidate, matched));
         op = EmitCopy(op, offset, matched);
         // We could immediately start working at ip now, but to improve
         // compression we first update table[Hash(ip - 1, ...)].
@@ -554,9 +552,9 @@ static uint16 MakeEntry(unsigned int extra,
                         unsigned int len,
                         unsigned int copy_offset) {
   // Check that all of the fields fit within the allocated space
-  DCHECK_EQ(extra,       extra & 0x7);          // At most 3 bits
-  DCHECK_EQ(copy_offset, copy_offset & 0x7);    // At most 3 bits
-  DCHECK_EQ(len,         len & 0x7f);           // At most 7 bits
+  assert(extra       == (extra & 0x7));          // At most 3 bits
+  assert(copy_offset == (copy_offset & 0x7));    // At most 3 bits
+  assert(len         == (len & 0x7f));           // At most 7 bits
   return len | (copy_offset << 8) | (extra << 11);
 }
 
@@ -614,9 +612,15 @@ static void ComputeTable() {
   }
 
   // Check that each entry was initialized exactly once.
-  CHECK_EQ(assigned, 256);
+  if (assigned != 256) {
+    fprintf(stderr, "ComputeTable: assigned only %d of 256\n", assigned);
+    abort();
+  }
   for (int i = 0; i < 256; i++) {
-    CHECK_NE(dst[i], 0xffff);
+    if (dst[i] == 0xffff) {
+      fprintf(stderr, "ComputeTable: did not assign byte %d\n", i);
+      abort();
+    }
   }
 
   if (FLAGS_snappy_dump_decompression_table) {
@@ -631,7 +635,11 @@ static void ComputeTable() {
 
   // Check that computed table matched recorded table
   for (int i = 0; i < 256; i++) {
-    CHECK_EQ(dst[i], char_table[i]);
+    if (dst[i] != char_table[i]) {
+      fprintf(stderr, "ComputeTable: byte %d: computed (%x), expect (%x)\n",
+              i, static_cast<int>(dst[i]), static_cast<int>(char_table[i]));
+      abort();
+    }
   }
 }
 #endif /* !NDEBUG */
@@ -676,7 +684,7 @@ class SnappyDecompressor {
   // On succcess, stores the length in *result and returns true.
   // On failure, returns false.
   bool ReadUncompressedLength(uint32* result) {
-    DCHECK(ip_ == NULL);       // Must not have read anything yet
+    assert(ip_ == NULL);       // Must not have read anything yet
     // Length is encoded in 1..5 bytes
     *result = 0;
     uint32 shift = 0;
@@ -720,7 +728,7 @@ class SnappyDecompressor {
       if ((c & 0x3) == LITERAL) {
         size_t literal_length = (c >> 2) + 1u;
         if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
-          DCHECK_LT(literal_length, 61);
+          assert(literal_length < 61);
           ip += literal_length;
           MAYBE_REFILL();
           continue;
@@ -787,11 +795,11 @@ bool SnappyDecompressor::RefillTag() {
   }
 
   // Read the tag character
-  DCHECK_LT(ip, ip_limit_);
+  assert(ip < ip_limit_);
   const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
   const uint32 entry = char_table[c];
   const uint32 needed = (entry >> 11) + 1;  // +1 byte for 'c'
-  DCHECK_LE(needed, sizeof(scratch_));
+  assert(needed <= sizeof(scratch_));
 
   // Read more bytes from reader if needed
   uint32 nbuf = ip_limit_ - ip;
@@ -812,7 +820,7 @@ bool SnappyDecompressor::RefillTag() {
       nbuf += to_add;
       reader_->Skip(to_add);
     }
-    DCHECK_EQ(nbuf, needed);
+    assert(nbuf == needed);
     ip_ = scratch_;
     ip_limit_ = scratch_ + needed;
   } else if (nbuf < 5) {
@@ -880,7 +888,7 @@ size_t Compress(Source* reader, Sink* writer) {
     // Get next block to compress (without copying if possible)
     size_t fragment_size;
     const char* fragment = reader->Peek(&fragment_size);
-    DCHECK_NE(fragment_size, 0) << ": premature end of input";
+    assert(fragment_size != 0);  // premature end of input
     const size_t num_to_read = min(N, kBlockSize);
     size_t bytes_read = fragment_size;
 
@@ -907,11 +915,11 @@ size_t Compress(Source* reader, Sink* writer) {
         bytes_read += n;
         reader->Skip(n);
       }
-      DCHECK_EQ(bytes_read, num_to_read);
+      assert(bytes_read == num_to_read);
       fragment = scratch;
       fragment_size = num_to_read;
     }
-    DCHECK_EQ(fragment_size, num_to_read);
+    assert(fragment_size == num_to_read);
 
     // Get encoding table for compression
     int table_size;

http://git-wip-us.apache.org/repos/asf/couchdb/blob/cc20daaf/src/snappy/google-snappy/snappy.h
----------------------------------------------------------------------
diff --git a/src/snappy/google-snappy/snappy.h b/src/snappy/google-snappy/snappy.h
index 8c2075f..03ef6ce 100644
--- a/src/snappy/google-snappy/snappy.h
+++ b/src/snappy/google-snappy/snappy.h
@@ -56,6 +56,13 @@ namespace snappy {
   // number of bytes written.
   size_t Compress(Source* source, Sink* sink);
 
+  // Find the uncompressed length of the given stream, as given by the header.
+  // Note that the true length could deviate from this; the stream could e.g.
+  // be truncated.
+  //
+  // Also note that this leaves "*source" in a state that is unsuitable for
+  // further operations, such as RawUncompress(). You will need to rewind
+  // or recreate the source yourself before attempting any further calls.
   bool GetUncompressedLength(Source* source, uint32* result);
 
   // ------------------------------------------------------------------------
@@ -135,15 +142,16 @@ namespace snappy {
   bool IsValidCompressedBuffer(const char* compressed,
                                size_t compressed_length);
 
-  // *** DO NOT CHANGE THE VALUE OF kBlockSize ***
+  // The size of a compression block. Note that many parts of the compression
+  // code assumes that kBlockSize <= 65536; in particular, the hash table
+  // can only store 16-bit offsets, and EmitCopy() also assumes the offset
+  // is 65535 bytes or less. Note also that if you change this, it will
+  // affect the framing format (see framing_format.txt).
   //
-  // New Compression code chops up the input into blocks of at most
-  // the following size.  This ensures that back-references in the
-  // output never cross kBlockSize block boundaries.  This can be
-  // helpful in implementing blocked decompression.  However the
-  // decompression code should not rely on this guarantee since older
-  // compression code may not obey it.
-  static const int kBlockLog = 15;
+  // Note that there might be older data around that is compressed with larger
+  // block sizes, so the decompression code should not rely on the
+  // non-existence of long backreferences.
+  static const int kBlockLog = 16;
   static const size_t kBlockSize = 1 << kBlockLog;
 
   static const int kMaxHashTableBits = 14;

http://git-wip-us.apache.org/repos/asf/couchdb/blob/cc20daaf/src/snappy/snappy.app.in
----------------------------------------------------------------------
diff --git a/src/snappy/snappy.app.in b/src/snappy/snappy.app.in
index 25d37b5..ecf5b00 100644
--- a/src/snappy/snappy.app.in
+++ b/src/snappy/snappy.app.in
@@ -1,7 +1,7 @@
 {application, snappy,
  [
   {description, "snappy compressor/decompressor Erlang NIF wrapper"},
-  {vsn, "1.0.5"},
+  {vsn, "1.1.0"},
   {registered, []},
   {applications, [
                   kernel,

http://git-wip-us.apache.org/repos/asf/couchdb/blob/cc20daaf/src/snappy/snappy_nif.cc
----------------------------------------------------------------------
diff --git a/src/snappy/snappy_nif.cc b/src/snappy/snappy_nif.cc
index ae28d91..30b9c66 100644
--- a/src/snappy/snappy_nif.cc
+++ b/src/snappy/snappy_nif.cc
@@ -15,7 +15,6 @@
  * the License.
  **/
 
-#include <iostream>
 #include <cstring>
 
 #include "erl_nif_compat.h"
@@ -41,7 +40,7 @@ class SnappyNifSink : public snappy::Sink
     public:
         SnappyNifSink(ErlNifEnv* e);
         ~SnappyNifSink();
-        
+
         void Append(const char* data, size_t n);
         char* GetAppendBuffer(size_t len, char* scratch);
         ErlNifBinary& getBin();
@@ -80,7 +79,7 @@ char*
 SnappyNifSink::GetAppendBuffer(size_t len, char* scratch)
 {
     size_t sz;
-    
+
     if((length + len) > bin.size) {
         sz = (len * 4) < 8192 ? 8192 : (len * 4);
 
@@ -119,7 +118,7 @@ static inline ERL_NIF_TERM
 make_ok(ErlNifEnv* env, ERL_NIF_TERM mesg)
 {
     ERL_NIF_TERM ok = make_atom(env, "ok");
-    return enif_make_tuple2(env, ok, mesg);   
+    return enif_make_tuple2(env, ok, mesg);
 }
 
 


Mime
View raw message