Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 45B61101CA for ; Tue, 13 Aug 2013 21:20:37 +0000 (UTC) Received: (qmail 56511 invoked by uid 500); 13 Aug 2013 21:20:37 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 56471 invoked by uid 500); 13 Aug 2013 21:20:36 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 56464 invoked by uid 99); 13 Aug 2013 21:20:36 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 13 Aug 2013 21:20:36 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 13 Aug 2013 21:20:22 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 8F5BB2388A91; Tue, 13 Aug 2013 21:19:59 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1513658 [4/4] - in /hadoop/common/branches/HDFS-4949/hadoop-common-project: hadoop-auth-examples/src/main/webapp/ hadoop-auth-examples/src/main/webapp/annonymous/ hadoop-auth-examples/src/main/webapp/kerberos/ hadoop-auth-examples/src/main... Date: Tue, 13 Aug 2013 21:19:57 -0000 To: common-commits@hadoop.apache.org From: cmccabe@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20130813211959.8F5BB2388A91@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c?rev=1513658&r1=1513657&r2=1513658&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c (original) +++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c Tue Aug 13 21:19:53 2013 @@ -1,19 +1,19 @@ /* LZ4 - Fast LZ compression algorithm - Copyright (C) 2011, Yann Collet. - BSD License + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -25,621 +25,672 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html + - LZ4 source repository : http://code.google.com/p/lz4/ +*/ + +/* +Note : this source file requires "lz4_encoder.h" */ //************************************** -// Copy from: -// URL: http://lz4.googlecode.com/svn/trunk/lz4.c -// Repository Root: http://lz4.googlecode.com/svn -// Repository UUID: 650e7d94-2a16-8b24-b05c-7c0b3f6821cd -// Revision: 43 -// Node Kind: file -// Last Changed Author: yann.collet.73@gmail.com -// Last Changed Rev: 43 -// Last Changed Date: 2011-12-16 15:41:46 -0800 (Fri, 16 Dec 2011) -// Sha1: 9db7b2c57698c528d79572e6bce2e7dc33fa5998 +// Tuning parameters //************************************** +// MEMORY_USAGE : +// Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) +// Increasing memory usage improves compression ratio +// Reduced memory usage can improve speed, due to cache effect +// Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache +#define MEMORY_USAGE 14 + +// HEAPMODE : +// Select how default compression function will allocate memory for its hash table, +// in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)). +// Default allocation strategy is to use stack (HEAPMODE 0) +// Note : explicit functions *_stack* and *_heap* are unaffected by this setting +#define HEAPMODE 0 + //************************************** -// Compilation Directives +// CPU Feature Detection //************************************** -#if __STDC_VERSION__ >= 199901L - /* "restrict" is a known keyword */ +// 32 or 64 bits ? +#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \ + || defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) \ + || defined(__64BIT__) || defined(_LP64) || defined(__LP64__) \ + || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) // Detects 64 bits mode +# define LZ4_ARCH64 1 #else -#define restrict // Disable restrict +# define LZ4_ARCH64 0 +#endif + +// Little Endian or Big Endian ? +// Overwrite the #define below if you know your architecture endianess +#if defined (__GLIBC__) +# include +# if (__BYTE_ORDER == __BIG_ENDIAN) +# define LZ4_BIG_ENDIAN 1 +# endif +#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN)) +# define LZ4_BIG_ENDIAN 1 +#elif defined(__sparc) || defined(__sparc__) \ + || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \ + || defined(__hpux) || defined(__hppa) \ + || defined(_MIPSEB) || defined(__s390__) +# define LZ4_BIG_ENDIAN 1 +#else +// Little Endian assumed. PDP Endian and other very rare endian format are unsupported. +#endif + +// Unaligned memory access is automatically enabled for "common" CPU, such as x86. +// For others CPU, such as ARM, the compiler may be more cautious, inserting unnecessary extra code to ensure aligned access property +// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance +#if defined(__ARM_FEATURE_UNALIGNED) +# define LZ4_FORCE_UNALIGNED_ACCESS 1 +#endif + +// Define this parameter if your target system or compiler does not support hardware bit count +#if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count +# define LZ4_FORCE_SW_BITCOUNT #endif +// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE : +// This option may provide a small boost to performance for some big endian cpu, although probably modest. +// You may set this option to 1 if data will remain within closed environment. +// This option is useless on Little_Endian CPU (such as x86) +//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 + //************************************** -// Includes +// Compiler Options //************************************** -#include // for malloc -#include // for memset +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) // C99 +/* "restrict" is a known keyword */ +#else +# define restrict // Disable restrict +#endif + +#ifdef _MSC_VER // Visual Studio +# define forceinline static __forceinline +# include // For Visual 2005 +# if LZ4_ARCH64 // 64-bits +# pragma intrinsic(_BitScanForward64) // For Visual 2005 +# pragma intrinsic(_BitScanReverse64) // For Visual 2005 +# else // 32-bits +# pragma intrinsic(_BitScanForward) // For Visual 2005 +# pragma intrinsic(_BitScanReverse) // For Visual 2005 +# endif +# pragma warning(disable : 4127) // disable: C4127: conditional expression is constant +#else +# ifdef __GNUC__ +# define forceinline static inline __attribute__((always_inline)) +# else +# define forceinline static inline +# endif +#endif + +#ifdef _MSC_VER +# define lz4_bswap16(x) _byteswap_ushort(x) +#else +# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))) +#endif + +#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__) +# define expect(expr,value) (__builtin_expect ((expr),(value)) ) +#else +# define expect(expr,value) (expr) +#endif + +#define likely(expr) expect((expr) != 0, 1) +#define unlikely(expr) expect((expr) != 0, 0) //************************************** -// Performance parameter +// Includes //************************************** -// Increasing this value improves compression ratio -// Lowering this value reduces memory usage -// Lowering may also improve speed, typically on reaching cache size limits (L1 32KB for Intel, 64KB for AMD) -// Memory usage formula for 32 bits systems : N->2^(N+2) Bytes (examples : 17 -> 512KB ; 12 -> 16KB) -#define HASH_LOG 12 +#include // for malloc +#include // for memset +#include "lz4.h" //************************************** // Basic Types //************************************** -#if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively -#define BYTE unsigned __int8 -#define U16 unsigned __int16 -#define U32 unsigned __int32 -#define S32 __int32 -#else -#include -#define BYTE uint8_t -#define U16 uint16_t -#define U32 uint32_t -#define S32 int32_t +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99 +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; +#else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; #endif +#if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS) +# define _PACKED __attribute__ ((packed)) +#else +# define _PACKED +#endif + +#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__) +# ifdef __IBMC__ +# pragma pack(1) +# else +# pragma pack(push, 1) +# endif +#endif + +typedef struct { U16 v; } _PACKED U16_S; +typedef struct { U32 v; } _PACKED U32_S; +typedef struct { U64 v; } _PACKED U64_S; +typedef struct {size_t v;} _PACKED size_t_S; + +#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__) +# pragma pack(pop) +#endif + +#define A16(x) (((U16_S *)(x))->v) +#define A32(x) (((U32_S *)(x))->v) +#define A64(x) (((U64_S *)(x))->v) +#define AARCH(x) (((size_t_S *)(x))->v) + //************************************** // Constants //************************************** +#define HASHTABLESIZE (1 << MEMORY_USAGE) + #define MINMATCH 4 -#define SKIPSTRENGTH 6 -#define STACKLIMIT 13 -#define HEAPMODE (HASH_LOG>STACKLIMIT) // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()). -#define COPYTOKEN 4 + #define COPYLENGTH 8 #define LASTLITERALS 5 #define MFLIMIT (COPYLENGTH+MINMATCH) #define MINLENGTH (MFLIMIT+1) +#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1)) +#define SKIPSTRENGTH 6 // Increasing this value will make the compression run slower on incompressible data + #define MAXD_LOG 16 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1) -#define HASHTABLESIZE (1 << HASH_LOG) -#define HASH_MASK (HASHTABLESIZE - 1) - -#define ML_BITS 4 -#define ML_MASK ((1U<v) -#define A16(x) (((U16_S *)(x))->v) - //************************************** // Macros //************************************** -#define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG)) -#define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p)) -#define LZ4_COPYPACKET(s,d) A32(d) = A32(s); d+=4; s+=4; A32(d) = A32(s); d+=4; s+=4; -#define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d>3); +# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_clzll(val) >> 3); +# else + int r; + if (!(val>>32)) { r=4; } else { r=0; val>>=32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +# endif +# else +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanForward64( &r, val ); + return (int)(r>>3); +# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_ctzll(val) >> 3); +# else + static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; + return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; +# endif +# endif +} -int LZ4_compressCtx(void** ctx, - char* source, - char* dest, - int isize) -{ -#if HEAPMODE - struct refTables *srt = (struct refTables *) (*ctx); - const BYTE** HashTable; #else - const BYTE* HashTable[HASHTABLESIZE] = {0}; -#endif - const BYTE* ip = (BYTE*) source; - const BYTE* anchor = ip; - const BYTE* const iend = ip + isize; - const BYTE* const mflimit = iend - MFLIMIT; -#define matchlimit (iend - LASTLITERALS) +forceinline int LZ4_NbCommonBytes (register U32 val) +{ +# if defined(LZ4_BIG_ENDIAN) +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanReverse( &r, val ); + return (int)(r>>3); +# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_clz(val) >> 3); +# else + int r; + if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } + r += (!val); + return r; +# endif +# else +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r; + _BitScanForward( &r, val ); + return (int)(r>>3); +# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_ctz(val) >> 3); +# else + static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif +# endif +} - BYTE* op = (BYTE*) dest; - -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - const size_t DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; #endif - int len, length; - const int skipStrength = SKIPSTRENGTH; - U32 forwardH; - // Init - if (isizehashTable; - memset((void*)HashTable, 0, sizeof(srt->hashTable)); -#else - (void) ctx; -#endif - - - // First Byte - HashTable[LZ4_HASH_VALUE(ip)] = ip; - ip++; forwardH = LZ4_HASH_VALUE(ip); - - // Main Loop - for ( ; ; ) - { - int findMatchAttempts = (1U << skipStrength) + 3; - const BYTE* forwardIp = ip; - const BYTE* ref; - BYTE* token; - - // Find a match - do { - U32 h = forwardH; - int step = findMatchAttempts++ >> skipStrength; - ip = forwardIp; - forwardIp = ip + step; - - if (forwardIp > mflimit) { goto _last_literals; } - - forwardH = LZ4_HASH_VALUE(forwardIp); - ref = HashTable[h]; - HashTable[h] = ip; - - } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip))); - - // Catch up - while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; } - - // Encode Literal length - length = ip - anchor; - token = op++; - if (length>=(int)RUN_MASK) { *token=(RUN_MASK< 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } - else *token = (length<>8; } -#endif - - // Start Counting - ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified - anchor = ip; - while (ip> 27]; -#else - if (A32(ref) == A32(ip)) { ip+=4; ref+=4; continue; } - if (A16(ref) == A16(ip)) { ip+=2; ref+=2; } - if (*ref == *ip) ip++; -#endif - goto _endCount; - } - if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; } - if ((ip=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } - else *token += len; - - // Test end of chunk - if (ip > mflimit) { anchor = ip; break; } - - // Fill table - HashTable[LZ4_HASH_VALUE(ip-2)] = ip-2; - - // Test next position - ref = HashTable[LZ4_HASH_VALUE(ip)]; - HashTable[LZ4_HASH_VALUE(ip)] = ip; - if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; } - - // Prepare next loop - anchor = ip++; - forwardH = LZ4_HASH_VALUE(ip); - } - -_last_literals: - // Encode Last Literals - { - int lastRun = iend - anchor; - if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK< 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } - else *op++ = (lastRun<> ((MINMATCH*8)-HASHLOG64K)) -#define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p)) -int LZ4_compress64kCtx(void** ctx, - char* source, - char* dest, - int isize) -{ -#if HEAPMODE - struct refTables *srt = (struct refTables *) (*ctx); - U16* HashTable; -#else - U16 HashTable[HASHTABLESIZE<<1] = {0}; -#endif +/* +int LZ4_compress_stack_limitedOutput( + const char* source, + char* dest, + int inputSize, + int maxOutputSize) + +Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'. +If it cannot achieve it, compression will stop, and result of the function will be zero. +return : the number of bytes written in buffer 'dest', or 0 if the compression fails +*/ +#define FUNCTION_NAME LZ4_compress_stack_limitedOutput +#define LIMITED_OUTPUT +#include "lz4_encoder.h" - const BYTE* ip = (BYTE*) source; - const BYTE* anchor = ip; - const BYTE* const base = ip; - const BYTE* const iend = ip + isize; - const BYTE* const mflimit = iend - MFLIMIT; -#define matchlimit (iend - LASTLITERALS) - BYTE* op = (BYTE*) dest; - -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - const size_t DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; -#endif - int len, length; - const int skipStrength = SKIPSTRENGTH; - U32 forwardH; +/* +int LZ4_compress64k_stack( + const char* source, + char* dest, + int inputSize) + +Compress 'inputSize' bytes from 'source' into an output buffer 'dest'. +This function compresses better than LZ4_compress_stack(), on the condition that +'inputSize' must be < to LZ4_64KLIMIT, or the function will fail. +Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize). +return : the number of bytes written in buffer 'dest', or 0 if compression fails +*/ +#define FUNCTION_NAME LZ4_compress64k_stack +#define COMPRESS_64K +#include "lz4_encoder.h" - // Init - if (isizehashTable); - memset((void*)HashTable, 0, sizeof(srt->hashTable)); -#else - (void) ctx; -#endif - - - // First Byte - ip++; forwardH = LZ4_HASH64K_VALUE(ip); - - // Main Loop - for ( ; ; ) - { - int findMatchAttempts = (1U << skipStrength) + 3; - const BYTE* forwardIp = ip; - const BYTE* ref; - BYTE* token; - - // Find a match - do { - U32 h = forwardH; - int step = findMatchAttempts++ >> skipStrength; - ip = forwardIp; - forwardIp = ip + step; - - if (forwardIp > mflimit) { goto _last_literals; } - - forwardH = LZ4_HASH64K_VALUE(forwardIp); - ref = base + HashTable[h]; - HashTable[h] = ip - base; - - } while (A32(ref) != A32(ip)); - - // Catch up - while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; } - - // Encode Literal length - length = ip - anchor; - token = op++; - if (length>=(int)RUN_MASK) { *token=(RUN_MASK< 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } - else *token = (length<>8; } -#endif - - // Start Counting - ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified - anchor = ip; - while (ip> 27]; -#else - if (A32(ref) == A32(ip)) { ip+=4; ref+=4; continue; } - if (A16(ref) == A16(ip)) { ip+=2; ref+=2; } - if (*ref == *ip) ip++; -#endif - goto _endCount; - } - if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; } - if ((ip=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } - else *token += len; - - // Test end of chunk - if (ip > mflimit) { anchor = ip; break; } - - // Test next position - ref = base + HashTable[LZ4_HASH64K_VALUE(ip)]; - HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base; - if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; } - - // Prepare next loop - anchor = ip++; - forwardH = LZ4_HASH64K_VALUE(ip); - } - -_last_literals: - // Encode Last Literals - { - int lastRun = iend - anchor; - if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK< 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } - else *op++ = (lastRun< compression not done + if (inputSize < LZ4_64KLIMIT) + result = LZ4_compress64k_heap(ctx, source, dest, inputSize); + else result = LZ4_compress_heap(ctx, source, dest, inputSize); + LZ4_free(ctx); + return result; #else - if (isize < (int)LZ4_64KLIMIT) return LZ4_compress64kCtx(NULL, source, dest, isize); - return LZ4_compressCtx(NULL, source, dest, isize); + if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack(source, dest, inputSize); + return LZ4_compress_stack(source, dest, inputSize); #endif } +int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) +{ +#if HEAPMODE + void* ctx = LZ4_create(); + int result; + if (ctx == NULL) return 0; // Failed allocation => compression not done + if (inputSize < LZ4_64KLIMIT) + result = LZ4_compress64k_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize); + else result = LZ4_compress_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize); + LZ4_free(ctx); + return result; +#else + if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack_limitedOutput(source, dest, inputSize, maxOutputSize); + return LZ4_compress_stack_limitedOutput(source, dest, inputSize, maxOutputSize); +#endif +} //**************************** -// Decompression CODE +// Decompression functions //**************************** -// Note : The decoding functions LZ4_uncompress() and LZ4_uncompress_unknownOutputSize() -// are safe against "buffer overflow" attack type -// since they will *never* write outside of the provided output buffer : -// they both check this condition *before* writing anything. -// A corrupted packet however can make them *read* within the first 64K before the output buffer. - -int LZ4_uncompress(char* source, - char* dest, - int osize) -{ - // Local Variables - const BYTE* restrict ip = (const BYTE*) source; - const BYTE* restrict ref; - - BYTE* restrict op = (BYTE*) dest; - BYTE* const oend = op + osize; - BYTE* cpy; - - BYTE token; - - U32 dec[4]={0, 3, 2, 3}; - int len, length; - - - // Main Loop - while (1) - { - // get runlength - token = *ip++; - if ((length=(token>>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; } - - // copy literals - cpy = op+length; - if (cpy>oend-COPYLENGTH) - { - if (cpy > oend) goto _output_error; - memcpy(op, ip, length); - ip += length; - break; // Necessarily EOF - } - LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy; - - - // get offset -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - ref = cpy - A16(ip); ip+=2; -#else - { int delta = *ip++; delta += *ip++ << 8; ref = cpy - delta; } -#endif - - // get matchlength - if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; } - - // copy repeated sequence - if (op-ref oend-COPYLENGTH) - { - if (cpy > oend) goto _output_error; - LZ4_WILDCOPY(ref, op, (oend-COPYLENGTH)); - while(op oend-MFLIMIT)) oexit = oend-MFLIMIT; // targetOutputSize too high => decode everything + if ((endOnInput) && unlikely(outputSize==0)) return ((inputSize==1) && (*ip==0)) ? 0 : -1; // Empty output buffer + if ((!endOnInput) && unlikely(outputSize==0)) return (*ip==0?1:-1); + + + // Main Loop + while (1) + { + unsigned token; + size_t length; + + // get runlength + token = *ip++; + if ((length=(token>>ML_BITS)) == RUN_MASK) + { + unsigned s=255; + while (((endOnInput)?ip(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) ) + || ((!endOnInput) && (cpy>oend-COPYLENGTH))) + { + if (partialDecoding) + { + if (cpy > oend) goto _output_error; // Error : write attempt beyond end of output buffer + if ((endOnInput) && (ip+length > iend)) goto _output_error; // Error : read attempt beyond end of input buffer + } + else + { + if ((!endOnInput) && (cpy != oend)) goto _output_error; // Error : block decoding must stop exactly there + if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; // Error : input must be consumed + } + memcpy(op, ip, length); + ip += length; + op += length; + break; // Necessarily EOF, due to parsing restrictions + } + LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy; + + // get offset + LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2; + if ((prefix64k==noPrefix) && unlikely(ref < (BYTE* const)dest)) goto _output_error; // Error : offset outside destination buffer + + // get matchlength + if ((length=(token&ML_MASK)) == ML_MASK) + { + for ( ; (!endOnInput) || (ipoend-(COPYLENGTH)-(STEPSIZE-4)) + { + if (cpy > oend-LASTLITERALS) goto _output_error; // Error : last 5 bytes must be literals + LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH)); + while(op>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; } - - // copy literals - cpy = op+length; - if (cpy>oend-COPYLENGTH) - { - if (cpy > oend) goto _output_error; - memcpy(op, ip, length); - op += length; - break; // Necessarily EOF - } - LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy; - if (ip>=iend) break; // check EOF - - // get offset -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - ref = cpy - A16(ip); ip+=2; -#else - { int delta = *ip++; delta += *ip++ << 8; ref = cpy - delta; } -#endif - - // get matchlength - if ((length=(token&ML_MASK)) == ML_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; } - - // copy repeated sequence - if (op-refoend-COPYLENGTH) - { - if (cpy > oend) goto _output_error; - LZ4_WILDCOPY(ref, op, (oend-COPYLENGTH)); - while(op Differences are described with each of the commands. Error information is sent to stderr and the output is sent to stdout. +appendToFile + + Usage: << ... >>> + + Append single src, or multiple srcs from local file system to the + destination file system. Also reads input from stdin and appends to + destination file system. + + * <<>> + + * <<>> + + * <<>> + + * <<>> + Reads the input from stdin. + + Exit Code: + + Returns 0 on success and 1 on error. + cat Usage: <<>> @@ -76,7 +97,7 @@ chmod Change the permissions of files. With -R, make the change recursively through the directory structure. The user must be the owner of the file, or - else a super-user. Additional information is in the + else a super-user. Additional information is in the {{{betterurl}Permissions Guide}}. chown Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java?rev=1513658&r1=1513657&r2=1513658&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java Tue Aug 13 21:19:53 2013 @@ -71,7 +71,8 @@ public abstract class FSTestWrapper impl public String getAbsoluteTestRootDir() throws IOException { if (absTestRootDir == null) { - if (testRootDir.startsWith("/")) { + Path testRootPath = new Path(testRootDir); + if (testRootPath.isAbsolute()) { absTestRootDir = testRootDir; } else { absTestRootDir = getWorkingDirectory().toString() + "/" Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSWrapper.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSWrapper.java?rev=1513658&r1=1513657&r2=1513658&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSWrapper.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSWrapper.java Tue Aug 13 21:19:53 2013 @@ -109,4 +109,7 @@ public interface FSWrapper { abstract public FileStatus[] listStatus(final Path f) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException; + + abstract public FileStatus[] globStatus(Path pathPattern, PathFilter filter) + throws IOException; } Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestWrapper.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestWrapper.java?rev=1513658&r1=1513657&r2=1513658&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestWrapper.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestWrapper.java Tue Aug 13 21:19:53 2013 @@ -332,4 +332,10 @@ public final class FileContextTestWrappe FileNotFoundException, UnsupportedFileSystemException, IOException { return fc.util().listStatus(f); } + + @Override + public FileStatus[] globStatus(Path pathPattern, PathFilter filter) + throws IOException { + return fc.util().globStatus(pathPattern, filter); + } } Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestWrapper.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestWrapper.java?rev=1513658&r1=1513657&r2=1513658&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestWrapper.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestWrapper.java Tue Aug 13 21:19:53 2013 @@ -397,4 +397,10 @@ public final class FileSystemTestWrapper FileNotFoundException, UnsupportedFileSystemException, IOException { return fs.listStatus(f); } + + @Override + public FileStatus[] globStatus(Path pathPattern, PathFilter filter) + throws IOException { + return fs.globStatus(pathPattern, filter); + } } Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java?rev=1513658&r1=1513657&r2=1513658&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java Tue Aug 13 21:19:53 2013 @@ -20,13 +20,10 @@ package org.apache.hadoop.fs; import java.io.*; import java.net.URI; import java.util.EnumSet; -import org.apache.hadoop.fs.FileContext; + import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.CreateFlag; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.test.GenericTestUtils; import static org.junit.Assert.*; @@ -51,6 +48,13 @@ public abstract class SymlinkBaseTest { abstract protected String testBaseDir2() throws IOException; abstract protected URI testURI(); + // Returns true if the filesystem is emulating symlink support. Certain + // checks will be bypassed if that is the case. + // + protected boolean emulatingSymlinksOnWindows() { + return false; + } + protected IOException unwrapException(IOException e) { return e; } @@ -156,8 +160,11 @@ public abstract class SymlinkBaseTest { @Test(timeout=10000) /** Try to create a directory given a path that refers to a symlink */ public void testMkdirExistingLink() throws IOException { + Path file = new Path(testBaseDir1() + "/targetFile"); + createAndWriteFile(file); + Path dir = new Path(testBaseDir1()+"/link"); - wrapper.createSymlink(new Path("/doesNotExist"), dir, false); + wrapper.createSymlink(file, dir, false); try { wrapper.mkdir(dir, FileContext.DEFAULT_PERM, false); fail("Created a dir where a symlink exists"); @@ -224,6 +231,7 @@ public abstract class SymlinkBaseTest { @Test(timeout=10000) /** Stat a link to a file */ public void testStatLinkToFile() throws IOException { + assumeTrue(!emulatingSymlinksOnWindows()); Path file = new Path(testBaseDir1()+"/file"); Path linkToFile = new Path(testBaseDir1()+"/linkToFile"); createAndWriteFile(file); @@ -232,8 +240,7 @@ public abstract class SymlinkBaseTest { assertTrue(wrapper.isSymlink(linkToFile)); assertTrue(wrapper.isFile(linkToFile)); assertFalse(wrapper.isDir(linkToFile)); - assertEquals(file.toUri().getPath(), - wrapper.getLinkTarget(linkToFile).toString()); + assertEquals(file, wrapper.getLinkTarget(linkToFile)); // The local file system does not fully resolve the link // when obtaining the file status if (!"file".equals(getScheme())) { @@ -277,8 +284,7 @@ public abstract class SymlinkBaseTest { assertFalse(wrapper.isFile(linkToDir)); assertTrue(wrapper.isDir(linkToDir)); - assertEquals(dir.toUri().getPath(), - wrapper.getLinkTarget(linkToDir).toString()); + assertEquals(dir, wrapper.getLinkTarget(linkToDir)); } @Test(timeout=10000) @@ -351,6 +357,12 @@ public abstract class SymlinkBaseTest { /* Assert that the given link to a file behaves as expected. */ private void checkLink(Path linkAbs, Path expectedTarget, Path targetQual) throws IOException { + + // If we are emulating symlinks then many of these checks will fail + // so we skip them. + // + assumeTrue(!emulatingSymlinksOnWindows()); + Path dir = new Path(testBaseDir1()); // isFile/Directory assertTrue(wrapper.isFile(linkAbs)); @@ -400,7 +412,7 @@ public abstract class SymlinkBaseTest { failureExpected = false; } try { - readFile(new Path(getScheme()+"://"+testBaseDir1()+"/linkToFile")); + readFile(new Path(getScheme()+":///"+testBaseDir1()+"/linkToFile")); assertFalse(failureExpected); } catch (Exception e) { if (!failureExpected) { @@ -646,6 +658,7 @@ public abstract class SymlinkBaseTest { @Test(timeout=10000) /** Create symlink through a symlink */ public void testCreateLinkViaLink() throws IOException { + assumeTrue(!emulatingSymlinksOnWindows()); Path dir1 = new Path(testBaseDir1()); Path file = new Path(testBaseDir1(), "file"); Path linkToDir = new Path(testBaseDir2(), "linkToDir"); @@ -688,6 +701,7 @@ public abstract class SymlinkBaseTest { @Test(timeout=10000) /** Test create symlink using the same path */ public void testCreateLinkTwice() throws IOException { + assumeTrue(!emulatingSymlinksOnWindows()); Path file = new Path(testBaseDir1(), "file"); Path link = new Path(testBaseDir1(), "linkToFile"); createAndWriteFile(file); @@ -783,7 +797,7 @@ public abstract class SymlinkBaseTest { Path linkToDir = new Path(testBaseDir2(), "linkToDir"); Path fileViaLink = new Path(linkToDir, "test/file"); // Symlink to .. is not a problem since the .. is squashed early - assertEquals(testBaseDir1(), dotDot.toString()); + assertEquals(new Path(testBaseDir1()), dotDot); createAndWriteFile(file); wrapper.createSymlink(dotDot, linkToDir, false); readFile(fileViaLink); @@ -876,7 +890,8 @@ public abstract class SymlinkBaseTest { assertFalse(wrapper.exists(linkViaLink)); // Check that we didn't rename the link target assertTrue(wrapper.exists(file)); - assertTrue(wrapper.getFileLinkStatus(linkNewViaLink).isSymlink()); + assertTrue(wrapper.getFileLinkStatus(linkNewViaLink).isSymlink() || + emulatingSymlinksOnWindows()); readFile(linkNewViaLink); } @@ -1014,7 +1029,8 @@ public abstract class SymlinkBaseTest { createAndWriteFile(file); wrapper.createSymlink(file, link1, false); wrapper.rename(link1, link2); - assertTrue(wrapper.getFileLinkStatus(link2).isSymlink()); + assertTrue(wrapper.getFileLinkStatus(link2).isSymlink() || + emulatingSymlinksOnWindows()); readFile(link2); readFile(file); assertFalse(wrapper.exists(link1)); @@ -1038,8 +1054,11 @@ public abstract class SymlinkBaseTest { } wrapper.rename(link, file1, Rename.OVERWRITE); assertFalse(wrapper.exists(link)); - assertTrue(wrapper.getFileLinkStatus(file1).isSymlink()); - assertEquals(file2, wrapper.getLinkTarget(file1)); + + if (!emulatingSymlinksOnWindows()) { + assertTrue(wrapper.getFileLinkStatus(file1).isSymlink()); + assertEquals(file2, wrapper.getLinkTarget(file1)); + } } @Test(timeout=10000) @@ -1078,16 +1097,21 @@ public abstract class SymlinkBaseTest { @Test(timeout=10000) /** Rename a symlink to itself */ public void testRenameSymlinkToItself() throws IOException { + Path file = new Path(testBaseDir1(), "file"); + createAndWriteFile(file); + Path link = new Path(testBaseDir1(), "linkToFile1"); - wrapper.createSymlink(new Path("/doestNotExist"), link, false); + wrapper.createSymlink(file, link, false); try { wrapper.rename(link, link); + fail("Failed to get expected IOException"); } catch (IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } // Fails with overwrite as well try { wrapper.rename(link, link, Rename.OVERWRITE); + fail("Failed to get expected IOException"); } catch (IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } @@ -1096,6 +1120,7 @@ public abstract class SymlinkBaseTest { @Test(timeout=10000) /** Rename a symlink */ public void testRenameSymlink() throws IOException { + assumeTrue(!emulatingSymlinksOnWindows()); Path file = new Path(testBaseDir1(), "file"); Path link1 = new Path(testBaseDir1(), "linkToFile1"); Path link2 = new Path(testBaseDir1(), "linkToFile2"); @@ -1193,6 +1218,7 @@ public abstract class SymlinkBaseTest { @Test(timeout=10000) /** Test rename the symlink's target */ public void testRenameLinkTarget() throws IOException { + assumeTrue(!emulatingSymlinksOnWindows()); Path file = new Path(testBaseDir1(), "file"); Path fileNew = new Path(testBaseDir1(), "fileNew"); Path link = new Path(testBaseDir1(), "linkToFile"); Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java?rev=1513658&r1=1513657&r2=1513658&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java Tue Aug 13 21:19:53 2013 @@ -793,6 +793,8 @@ public class TestFileUtil { } } List actualClassPaths = Arrays.asList(classPathAttr.split(" ")); + Collections.sort(expectedClassPaths); + Collections.sort(actualClassPaths); Assert.assertEquals(expectedClassPaths, actualClassPaths); } finally { if (jarFile != null) { Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java?rev=1513658&r1=1513657&r2=1513658&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java Tue Aug 13 21:19:53 2013 @@ -28,11 +28,38 @@ import org.apache.hadoop.conf.Configurat import org.apache.hadoop.io.AvroTestUtil; import org.apache.hadoop.util.Shell; -import junit.framework.TestCase; +import com.google.common.base.Joiner; -import static org.junit.Assert.fail; +import junit.framework.TestCase; public class TestPath extends TestCase { + /** + * Merge a bunch of Path objects into a sorted semicolon-separated + * path string. + */ + public static String mergeStatuses(Path paths[]) { + String pathStrings[] = new String[paths.length]; + int i = 0; + for (Path path : paths) { + pathStrings[i++] = path.toUri().getPath(); + } + Arrays.sort(pathStrings); + return Joiner.on(";").join(pathStrings); + } + + /** + * Merge a bunch of FileStatus objects into a sorted semicolon-separated + * path string. + */ + public static String mergeStatuses(FileStatus statuses[]) { + Path paths[] = new Path[statuses.length]; + int i = 0; + for (FileStatus status : statuses) { + paths[i++] = status.getPath(); + } + return mergeStatuses(paths); + } + @Test (timeout = 30000) public void testToString() { toStringTest("/"); @@ -352,10 +379,11 @@ public class TestPath extends TestCase { // ensure globStatus with "*" finds all dir contents stats = lfs.globStatus(new Path(testRoot, "*")); Arrays.sort(stats); - assertEquals(paths.length, stats.length); - for (int i=0; i < paths.length; i++) { - assertEquals(paths[i].getParent(), stats[i].getPath()); + Path parentPaths[] = new Path[paths.length]; + for (int i = 0; i < paths.length; i++) { + parentPaths[i] = paths[i].getParent(); } + assertEquals(mergeStatuses(parentPaths), mergeStatuses(stats)); // ensure that globStatus with an escaped "\*" only finds "*" stats = lfs.globStatus(new Path(testRoot, "\\*")); @@ -365,9 +393,7 @@ public class TestPath extends TestCase { // try to glob the inner file for all dirs stats = lfs.globStatus(new Path(testRoot, "*/f")); assertEquals(paths.length, stats.length); - for (int i=0; i < paths.length; i++) { - assertEquals(paths[i], stats[i].getPath()); - } + assertEquals(mergeStatuses(paths), mergeStatuses(stats)); // try to get the inner file for only the "*" dir stats = lfs.globStatus(new Path(testRoot, "\\*/f")); Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java?rev=1513658&r1=1513657&r2=1513658&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java Tue Aug 13 21:19:53 2013 @@ -30,6 +30,7 @@ import java.net.URI; import java.net.URISyntaxException; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.util.Shell; import org.junit.Test; /** @@ -62,6 +63,16 @@ abstract public class TestSymlinkLocalFS } @Override + protected boolean emulatingSymlinksOnWindows() { + // Java 6 on Windows has very poor symlink support. Specifically + // Specifically File#length and File#renameTo do not work as expected. + // (see HADOOP-9061 for additional details) + // Hence some symlink tests will be skipped. + // + return (Shell.WINDOWS && !Shell.isJava7OrAbove()); + } + + @Override public void testCreateDanglingLink() throws IOException { // Dangling symlinks are not supported on Windows local file system. assumeTrue(!Path.WINDOWS); @@ -171,6 +182,7 @@ abstract public class TestSymlinkLocalFS * file scheme (eg file://host/tmp/test). */ public void testGetLinkStatusPartQualTarget() throws IOException { + assumeTrue(!emulatingSymlinksOnWindows()); Path fileAbs = new Path(testBaseDir1()+"/file"); Path fileQual = new Path(testURI().toString(), fileAbs); Path dir = new Path(testBaseDir1()); @@ -205,4 +217,14 @@ abstract public class TestSymlinkLocalFS // Excpected. } } + + /** Test create symlink to . */ + @Override + public void testCreateLinkToDot() throws IOException { + try { + super.testCreateLinkToDot(); + } catch (IllegalArgumentException iae) { + // Expected. + } + } } Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileContext.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileContext.java?rev=1513658&r1=1513657&r2=1513658&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileContext.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileContext.java Tue Aug 13 21:19:53 2013 @@ -17,8 +17,13 @@ */ package org.apache.hadoop.fs; +import org.apache.hadoop.util.Shell; import org.junit.BeforeClass; +import java.io.IOException; + +import static org.junit.Assume.assumeTrue; + public class TestSymlinkLocalFSFileContext extends TestSymlinkLocalFS { @BeforeClass @@ -27,4 +32,9 @@ public class TestSymlinkLocalFSFileConte wrapper = new FileContextTestWrapper(context); } + @Override + public void testRenameFileWithDestParentSymlink() throws IOException { + assumeTrue(!Shell.WINDOWS); + super.testRenameFileWithDestParentSymlink(); + } } Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileSystem.java?rev=1513658&r1=1513657&r2=1513658&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileSystem.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileSystem.java Tue Aug 13 21:19:53 2013 @@ -17,13 +17,20 @@ */ package org.apache.hadoop.fs; +import java.io.FileNotFoundException; import java.io.IOException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Options.Rename; +import org.apache.hadoop.util.Shell; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + public class TestSymlinkLocalFSFileSystem extends TestSymlinkLocalFS { @BeforeClass @@ -54,4 +61,36 @@ public class TestSymlinkLocalFSFileSyste @Override @Test(timeout=1000) public void testAccessFileViaInterSymlinkAbsTarget() throws IOException {} + + @Override + public void testRenameFileWithDestParentSymlink() throws IOException { + assumeTrue(!Shell.WINDOWS); + super.testRenameFileWithDestParentSymlink(); + } + + @Override + @Test(timeout=10000) + /** Rename a symlink to itself */ + public void testRenameSymlinkToItself() throws IOException { + Path file = new Path(testBaseDir1(), "file"); + createAndWriteFile(file); + + Path link = new Path(testBaseDir1(), "linkToFile1"); + wrapper.createSymlink(file, link, false); + try { + wrapper.rename(link, link); + fail("Failed to get expected IOException"); + } catch (IOException e) { + assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); + } + // Fails with overwrite as well + try { + wrapper.rename(link, link, Rename.OVERWRITE); + fail("Failed to get expected IOException"); + } catch (IOException e) { + // Todo: Fix this test when HADOOP-9819 is fixed. + assertTrue(unwrapException(e) instanceof FileAlreadyExistsException || + unwrapException(e) instanceof FileNotFoundException); + } + } } Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java?rev=1513658&r1=1513657&r2=1513658&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java Tue Aug 13 21:19:53 2013 @@ -134,6 +134,14 @@ public class TestCodec { public void testLz4Codec() throws IOException { if (NativeCodeLoader.isNativeCodeLoaded()) { if (Lz4Codec.isNativeCodeLoaded()) { + conf.setBoolean( + CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY, + false); + codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec"); + codecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec"); + conf.setBoolean( + CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY, + true); codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec"); codecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec"); } else { Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java?rev=1513658&r1=1513657&r2=1513658&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java Tue Aug 13 21:19:53 2013 @@ -100,6 +100,7 @@ public class TestRPC { void ping() throws IOException; void slowPing(boolean shouldSlow) throws IOException; + void sleep(long delay) throws IOException, InterruptedException; String echo(String value) throws IOException; String[] echo(String[] value) throws IOException; Writable echo(Writable value) throws IOException; @@ -146,6 +147,11 @@ public class TestRPC { } @Override + public void sleep(long delay) throws InterruptedException { + Thread.sleep(delay); + } + + @Override public String echo(String value) throws IOException { return value; } @Override @@ -932,6 +938,28 @@ public class TestRPC { } } + @Test + public void testConnectionPing() throws Exception { + Configuration conf = new Configuration(); + int pingInterval = 50; + conf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true); + conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, pingInterval); + final Server server = new RPC.Builder(conf) + .setProtocol(TestProtocol.class).setInstance(new TestImpl()) + .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) + .build(); + server.start(); + + final TestProtocol proxy = RPC.getProxy(TestProtocol.class, + TestProtocol.versionID, server.getListenerAddress(), conf); + try { + // this call will throw exception if server couldn't decode the ping + proxy.sleep(pingInterval*4); + } finally { + if (proxy != null) RPC.stopProxy(proxy); + } + } + public static void main(String[] args) throws IOException { new TestRPC().testCallsInternal(conf); Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java?rev=1513658&r1=1513657&r2=1513658&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java Tue Aug 13 21:19:53 2013 @@ -29,6 +29,7 @@ import java.lang.annotation.Annotation; import java.net.InetSocketAddress; import java.security.PrivilegedExceptionAction; import java.security.Security; +import java.util.ArrayList; import java.util.Collection; import java.util.Set; import java.util.regex.Pattern; @@ -44,8 +45,6 @@ import javax.security.sasl.SaslClient; import javax.security.sasl.SaslException; import javax.security.sasl.SaslServer; -import junit.framework.Assert; - import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -62,11 +61,11 @@ import org.apache.hadoop.security.SaslPl import org.apache.hadoop.security.SaslRpcClient; import org.apache.hadoop.security.SaslRpcServer; import org.apache.hadoop.security.SaslRpcServer.AuthMethod; +import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection; import org.apache.hadoop.security.SecurityInfo; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.TestUserGroupInformation; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; @@ -77,9 +76,28 @@ import org.apache.log4j.Level; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; /** Unit tests for using Sasl over RPC. */ +@RunWith(Parameterized.class) public class TestSaslRPC { + @Parameters + public static Collection data() { + Collection params = new ArrayList(); + for (QualityOfProtection qop : QualityOfProtection.values()) { + params.add(new Object[]{ qop }); + } + return params; + } + + QualityOfProtection expectedQop; + + public TestSaslRPC(QualityOfProtection qop) { + expectedQop = qop; + } + private static final String ADDRESS = "0.0.0.0"; public static final Log LOG = @@ -115,8 +133,12 @@ public class TestSaslRPC { @Before public void setup() { + LOG.info("---------------------------------"); + LOG.info("Testing QOP:"+expectedQop); + LOG.info("---------------------------------"); conf = new Configuration(); conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString()); + conf.set("hadoop.rpc.protection", expectedQop.name().toLowerCase()); UserGroupInformation.setConfiguration(conf); enableSecretManager = null; forceSecretManager = null; @@ -226,15 +248,16 @@ public class TestSaslRPC { serverPrincipal = SERVER_PRINCIPAL_KEY) @TokenInfo(TestTokenSelector.class) public interface TestSaslProtocol extends TestRPC.TestProtocol { - public AuthenticationMethod getAuthMethod() throws IOException; + public AuthMethod getAuthMethod() throws IOException; public String getAuthUser() throws IOException; } public static class TestSaslImpl extends TestRPC.TestImpl implements TestSaslProtocol { @Override - public AuthenticationMethod getAuthMethod() throws IOException { - return UserGroupInformation.getCurrentUser().getAuthenticationMethod(); + public AuthMethod getAuthMethod() throws IOException { + return UserGroupInformation.getCurrentUser() + .getAuthenticationMethod().getAuthMethod(); } @Override public String getAuthUser() throws IOException { @@ -341,8 +364,11 @@ public class TestSaslRPC { try { proxy = RPC.getProxy(TestSaslProtocol.class, TestSaslProtocol.versionID, addr, conf); + AuthMethod authMethod = proxy.getAuthMethod(); + assertEquals(TOKEN, authMethod); //QOP must be auth - Assert.assertEquals(SaslRpcServer.SASL_PROPS.get(Sasl.QOP), "auth"); + assertEquals(expectedQop.saslQop, + RPC.getConnectionIdForProxy(proxy).getSaslQop()); proxy.ping(); } finally { server.stop(); @@ -393,6 +419,7 @@ public class TestSaslRPC { newConf.set(CommonConfigurationKeysPublic. HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, ""); + Client client = null; TestSaslProtocol proxy1 = null; TestSaslProtocol proxy2 = null; TestSaslProtocol proxy3 = null; @@ -402,7 +429,7 @@ public class TestSaslRPC { proxy1 = RPC.getProxy(TestSaslProtocol.class, TestSaslProtocol.versionID, addr, newConf); proxy1.getAuthMethod(); - Client client = WritableRpcEngine.getClient(conf); + client = WritableRpcEngine.getClient(newConf); Set conns = client.getConnectionIds(); assertEquals("number of connections in cache is wrong", 1, conns.size()); // same conf, connection should be re-used @@ -428,9 +455,13 @@ public class TestSaslRPC { assertNotSame(connsArray[2].getMaxIdleTime(), timeouts[1]); } finally { server.stop(); - RPC.stopProxy(proxy1); - RPC.stopProxy(proxy2); - RPC.stopProxy(proxy3); + // this is dirty, but clear out connection cache for next run + if (client != null) { + client.getConnectionIds().clear(); + } + if (proxy1 != null) RPC.stopProxy(proxy1); + if (proxy2 != null) RPC.stopProxy(proxy2); + if (proxy3 != null) RPC.stopProxy(proxy3); } } @@ -793,14 +824,13 @@ public class TestSaslRPC { final AuthMethod serverAuth, final UseToken tokenType) throws Exception { - String currentUser = UserGroupInformation.getCurrentUser().getUserName(); - final Configuration serverConf = new Configuration(conf); serverConf.set(HADOOP_SECURITY_AUTHENTICATION, serverAuth.toString()); UserGroupInformation.setConfiguration(serverConf); - final UserGroupInformation serverUgi = - UserGroupInformation.createRemoteUser(currentUser + "-SERVER/localhost@NONE"); + final UserGroupInformation serverUgi = (serverAuth == KERBEROS) + ? UserGroupInformation.createRemoteUser("server/localhost@NONE") + : UserGroupInformation.createRemoteUser("server"); serverUgi.setAuthenticationMethod(serverAuth); final TestTokenSecretManager sm = new TestTokenSecretManager(); @@ -835,7 +865,7 @@ public class TestSaslRPC { UserGroupInformation.setConfiguration(clientConf); final UserGroupInformation clientUgi = - UserGroupInformation.createRemoteUser(currentUser + "-CLIENT"); + UserGroupInformation.createRemoteUser("client"); clientUgi.setAuthenticationMethod(clientAuth); final InetSocketAddress addr = NetUtils.getConnectAddress(server); @@ -873,14 +903,13 @@ public class TestSaslRPC { TestSaslProtocol.versionID, addr, clientConf); proxy.ping(); - // verify sasl completed - if (serverAuth != SIMPLE) { - assertEquals(SaslRpcServer.SASL_PROPS.get(Sasl.QOP), "auth"); - } - // make sure the other side thinks we are who we said we are!!! assertEquals(clientUgi.getUserName(), proxy.getAuthUser()); - return proxy.getAuthMethod().toString(); + AuthMethod authMethod = proxy.getAuthMethod(); + // verify sasl completed with correct QOP + assertEquals((authMethod != SIMPLE) ? expectedQop.saslQop : null, + RPC.getConnectionIdForProxy(proxy).getSaslQop()); + return authMethod.toString(); } finally { if (proxy != null) { RPC.stopProxy(proxy);