hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1196458 [3/3] - in /hadoop/common/branches/HDFS-1623: common/ hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authent...
Date Wed, 02 Nov 2011 05:34:59 GMT
Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c Wed Nov  2 05:34:31 2011
@@ -21,6 +21,7 @@
  *   All rights reserved. Use of this source code is governed by a
  *   BSD-style license that can be found in the LICENSE file.
  */
+#include <assert.h>
 #include <arpa/inet.h>
 #include <stdint.h>
 #include <unistd.h>
@@ -30,47 +31,124 @@
 #include "bulk_crc32.h"
 #include "gcc_optimizations.h"
 
+#define USE_PIPELINED
+
 typedef uint32_t (*crc_update_func_t)(uint32_t, const uint8_t *, size_t);
 static uint32_t crc_init();
 static uint32_t crc_val(uint32_t crc);
 static uint32_t crc32_zlib_sb8(uint32_t crc, const uint8_t *buf, size_t length);
 static uint32_t crc32c_sb8(uint32_t crc, const uint8_t *buf, size_t length);
 
+#ifdef USE_PIPELINED
+static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, const uint8_t *p_buf, size_t block_size, int num_blocks);
+#endif USE_PIPELINED
+static int cached_cpu_supports_crc32; // initialized by constructor below
+static uint32_t crc32c_hardware(uint32_t crc, const uint8_t* data, size_t length);
+
 int bulk_verify_crc(const uint8_t *data, size_t data_len,
                     const uint32_t *sums, int checksum_type,
                     int bytes_per_checksum,
                     crc32_error_t *error_info) {
 
+#ifdef USE_PIPELINED
+  uint32_t crc1, crc2, crc3;
+  int n_blocks = data_len / bytes_per_checksum;
+  int remainder = data_len % bytes_per_checksum;
+  int do_pipelined = 0;
+#endif
+  uint32_t crc;
   crc_update_func_t crc_update_func;
   switch (checksum_type) {
     case CRC32_ZLIB_POLYNOMIAL:
       crc_update_func = crc32_zlib_sb8;
       break;
     case CRC32C_POLYNOMIAL:
-      crc_update_func = crc32c_sb8;
+      if (likely(cached_cpu_supports_crc32)) {
+        crc_update_func = crc32c_hardware;
+#ifdef USE_PIPELINED
+        do_pipelined = 1;
+#endif
+      } else {
+        crc_update_func = crc32c_sb8;
+      }
       break;
     default:
       return INVALID_CHECKSUM_TYPE;
   }
 
+#ifdef USE_PIPELINED
+  if (do_pipelined) {
+    /* Process three blocks at a time */
+    while (likely(n_blocks >= 3)) {
+      crc1 = crc2 = crc3 = crc_init();  
+      pipelined_crc32c(&crc1, &crc2, &crc3, data, bytes_per_checksum, 3);
+
+      crc = ntohl(crc_val(crc1));
+      if ((crc = ntohl(crc_val(crc1))) != *sums)
+        goto return_crc_error;
+      sums++;
+      data += bytes_per_checksum;
+      if ((crc = ntohl(crc_val(crc2))) != *sums)
+        goto return_crc_error;
+      sums++;
+      data += bytes_per_checksum;
+      if ((crc = ntohl(crc_val(crc3))) != *sums)
+        goto return_crc_error;
+      sums++;
+      data += bytes_per_checksum;
+      n_blocks -= 3;
+    }
+
+    /* One or two blocks */
+    if (n_blocks) {
+      crc1 = crc2 = crc_init();
+      pipelined_crc32c(&crc1, &crc2, &crc3, data, bytes_per_checksum, n_blocks);
+
+      if ((crc = ntohl(crc_val(crc1))) != *sums)
+        goto return_crc_error;
+      data += bytes_per_checksum;
+      sums++;
+      if (n_blocks == 2) {
+        if ((crc = ntohl(crc_val(crc2))) != *sums)
+          goto return_crc_error;
+        sums++;
+        data += bytes_per_checksum;
+      }
+    }
+ 
+    /* For something smaller than a block */
+    if (remainder) {
+      crc1 = crc_init();
+      pipelined_crc32c(&crc1, &crc2, &crc3, data, remainder, 1);
+
+      if ((crc = ntohl(crc_val(crc1))) != *sums)
+        goto return_crc_error;
+    }
+    return CHECKSUMS_VALID;
+  }
+#endif
+
   while (likely(data_len > 0)) {
     int len = likely(data_len >= bytes_per_checksum) ? bytes_per_checksum : data_len;
-    uint32_t crc = crc_init();
+    crc = crc_init();
     crc = crc_update_func(crc, data, len);
     crc = ntohl(crc_val(crc));
     if (unlikely(crc != *sums)) {
-      if (error_info != NULL) {
-        error_info->got_crc = crc;
-        error_info->expected_crc = *sums;
-        error_info->bad_data = data;
-      }
-      return INVALID_CHECKSUM_DETECTED;
+      goto return_crc_error;
     }
     data += len;
     data_len -= len;
     sums++;
   }
   return CHECKSUMS_VALID;
+
+return_crc_error:
+  if (error_info != NULL) {
+    error_info->got_crc = crc;
+    error_info->expected_crc = *sums;
+    error_info->bad_data = data;
+  }
+  return INVALID_CHECKSUM_DETECTED;
 }
 
 
@@ -154,3 +232,417 @@ static uint32_t crc32_zlib_sb8(
   }
   return crc;    
 }
+
+///////////////////////////////////////////////////////////////////////////
+// Begin code for SSE4.2 specific hardware support of CRC32C
+///////////////////////////////////////////////////////////////////////////
+
+#if (defined(__amd64__) || defined(__i386)) && defined(__GNUC__)
+#  define SSE42_FEATURE_BIT (1 << 20)
+#  define CPUID_FEATURES 1
+/**
+ * Call the cpuid instruction to determine CPU feature flags.
+ */
+static uint32_t cpuid(uint32_t eax_in) {
+  uint32_t eax, ebx, ecx, edx;
+#  if defined(__PIC__) && !defined(__LP64__)
+// 32-bit PIC code uses the ebx register for the base offset --
+// have to save and restore it on the stack
+  asm("pushl %%ebx\n\t"
+      "cpuid\n\t"
+      "movl %%ebx, %[ebx]\n\t"
+      "popl %%ebx" : "=a" (eax), [ebx] "=r"(ebx),  "=c"(ecx), "=d"(edx) : "a" (eax_in)
+      : "cc");
+#  else
+  asm("cpuid" : "=a" (eax), "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(eax_in)
+      : "cc");
+#  endif
+
+  return ecx;
+}
+
+/**
+ * On library load, initiailize the cached value above for
+ * whether the cpu supports SSE4.2's crc32 instruction.
+ */
+void __attribute__ ((constructor)) init_cpu_support_flag(void) {
+  uint32_t ecx = cpuid(CPUID_FEATURES);
+  cached_cpu_supports_crc32 = ecx & SSE42_FEATURE_BIT;
+}
+
+
+//
+// Definitions of the SSE4.2 crc32 operations. Using these instead of
+// the GCC __builtin_* intrinsics allows this code to compile without
+// -msse4.2, since we do dynamic CPU detection at runtime.
+//
+
+#  ifdef __LP64__
+inline uint64_t _mm_crc32_u64(uint64_t crc, uint64_t value) {
+  asm("crc32q %[value], %[crc]\n" : [crc] "+r" (crc) : [value] "rm" (value));
+  return crc;
+}
+#  endif
+
+inline uint32_t _mm_crc32_u32(uint32_t crc, uint32_t value) {
+  asm("crc32l %[value], %[crc]\n" : [crc] "+r" (crc) : [value] "rm" (value));
+  return crc;
+}
+
+inline uint32_t _mm_crc32_u16(uint32_t crc, uint16_t value) {
+  asm("crc32w %[value], %[crc]\n" : [crc] "+r" (crc) : [value] "rm" (value));
+  return crc;
+}
+
+inline uint32_t _mm_crc32_u8(uint32_t crc, uint8_t value) {
+  asm("crc32b %[value], %[crc]\n" : [crc] "+r" (crc) : [value] "rm" (value));
+  return crc;
+}
+ 
+
+#  ifdef __LP64__
+/**
+ * Hardware-accelerated CRC32C calculation using the 64-bit instructions.
+ */
+static uint32_t crc32c_hardware(uint32_t crc, const uint8_t* p_buf, size_t length) {
+  // start directly at p_buf, even if it's an unaligned address. According
+  // to the original author of this code, doing a small run of single bytes
+  // to word-align the 64-bit instructions doesn't seem to help, but
+  // we haven't reconfirmed those benchmarks ourselves.
+  uint64_t crc64bit = crc;
+  size_t i;
+  for (i = 0; i < length / sizeof(uint64_t); i++) {
+    crc64bit = _mm_crc32_u64(crc64bit, *(uint64_t*) p_buf);
+    p_buf += sizeof(uint64_t);
+  }
+
+  // This ugly switch is slightly faster for short strings than the straightforward loop
+  uint32_t crc32bit = (uint32_t) crc64bit;
+  length &= sizeof(uint64_t) - 1;
+  switch (length) {
+    case 7:
+      crc32bit = _mm_crc32_u8(crc32bit, *p_buf++);
+    case 6:
+      crc32bit = _mm_crc32_u16(crc32bit, *(uint16_t*) p_buf);
+      p_buf += 2;
+    // case 5 is below: 4 + 1
+    case 4:
+      crc32bit = _mm_crc32_u32(crc32bit, *(uint32_t*) p_buf);
+      break;
+    case 3:
+      crc32bit = _mm_crc32_u8(crc32bit, *p_buf++);
+    case 2:
+      crc32bit = _mm_crc32_u16(crc32bit, *(uint16_t*) p_buf);
+      break;
+    case 5:
+      crc32bit = _mm_crc32_u32(crc32bit, *(uint32_t*) p_buf);
+      p_buf += 4;
+    case 1:
+      crc32bit = _mm_crc32_u8(crc32bit, *p_buf);
+      break;
+    case 0:
+      break;
+    default:
+      // This should never happen; enable in debug code
+      assert(0 && "ended up with 8 or more bytes at tail of calculation");
+  }
+
+  return crc32bit;
+}
+
+#ifdef USE_PIPELINED
+/**
+ * Pipelined version of hardware-accelerated CRC32C calculation using
+ * the 64 bit crc32q instruction. 
+ * One crc32c instruction takes three cycles, but two more with no data
+ * dependency can be in the pipeline to achieve something close to single 
+ * instruction/cycle. Here we feed three blocks in RR.
+ *
+ *   crc1, crc2, crc3 : Store initial checksum for each block before
+ *           calling. When it returns, updated checksums are stored.
+ *   p_buf : The base address of the data buffer. The buffer should be
+ *           at least as big as block_size * num_blocks.
+ *   block_size : The size of each block in bytes.
+ *   num_blocks : The number of blocks to work on. Min = 1, Max = 3
+ */
+static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, const uint8_t *p_buf, size_t block_size, int num_blocks) {
+  uint64_t c1 = *crc1;
+  uint64_t c2 = *crc2;
+  uint64_t c3 = *crc3;
+  uint64_t *data = (uint64_t*)p_buf;
+  int counter = block_size / sizeof(uint64_t);
+  int remainder = block_size % sizeof(uint64_t);
+  uint8_t *bdata;
+
+  /* We do switch here because the loop has to be tight in order
+   * to fill the pipeline. Any other statement inside the loop
+   * or inbetween crc32 instruction can slow things down. Calling
+   * individual crc32 instructions three times from C also causes
+   * gcc to insert other instructions inbetween.
+   *
+   * Do not rearrange the following code unless you have verified
+   * the generated machine code is as efficient as before.
+   */
+  switch (num_blocks) {
+    case 3:
+      /* Do three blocks */
+      while (likely(counter)) {
+        __asm__ __volatile__(
+        "crc32q (%7), %0;\n\t"
+        "crc32q (%7,%6,1), %1;\n\t"
+        "crc32q (%7,%6,2), %2;\n\t"
+         : "=r"(c1), "=r"(c2), "=r"(c3)
+         : "r"(c1), "r"(c2), "r"(c3), "r"(block_size), "r"(data)
+        );
+        data++;
+        counter--;
+      }
+
+      /* Take care of the remainder. They are only up to three bytes,
+       * so performing byte-level crc32 won't take much time.
+       */
+      bdata = (uint8_t*)data;
+      while (likely(remainder)) {
+        __asm__ __volatile__(
+        "crc32b (%7), %0;\n\t"
+        "crc32b (%7,%6,1), %1;\n\t"
+        "crc32b (%7,%6,2), %2;\n\t"
+         : "=r"(c1), "=r"(c2), "=r"(c3)
+         : "r"(c1), "r"(c2), "r"(c3), "r"(block_size), "r"(bdata)
+        );
+        bdata++;
+        remainder--;
+      }
+      break;
+    case 2:
+      /* Do two blocks */
+      while (likely(counter)) {
+        __asm__ __volatile__(
+        "crc32q (%5), %0;\n\t"
+        "crc32q (%5,%4,1), %1;\n\t"
+         : "=r"(c1), "=r"(c2) 
+         : "r"(c1), "r"(c2), "r"(block_size), "r"(data)
+        );
+        data++;
+        counter--;
+      }
+
+      bdata = (uint8_t*)data;
+      while (likely(remainder)) {
+        __asm__ __volatile__(
+        "crc32b (%5), %0;\n\t"
+        "crc32b (%5,%4,1), %1;\n\t"
+         : "=r"(c1), "=r"(c2) 
+         : "r"(c1), "r"(c2), "r"(c3), "r"(block_size), "r"(bdata)
+        );
+        bdata++;
+        remainder--;
+      }
+      break;
+    case 1:
+      /* single block */
+      while (likely(counter)) {
+        __asm__ __volatile__(
+        "crc32q (%2), %0;\n\t"
+         : "=r"(c1) 
+         : "r"(c1), "r"(data)
+        );
+        data++;
+        counter--;
+      }
+      bdata = (uint8_t*)data;
+      while (likely(remainder)) {
+        __asm__ __volatile__(
+        "crc32b (%2), %0;\n\t"
+         : "=r"(c1) 
+         : "r"(c1), "r"(bdata)
+        );
+        bdata++;
+        remainder--;
+      }
+      break;
+    case 0:
+      return;
+    default:
+      assert(0 && "BUG: Invalid number of checksum blocks");
+  }
+
+  *crc1 = c1;
+  *crc2 = c2;
+  *crc3 = c3;
+  return;
+}
+#endif /* USE_PIPELINED */
+
+# else  // 32-bit
+
+/**
+ * Hardware-accelerated CRC32C calculation using the 32-bit instructions.
+ */
+static uint32_t crc32c_hardware(uint32_t crc, const uint8_t* p_buf, size_t length) {
+  // start directly at p_buf, even if it's an unaligned address. According
+  // to the original author of this code, doing a small run of single bytes
+  // to word-align the 64-bit instructions doesn't seem to help, but
+  // we haven't reconfirmed those benchmarks ourselves.
+  size_t i;
+  for (i = 0; i < length / sizeof(uint32_t); i++) {
+    crc = _mm_crc32_u32(crc, *(uint32_t*) p_buf);
+    p_buf += sizeof(uint32_t);
+  }
+
+  // This ugly switch is slightly faster for short strings than the straightforward loop
+  length &= sizeof(uint32_t) - 1;
+  switch (length) {
+    case 3:
+      crc = _mm_crc32_u8(crc, *p_buf++);
+    case 2:
+      crc = _mm_crc32_u16(crc, *(uint16_t*) p_buf);
+      break;
+    case 1:
+      crc = _mm_crc32_u8(crc, *p_buf);
+      break;
+    case 0:
+      break;
+    default:
+      // This should never happen; enable in debug code
+      assert(0 && "ended up with 4 or more bytes at tail of calculation");
+  }
+
+  return crc;
+}
+
+#ifdef USE_PIPELINED
+/**
+ * Pipelined version of hardware-accelerated CRC32C calculation using
+ * the 32 bit crc32l instruction. 
+ * One crc32c instruction takes three cycles, but two more with no data
+ * dependency can be in the pipeline to achieve something close to single 
+ * instruction/cycle. Here we feed three blocks in RR.
+ *
+ *   crc1, crc2, crc3 : Store initial checksum for each block before
+ *                calling. When it returns, updated checksums are stored.
+ *   data       : The base address of the data buffer. The buffer should be
+ *                at least as big as block_size * num_blocks.
+ *   block_size : The size of each block in bytes. 
+ *   num_blocks : The number of blocks to work on. Min = 1, Max = 3
+ */
+static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, const uint8_t *p_buf, size_t block_size, int num_blocks) {
+  uint32_t c1 = *crc1;
+  uint32_t c2 = *crc2;
+  uint32_t c3 = *crc3;
+  int counter = block_size / sizeof(uint32_t);
+  int remainder = block_size % sizeof(uint32_t);
+  uint32_t *data = (uint32_t*)p_buf;
+  uint8_t *bdata;
+
+  /* We do switch here because the loop has to be tight in order
+   * to fill the pipeline. Any other statement inside the loop
+   * or inbetween crc32 instruction can slow things down. Calling
+   * individual crc32 instructions three times from C also causes
+   * gcc to insert other instructions inbetween.
+   *
+   * Do not rearrange the following code unless you have verified
+   * the generated machine code is as efficient as before.
+   */
+  switch (num_blocks) {
+    case 3:
+      /* Do three blocks */
+      while (likely(counter)) {
+        __asm__ __volatile__(
+        "crc32l (%7), %0;\n\t"
+        "crc32l (%7,%6,1), %1;\n\t"
+        "crc32l (%7,%6,2), %2;\n\t"
+         : "=r"(c1), "=r"(c2), "=r"(c3)
+         : "r"(c1), "r"(c2), "r"(c3), "r"(block_size), "r"(data)
+        );
+        data++;
+        counter--;
+      }
+      /* Take care of the remainder. They are only up to three bytes,
+       * so performing byte-level crc32 won't take much time.
+       */
+      bdata = (uint8_t*)data;
+      while (likely(remainder)) {
+        __asm__ __volatile__(
+        "crc32b (%7), %0;\n\t"
+        "crc32b (%7,%6,1), %1;\n\t"
+        "crc32b (%7,%6,2), %2;\n\t"
+         : "=r"(c1), "=r"(c2), "=r"(c3)
+         : "r"(c1), "r"(c2), "r"(c3), "r"(block_size), "r"(bdata)
+        );
+        bdata++;
+        remainder--;
+      }
+      break;
+    case 2:
+      /* Do two blocks */
+      while (likely(counter)) {
+        __asm__ __volatile__(
+        "crc32l (%5), %0;\n\t"
+        "crc32l (%5,%4,1), %1;\n\t"
+         : "=r"(c1), "=r"(c2) 
+         : "r"(c1), "r"(c2), "r"(block_size), "r"(data)
+        );
+        data++;
+        counter--;
+      }
+
+      bdata = (uint8_t*)data;
+      while (likely(remainder)) {
+        __asm__ __volatile__(
+        "crc32b (%5), %0;\n\t"
+        "crc32b (%5,%4,1), %1;\n\t"
+         : "=r"(c1), "=r"(c2) 
+         : "r"(c1), "r"(c2), "r"(c3), "r"(block_size), "r"(bdata)
+        );
+        bdata++;
+        remainder--;
+      }
+      break;
+    case 1:
+      /* single block */
+      while (likely(counter)) {
+        __asm__ __volatile__(
+        "crc32l (%2), %0;\n\t"
+         : "=r"(c1) 
+         : "r"(c1), "r"(data)
+        );
+        data++;
+        counter--;
+      }
+      bdata = (uint8_t*)data;
+      while (likely(remainder)) {
+        __asm__ __volatile__(
+        "crc32b (%2), %0;\n\t"
+         : "=r"(c1) 
+         : "r"(c1), "r"(bdata)
+        );
+        bdata++;
+        remainder--;
+      }
+      break;
+    case 0:
+       return;
+    default:
+      assert(0 && "BUG: Invalid number of checksum blocks");
+  }
+
+  *crc1 = c1;
+  *crc2 = c2;
+  *crc3 = c3;
+  return;
+}
+
+#endif /* USE_PIPELINED */
+
+# endif // 64-bit vs 32-bit
+
+#else // end x86 architecture
+
+static uint32_t crc32c_hardware(uint32_t crc, const uint8_t* data, size_t length) {
+  // never called!
+  assert(0 && "hardware crc called on an unsupported platform");
+  return 0;
+}
+
+#endif

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-conf.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-conf.sh?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-conf.sh (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-conf.sh Wed Nov  2 05:34:31 2011
@@ -55,6 +55,16 @@ usage: $0 <parameters>
      --dfs-support-append=false|true                                 Enable append
      --hadoop-proxy-users='user1:groups:hosts;user2:groups:hosts'    Setup proxy users for hadoop
      --hbase-user=hbase                                              User which hbase is running as. Defaults to hbase
+     --mapreduce-cluster-mapmemory-mb=memory                         Virtual memory of a map slot for the MR framework. Defaults to -1
+     --mapreduce-cluster-reducememory-mb=memory                      Virtual memory, of a reduce slot for the MR framework. Defaults to -1
+     --mapreduce-jobtracker-maxmapmemory-mb=memory                   Maximum virtual memory of a single map task. Defaults to -1
+                                                                     This value should be set to (mapreduce.cluster.mapmemory.mb * mapreduce.tasktracker.map.tasks.maximum)
+     --mapreduce-jobtracker-maxreducememory-mb=memory                Maximum virtual memory of a single reduce task. Defaults to -1
+                                                                     This value should be set to (mapreduce.cluster.reducememory.mb * mapreduce.tasktracker.reduce.tasks.maximum)
+     --mapreduce-map-memory-mb=memory                                Virtual memory of a single map slot for a job. Defaults to -1
+                                                                     This value should be <= mapred.cluster.max.map.memory.mb
+     --mapreduce-reduce-memory-mb=memory                             Virtual memory, of a single reduce slot for a job. Defaults to -1
+                                                                     This value should be <= mapred.cluster.max.reduce.memory.mb
   "
   exit 1
 }
@@ -139,6 +149,7 @@ function addPropertyToXMLConf
 #########################################
 function setupProxyUsers
 {
+  local conf_file="${HADOOP_CONF_DIR}/core-site.xml"
   #if hadoop proxy users are sent, setup hadoop proxy
   if [ ! -z $HADOOP_PROXY_USERS ]
   then
@@ -156,10 +167,10 @@ function setupProxyUsers
       #determine the property names and values
       proxy_groups_property="hadoop.proxyuser.${user}.groups"
       proxy_groups_val="$groups"
-      addPropertyToXMLConf "${HADOOP_CONF_DIR}/hdfs-site.xml" "$proxy_groups_property" "$proxy_groups_val"
+      addPropertyToXMLConf "$conf_file" "$proxy_groups_property" "$proxy_groups_val"
       proxy_hosts_property="hadoop.proxyuser.${user}.hosts"
       proxy_hosts_val="$hosts"
-      addPropertyToXMLConf "${HADOOP_CONF_DIR}/hdfs-site.xml" "$proxy_hosts_property" "$proxy_hosts_val"
+      addPropertyToXMLConf "$conf_file" "$proxy_hosts_property" "$proxy_hosts_val"
       IFS=';'
     done
     IFS=$oldIFS
@@ -198,6 +209,12 @@ OPTS=$(getopt \
   -l 'hadoop-proxy-users:' \
   -l 'dfs-support-append:' \
   -l 'hbase-user:' \
+  -l 'mapreduce-cluster-mapmemory-mb:' \
+  -l 'mapreduce-cluster-reducememory-mb:' \
+  -l 'mapreduce-jobtracker-maxmapmemory-mb:' \
+  -l 'mapreduce-jobtracker-maxreducememory-mb:' \
+  -l 'mapreduce-map-memory-mb:' \
+  -l 'mapreduce-reduce-memory-mb:' \
   -o 'h' \
   -- "$@") 
   
@@ -333,6 +350,30 @@ while true ; do
       HBASE_USER=$2; shift 2
       AUTOMATED=1
       ;;
+    --mapreduce-cluster-mapmemory-mb)
+      MAPREDUCE_CLUSTER_MAPMEMORY_MB=$2; shift 2
+      AUTOMATED=1
+      ;;
+    --mapreduce-cluster-reducememory-mb)
+      MAPREDUCE_CLUSTER_REDUCEMEMORY_MB=$2; shift 2
+      AUTOMATED=1
+      ;;
+    --mapreduce-jobtracker-maxmapmemory-mb)
+      MAPREDUCE_JOBTRACKER_MAXMAPMEMORY_MB=$2; shift 2
+      AUTOMATED=1
+      ;;
+    --mapreduce-jobtracker-maxreducememory-mb)
+      MAPREDUCE_JOBTRACKER_MAXREDUCEMEMORY_MB=$2; shift 2
+      AUTOMATED=1
+      ;;
+    --mapreduce-map-memory-mb)
+      MAPREDUCE_MAP_MEMORY_MB=$2; shift 2
+      AUTOMATED=1
+      ;;
+    --mapreduce-reduce-memory-mb)
+      MAPREDUCE_REDUCE_MEMORY_MB=$2; shift 2
+      AUTOMATED=1
+      ;;
     --)
       shift ; break
       ;;
@@ -364,6 +405,12 @@ HADOOP_MR_USER=${HADOOP_MR_USER:-mr}
 DFS_WEBHDFS_ENABLED=${DFS_WEBHDFS_ENABLED:-false}
 DFS_SUPPORT_APPEND=${DFS_SUPPORT_APPEND:-false}
 HBASE_USER=${HBASE_USER:-hbase}
+MAPREDUCE_CLUSTER_MAPMEMORY_MB=${MAPREDUCE_CLUSTER_MAPMEMORY_MB:--1}
+MAPREDUCE_CLUSTER_REDUCEMEMORY_MB=${MAPREDUCE_CLUSTER_REDUCEMEMORY_MB:--1}
+MAPREDUCE_JOBTRACKER_MAXMAPMEMORY_MB=${MAPREDUCE_JOBTRACKER_MAXMAPMEMORY_MB:--1}
+MAPREDUCE_JOBTRACKER_MAXREDUCEMEMORY_MB=${MAPREDUCE_JOBTRACKER_MAXREDUCEMEMORY_MB:--1}
+MAPREDUCE_MAP_MEMORY_MB=${MAPREDUCE_MAP_MEMORY_MB:--1}
+MAPREDUCE_REDUCE_MEMORY_MB=${MAPREDUCE_REDUCE_MEMORY_MB:--1}
 KEYTAB_DIR=${KEYTAB_DIR:-/etc/security/keytabs}
 HDFS_KEYTAB=${HDFS_KEYTAB:-/home/hdfs/hdfs.keytab}
 MR_KEYTAB=${MR_KEYTAB:-/home/mr/mr.keytab}

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-hdfs.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-hdfs.sh?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-hdfs.sh (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-hdfs.sh Wed Nov  2 05:34:31 2011
@@ -70,6 +70,10 @@ while true ; do
       HADOOP_MR_USER=$2; shift 2
       AUTOMATED=1
       ;;
+    --yarn-user)
+      HADOOP_YARN_USER=$2; shift 2
+      AUTOMATED=1
+      ;;
     --hdfs-user-keytab)
       HDFS_KEYTAB=$2; shift 2
       AUTOMATED=1
@@ -91,6 +95,7 @@ done
 
 HADOOP_GROUP=${HADOOP_GROUP:-hadoop}
 HADOOP_HDFS_USER=${HADOOP_HDFS_USER:-hdfs}
+HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
 HADOOP_MAPREDUCE_USER=${HADOOP_MR_USER:-mapred}
 
 if [ "${KERBEROS_REALM}" != "" ]; then

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh Wed Nov  2 05:34:31 2011
@@ -44,12 +44,12 @@ done
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
 
 # Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"
-HADOOP_JOBTRACKER_OPTS="-Dsecurity.audit.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dmapred.jobsummary.logger=INFO,JSA $HADOOP_JOBTRACKER_OPTS"
-HADOOP_TASKTRACKER_OPTS="-Dsecurity.audit.logger=ERROR,console -Dmapred.audit.logger=ERROR,console $HADOOP_TASKTRACKER_OPTS"
-HADOOP_DATANODE_OPTS="-Dsecurity.audit.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"
+export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"
+HADOOP_JOBTRACKER_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dmapred.jobsummary.logger=INFO,JSA $HADOOP_JOBTRACKER_OPTS"
+HADOOP_TASKTRACKER_OPTS="-Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console $HADOOP_TASKTRACKER_OPTS"
+HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-policy.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-policy.xml?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-policy.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-policy.xml Wed Nov  2 05:34:31 2011
@@ -85,6 +85,7 @@
     A special value of "*" means all users are allowed.</description>
   </property>
 
+ 
   <property>
     <name>security.job.submission.protocol.acl</name>
     <value>*</value>
@@ -124,7 +125,7 @@
     users are allowed.</description>
   </property>
 
-<property>
+  <property>
     <name>security.refresh.policy.protocol.acl</name>
     <value>${HADOOP_HDFS_USER}</value>
     <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
@@ -135,5 +136,85 @@
   </property>
 
 
+  <!-- YARN Protocols -->
+
+  <property>
+    <name>security.resourcetracker.protocol.acl</name>
+    <value>${HADOOP_YARN_USER}</value>
+    <description>ACL for ResourceTracker protocol, used by the
+    ResourceManager and NodeManager to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.admin.protocol.acl</name>
+    <value>${HADOOP_YARN_USER}</value>
+    <description>ACL for RMAdminProtocol, for admin commands. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.resourcemanager.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientRMProtocol, used by the ResourceManager 
+    and applications submission clients to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.applicationmaster.resourcemanager.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for AMRMProtocol, used by the ResourceManager 
+    and ApplicationMasters to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.containermanager.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ContainerManager protocol, used by the NodeManager 
+    and ApplicationMasters to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.resourcelocalizer.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceLocalizer protocol, used by the NodeManager 
+    and ResourceLocalizer to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.task.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for MRClientProtocol, used by job clients to
+    communciate with the MR ApplicationMaster to query job status etc. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
 
 </configuration>

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties Wed Nov  2 05:34:31 2011
@@ -81,7 +81,8 @@ log4j.appender.TLA.layout.ConversionPatt
 #
 #Security appender
 #
-security.audit.logger=INFO,console
+hadoop.security.logger=INFO,console
+log4j.category.SecurityLogger=${hadoop.security.logger}
 hadoop.security.log.file=SecurityAuth.audit
 log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
 log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
@@ -89,9 +90,6 @@ log4j.appender.DRFAS.layout=org.apache.l
 log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
 
-#new logger
-# Define some default values that can be overridden by system properties
-hadoop.security.logger=INFO,console
 
 #
 # hdfs audit logging

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/mapred-site.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/mapred-site.xml?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/mapred-site.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/mapred-site.xml Wed Nov  2 05:34:31 2011
@@ -280,4 +280,34 @@
     <name>mapred.jobtracker.retirejob.interval</name>
     <value>0</value>
   </property>
+
+  <property>
+    <name>mapreduce.cluster.mapmemory.mb</name>
+    <value>${MAPREDUCE_CLUSTER_MAPMEMORY_MB}</value>
+  </property>
+
+  <property>
+    <name>mapreduce.cluster.reducememory.mb</name>
+    <value>${MAPREDUCE_CLUSTER_REDUCEMEMORY_MB}</value>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.maxmapmemory.mb</name>
+    <value>${MAPREDUCE_JOBTRACKER_MAXMAPMEMORY_MB}</value>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.maxreducememory.mb</name>
+    <value>${MAPREDUCE_JOBTRACKER_MAXREDUCEMEMORY_MB}</value>
+  </property>
+
+  <property>
+    <name>mapreduce.map.memory.mb</name>
+    <value>${MAPREDUCE_MAP_MEMORY_MB}</value>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.memory.mb</name>
+    <value>${MAPREDUCE_REDUCE_MEMORY_MB}</value>
+  </property>
 </configuration>

Propchange: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/core/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Nov  2 05:34:31 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:1152502-1179483
+/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:1152502-1196451
 /hadoop/core/branches/branch-0.19/core/src/test/core:713112
 /hadoop/core/trunk/src/test/core:776175-785643,785929-786278

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java Wed Nov  2 05:34:31 2011
@@ -180,7 +180,7 @@ public class CLITestHelper {
           LOG.info("         Comparision result:   [" + 
                    (resultBoolean ? "pass" : "fail") + "]");
           LOG.info("            Expected output:   [" + 
-                   cd.getExpectedOutput() + "]");
+                   expandCommand(cd.getExpectedOutput()) + "]");
           LOG.info("              Actual output:   [" + 
                    cd.getActualOutput() + "]");
         }
@@ -290,7 +290,7 @@ public class CLITestHelper {
           comparatorType);
         ComparatorBase comp = (ComparatorBase) comparatorClass.newInstance();
         compareOutput = comp.compare(cmdResult.getCommandOutput(), 
-          compdata.getExpectedOutput());
+          expandCommand(compdata.getExpectedOutput()));
       } catch (Exception e) {
         LOG.info("Error in instantiating the comparator" + e);
       }

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java Wed Nov  2 05:34:31 2011
@@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.ftpserver.command.impl.STAT;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.IOUtils;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -301,7 +302,7 @@ public class TestFsShellReturnCode {
     // arguments is valid - fsshell should work
     FsShell shell = new FsShell();
     Configuration conf = new Configuration();
-    FsConfig.setDefaultFS(conf, "hhhh://doesnotexist/");
+    conf.set(FS_DEFAULT_NAME_KEY, "hhhh://doesnotexist/");
     shell.setConf(conf);
     String [] args = new String[2];
     args[0] = "-ls";

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java Wed Nov  2 05:34:31 2011
@@ -17,80 +17,180 @@
  */
 package org.apache.hadoop.fs.shell;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.util.Arrays;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 public class TestPathData {
   protected static Configuration conf;
   protected static FileSystem fs;
   protected static String dirString;
-  protected static Path dir;
+  protected static Path testDir;
   protected static PathData item;
-
+  
+  protected static String[] d1Paths =
+    new String[] { "d1/f1", "d1/f1.1", "d1/f2" };
+  protected static String[] d2Paths =
+    new String[] { "d2/f3" };
+        
   @BeforeClass
   public static void initialize() throws Exception {
     conf = new Configuration();
-    fs = FileSystem.getLocal(conf); 
+    fs = FileSystem.getLocal(conf);
+    testDir = new Path(
+        System.getProperty("test.build.data", "build/test/data") + "/testPD"
+    );
+    // don't want scheme on the path, just an absolute path
+    testDir = new Path(fs.makeQualified(testDir).toUri().getPath());
+    FileSystem.setDefaultUri(conf, fs.getUri());    
+    fs.setWorkingDirectory(testDir);
+    fs.mkdirs(new Path("d1"));
+    fs.createNewFile(new Path("d1", "f1"));
+    fs.createNewFile(new Path("d1", "f1.1"));
+    fs.createNewFile(new Path("d1", "f2"));
+    fs.mkdirs(new Path("d2"));
+    fs.create(new Path("d2","f3"));
   }
 
   @Test
-  public void testWithFsAndPath() throws Exception {
-    dirString = "/tmp";
-    dir = new Path(dirString);
-    item = new PathData(fs, dir);
+  public void testWithDirStringAndConf() throws Exception {
+    dirString = "d1";
+    item = new PathData(dirString, conf);
     checkPathData();
-  }
 
-  @Test
-  public void testWithStringAndConf() throws Exception {
-    dirString = "/tmp";
-    dir = new Path(dirString);
+    // properly implementing symlink support in various commands will require
+    // trailing slashes to be retained
+    dirString = "d1/";
     item = new PathData(dirString, conf);
     checkPathData();
   }
 
   @Test
   public void testUnqualifiedUriContents() throws Exception {
-    dirString = "/tmp";
+    dirString = "d1";
     item = new PathData(dirString, conf);
     PathData[] items = item.getDirectoryContents();
-    for (PathData item : items) {
-      assertTrue(item.toString().startsWith(dirString));
-    }
+    assertEquals(
+        sortedString("d1/f1", "d1/f1.1", "d1/f2"),
+        sortedString(items)
+    );
   }
 
   @Test
   public void testQualifiedUriContents() throws Exception {
-    dirString = "file:/tmp";
+    dirString = fs.makeQualified(new Path("d1")).toString();
     item = new PathData(dirString, conf);
     PathData[] items = item.getDirectoryContents();
-    for (PathData item : items) {
-      assertTrue(item.toString().startsWith(dirString));
-    }
+    assertEquals(
+        sortedString(dirString+"/f1", dirString+"/f1.1", dirString+"/f2"),
+        sortedString(items)
+    );
+  }
+
+  @Test
+  public void testCwdContents() throws Exception {
+    dirString = Path.CUR_DIR;
+    item = new PathData(dirString, conf);
+    PathData[] items = item.getDirectoryContents();
+    assertEquals(
+        sortedString("d1", "d2"),
+        sortedString(items)
+    );
+  }
+
+
+	@Test
+	public void testToFile() throws Exception {
+    item = new PathData(".", conf);
+    assertEquals(new File(testDir.toString()), item.toFile());
+	  item = new PathData("d1/f1", conf);
+	  assertEquals(new File(testDir+"/d1/f1"), item.toFile());
+    item = new PathData(testDir+"/d1/f1", conf);
+    assertEquals(new File(testDir+"/d1/f1"), item.toFile());
+	}
+	
+  @Test
+  public void testAbsoluteGlob() throws Exception {
+    PathData[] items = PathData.expandAsGlob(testDir+"/d1/f1*", conf);
+    assertEquals(
+        sortedString(testDir+"/d1/f1", testDir+"/d1/f1.1"),
+        sortedString(items)
+    );
+  }
+
+  @Test
+  public void testRelativeGlob() throws Exception {
+    PathData[] items = PathData.expandAsGlob("d1/f1*", conf);
+    assertEquals(
+        sortedString("d1/f1", "d1/f1.1"),
+        sortedString(items)
+    );
+  }
+
+  @Test
+  public void testRelativeGlobBack() throws Exception {
+    fs.setWorkingDirectory(new Path("d1"));
+    PathData[] items = PathData.expandAsGlob("../d2/*", conf);
+    assertEquals(
+        sortedString("../d2/f3"),
+        sortedString(items)
+    );
   }
 
   @Test
   public void testWithStringAndConfForBuggyPath() throws Exception {
     dirString = "file:///tmp";
-    dir = new Path(dirString);
+    testDir = new Path(dirString);
     item = new PathData(dirString, conf);
     // this may fail some day if Path is fixed to not crunch the uri
     // if the authority is null, however we need to test that the PathData
     // toString() returns the given string, while Path toString() does
     // the crunching
-    assertEquals("file:/tmp", dir.toString());
+    assertEquals("file:/tmp", testDir.toString());
     checkPathData();
   }
 
   public void checkPathData() throws Exception {
-    assertEquals(fs, item.fs);
-    assertEquals(dirString, item.toString());
-    assertEquals(dir, item.path);
-    assertTrue(item.stat != null);
-    assertTrue(item.stat.isDirectory());
+    assertEquals("checking fs", fs, item.fs);
+    assertEquals("checking string", dirString, item.toString());
+    assertEquals("checking path",
+        fs.makeQualified(new Path(item.toString())), item.path
+    );
+    assertTrue("checking exist", item.stat != null);
+    assertTrue("checking isDir", item.stat.isDirectory());
+  }
+  
+  /* junit does a lousy job of comparing arrays
+   * if the array lengths differ, it just says that w/o showing contents
+   * this sorts the paths, and builds a string of "i:<value>, ..." suitable
+   * for a string compare
+   */
+  private static String sortedString(Object ... list) {
+    String[] strings = new String[list.length];
+    for (int i=0; i < list.length; i++) {
+      strings[i] = String.valueOf(list[i]);
+    }
+    Arrays.sort(strings);
+    
+    StringBuilder result = new StringBuilder();
+    for (int i=0; i < strings.length; i++) {
+      if (result.length() > 0) {
+        result.append(", ");
+      }
+      result.append(i+":<"+strings[i]+">");
+    }
+    return result.toString();
+  }
+  
+  private static String sortedString(PathData ... items) {
+    return sortedString((Object[])items);
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java Wed Nov  2 05:34:31 2011
@@ -23,6 +23,7 @@ import java.io.IOException;
 import java.net.URISyntaxException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -33,6 +34,8 @@ import org.apache.hadoop.io.DataOutputBu
 
 import org.junit.AfterClass;
 import org.junit.Test;
+import org.mockito.Mockito;
+
 import static org.junit.Assert.*;
 
 /**
@@ -81,6 +84,27 @@ public class TestViewfsFileStatus {
     assertEquals(content.length, deSer.getLen());
   }
 
+  // Tests that ViewFileSystem.getFileChecksum calls res.targetFileSystem
+  // .getFileChecksum with res.remainingPath and not with f
+  @Test
+  public void testGetFileChecksum() throws IOException {
+    FileSystem mockFS = Mockito.mock(FileSystem.class);
+    InodeTree.ResolveResult<FileSystem> res =
+      new InodeTree.ResolveResult<FileSystem>(null, mockFS , null,
+        new Path("someFile"));
+    @SuppressWarnings("unchecked")
+    InodeTree<FileSystem> fsState = Mockito.mock(InodeTree.class);
+    Mockito.when(fsState.resolve("/tmp/someFile", true)).thenReturn(res);
+    ViewFileSystem vfs = Mockito.mock(ViewFileSystem.class);
+    vfs.fsState = fsState;
+
+    Mockito.when(vfs.getFileChecksum(new Path("/tmp/someFile")))
+      .thenCallRealMethod();
+    vfs.getFileChecksum(new Path("/tmp/someFile"));
+
+    Mockito.verify(mockFS).getFileChecksum(new Path("someFile"));
+  }
+
   @AfterClass
   public static void cleanup() throws IOException {
     FileUtil.fullyDelete(TEST_DIR);

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java Wed Nov  2 05:34:31 2011
@@ -29,13 +29,15 @@ import java.net.URI;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AbstractFileSystem;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileContextTestHelper;
+import org.apache.hadoop.fs.FileContextTestHelper.fileType;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileContextTestHelper.fileType;
+import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.viewfs.ViewFs.MountPoint;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.token.Token;
@@ -43,6 +45,7 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 
 /**
@@ -408,6 +411,27 @@ public class ViewFsBaseTest {
     }
   }
   
+  @Test
+  public void testGetFileChecksum() throws AccessControlException
+    , UnresolvedLinkException, IOException {
+    AbstractFileSystem mockAFS = Mockito.mock(AbstractFileSystem.class);
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+      new InodeTree.ResolveResult<AbstractFileSystem>(null, mockAFS , null,
+        new Path("someFile"));
+    @SuppressWarnings("unchecked")
+    InodeTree<AbstractFileSystem> fsState = Mockito.mock(InodeTree.class);
+    Mockito.when(fsState.resolve(Mockito.anyString()
+      , Mockito.anyBoolean())).thenReturn(res);
+    ViewFs vfs = Mockito.mock(ViewFs.class);
+    vfs.fsState = fsState;
+
+    Mockito.when(vfs.getFileChecksum(new Path("/tmp/someFile")))
+      .thenCallRealMethod();
+    vfs.getFileChecksum(new Path("/tmp/someFile"));
+
+    Mockito.verify(mockAFS).getFileChecksum(new Path("someFile"));
+  }
+
   @Test(expected=FileNotFoundException.class) 
   public void testgetFSonDanglingLink() throws IOException {
     fcView.getFileStatus(new Path("/danglingLink"));

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java Wed Nov  2 05:34:31 2011
@@ -19,6 +19,7 @@ package org.apache.hadoop.io.nativeio;
 
 import java.io.File;
 import java.io.FileDescriptor;
+import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicReference;
@@ -210,6 +211,66 @@ public class TestNativeIO {
     assertPermissions(toChmod, 0644);
   }
 
+
+  @Test
+  public void testPosixFadvise() throws Exception {
+    FileInputStream fis = new FileInputStream("/dev/zero");
+    try {
+      NativeIO.posix_fadvise(fis.getFD(), 0, 0,
+                             NativeIO.POSIX_FADV_SEQUENTIAL);
+    } catch (UnsupportedOperationException uoe) {
+      // we should just skip the unit test on machines where we don't
+      // have fadvise support
+      assumeTrue(false);
+    } finally {
+      fis.close();
+    }
+
+    try {
+      NativeIO.posix_fadvise(fis.getFD(), 0, 1024,
+                             NativeIO.POSIX_FADV_SEQUENTIAL);
+
+      fail("Did not throw on bad file");
+    } catch (NativeIOException nioe) {
+      assertEquals(Errno.EBADF, nioe.getErrno());
+    }
+    
+    try {
+      NativeIO.posix_fadvise(null, 0, 1024,
+                             NativeIO.POSIX_FADV_SEQUENTIAL);
+
+      fail("Did not throw on null file");
+    } catch (NullPointerException npe) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testSyncFileRange() throws Exception {
+    FileOutputStream fos = new FileOutputStream(
+      new File(TEST_DIR, "testSyncFileRange"));
+    try {
+      fos.write("foo".getBytes());
+      NativeIO.sync_file_range(fos.getFD(), 0, 1024,
+                               NativeIO.SYNC_FILE_RANGE_WRITE);
+      // no way to verify that this actually has synced,
+      // but if it doesn't throw, we can assume it worked
+    } catch (UnsupportedOperationException uoe) {
+      // we should just skip the unit test on machines where we don't
+      // have fadvise support
+      assumeTrue(false);
+    } finally {
+      fos.close();
+    }
+    try {
+      NativeIO.sync_file_range(fos.getFD(), 0, 1024,
+                               NativeIO.SYNC_FILE_RANGE_WRITE);
+      fail("Did not throw on bad file");
+    } catch (NativeIOException nioe) {
+      assertEquals(Errno.EBADF, nioe.getErrno());
+    }
+  }
+
   private void assertPermissions(File f, int expected) throws IOException {
     FileSystem localfs = FileSystem.getLocal(new Configuration());
     FsPermission perms = localfs.getFileStatus(

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java Wed Nov  2 05:34:31 2011
@@ -583,6 +583,12 @@ public class TestIPC {
         NetworkTraces.RESPONSE_TO_HADOOP_0_21_0_RPC);
   }
   
+  @Test
+  public void testHttpGetResponse() throws Exception {
+    doIpcVersionTest("GET / HTTP/1.0\r\n\r\n".getBytes(),
+        Server.RECEIVED_HTTP_REQ_RESPONSE.getBytes());
+  }
+  
   private void doIpcVersionTest(
       byte[] requestData,
       byte[] expectedResponse) throws Exception {

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/jmx/TestJMXJsonServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/jmx/TestJMXJsonServlet.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/jmx/TestJMXJsonServlet.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/jmx/TestJMXJsonServlet.java Wed Nov  2 05:34:31 2011
@@ -51,7 +51,7 @@ public class TestJMXJsonServlet extends 
     assertTrue("'"+p+"' does not match "+value, m.find());
   }
   
-  @Test public void testQury() throws Exception {
+  @Test public void testQuery() throws Exception {
     String result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Runtime"));
     LOG.info("/jmx?qry=java.lang:type=Runtime RESULT: "+result);
     assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Runtime\"", result);

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java Wed Nov  2 05:34:31 2011
@@ -163,6 +163,27 @@ public class TestNetUtils {
     assertRemoteDetailsIncluded(wrapped);
     assertInException(wrapped, "/UnknownHost");
   }
+  
+  @Test
+  public void testCreateSocketAddress() throws Throwable {
+    InetSocketAddress addr = NetUtils.createSocketAddr(
+        "127.0.0.1:12345", 1000, "myconfig");
+    assertEquals("127.0.0.1", addr.getAddress().getHostAddress());
+    assertEquals(12345, addr.getPort());
+    
+    addr = NetUtils.createSocketAddr(
+        "127.0.0.1", 1000, "myconfig");
+    assertEquals("127.0.0.1", addr.getAddress().getHostAddress());
+    assertEquals(1000, addr.getPort());
+
+    try {
+      addr = NetUtils.createSocketAddr(
+          "127.0.0.1:blahblah", 1000, "myconfig");
+      fail("Should have failed to parse bad port");
+    } catch (IllegalArgumentException iae) {
+      assertInException(iae, "myconfig");
+    }
+  }
 
   private void assertRemoteDetailsIncluded(IOException wrapped)
       throws Throwable {

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java Wed Nov  2 05:34:31 2011
@@ -360,6 +360,8 @@ public class TestDelegationToken {
         byte[] storedPassword = dtSecretManager.retrievePassword(id);
         byte[] password = dtSecretManager.createPassword(id, key);
         Assert.assertTrue(Arrays.equals(password, storedPassword));
+        //verify by secret manager api
+        dtSecretManager.verifyToken(id, password);
       }
     } finally {
       dtSecretManager.stopThreads();

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/CoreTestDriver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/CoreTestDriver.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/CoreTestDriver.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/CoreTestDriver.java Wed Nov  2 05:34:31 2011
@@ -50,11 +50,14 @@ public class CoreTestDriver {
   }
 
   public void run(String argv[]) {
+    int exitCode = -1;
     try {
-      pgd.driver(argv);
+      exitCode = pgd.driver(argv);
     } catch(Throwable e) {
       e.printStackTrace();
     }
+
+    System.exit(exitCode);
   }
 
   public static void main(String argv[]){

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java Wed Nov  2 05:34:31 2011
@@ -21,11 +21,14 @@ package org.apache.hadoop.test;
 import static com.google.common.base.Preconditions.*;
 
 import org.hamcrest.Description;
+import org.junit.Assert;
 
 import static org.mockito.Mockito.*;
 import org.mockito.stubbing.Answer;
+import org.mockito.internal.matchers.GreaterThan;
 import org.mockito.invocation.InvocationOnMock;
-import static org.mockito.AdditionalMatchers.*;
+
+import org.mockito.ArgumentCaptor;
 import org.mockito.ArgumentMatcher;
 
 import org.apache.commons.logging.Log;
@@ -44,6 +47,7 @@ import static org.apache.hadoop.metrics2
 public class MetricsAsserts {
 
   final static Log LOG = LogFactory.getLog(MetricsAsserts.class);
+  private static final double EPSILON = 0.00001;
 
   public static MetricsSystem mockMetricsSystem() {
     MetricsSystem ms = mock(MetricsSystem.class);
@@ -139,7 +143,15 @@ public class MetricsAsserts {
    */
   public static void assertGauge(String name, int expected,
                                  MetricsRecordBuilder rb) {
-    verify(rb).addGauge(eqName(info(name, "")), eq(expected));
+    Assert.assertEquals("Bad value for metric " + name,
+        expected, getIntGauge(name, rb));
+  }
+
+  public static int getIntGauge(String name, MetricsRecordBuilder rb) {
+    ArgumentCaptor<Integer> captor = ArgumentCaptor.forClass(Integer.class);
+    verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture());
+    checkCaptured(captor, name);
+    return captor.getValue();
   }
 
   /**
@@ -150,9 +162,18 @@ public class MetricsAsserts {
    */
   public static void assertCounter(String name, int expected,
                                    MetricsRecordBuilder rb) {
-    verify(rb).addCounter(eqName(info(name, "")), eq(expected));
+    Assert.assertEquals("Bad value for metric " + name,
+        expected, getIntCounter(name, rb));
   }
 
+  public static int getIntCounter(String name, MetricsRecordBuilder rb) {
+    ArgumentCaptor<Integer> captor = ArgumentCaptor.forClass(
+        Integer.class);
+    verify(rb, atLeast(0)).addCounter(eqName(info(name, "")), captor.capture());
+    checkCaptured(captor, name);
+    return captor.getValue();
+  }
+  
   /**
    * Assert a long gauge metric as expected
    * @param name  of the metric
@@ -161,7 +182,15 @@ public class MetricsAsserts {
    */
   public static void assertGauge(String name, long expected,
                                  MetricsRecordBuilder rb) {
-    verify(rb).addGauge(eqName(info(name, "")), eq(expected));
+    Assert.assertEquals("Bad value for metric " + name,
+        expected, getLongGauge(name, rb));
+  }
+
+  public static long getLongGauge(String name, MetricsRecordBuilder rb) {
+    ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class);
+    verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture());
+    checkCaptured(captor, name);
+    return captor.getValue();
   }
 
    /**
@@ -172,7 +201,15 @@ public class MetricsAsserts {
    */
   public static void assertGauge(String name, double expected,
                                  MetricsRecordBuilder rb) {
-    verify(rb).addGauge(eqName(info(name, "")), eq(expected));
+    Assert.assertEquals("Bad value for metric " + name,
+        expected, getDoubleGauge(name, rb), EPSILON);
+  }
+
+  public static double getDoubleGauge(String name, MetricsRecordBuilder rb) {
+    ArgumentCaptor<Double> captor = ArgumentCaptor.forClass(Double.class);
+    verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture());
+    checkCaptured(captor, name);
+    return captor.getValue();
   }
 
   /**
@@ -183,7 +220,23 @@ public class MetricsAsserts {
    */
   public static void assertCounter(String name, long expected,
                                    MetricsRecordBuilder rb) {
-    verify(rb).addCounter(eqName(info(name, "")), eq(expected));
+    Assert.assertEquals("Bad value for metric " + name,
+        expected, getLongCounter(name, rb));
+  }
+
+  public static long getLongCounter(String name, MetricsRecordBuilder rb) {
+    ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class);
+    verify(rb, atLeast(0)).addCounter(eqName(info(name, "")), captor.capture());
+    checkCaptured(captor, name);
+    return captor.getValue();
+  }
+
+  /**
+   * Check that this metric was captured exactly once.
+   */
+  private static void checkCaptured(ArgumentCaptor<?> captor, String name) {
+    Assert.assertEquals("Expected exactly one metric for name " + name,
+        1, captor.getAllValues().size());
   }
 
   /**
@@ -238,7 +291,8 @@ public class MetricsAsserts {
    */
   public static void assertCounterGt(String name, long greater,
                                      MetricsRecordBuilder rb) {
-    verify(rb).addCounter(eqName(info(name, "")), gt(greater));
+    Assert.assertThat("Bad value for metric " + name, getLongCounter(name, rb),
+        new GreaterThan<Long>(greater));
   }
 
   /**
@@ -260,7 +314,8 @@ public class MetricsAsserts {
    */
   public static void assertGaugeGt(String name, double greater,
                                    MetricsRecordBuilder rb) {
-    verify(rb).addGauge(eqName(info(name, "")), gt(greater));
+    Assert.assertThat("Bad value for metric " + name, getDoubleGauge(name, rb),
+        new GreaterThan<Double>(greater));
   }
 
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java Wed Nov  2 05:34:31 2011
@@ -114,6 +114,26 @@ public class TestDataChecksum {
       assertTrue(ce.getMessage().contains("fake file"));
     }
   }
+  
+  @Test
+  public void testEquality() {
+    assertEquals(
+        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512),
+        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512));
+    assertFalse(
+        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512).equals(
+        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 1024)));
+    assertFalse(
+        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512).equals(
+        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32C, 512)));        
+  }
+  
+  @Test
+  public void testToString() {
+    assertEquals("DataChecksum(type=CRC32, chunkSize=512)",
+        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512)
+          .toString());
+  }
 
   private static void corruptBufferOffset(ByteBuffer buf, int offset) {
     buf.put(offset, (byte)(buf.get(offset) + 1));    



Mime
View raw message