hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1124466 [3/3] - in /hadoop/hdfs/branches/HDFS-1073: ./ bin/ conf/ src/c++/libhdfs/ src/contrib/ src/contrib/hdfsproxy/ src/contrib/thriftfs/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/...
Date Wed, 18 May 2011 23:44:25 GMT
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUtil.java Wed May 18 23:44:23 2011
@@ -20,16 +20,22 @@ package org.apache.hadoop.hdfs;
 
 import org.junit.Test;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
 
+import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.util.Arrays;
+import java.util.Collection;
+import java.util.Iterator;
 import java.util.List;
 
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 
 public class TestDFSUtil {
@@ -72,4 +78,159 @@ public class TestDFSUtil {
     bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
     assertEquals(0, bs.length);
   }
+
+  /** 
+   * Test for
+   * {@link DFSUtil#getNameServiceIds(Configuration)}
+   * {@link DFSUtil#getNameServiceId(Configuration)}
+   * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
+   */
+  @Test
+  public void testMultipleNamenodes() throws IOException {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+    
+    // Test - The configured nameserviceIds are returned
+    Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
+    Iterator<String> it = nameserviceIds.iterator();
+    assertEquals(2, nameserviceIds.size());
+    assertEquals("nn1", it.next().toString());
+    assertEquals("nn2", it.next().toString());
+    
+    // Tests default nameserviceId is returned
+    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+    assertEquals("nn1", DFSUtil.getNameServiceId(conf));
+    
+    // Test - configured list of namenodes are returned
+    final String NN1_ADDRESS = "localhost:9000";
+    final String NN2_ADDRESS = "localhost:9001";
+    final String NN3_ADDRESS = "localhost:9002";
+    conf.set(DFSUtil.getNameServiceIdKey(
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"), NN1_ADDRESS);
+    conf.set(DFSUtil.getNameServiceIdKey(
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS);
+    
+    Collection<InetSocketAddress> nnAddresses = 
+      DFSUtil.getNNServiceRpcAddresses(conf);
+    assertEquals(2, nnAddresses.size());
+    Iterator<InetSocketAddress> iterator = nnAddresses.iterator();
+    assertEquals(2, nameserviceIds.size());
+    InetSocketAddress addr = iterator.next();
+    assertEquals("localhost", addr.getHostName());
+    assertEquals(9000, addr.getPort());
+    addr = iterator.next();
+    assertEquals("localhost", addr.getHostName());
+    assertEquals(9001, addr.getPort());
+    
+    // Test - can look up nameservice ID from service address
+    InetSocketAddress testAddress1 = NetUtils.createSocketAddr(NN1_ADDRESS);
+    String nameserviceId = DFSUtil.getNameServiceIdFromAddress(
+        conf, testAddress1,
+        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertEquals("nn1", nameserviceId);
+    InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
+    nameserviceId = DFSUtil.getNameServiceIdFromAddress(
+        conf, testAddress2,
+        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertEquals("nn2", nameserviceId);
+    InetSocketAddress testAddress3 = NetUtils.createSocketAddr(NN3_ADDRESS);
+    nameserviceId = DFSUtil.getNameServiceIdFromAddress(
+        conf, testAddress3,
+        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertNull(nameserviceId);
+  }
+  
+  /** 
+   * Test for
+   * {@link DFSUtil#isDefaultNamenodeAddress(Configuration, InetSocketAddress, String...)}
+   */
+  @Test
+  public void testSingleNamenode() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    final String DEFAULT_ADDRESS = "localhost:9000";
+    final String NN2_ADDRESS = "localhost:9001";
+    conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
+    
+    InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
+    boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
+        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertTrue(isDefault);
+    InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
+    isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
+        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertFalse(isDefault);
+  }
+  
+  /** Tests to ensure default namenode is used as fallback */
+  @Test
+  public void testDefaultNamenode() throws IOException {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    final String hdfs_default = "hdfs://localhost:9999/";
+    conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, hdfs_default);
+    // If DFSConfigKeys.DFS_FEDERATION_NAMESERVICES is not set, verify that 
+    // default namenode address is returned.
+    List<InetSocketAddress> addrList = DFSUtil.getNNServiceRpcAddresses(conf);
+    assertEquals(1, addrList.size());
+    assertEquals(9999, addrList.get(0).getPort());
+  }
+  
+  /**
+   * Test to ensure nameservice specific keys in the configuration are
+   * copied to generic keys when the namenode starts.
+   */
+  @Test
+  public void testConfModification() throws IOException {
+    final HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1");
+    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+    final String nameserviceId = DFSUtil.getNameServiceId(conf);
+    
+    // Set the nameservice specific keys with nameserviceId in the config key
+    for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
+      // Note: value is same as the key
+      conf.set(DFSUtil.getNameServiceIdKey(key, nameserviceId), key);
+    }
+    
+    // Initialize generic keys from specific keys
+    NameNode.initializeGenericKeys(conf);
+    
+    // Retrieve the keys without nameserviceId and Ensure generic keys are set
+    // to the correct value
+    for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
+      assertEquals(key, conf.get(key));
+    }
+  }
+  
+  /**
+   * Tests for empty configuration, an exception is thrown from
+   * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
+   * {@link DFSUtil#getBackupNodeAddresses(Configuration)}
+   * {@link DFSUtil#getSecondaryNameNodeAddresses(Configuration)}
+   */
+  @Test
+  public void testEmptyConf() {
+    HdfsConfiguration conf = new HdfsConfiguration(false);
+    try {
+      DFSUtil.getNNServiceRpcAddresses(conf);
+      fail("Expected IOException is not thrown");
+    } catch (IOException expected) {
+    }
+
+    try {
+      DFSUtil.getBackupNodeAddresses(conf);
+      fail("Expected IOException is not thrown");
+    } catch (IOException expected) {
+    }
+
+    try {
+      DFSUtil.getSecondaryNameNodeAddresses(conf);
+      fail("Expected IOException is not thrown");
+    } catch (IOException expected) {
+    }
+  }
 }
\ No newline at end of file

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java Wed May 18 23:44:23 2011
@@ -26,7 +26,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 /**
  * This test ensures the all types of data node report work correctly.
@@ -77,9 +77,7 @@ public class TestDatanodeReport extends 
                    NUM_OF_DATANODES);
 
       Thread.sleep(5000);
-      FSNamesystemMetrics fsMetrics = 
-                     cluster.getNamesystem().getFSNamesystemMetrics();
-      assertEquals(1,fsMetrics.numExpiredHeartbeats.getCurrentIntervalValue());
+      assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
     }finally {
       cluster.shutdown();
     }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java Wed May 18 23:44:23 2011
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.namenode.BackupNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.test.GenericTestUtils;
 
@@ -52,6 +53,9 @@ public class TestHDFSServerPorts extends
   
   // reset default 0.0.0.0 addresses in order to avoid IPv6 problem
   static final String THIS_HOST = getFullHostName() + ":0";
+  static {
+    DefaultMetricsSystem.setMiniClusterMode(true);
+  }
 
   Configuration config;
   File hdfsDir;
@@ -104,7 +108,7 @@ public class TestHDFSServerPorts extends
       NameNode.setServiceAddress(config, THIS_HOST);      
     }
     config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
-    GenericTestUtils.formatNamenode(config);
+    DFSTestUtil.formatNameNode(config);
 
     String[] args = new String[] {};
     // NameNode will modify config with the ports it bound to
@@ -262,7 +266,7 @@ public class TestHDFSServerPorts extends
       Configuration conf2 = new HdfsConfiguration(config);
       conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
           fileAsURI(new File(hdfsDir, "name2")).toString());
-      GenericTestUtils.formatNamenode(conf2);
+      DFSTestUtil.formatNameNode(conf2);
       boolean started = canStartNameNode(conf2);
       assertFalse(started); // should fail
 

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java Wed May 18 23:44:23 2011
@@ -84,6 +84,10 @@ public class UpgradeUtilities {
   private static long datanodeStorageChecksum;
   // A checksum of the contents in blockpool storage directory
   private static long blockPoolStorageChecksum;
+  // A checksum of the contents in blockpool finalize storage directory
+  private static long blockPoolFinalizedStorageChecksum;
+  // A checksum of the contents in blockpool rbw storage directory
+  private static long blockPoolRbwStorageChecksum;
 
   /**
    * Initialize the data structures used by this class.  
@@ -107,7 +111,7 @@ public class UpgradeUtilities {
       createEmptyDirs(new String[] {datanodeStorage.toString()});
       
       // format and start NameNode and start DataNode
-      GenericTestUtils.formatNamenode(config);
+      DFSTestUtil.formatNameNode(config);
       cluster =  new MiniDFSCluster.Builder(config)
                                    .numDataNodes(1)
                                    .startupOption(StartupOption.REGULAR)
@@ -157,6 +161,14 @@ public class UpgradeUtilities {
     File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
         "current");
     blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir);
+    
+    File bpCurFinalizeDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
+        "current/"+DataStorage.STORAGE_DIR_FINALIZED);
+    blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE, bpCurFinalizeDir);
+    
+    File bpCurRbwDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
+        "current/"+DataStorage.STORAGE_DIR_RBW);
+    blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir);
   }
   
   // Private helper method that writes a file to the given file system.
@@ -232,6 +244,22 @@ public class UpgradeUtilities {
   }
   
   /**
+   * Return the checksum for the singleton master storage directory
+   * for finalized dir under block pool.
+   */
+  public static long checksumMasterBlockPoolFinalizedContents() {
+    return blockPoolFinalizedStorageChecksum;
+  }
+  
+  /**
+   * Return the checksum for the singleton master storage directory
+   * for rbw dir under block pool.
+   */
+  public static long checksumMasterBlockPoolRbwContents() {
+    return blockPoolRbwStorageChecksum;
+  }
+  
+  /**
    * Compute the checksum of all the files in the specified directory.
    * The contents of subdirectories are not included. This method provides
    * an easy way to ensure equality between the contents of two directories.

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Wed May 18 23:44:23 2011
@@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
-import org.apache.hadoop.metrics.util.MBeanUtil;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 
@@ -909,18 +909,17 @@ public class SimulatedFSDataset  impleme
 
     try {
       bean = new StandardMBean(this,FSDatasetMBean.class);
-      mbeanName = MBeanUtil.registerMBean("DataNode",
-          "FSDatasetState-" + storageId, bean);
+      mbeanName = MBeans.register("DataNode", "FSDatasetState-"+
+                                  storageId, bean);
     } catch (NotCompliantMBeanException e) {
-      e.printStackTrace();
+      DataNode.LOG.warn("Error registering FSDatasetState MBean", e);
     }
  
-    DataNode.LOG.info("Registered FSDatasetStatusMBean");
+    DataNode.LOG.info("Registered FSDatasetState MBean");
   }
 
   public void shutdown() {
-    if (mbeanName != null)
-      MBeanUtil.unregisterMBean(mbeanName);
+    if (mbeanName != null) MBeans.unregister(mbeanName);
   }
 
   public String getStorageInfo() {

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java Wed May 18 23:44:23 2011
@@ -43,7 +43,8 @@ public class TestDataNodeMXBean {
       DataNode datanode = datanodes.get(0);
 
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
-      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=DataNodeInfo");
+      ObjectName mxbeanName = new ObjectName(
+          "Hadoop:service=DataNode,name=DataNodeInfo");
       // get attribute "ClusterId"
       String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
       Assert.assertEquals(datanode.getClusterId(), clusterId);

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java Wed May 18 23:44:23 2011
@@ -24,8 +24,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import static org.apache.hadoop.test.MetricsAsserts.*;
+
 import junit.framework.TestCase;
 
 public class TestDataNodeMetrics extends TestCase {
@@ -42,8 +44,8 @@ public class TestDataNodeMetrics extends
       List<DataNode> datanodes = cluster.getDataNodes();
       assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
-      DataNodeMetrics metrics = datanode.getMetrics();
-      assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
+      MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
+      assertCounter("BytesWritten", LONG_FILE_LEN, rb);
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java Wed May 18 23:44:23 2011
@@ -19,18 +19,17 @@ package org.apache.hadoop.hdfs.server.da
 
 import java.io.File;
 import java.util.ArrayList;
-import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -149,15 +148,12 @@ public class TestDataNodeVolumeFailureRe
     /*
      * The metrics should confirm the volume failures.
      */
-    DataNodeMetrics metrics1 = dns.get(0).getMetrics();
-    DataNodeMetrics metrics2 = dns.get(1).getMetrics();
-    DataNodeMetrics metrics3 = dns.get(2).getMetrics();
-    assertEquals("Vol1 should report 1 failure",
-        1, metrics1.volumeFailures.getCurrentIntervalValue());
-    assertEquals("Vol2 should report 1 failure",
-        1, metrics2.volumeFailures.getCurrentIntervalValue());
-    assertEquals("Vol3 should have no failures",
-        0, metrics3.volumeFailures.getCurrentIntervalValue());
+    assertCounter("VolumeFailures", 1L, 
+        getMetrics(dns.get(0).getMetrics().name()));
+    assertCounter("VolumeFailures", 1L, 
+        getMetrics(dns.get(1).getMetrics().name()));
+    assertCounter("VolumeFailures", 0L, 
+        getMetrics(dns.get(2).getMetrics().name()));
 
     // Ensure we wait a sufficient amount of time
     assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH;
@@ -175,8 +171,8 @@ public class TestDataNodeVolumeFailureRe
     DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
     DFSTestUtil.waitReplication(fs, file2, (short)3);
     assertTrue("DN3 should still be up", dns.get(2).isDatanodeUp());
-    assertEquals("Vol3 should report 1 failure",
-        1, metrics3.volumeFailures.getCurrentIntervalValue());
+    assertCounter("VolumeFailures", 1L, 
+        getMetrics(dns.get(2).getMetrics().name()));
 
     ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
     ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
@@ -211,9 +207,8 @@ public class TestDataNodeVolumeFailureRe
     DFSTestUtil.waitForDatanodeDeath(dns.get(2));
 
     // And report two failed volumes
-    metrics3 = dns.get(2).getMetrics();
-    assertEquals("DN3 should report 2 vol failures",
-        2, metrics3.volumeFailures.getCurrentIntervalValue());
+    assertCounter("VolumeFailures", 2L, 
+        getMetrics(dns.get(2).getMetrics().name()));
 
     // The NN considers the DN dead
     DFSTestUtil.waitForDatanodeStatus(ns, 2, 1, 2, 

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Wed May 18 23:44:23 2011
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.na
 import junit.framework.TestCase;
 import java.io.*;
 import java.net.URI;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Iterator;
 import java.util.concurrent.Callable;
@@ -40,11 +39,10 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
-
 import org.apache.hadoop.util.StringUtils;
  
 import org.mockito.Mockito;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 /**
  * This class tests the creation and validation of a checkpoint.
@@ -308,16 +306,13 @@ public class TestEditLog extends TestCas
 
       // Now ask to sync edit from A, which was already batched in - thus
       // it should increment the batch count metric
-      NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
-      metrics.transactionsBatchedInSync = Mockito.mock(MetricsTimeVaryingInt.class);
-
       doCallLogSync(threadA, editLog);
       assertEquals("logSync from first thread shouldn't change txid",
         2, editLog.getSyncTxId());
 
       //Should have incremented the batch count exactly once
-      Mockito.verify(metrics.transactionsBatchedInSync,
-                    Mockito.times(1)).inc();
+      assertCounter("TransactionsBatchedInSync", 1L, 
+        getMetrics("NameNodeActivity"));
     } finally {
       threadA.shutdown();
       threadB.shutdown();

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java Wed May 18 23:44:23 2011
@@ -18,30 +18,31 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DU;
-import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.Test;
 
 public class TestEditLogFileOutputStream {
 
   @Test
   public void testPreallocation() throws IOException {
-    Configuration conf = new Configuration();
-    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
-    conf.set("dfs.http.address", "127.0.0.1:0");
-    GenericTestUtils.formatNamenode(conf);
-    NameNode nn = new NameNode(conf);
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+        .build();
 
-    StorageDirectory sd = nn.getFSImage().getStorage().getStorageDir(0);
+    StorageDirectory sd = cluster.getNameNode().getFSImage()
+      .getStorage().getStorageDir(0);
     File editLog = NNStorage.getEditFile(sd);
 
     assertEquals("Edit log should only be 4 bytes long",
@@ -49,7 +50,8 @@ public class TestEditLogFileOutputStream
     assertEquals("Edit log disk space used should be one block",
         4096, new DU(editLog, conf).getUsed());
 
-    nn.mkdirs("/tmp", new FsPermission((short)777), false);
+    cluster.getFileSystem().mkdirs(new Path("/tmp"),
+        new FsPermission((short)777));
 
     assertEquals("Edit log should be 1MB + 4 bytes long",
         (1024 * 1024) + 4, editLog.length());

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java Wed May 18 23:44:23 2011
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.permission.*;
 
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
@@ -334,7 +335,7 @@ public class TestEditLogRace {
   public void testSaveImageWhileSyncInProgress() throws Exception {
     Configuration conf = getConf();
     NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
-    GenericTestUtils.formatNamenode(conf);
+    DFSTestUtil.formatNameNode(conf);
     final FSNamesystem namesystem = new FSNamesystem(conf);
 
     try {
@@ -427,7 +428,7 @@ public class TestEditLogRace {
   public void testSaveRightBeforeSync() throws Exception {
     Configuration conf = getConf();
     NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
-    GenericTestUtils.formatNamenode(conf);
+    DFSTestUtil.formatNameNode(conf);
     final FSNamesystem namesystem = new FSNamesystem(conf);
 
     try {

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java Wed May 18 23:44:23 2011
@@ -22,6 +22,7 @@ import java.util.Arrays;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
@@ -36,7 +37,7 @@ public class TestNNThroughputBenchmark {
     Configuration conf = new HdfsConfiguration();
     FileSystem.setDefaultUri(conf, "hdfs://localhost:" + 0);
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
-    GenericTestUtils.formatNamenode(conf);
+    DFSTestUtil.formatNameNode(conf);
     String[] args = new String[] {"-op", "all"};
     NNThroughputBenchmark.runBenchmark(conf, Arrays.asList(args));
   }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Wed May 18 23:44:23 2011
@@ -45,7 +45,8 @@ public class TestNameNodeMXBean {
       FSNamesystem fsn = cluster.getNameNode().namesystem;
 
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
-      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=NameNodeInfo");
+      ObjectName mxbeanName = new ObjectName(
+          "Hadoop:service=NameNode,name=NameNodeInfo");
       // get attribute "ClusterId"
       String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
       Assert.assertEquals(fsn.getClusterId(), clusterId);

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java Wed May 18 23:44:23 2011
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 import junit.framework.TestCase;
@@ -61,7 +62,7 @@ public class TestReplicationPolicy exten
     try {
       FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
       CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
-      GenericTestUtils.formatNamenode(CONF);
+      DFSTestUtil.formatNameNode(CONF);
       namenode = new NameNode(CONF);
     } catch (IOException e) {
       e.printStackTrace();

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java Wed May 18 23:44:23 2011
@@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.ser
 import static org.junit.Assert.*;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doThrow;
@@ -37,14 +38,15 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.log4j.Level;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
+import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -106,7 +108,7 @@ public class TestSaveNamespace {
   private void saveNamespaceWithInjectedFault(Fault fault) throws IOException {
     Configuration conf = getConf();
     NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
-    GenericTestUtils.formatNamenode(conf);
+    DFSTestUtil.formatNameNode(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
 
     // Replace the FSImage with a spy
@@ -183,7 +185,7 @@ public class TestSaveNamespace {
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
 
     NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
-    GenericTestUtils.formatNamenode(conf);
+    DFSTestUtil.formatNameNode(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
 
     // Replace the FSImage with a spy
@@ -267,12 +269,101 @@ public class TestSaveNamespace {
   public void testCrashWhileMoveLastCheckpoint() throws Exception {
     saveNamespaceWithInjectedFault(Fault.MOVE_LAST_CHECKPOINT);
   }
+ 
+
+  /**
+   * Test case where savenamespace fails in all directories
+   * and then the NN shuts down. Here we should recover from the
+   * failed checkpoint by moving the directories back on next
+   * NN start. This is a regression test for HDFS-1921.
+   */
+  @Test
+  public void testFailedSaveNamespace() throws Exception {
+    doTestFailedSaveNamespace(false);
+  }
+
+  /**
+   * Test case where saveNamespace fails in all directories, but then
+   * the operator restores the directories and calls it again.
+   * This should leave the NN in a clean state for next start.
+   */
+  @Test
+  public void testFailedSaveNamespaceWithRecovery() throws Exception {
+    doTestFailedSaveNamespace(true);
+  }
+
+  /**
+   * Injects a failure on all storage directories while saving namespace.
+   *
+   * @param restoreStorageAfterFailure if true, will try to save again after
+   *   clearing the failure injection
+   */
+  public void doTestFailedSaveNamespace(boolean restoreStorageAfterFailure)
+  throws Exception {
+    Configuration conf = getConf();
+    NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
+    DFSTestUtil.formatNameNode(conf);
+    FSNamesystem fsn = new FSNamesystem(conf);
+
+    // Replace the FSImage with a spy
+    final FSImage originalImage = fsn.dir.fsImage;
+    NNStorage storage = originalImage.getStorage();
+    storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
+
+    NNStorage spyStorage = spy(storage);
+    originalImage.storage = spyStorage;
+    FSImage spyImage = spy(originalImage);
+    fsn.dir.fsImage = spyImage;
+    spyImage.storage.setStorageDirectories(
+        FSNamesystem.getNamespaceDirs(conf), 
+        FSNamesystem.getNamespaceEditsDirs(conf));
+
+    doThrow(new IOException("Injected fault: saveFSImage")).
+      when(spyImage).saveFSImage((File)anyObject());
+
+    try {
+      doAnEdit(fsn, 1);
+
+      // Save namespace
+      fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      try {
+        fsn.saveNamespace();
+        fail("saveNamespace did not fail even when all directories failed!");
+      } catch (IOException ioe) {
+        LOG.info("Got expected exception", ioe);
+      }
+      
+      // Ensure that, if storage dirs come back online, things work again.
+      if (restoreStorageAfterFailure) {
+        Mockito.reset(spyImage);
+        spyStorage.setRestoreFailedStorage(true);
+        fsn.saveNamespace();
+        checkEditExists(fsn, 1);
+      }
+
+      // Now shut down and restart the NN
+      originalImage.close();
+      fsn.close();
+      fsn = null;
+
+      // Start a new namesystem, which should be able to recover
+      // the namespace from the previous incarnation.
+      fsn = new FSNamesystem(conf);
+
+      // Make sure the image loaded including our edits.
+      checkEditExists(fsn, 1);
+    } finally {
+      if (fsn != null) {
+        fsn.close();
+      }
+    }
+  }
 
   @Test
   public void testSaveWhileEditsRolled() throws Exception {
     Configuration conf = getConf();
     NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
-    GenericTestUtils.formatNamenode(conf);
+    DFSTestUtil.formatNameNode(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
 
     try {
@@ -308,7 +399,7 @@ public class TestSaveNamespace {
   public void testTxIdPersistence() throws Exception {
     Configuration conf = getConf();
     NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
-    GenericTestUtils.formatNamenode(conf);
+    DFSTestUtil.formatNameNode(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
 
     try {

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Wed May 18 23:44:23 2011
@@ -45,6 +45,7 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.fs.permission.PermissionStatus;
 
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
@@ -374,7 +375,7 @@ public class TestStartup extends TestCas
     conf.set("dfs.name.dir", new File(base_dir, "name").getPath());
     conf.setBoolean("dfs.permissions", false);
 
-    GenericTestUtils.formatNamenode(conf);
+    DFSTestUtil.formatNameNode(conf);
 
     // create an uncompressed image
     LOG.info("Create an uncompressed fsimage");

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java Wed May 18 23:44:23 2011
@@ -28,15 +28,16 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 /**
  * Test case for FilesInGetListingOps metric in Namenode
  */
 public class TestNNMetricFilesInGetListingOps extends TestCase {
   private static final Configuration CONF = new HdfsConfiguration();
+  private static final String NN_METRICS = "NameNodeActivity";
   static {
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
     CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
@@ -45,7 +46,6 @@ public class TestNNMetricFilesInGetListi
   }
      
   private MiniDFSCluster cluster;
-  private NameNodeMetrics nnMetrics;
   private DistributedFileSystem fs;
   private Random rand = new Random();
 
@@ -54,7 +54,6 @@ public class TestNNMetricFilesInGetListi
     cluster = new MiniDFSCluster.Builder(CONF).build();
     cluster.waitActive();
     cluster.getNameNode();
-    nnMetrics = NameNode.getNameNodeMetrics();
     fs = (DistributedFileSystem) cluster.getFileSystem();
   }
 
@@ -76,9 +75,9 @@ public class TestNNMetricFilesInGetListi
     createFile("/tmp2/t1", 3200, (short)3);
     createFile("/tmp2/t2", 3200, (short)3);
     cluster.getNameNode().getListing("/tmp1", HdfsFileStatus.EMPTY_NAME, false);
-    assertEquals(2,nnMetrics.numFilesInGetListingOps.getCurrentIntervalValue());
-    cluster.getNameNode().getListing("/tmp2", HdfsFileStatus.EMPTY_NAME, false) ;
-    assertEquals(4,nnMetrics.numFilesInGetListingOps.getCurrentIntervalValue());
+    assertCounter("FilesInGetListingOps", 2L, getMetrics(NN_METRICS));
+    cluster.getNameNode().getListing("/tmp2", HdfsFileStatus.EMPTY_NAME, false);
+    assertCounter("FilesInGetListingOps", 4L, getMetrics(NN_METRICS));
   }
 }
 

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Wed May 18 23:44:23 2011
@@ -33,10 +33,11 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.namenode.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 /**
  * Test for metrics published by the Namenode
@@ -46,6 +47,8 @@ public class TestNameNodeMetrics extends
   private static final int DFS_REPLICATION_INTERVAL = 1;
   private static final Path TEST_ROOT_DIR_PATH = 
     new Path(System.getProperty("test.build.data", "build/test/data"));
+  private static final String NN_METRICS = "NameNodeActivity";
+  private static final String NS_METRICS = "FSNamesystem";
   
   // Number of datanodes in the cluster
   private static final int DATANODE_COUNT = 3; 
@@ -59,11 +62,9 @@ public class TestNameNodeMetrics extends
   }
   
   private MiniDFSCluster cluster;
-  private FSNamesystemMetrics metrics;
   private DistributedFileSystem fs;
   private Random rand = new Random();
   private FSNamesystem namesystem;
-  private NameNodeMetrics nnMetrics;
 
   private static Path getTestPath(String fileName) {
     return new Path(TEST_ROOT_DIR_PATH, fileName);
@@ -75,8 +76,6 @@ public class TestNameNodeMetrics extends
     cluster.waitActive();
     namesystem = cluster.getNamesystem();
     fs = (DistributedFileSystem) cluster.getFileSystem();
-    metrics = namesystem.getFSNamesystemMetrics();
-    nnMetrics = NameNode.getNameNodeMetrics();
   }
   
   @Override
@@ -93,8 +92,6 @@ public class TestNameNodeMetrics extends
     // Wait for metrics update (corresponds to dfs.replication.interval
     // for some block related metrics to get updated)
     Thread.sleep(1000);
-    metrics.doUpdates(null);
-    nnMetrics.doUpdates(null);
   }
 
   private void readFile(FileSystem fileSys,Path name) throws IOException {
@@ -110,15 +107,16 @@ public class TestNameNodeMetrics extends
     // Add files with 100 blocks
     final Path file = getTestPath("testFileAdd");
     createFile(file, 3200, (short)3);
-    final int blockCount = 32;
+    final long blockCount = 32;
     int blockCapacity = namesystem.getBlockCapacity();
     updateMetrics();
-    assertEquals(blockCapacity, metrics.blockCapacity.get());
-    
+    assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS));
+
+    MetricsRecordBuilder rb = getMetrics(NN_METRICS);
     // File create operations is 1
     // Number of files created is depth of <code>file</code> path
-    assertEquals(1, nnMetrics.numCreateFileOps.getPreviousIntervalValue());
-    assertEquals(file.depth(), nnMetrics.numFilesCreated.getPreviousIntervalValue());
+    assertCounter("CreateFileOps", 1L, rb);
+    assertCounter("FilesCreated", (long)file.depth(), rb);
 
     // Blocks are stored in a hashmap. Compute its capacity, which
     // doubles every time the number of entries reach the threshold.
@@ -127,10 +125,11 @@ public class TestNameNodeMetrics extends
       blockCapacity <<= 1;
     }
     updateMetrics();
-    int filesTotal = file.depth() + 1; // Add 1 for root
-    assertEquals(filesTotal, metrics.filesTotal.get());
-    assertEquals(blockCount, metrics.blocksTotal.get());
-    assertEquals(blockCapacity, metrics.blockCapacity.get());
+    long filesTotal = file.depth() + 1; // Add 1 for root
+    rb = getMetrics(NS_METRICS);
+    assertGauge("FilesTotal", filesTotal, rb);
+    assertGauge("BlocksTotal", blockCount, rb);
+    assertGauge("BlockCapacity", blockCapacity, rb);
     fs.delete(file, true);
     filesTotal--; // reduce the filecount for deleted file
     
@@ -138,13 +137,15 @@ public class TestNameNodeMetrics extends
     // the blocks pending deletion are sent for deletion to the datanodes.
     Thread.sleep(DFS_REPLICATION_INTERVAL * (DATANODE_COUNT + 1) * 1000);
     updateMetrics();
-    assertEquals(filesTotal, metrics.filesTotal.get());
-    assertEquals(0, metrics.blocksTotal.get());
-    assertEquals(0, metrics.pendingDeletionBlocks.get());
-    
+    rb = getMetrics(NS_METRICS);
+    assertGauge("FilesTotal", filesTotal, rb);
+    assertGauge("BlocksTotal", 0L, rb);
+    assertGauge("PendingDeletionBlocks", 0L, rb);
+
+    rb = getMetrics(NN_METRICS);
     // Delete file operations and number of files deleted must be 1
-    assertEquals(1, nnMetrics.numDeleteFileOps.getPreviousIntervalValue());
-    assertEquals(1, nnMetrics.numFilesDeleted.getPreviousIntervalValue());
+    assertCounter("DeleteFileOps", 1L, rb);
+    assertCounter("FilesDeleted", 1L, rb);
   }
   
   /** Corrupt a block and ensure metrics reflects it */
@@ -158,14 +159,16 @@ public class TestNameNodeMetrics extends
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
     namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
     updateMetrics();
-    assertEquals(1, metrics.corruptBlocks.get());
-    assertEquals(1, metrics.pendingReplicationBlocks.get());
-    assertEquals(1, metrics.scheduledReplicationBlocks.get());
+    MetricsRecordBuilder rb = getMetrics(NS_METRICS);
+    assertGauge("CorruptBlocks", 1L, rb);
+    assertGauge("PendingReplicationBlocks", 1L, rb);
+    assertGauge("ScheduledReplicationBlocks", 1L, rb);
     fs.delete(file, true);
     updateMetrics();
-    assertEquals(0, metrics.corruptBlocks.get());
-    assertEquals(0, metrics.pendingReplicationBlocks.get());
-    assertEquals(0, metrics.scheduledReplicationBlocks.get());
+    rb = getMetrics(NS_METRICS);
+    assertGauge("CorruptBlocks", 0L, rb);
+    assertGauge("PendingReplicationBlocks", 0L, rb);
+    assertGauge("ScheduledReplicationBlocks", 0L, rb);
   }
   
   /** Create excess blocks by reducing the replication factor for
@@ -174,10 +177,11 @@ public class TestNameNodeMetrics extends
   public void testExcessBlocks() throws Exception {
     Path file = getTestPath("testExcessBlocks");
     createFile(file, 100, (short)2);
-    int totalBlocks = 1;
+    long totalBlocks = 1;
     namesystem.setReplication(file.toString(), (short)1);
     updateMetrics();
-    assertEquals(totalBlocks, metrics.excessBlocks.get());
+    MetricsRecordBuilder rb = getMetrics(NS_METRICS);
+    assertGauge("ExcessBlocks", totalBlocks, rb);
     fs.delete(file, true);
   }
   
@@ -192,11 +196,12 @@ public class TestNameNodeMetrics extends
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
     namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
     updateMetrics();
-    assertEquals(1, metrics.underReplicatedBlocks.get());
-    assertEquals(1, metrics.missingBlocks.get());
+    MetricsRecordBuilder rb = getMetrics(NS_METRICS);
+    assertGauge("UnderReplicatedBlocks", 1L, rb);
+    assertGauge("MissingBlocks", 1L, rb);
     fs.delete(file, true);
     updateMetrics();
-    assertEquals(0, metrics.underReplicatedBlocks.get());
+    assertGauge("UnderReplicatedBlocks", 0L, getMetrics(NS_METRICS));
   }
   
   public void testRenameMetrics() throws Exception {
@@ -206,8 +211,9 @@ public class TestNameNodeMetrics extends
     createFile(target, 100, (short)1);
     fs.rename(src, target, Rename.OVERWRITE);
     updateMetrics();
-    assertEquals(1, nnMetrics.numFilesRenamed.getPreviousIntervalValue());
-    assertEquals(1, nnMetrics.numFilesDeleted.getPreviousIntervalValue());
+    MetricsRecordBuilder rb = getMetrics(NN_METRICS);
+    assertCounter("FilesRenamed", 1L, rb);
+    assertCounter("FilesDeleted", 1L, rb);
   }
   
   /**
@@ -226,13 +232,8 @@ public class TestNameNodeMetrics extends
     Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "file1.dat");
 
     // When cluster starts first time there are no file  (read,create,open)
-    // operations so metric numGetBlockLocations should be 0.
-    // Verify that numGetBlockLocations for current interval 
-    // and previous interval are 0
-    assertEquals("numGetBlockLocations for previous interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getPreviousIntervalValue());
-    assertEquals("numGetBlockLocations for current interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getCurrentIntervalValue());
+    // operations so metric GetBlockLocations should be 0.
+    assertCounter("GetBlockLocations", 0L, getMetrics(NN_METRICS));
 
     //Perform create file operation
     createFile(file1_Path,100,(short)2);
@@ -240,36 +241,23 @@ public class TestNameNodeMetrics extends
   
     //Create file does not change numGetBlockLocations metric
     //expect numGetBlockLocations = 0 for previous and current interval 
-    assertEquals("numGetBlockLocations for previous interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getPreviousIntervalValue());
-    // Verify numGetBlockLocations for current interval is 0
-    assertEquals("numGetBlockLocations for current interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getCurrentIntervalValue());
+    assertCounter("GetBlockLocations", 0L, getMetrics(NN_METRICS));
   
-    // Open and read file operation increments numGetBlockLocations
+    // Open and read file operation increments GetBlockLocations
     // Perform read file operation on earlier created file
     readFile(fs, file1_Path);
     updateMetrics();
     // Verify read file operation has incremented numGetBlockLocations by 1
-    assertEquals("numGetBlockLocations for previous interval is incorrect",
-    1,nnMetrics.numGetBlockLocations.getPreviousIntervalValue());
-    // Verify numGetBlockLocations for current interval is 0
-    assertEquals("numGetBlockLocations for current interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getCurrentIntervalValue());
+    assertCounter("GetBlockLocations", 1L, getMetrics(NN_METRICS));
 
     // opening and reading file  twice will increment numGetBlockLocations by 2
     readFile(fs, file1_Path);
     readFile(fs, file1_Path);
     updateMetrics();
-    assertEquals("numGetBlockLocations for previous interval is incorrect",
-    2,nnMetrics.numGetBlockLocations.getPreviousIntervalValue());
-    // Verify numGetBlockLocations for current interval is 0
-    assertEquals("numGetBlockLocations for current interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getCurrentIntervalValue());
+    assertCounter("GetBlockLocations", 3L, getMetrics(NN_METRICS));
   
     // Verify total load metrics, total load = Data Node started.
     updateMetrics();
-    assertEquals("Metrics TotalLoad is incorrect"
-    ,DATANODE_COUNT,metrics.totalLoad.get());
+    assertGauge("TotalLoad" ,DATANODE_COUNT, getMetrics(NS_METRICS));
   }
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java Wed May 18 23:44:23 2011
@@ -34,17 +34,4 @@ public abstract class GenericTestUtils {
   public static String getMethodName() {
     return Thread.currentThread().getStackTrace()[2].getMethodName();
   }
-  
-  /**
-   * when formating a namenode - we must provide clusterid.
-   * @param conf
-   * @throws IOException
-   */
-  public static void formatNamenode(Configuration conf) throws IOException {
-    String clusterId = StartupOption.FORMAT.getClusterId();
-    if(clusterId == null || clusterId.isEmpty())
-      StartupOption.FORMAT.setClusterId("testClusterID");
-
-    NameNode.format(conf);
-  }
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/tools/TestJMXGet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/tools/TestJMXGet.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/tools/TestJMXGet.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/tools/TestJMXGet.java Wed May 18 23:44:23 2011
@@ -31,8 +31,8 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.tools.JMXGet;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 
 /**
@@ -91,18 +91,18 @@ public class TestJMXGet extends TestCase
     writeFile(cluster.getFileSystem(), new Path("/test1"), 2);
 
     JMXGet jmx = new JMXGet();
-    jmx.init();
-
-
-    //get some data from different sources
-    int blocks_corrupted = NameNode.getNameNodeMetrics().
-    numBlocksCorrupted.get();
-    assertEquals(Integer.parseInt(
-        jmx.getValue("NumLiveDataNodes")), 2);
-    assertEquals(Integer.parseInt(
-        jmx.getValue("BlocksCorrupted")), blocks_corrupted);
-    assertEquals(Integer.parseInt(
-        jmx.getValue("NumOpenConnections")), 0);
+    //jmx.setService("*"); // list all hadoop services
+    //jmx.init();
+    //jmx = new JMXGet();
+    jmx.init(); // default lists namenode mbeans only
+
+    //get some data from different source
+    assertEquals(numDatanodes, Integer.parseInt(
+        jmx.getValue("NumLiveDataNodes")));
+    assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")),
+                getMetrics("FSNamesystem"));
+    assertEquals(numDatanodes, Integer.parseInt(
+        jmx.getValue("NumOpenConnections")));
 
     cluster.shutdown();
   }
@@ -119,9 +119,12 @@ public class TestJMXGet extends TestCase
     writeFile(cluster.getFileSystem(), new Path("/test"), 2);
 
     JMXGet jmx = new JMXGet();
+    //jmx.setService("*"); // list all hadoop services
+    //jmx.init();
+    //jmx = new JMXGet();
     jmx.setService("DataNode");
     jmx.init();
-    assertEquals(Integer.parseInt(jmx.getValue("bytes_written")), 0);
+    assertEquals(fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
 
     cluster.shutdown();
   }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java Wed May 18 23:44:23 2011
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
@@ -75,7 +76,7 @@ public class TestNNLeaseRecovery {
 
     FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
-    GenericTestUtils.formatNamenode(conf);
+    DFSTestUtil.formatNameNode(conf);
     fsn = spy(new FSNamesystem(conf));
   }
 

Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed May 18 23:44:23 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/webapps/datanode:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/datanode:820487
-/hadoop/hdfs/trunk/src/webapps/datanode:1086482-1102504
+/hadoop/hdfs/trunk/src/webapps/datanode:1086482-1124460

Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed May 18 23:44:23 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/hdfs:820487
-/hadoop/hdfs/trunk/src/webapps/hdfs:1086482-1102504
+/hadoop/hdfs/trunk/src/webapps/hdfs:1086482-1124460

Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed May 18 23:44:23 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/webapps/secondary:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/secondary:820487
-/hadoop/hdfs/trunk/src/webapps/secondary:1086482-1102504
+/hadoop/hdfs/trunk/src/webapps/secondary:1086482-1124460



Mime
View raw message