hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1306587 - in /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs/ hadoop-hdfs/src/contrib/bkjournal/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ hadoop-hdfs/sr...
Date Wed, 28 Mar 2012 20:45:18 GMT
Author: todd
Date: Wed Mar 28 20:45:13 2012
New Revision: 1306587

URL: http://svn.apache.org/viewvc?rev=1306587&view=rev
Log:
Merge trunk into HDFS-3042 branch

Removed:
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java
Modified:
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/pom.xml
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/pom.xml

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1306184-1306586

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml Wed Mar 28 20:45:13 2012
@@ -19,12 +19,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.24.0-SNAPSHOT</version>
+    <version>3.0.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-hdfs-httpfs</artifactId>
-  <version>0.24.0-SNAPSHOT</version>
+  <version>3.0.0-SNAPSHOT</version>
   <packaging>war</packaging>
 
   <name>Apache Hadoop HttpFS</name>

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Mar 28 20:45:13 2012
@@ -109,7 +109,7 @@ Trunk (unreleased changes)
 
     HDFS-3116. Typo in fetchdt error message. (AOE Takashi via atm)
 
-Release 0.23.3 - UNRELEASED 
+Release 2.0.0 - UNRELEASED 
 
   INCOMPATIBLE CHANGES
 
@@ -277,6 +277,8 @@ Release 0.23.3 - UNRELEASED 
     HDFS-3129. NetworkTopology: add test that getLeaf should check for
     invalid topologies (Colin Patrick McCabe via eli)
 
+    HDFS-3155. Clean up FSDataset implemenation related code.  (szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@@ -360,6 +362,10 @@ Release 0.23.3 - UNRELEASED 
 
     HDFS-3132. Fix findbugs warning on HDFS trunk. (todd)
 
+    HDFS-3156. TestDFSHAAdmin is failing post HADOOP-8202. (atm)
+
+    HDFS-3143. TestGetBlocks.testGetBlocks is failing. (Arpit Gupta via atm)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
@@ -869,6 +875,8 @@ Release 0.23.1 - 2012-02-17 
 
     HDFS-2868. Expose xceiver counts via the DataNode MXBean. (harsh)
 
+    HDFS-3139. Minor Datanode logging improvement. (eli)
+
   OPTIMIZATIONS
 
     HDFS-2130. Switch default checksum to CRC32C. (todd)

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/pom.xml Wed Mar 28 20:45:13 2012
@@ -17,12 +17,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>0.24.0-SNAPSHOT</version>
+    <version>3.0.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-hdfs</artifactId>
-  <version>0.24.0-SNAPSHOT</version>
+  <version>3.0.0-SNAPSHOT</version>
   <description>Apache Hadoop HDFS</description>
   <name>Apache Hadoop HDFS</name>
   <packaging>jar</packaging>

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml Wed Mar 28 20:45:13 2012
@@ -17,13 +17,13 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.24.0-SNAPSHOT</version>
+    <version>3.0.0-SNAPSHOT</version>
     <relativePath>../../../../../hadoop-project</relativePath>
   </parent>
 
   <groupId>org.apache.hadoop.contrib</groupId>
   <artifactId>hadoop-hdfs-bkjournal</artifactId>
-  <version>0.24.0-SNAPSHOT</version>
+  <version>3.0.0-SNAPSHOT</version>
   <description>Apache Hadoop HDFS BookKeeper Journal</description>
   <name>Apache Hadoop HDFS BookKeeper Journal</name>
   <packaging>jar</packaging>

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1306184-1306586

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java Wed Mar 28 20:45:13 2012
@@ -28,20 +28,20 @@ import org.apache.hadoop.hdfs.Deprecated
 import org.apache.hadoop.io.WritableComparable;
 
 /**
- * DatanodeID is composed of the data node 
- * name (hostname:portNumber) and the data storage ID, 
- * which it currently represents.
- * 
+ * This class represents the primary identifier for a Datanode.
+ * Datanodes are identified by how they can be contacted (hostname
+ * and ports) and their storage ID, a unique number that associates
+ * the Datanodes blocks with a particular Datanode.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class DatanodeID implements WritableComparable<DatanodeID> {
   public static final DatanodeID[] EMPTY_ARRAY = {}; 
 
-  public String name;      /// hostname:portNumber
-  public String storageID; /// unique per cluster storageID
-  protected int infoPort;     /// the port where the infoserver is running
-  public int ipcPort;     /// the port where the ipc server is running
+  public String name;       // hostname:port (data transfer port)
+  public String storageID;  // unique per cluster storageID
+  protected int infoPort;   // info server port
+  public int ipcPort;       // ipc server port
 
   /** Equivalent to DatanodeID(""). */
   public DatanodeID() {this("");}

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java Wed Mar 28 20:45:13 2012
@@ -37,9 +37,9 @@ import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.util.StringUtils;
 
 /** 
- * DatanodeInfo represents the status of a DataNode.
- * This object is used for communication in the
- * Datanode Protocol and the Client Protocol.
+ * This class extends the primary identifier of a Datanode with ephemeral
+ * state, eg usage information, current administrative state, and the
+ * network location that is communicated to clients.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -52,12 +52,10 @@ public class DatanodeInfo extends Datano
   protected int xceiverCount;
   protected String location = NetworkTopology.DEFAULT_RACK;
 
-  /** HostName as supplied by the datanode during registration as its 
-   * name. Namenode uses datanode IP address as the name.
-   */
+  // The FQDN of the IP associated with the Datanode's hostname
   protected String hostName = null;
   
-  // administrative states of a datanode
+  // Datanode administrative states
   public enum AdminStates {
     NORMAL("In Service"), 
     DECOMMISSION_INPROGRESS("Decommission In Progress"), 
@@ -241,12 +239,14 @@ public class DatanodeInfo extends Datano
     long nonDFSUsed = getNonDfsUsed();
     float usedPercent = getDfsUsedPercent();
     float remainingPercent = getRemainingPercent();
-    String hostName = NetUtils.getHostNameOfIP(name);
+    String lookupName = NetUtils.getHostNameOfIP(name);
 
     buffer.append("Name: "+ name);
-    if(hostName != null)
-      buffer.append(" (" + hostName + ")");
+    if (lookupName != null) {
+      buffer.append(" (" + lookupName + ")");
+    }
     buffer.append("\n");
+    buffer.append("Hostname: " + getHostName() + "\n");
 
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append("Rack: "+location+"\n");

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java Wed Mar 28 20:45:13 2012
@@ -34,16 +34,13 @@ import org.apache.hadoop.hdfs.util.Light
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
 
-/**************************************************
- * DatanodeDescriptor tracks stats on a given DataNode, such as
- * available storage capacity, last update time, etc., and maintains a
- * set of blocks stored on the datanode.
- *
- * This data structure is internal to the namenode. It is *not* sent
- * over-the-wire to the Client or the Datanodes. Neither is it stored
- * persistently in the fsImage.
- **************************************************/
+/**
+ * This class extends the DatanodeInfo class with ephemeral information (eg
+ * health, capacity, what blocks are associated with the Datanode) that is
+ * private to the Namenode, ie this class is not exposed to clients.
+ */
 @InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class DatanodeDescriptor extends DatanodeInfo {
   
   // Stores status of decommissioning.
@@ -586,14 +583,14 @@ public class DatanodeDescriptor extends 
   }
 
   /**
-   * @return Blanacer bandwidth in bytes per second for this datanode.
+   * @return balancer bandwidth in bytes per second for this datanode
    */
   public long getBalancerBandwidth() {
     return this.bandwidth;
   }
 
   /**
-   * @param bandwidth Blanacer bandwidth in bytes per second for this datanode.
+   * @param bandwidth balancer bandwidth in bytes per second for this datanode
    */
   public void setBalancerBandwidth(long bandwidth) {
     this.bandwidth = bandwidth;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Wed Mar 28 20:45:13 2012
@@ -358,9 +358,8 @@ class BlockReceiver implements Closeable
    * This does not verify the original checksums, under the assumption
    * that they have already been validated.
    */
-  private void translateChunks( byte[] dataBuf, int dataOff, int len, 
-                             byte[] checksumBuf, int checksumOff ) 
-                             throws IOException {
+  private void translateChunks( byte[] dataBuf, int dataOff, int len,
+      byte[] checksumBuf, int checksumOff ) {
     if (len == 0) return;
     
     int numChunks = (len - 1)/bytesPerChecksum + 1;
@@ -702,7 +701,7 @@ class BlockReceiver implements Closeable
     return lastPacketInBlock?-1:len;
   }
 
-  private void dropOsCacheBehindWriter(long offsetInBlock) throws IOException {
+  private void dropOsCacheBehindWriter(long offsetInBlock) {
     try {
       if (outFd != null &&
           offsetInBlock > lastCacheDropOffset + CACHE_DROP_LAG_BYTES) {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Wed Mar 28 20:45:13 2012
@@ -111,10 +111,6 @@ class BlockSender implements java.io.Clo
   
   /** the block to read from */
   private final ExtendedBlock block;
-  /** the replica to read from */
-  private final Replica replica;
-  /** The visible length of a replica. */
-  private final long replicaVisibleLength;
   /** Stream to read block data from */
   private InputStream blockIn;
   /** updated while using transferTo() */
@@ -189,17 +185,18 @@ class BlockSender implements java.io.Clo
       this.readaheadLength = datanode.getDnConf().readaheadLength;
       this.shouldDropCacheBehindRead = datanode.getDnConf().dropCacheBehindReads;
       
+      final Replica replica;
+      final long replicaVisibleLength;
       synchronized(datanode.data) { 
-        this.replica = getReplica(block, datanode);
-        this.replicaVisibleLength = replica.getVisibleLength();
+        replica = getReplica(block, datanode);
+        replicaVisibleLength = replica.getVisibleLength();
       }
       // if there is a write in progress
       ChunkChecksum chunkChecksum = null;
       if (replica instanceof ReplicaBeingWritten) {
-        long minEndOffset = startOffset + length;
-        waitForMinLength((ReplicaBeingWritten)replica, minEndOffset);
-        ReplicaInPipeline rip = (ReplicaInPipeline) replica;
-        chunkChecksum = rip.getLastChecksumAndDataLen();
+        final ReplicaBeingWritten rbw = (ReplicaBeingWritten)replica;
+        waitForMinLength(rbw, startOffset + length);
+        chunkChecksum = rbw.getLastChecksumAndDataLen();
       }
 
       if (replica.getGenerationStamp() < block.getGenerationStamp()) {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Mar 28 20:45:13 2012
@@ -330,9 +330,7 @@ public class DataNode extends Configured
        : new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0,
            conf, new AccessControlList(conf.get(DFS_ADMIN, " ")),
            secureResources.getListener());
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Datanode listening on " + infoHost + ":" + tmpInfoPort);
-    }
+    LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort);
     if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
       boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
                                                DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
@@ -398,7 +396,8 @@ public class DataNode extends Configured
         .newReflectiveBlockingService(interDatanodeProtocolXlator);
     DFSUtil.addPBProtocol(conf, InterDatanodeProtocolPB.class, service,
         ipcServer);
-    
+    LOG.info("Opened IPC server at " + ipcServer.getListenerAddress());
+
     // set service-level authorization security policy
     if (conf.getBoolean(
         CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
@@ -486,14 +485,14 @@ public class DataNode extends Configured
   }
   
   private void initDataXceiver(Configuration conf) throws IOException {
-    InetSocketAddress socAddr = DataNode.getStreamingAddr(conf);
+    InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf);
 
     // find free port or use privileged port provided
     ServerSocket ss;
     if(secureResources == null) {
       ss = (dnConf.socketWriteTimeout > 0) ? 
           ServerSocketChannel.open().socket() : new ServerSocket();
-          Server.bind(ss, socAddr, 0);
+          Server.bind(ss, streamingAddr, 0);
     } else {
       ss = secureResources.getStreamingSocket();
     }
@@ -502,8 +501,7 @@ public class DataNode extends Configured
     int tmpPort = ss.getLocalPort();
     selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
                                      tmpPort);
-    LOG.info("Opened info server at " + tmpPort);
-      
+    LOG.info("Opened streaming server at " + selfAddr);
     this.threadGroup = new ThreadGroup("dataXceiverServer");
     this.dataXceiverServer = new Daemon(threadGroup, 
         new DataXceiverServer(ss, conf, this));

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Wed Mar 28 20:45:13 2012
@@ -760,8 +760,8 @@ public class DataStorage extends Storage
   /**
    * Add bpStorage into bpStorageMap
    */
-  private void addBlockPoolStorage(String bpID, BlockPoolSliceStorage bpStorage)
-      throws IOException {
+  private void addBlockPoolStorage(String bpID, BlockPoolSliceStorage bpStorage
+      ) {
     if (!this.bpStorageMap.containsKey(bpID)) {
       this.bpStorageMap.put(bpID, bpStorage);
     }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java Wed Mar 28 20:45:13 2012
@@ -32,7 +32,7 @@ class DatanodeUtil {
 
   static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
 
-  private final static String DISK_ERROR = "Possible disk error on file creation: ";
+  private static final String DISK_ERROR = "Possible disk error: ";
 
   /** Get the cause of an I/O exception if caused by a possible disk error
    * @param ioe an I/O exception

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Wed Mar 28 20:45:13 2012
@@ -1800,7 +1800,7 @@ public class FSDataset implements FsData
       ReplicaInfo replicaInfo) throws IOException {
     FinalizedReplica newReplicaInfo = null;
     if (replicaInfo.getState() == ReplicaState.RUR &&
-       ((ReplicaUnderRecovery)replicaInfo).getOrignalReplicaState() == 
+       ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica().getState() == 
          ReplicaState.FINALIZED) {
       newReplicaInfo = (FinalizedReplica)
              ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
@@ -2036,7 +2036,7 @@ public class FSDataset implements FsData
         ReplicaState replicaState = dinfo.getState();
         if (replicaState == ReplicaState.FINALIZED || 
             (replicaState == ReplicaState.RUR && 
-                ((ReplicaUnderRecovery)dinfo).getOrignalReplicaState() == 
+                ((ReplicaUnderRecovery)dinfo).getOriginalReplica().getState() == 
                   ReplicaState.FINALIZED)) {
           v.clearPath(bpid, parent);
         }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java Wed Mar 28 20:45:13 2012
@@ -86,14 +86,6 @@ class ReplicaUnderRecovery extends Repli
   ReplicaInfo getOriginalReplica() {
     return original;
   }
-  
-  /**
-   * Get the original replica's state
-   * @return the original replica's state
-   */
-  ReplicaState getOrignalReplicaState() {
-    return original.getState();
-  }
 
   @Override //ReplicaInfo
   boolean isUnlinked() {
@@ -170,6 +162,6 @@ class ReplicaUnderRecovery extends Repli
   ReplicaRecoveryInfo createInfo() {
     return new ReplicaRecoveryInfo(original.getBlockId(), 
         original.getBytesOnDisk(), original.getGenerationStamp(),
-        getOrignalReplicaState()); 
+        original.getState()); 
   }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java Wed Mar 28 20:45:13 2012
@@ -69,18 +69,19 @@ public class SecureDataNodeStarter imple
     args = context.getArguments();
     
     // Obtain secure port for data streaming to datanode
-    InetSocketAddress socAddr = DataNode.getStreamingAddr(conf);
+    InetSocketAddress streamingAddr  = DataNode.getStreamingAddr(conf);
     int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
         HdfsServerConstants.WRITE_TIMEOUT);
     
     ServerSocket ss = (socketWriteTimeout > 0) ? 
         ServerSocketChannel.open().socket() : new ServerSocket();
-    ss.bind(socAddr, 0);
+    ss.bind(streamingAddr, 0);
     
     // Check that we got the port we need
-    if(ss.getLocalPort() != socAddr.getPort())
+    if (ss.getLocalPort() != streamingAddr.getPort()) {
       throw new RuntimeException("Unable to bind on specified streaming port in secure " +
-      		"context. Needed " + socAddr.getPort() + ", got " + ss.getLocalPort());
+      		"context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
+    }
 
     // Obtain secure listener for web server
     SelectChannelConnector listener = 
@@ -90,15 +91,18 @@ public class SecureDataNodeStarter imple
     listener.setPort(infoSocAddr.getPort());
     // Open listener here in order to bind to port as root
     listener.open(); 
-    if(listener.getPort() != infoSocAddr.getPort())
+    if (listener.getPort() != infoSocAddr.getPort()) {
       throw new RuntimeException("Unable to bind on specified info port in secure " +
-          "context. Needed " + socAddr.getPort() + ", got " + ss.getLocalPort());
+          "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
+    }
     System.err.println("Successfully obtained privileged resources (streaming port = "
         + ss + " ) (http listener port = " + listener.getConnection() +")");
     
-    if(ss.getLocalPort() >= 1023 || listener.getPort() >= 1023)
+    if (ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) {
       throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
-    
+    }
+    System.err.println("Opened streaming server at " + streamingAddr);
+    System.err.println("Opened info server at " + infoSocAddr);
     resources = new SecureResources(ss, listener);
   }
 

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1306184-1306586

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1306184-1306586

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1306184-1306586

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1306184-1306586

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml Wed Mar 28 20:45:13 2012
@@ -21,7 +21,7 @@
   <property name="aspectversion" value="1.6.5"/>
   <!-- TODO this has to be changed synchronously with build.xml version prop.-->
   <!-- this workarounds of test-patch setting its own 'version' -->
-  <property name="project.version" value="0.24.0-SNAPSHOT"/>
+  <property name="project.version" value="3.0.0-SNAPSHOT"/>
 
   <!-- Properties common for all fault injections -->
   <property name="build-fi.dir" value="${basedir}/build-fi"/>

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1306184-1306586

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed Mar 28 20:45:13 2012
@@ -80,7 +80,6 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
@@ -1664,7 +1663,7 @@ public class MiniDFSCluster {
   public void triggerBlockReports()
       throws IOException {
     for (DataNode dn : getDataNodes()) {
-      DataNodeAdapter.triggerBlockReport(dn);
+      DataNodeTestUtils.triggerBlockReport(dn);
     }
   }
 
@@ -1672,14 +1671,14 @@ public class MiniDFSCluster {
   public void triggerDeletionReports()
       throws IOException {
     for (DataNode dn : getDataNodes()) {
-      DataNodeAdapter.triggerDeletionReport(dn);
+      DataNodeTestUtils.triggerDeletionReport(dn);
     }
   }
 
   public void triggerHeartbeats()
       throws IOException {
     for (DataNode dn : getDataNodes()) {
-      DataNodeAdapter.triggerHeartbeat(dn);
+      DataNodeTestUtils.triggerHeartbeat(dn);
     }
   }
 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java Wed Mar 28 20:45:13 2012
@@ -52,7 +52,7 @@ public class TestDFSAddressConfig extend
 
     String selfSocketAddr = dn.getSelfAddr().toString();
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
-    assertTrue(selfSocketAddr.startsWith("/127.0.0.1:"));
+    assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
 
     /*-------------------------------------------------------------------------
      * Shut down the datanodes, reconfigure, and bring them back up.
@@ -78,7 +78,7 @@ public class TestDFSAddressConfig extend
     selfSocketAddr = dn.getSelfAddr().toString();
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     // assert that default self socket address is 127.0.0.1
-    assertTrue(selfSocketAddr.startsWith("/127.0.0.1:"));
+    assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
 
     /*-------------------------------------------------------------------------
      * Shut down the datanodes, reconfigure, and bring them back up.
@@ -103,7 +103,7 @@ public class TestDFSAddressConfig extend
     selfSocketAddr = dn.getSelfAddr().toString();
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     // assert that default self socket address is 0.0.0.0
-    assertTrue(selfSocketAddr.startsWith("/0.0.0.0:"));
+    assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
 
     cluster.shutdown();
   }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java Wed Mar 28 20:45:13 2012
@@ -135,7 +135,7 @@ public class TestGetBlocks extends TestC
         namenode.getBlocks(new DatanodeInfo(), 2);
     } catch(RemoteException e) {
       getException = true;
-      assertTrue(e.getMessage().contains("IllegalArgumentException"));
+      assertTrue(e.getClassName().contains("HadoopIllegalArgumentException"));
     }
     assertTrue(getException);
   }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Wed Mar 28 20:45:13 2012
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
@@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -454,7 +454,7 @@ public class TestLeaseRecovery2 {
     // Make sure the DNs don't send a heartbeat for a while, so the blocks
     // won't actually get completed during lease recovery.
     for (DataNode dn : cluster.getDataNodes()) {
-      DataNodeAdapter.setHeartbeatsDisabledForTests(dn, true);
+      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
     }
     
     // set the hard limit to be 1 second 
@@ -474,7 +474,7 @@ public class TestLeaseRecovery2 {
     
     // Let the DNs send heartbeats again.
     for (DataNode dn : cluster.getDataNodes()) {
-      DataNodeAdapter.setHeartbeatsDisabledForTests(dn, false);
+      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
     }
 
     cluster.waitActive();

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java Wed Mar 28 20:45:13 2012
@@ -17,6 +17,13 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -26,22 +33,16 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.Replica;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.After;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.io.IOException;
-import java.util.List;
-import java.util.Random;
-
 public class TestPipelines {
   public static final Log LOG = LogFactory.getLog(TestPipelines.class);
 
@@ -105,7 +106,7 @@ public class TestPipelines {
 
     String bpid = cluster.getNamesystem().getBlockPoolId();
     for (DataNode dn : cluster.getDataNodes()) {
-      Replica r = DataNodeAdapter.fetchReplicaInfo(dn, bpid, lb.get(0)
+      Replica r = DataNodeTestUtils.fetchReplicaInfo(dn, bpid, lb.get(0)
           .getBlock().getBlockId());
 
       assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java Wed Mar 28 20:45:13 2012
@@ -24,8 +24,13 @@ import java.io.IOException;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.mockito.Mockito;
+
+import com.google.common.base.Preconditions;
 
 /**
  * Utility class for accessing package-private DataNode information during tests.
@@ -42,6 +47,64 @@ public class DataNodeTestUtils {
     return dn.getDNRegistrationForBP(bpid);
   }
 
+  public static void setHeartbeatsDisabledForTests(DataNode dn,
+      boolean heartbeatsDisabledForTests) {
+    dn.setHeartbeatsDisabledForTests(heartbeatsDisabledForTests);
+  }
+
+  public static void triggerDeletionReport(DataNode dn) throws IOException {
+    for (BPOfferService bpos : dn.getAllBpOs()) {
+      bpos.triggerDeletionReportForTests();
+    }
+  }
+
+  public static void triggerHeartbeat(DataNode dn) throws IOException {
+    for (BPOfferService bpos : dn.getAllBpOs()) {
+      bpos.triggerHeartbeatForTests();
+    }
+  }
+  
+  public static void triggerBlockReport(DataNode dn) throws IOException {
+    for (BPOfferService bpos : dn.getAllBpOs()) {
+      bpos.triggerBlockReportForTests();
+    }
+  }
+  
+  /**
+   * Insert a Mockito spy object between the given DataNode and
+   * the given NameNode. This can be used to delay or wait for
+   * RPC calls on the datanode->NN path.
+   */
+  public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN(
+      DataNode dn, NameNode nn) {
+    String bpid = nn.getNamesystem().getBlockPoolId();
+    
+    BPOfferService bpos = null;
+    for (BPOfferService thisBpos : dn.getAllBpOs()) {
+      if (thisBpos.getBlockPoolId().equals(bpid)) {
+        bpos = thisBpos;
+        break;
+      }
+    }
+    Preconditions.checkArgument(bpos != null,
+        "No such bpid: %s", bpid);
+    
+    BPServiceActor bpsa = null;
+    for (BPServiceActor thisBpsa : bpos.getBPServiceActors()) {
+      if (thisBpsa.getNNSocketAddress().equals(nn.getServiceRpcAddress())) {
+        bpsa = thisBpsa;
+        break;
+      }
+    }
+    Preconditions.checkArgument(bpsa != null,
+      "No service actor to NN at %s", nn.getServiceRpcAddress());
+
+    DatanodeProtocolClientSideTranslatorPB origNN = bpsa.getNameNodeProxy();
+    DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN);
+    bpsa.setNameNode(spy);
+    return spy;
+  }
+
   /**
    * This method is used for testing. 
    * Examples are adding and deleting blocks directly.
@@ -53,18 +116,37 @@ public class DataNodeTestUtils {
     return dn.getFSDataset();
   }
 
+  public static FSDataset getFsDatasetImpl(DataNode dn) {
+    return (FSDataset)dn.getFSDataset();
+  }
+
   public static File getFile(DataNode dn, String bpid, long bid) {
-    return ((FSDataset)dn.getFSDataset()).getFile(bpid, bid);
+    return getFsDatasetImpl(dn).getFile(bpid, bid);
   }
 
   public static File getBlockFile(DataNode dn, String bpid, Block b
       ) throws IOException {
-    return ((FSDataset)dn.getFSDataset()).getBlockFile(bpid, b);
+    return getFsDatasetImpl(dn).getBlockFile(bpid, b);
   }
 
   public static boolean unlinkBlock(DataNode dn, ExtendedBlock block, int numLinks
       ) throws IOException {
-    ReplicaInfo info = ((FSDataset)dn.getFSDataset()).getReplicaInfo(block);
-    return info.unlinkBlock(numLinks);
+    return getFsDatasetImpl(dn).getReplicaInfo(block).unlinkBlock(numLinks);
+  }
+
+  public static long getPendingAsyncDeletions(DataNode dn) {
+    return getFsDatasetImpl(dn).asyncDiskService.countPendingDeletions();
+  }
+
+  /**
+   * Fetch a copy of ReplicaInfo from a datanode by block id
+   * @param dn datanode to retrieve a replicainfo object from
+   * @param bpid Block pool Id
+   * @param blkId id of the replica's block
+   * @return copy of ReplicaInfo object @link{FSDataset#fetchReplicaInfo}
+   */
+  public static ReplicaInfo fetchReplicaInfo(final DataNode dn,
+      final String bpid, final long blkId) {
+    return getFsDatasetImpl(dn).fetchReplicaInfo(bpid, blkId);
   }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Wed Mar 28 20:45:13 2012
@@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Wed Mar 28 20:45:13 2012
@@ -17,6 +17,17 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -40,27 +51,17 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.log4j.Level;
 import org.junit.After;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 
-import java.io.File;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.CountDownLatch;
-
 /**
  * This test simulates a variety of situations when blocks are being
  * intentionally orrupted, unexpectedly modified, and so on before a block
@@ -561,7 +562,7 @@ public class TestBlockReport {
       // from this node.
       DataNode dn = cluster.getDataNodes().get(0);
       DatanodeProtocolClientSideTranslatorPB spy =
-        DataNodeAdapter.spyOnBposToNN(dn, nn);
+        DataNodeTestUtils.spyOnBposToNN(dn, nn);
       
       Mockito.doAnswer(delayer)
         .when(spy).blockReport(

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java Wed Mar 28 20:45:13 2012
@@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
@@ -83,8 +83,8 @@ public class HAStressTestHarness {
       @Override
       public void doAnAction() throws Exception {
         for (DataNode dn : cluster.getDataNodes()) {
-          DataNodeAdapter.triggerDeletionReport(dn);
-          DataNodeAdapter.triggerHeartbeat(dn);
+          DataNodeTestUtils.triggerDeletionReport(dn);
+          DataNodeTestUtils.triggerHeartbeat(dn);
         }
         for (int i = 0; i < 2; i++) {
           NameNode nn = cluster.getNameNode(i);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java Wed Mar 28 20:45:13 2012
@@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -96,7 +96,7 @@ public abstract class HATestUtil {
       @Override
       public Boolean get() {
         for (DataNode dn : cluster.getDataNodes()) {
-          if (DataNodeAdapter.getPendingAsyncDeletions(dn) > 0) {
+          if (DataNodeTestUtils.getPendingAsyncDeletions(dn) > 0) {
             return false;
           }
         }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java Wed Mar 28 20:45:13 2012
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
 
 import java.io.IOException;
 import java.io.PrintWriter;
@@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -61,7 +60,6 @@ import org.apache.hadoop.test.GenericTes
 import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
@@ -72,9 +70,7 @@ import com.google.common.collect.Lists;
 
 public class TestDNFencing {
   
-  protected static final Log LOG = LogFactory.getLog(
-      TestDNFencing.class);
-  private static final String TEST_FILE_DATA = "hello highly available world";
+  protected static final Log LOG = LogFactory.getLog(TestDNFencing.class);
   private static final String TEST_FILE = "/testStandbyIsHot";
   private static final Path TEST_FILE_PATH = new Path(TEST_FILE);
   private static final int SMALL_BLOCK = 1024;
@@ -497,7 +493,7 @@ public class TestDNFencing {
 
       DataNode dn = cluster.getDataNodes().get(0);
       DatanodeProtocolClientSideTranslatorPB spy =
-        DataNodeAdapter.spyOnBposToNN(dn, nn2);
+        DataNodeTestUtils.spyOnBposToNN(dn, nn2);
       
       Mockito.doAnswer(delayer)
         .when(spy).blockReport(

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java Wed Mar 28 20:45:13 2012
@@ -17,7 +17,10 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
@@ -45,7 +48,7 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.IOUtils;
@@ -54,9 +57,7 @@ import org.apache.hadoop.test.GenericTes
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
-
 import org.apache.log4j.Level;
-
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -297,7 +298,7 @@ public class TestPipelinesFailover {
       // active.
       DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
       DatanodeProtocolClientSideTranslatorPB nnSpy =
-          DataNodeAdapter.spyOnBposToNN(primaryDN, nn0);
+          DataNodeTestUtils.spyOnBposToNN(primaryDN, nn0);
       
       // Delay the commitBlockSynchronization call
       DelayAnswer delayer = new DelayAnswer(LOG);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java Wed Mar 28 20:45:13 2012
@@ -35,14 +35,14 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -225,7 +225,7 @@ public class TestStandbyIsHot {
           LOG.info("Got " + numReplicas + " locs: " + locs);
           if (numReplicas > expectedReplicas) {
             for (DataNode dn : cluster.getDataNodes()) {
-              DataNodeAdapter.triggerDeletionReport(dn);
+              DataNodeTestUtils.triggerDeletionReport(dn);
             }
           }
           return numReplicas == expectedReplicas;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java Wed Mar 28 20:45:13 2012
@@ -36,6 +36,7 @@ import org.apache.hadoop.ha.HAServiceSta
 import org.apache.hadoop.ha.HAServiceTarget;
 import org.apache.hadoop.ha.HealthCheckFailedException;
 import org.apache.hadoop.ha.NodeFencer;
+import org.apache.hadoop.test.MockitoUtil;
 
 import org.junit.Before;
 import org.junit.Test;
@@ -79,7 +80,7 @@ public class TestDFSHAAdmin {
 
   @Before
   public void setup() throws IOException {
-    mockProtocol = Mockito.mock(HAServiceProtocol.class);
+    mockProtocol = MockitoUtil.mockProtocol(HAServiceProtocol.class);
     tool = new DFSHAAdmin() {
 
       @Override

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/pom.xml?rev=1306587&r1=1306586&r2=1306587&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/pom.xml (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/pom.xml Wed Mar 28 20:45:13 2012
@@ -17,12 +17,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.24.0-SNAPSHOT</version>
+    <version>3.0.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-hdfs-project</artifactId>
-  <version>0.24.0-SNAPSHOT</version>
+  <version>3.0.0-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Project</description>
   <name>Apache Hadoop HDFS Project</name>
   <packaging>pom</packaging>



Mime
View raw message