hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1125583 - in /hadoop/hdfs/branches/yahoo-merge: ./ src/c++/libhdfs/ src/contrib/hdfsproxy/ src/java/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/test/hdfs/ src/webapps/datanode/ src/webapps/hdfs/ src/webapps/secondary/
Date Fri, 20 May 2011 22:44:58 GMT
Author: szetszwo
Date: Fri May 20 22:44:57 2011
New Revision: 1125583

URL: http://svn.apache.org/viewvc?rev=1125583&view=rev
Log:
svn merge -c 1085509 from trunk for HDFS-1785.

Modified:
    hadoop/hdfs/branches/yahoo-merge/   (props changed)
    hadoop/hdfs/branches/yahoo-merge/CHANGES.txt
    hadoop/hdfs/branches/yahoo-merge/build.xml   (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/c++/libhdfs/   (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/contrib/hdfsproxy/   (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/java/   (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
  (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/   (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/webapps/datanode/   (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/webapps/hdfs/   (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/webapps/secondary/   (props changed)

Propchange: hadoop/hdfs/branches/yahoo-merge/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:44:57 2011
@@ -2,4 +2,4 @@
 /hadoop/hdfs/branches/HDFS-1052:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265:796829-820463
 /hadoop/hdfs/branches/branch-0.21:820487
-/hadoop/hdfs/trunk:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1036738,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1036738,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Modified: hadoop/hdfs/branches/yahoo-merge/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/CHANGES.txt?rev=1125583&r1=1125582&r2=1125583&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/CHANGES.txt (original)
+++ hadoop/hdfs/branches/yahoo-merge/CHANGES.txt Fri May 20 22:44:57 2011
@@ -277,6 +277,10 @@ Trunk (unreleased changes)
     HDFS-1736. Remove the dependency from DatanodeJspHelper to FsShell.
     (Daryn Sharp via szetszwo)
 
+    HDFS-1785. In BlockReceiver and DataXceiver, clientName.length() is used
+    multiple times for determining whether the source is a client or a
+    datanode.  (szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

Propchange: hadoop/hdfs/branches/yahoo-merge/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:44:57 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/build.xml:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/build.xml:796829-820463
 /hadoop/hdfs/branches/branch-0.21/build.xml:820487
-/hadoop/hdfs/trunk/build.xml:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/build.xml:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/c++/libhdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:44:57 2011
@@ -1,4 +1,4 @@
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663
 /hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
-/hadoop/hdfs/trunk/src/c++/libhdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/c++/libhdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/contrib/hdfsproxy/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:44:57 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/contrib/hdfsproxy:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/contrib/hdfsproxy:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy:820487
-/hadoop/hdfs/trunk/src/contrib/hdfsproxy:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/contrib/hdfsproxy:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:44:57 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/java:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java:820487
-/hadoop/hdfs/trunk/src/java:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/java:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1125583&r1=1125582&r2=1125583&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
Fri May 20 22:44:57 2011
@@ -35,14 +35,14 @@ import java.util.zip.Checksum;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.fs.FSInputChecker;
 import org.apache.hadoop.fs.FSOutputSummer;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
@@ -60,7 +60,6 @@ class BlockReceiver implements java.io.C
   public static final Log LOG = DataNode.LOG;
   static final Log ClientTraceLog = DataNode.ClientTraceLog;
   
-  private ExtendedBlock block; // the block to receive
   private DataInputStream in = null; // from where data are read
   private DataChecksum checksum; // from where chunks of a block can be read
   private OutputStream out = null; // to block file at local disk
@@ -77,28 +76,41 @@ class BlockReceiver implements java.io.C
   private Daemon responder = null;
   private DataTransferThrottler throttler;
   private FSDataset.BlockWriteStreams streams;
-  private String clientName;
-  DatanodeInfo srcDataNode = null;
+  private DatanodeInfo srcDataNode = null;
   private Checksum partialCrc = null;
   private final DataNode datanode;
-  private final BlockConstructionStage initialStage;
-  final private ReplicaInPipelineInterface replicaInfo;
   volatile private boolean mirrorError;
 
-  BlockReceiver(ExtendedBlock block, DataInputStream in, String inAddr,
-                String myAddr, BlockConstructionStage stage, 
-                long newGs, long minBytesRcvd, long maxBytesRcvd, 
-                String clientName, DatanodeInfo srcDataNode, DataNode datanode)
-                throws IOException {
+  /** The client name.  It is empty if a datanode is the client */
+  private final String clientname;
+  private final boolean isClient; 
+  private final boolean isDatanode; 
+
+  /** the block to receive */
+  private final ExtendedBlock block; 
+  /** the replica to write */
+  private final ReplicaInPipelineInterface replicaInfo;
+  /** pipeline stage */
+  private final BlockConstructionStage initialStage;
+
+  BlockReceiver(final ExtendedBlock block, final DataInputStream in,
+      final String inAddr, final String myAddr,
+      final BlockConstructionStage stage, 
+      final long newGs, final long minBytesRcvd, final long maxBytesRcvd, 
+      final String clientname, final DatanodeInfo srcDataNode,
+      final DataNode datanode) throws IOException {
     try{
       this.block = block;
       this.in = in;
       this.inAddr = inAddr;
       this.myAddr = myAddr;
-      this.clientName = clientName;
       this.srcDataNode = srcDataNode;
       this.datanode = datanode;
-      
+
+      this.clientname = clientname;
+      this.isDatanode = clientname.length() == 0;
+      this.isClient = !this.isDatanode;
+
       //for datanode, we have
       //1: clientName.length() == 0, and
       //2: stage == null, PIPELINE_SETUP_CREATE or TRANSFER_RBW
@@ -106,7 +118,7 @@ class BlockReceiver implements java.io.C
       //
       // Open local disk out
       //
-      if (clientName.length() == 0) { //replication or move
+      if (isDatanode) { //replication or move
         replicaInfo = datanode.data.createTemporary(block);
       } else {
         switch (stage) {
@@ -143,8 +155,8 @@ class BlockReceiver implements java.io.C
       this.bytesPerChecksum = checksum.getBytesPerChecksum();
       this.checksumSize = checksum.getChecksumSize();
       
-      boolean isCreate = stage == BlockConstructionStage.PIPELINE_SETUP_CREATE 
-      || clientName.length() == 0;
+      final boolean isCreate = isDatanode 
+          || stage == BlockConstructionStage.PIPELINE_SETUP_CREATE;
       streams = replicaInfo.createStreams(isCreate,
           this.bytesPerChecksum, this.checksumSize);
       if (streams != null) {
@@ -525,7 +537,7 @@ class BlockReceiver implements java.io.C
        * protocol includes acks and only the last datanode needs to verify 
        * checksum.
        */
-      if (mirrorOut == null || clientName.length() == 0) {
+      if (mirrorOut == null || isDatanode) {
         verifyChunks(pktBuf, dataOff, len, pktBuf, checksumOff);
       }
 
@@ -624,7 +636,7 @@ class BlockReceiver implements java.io.C
       throttler = throttlerArg;
 
     try {
-      if (clientName.length() > 0) {
+      if (isClient) {
         responder = new Daemon(datanode.threadGroup, 
             new PacketResponder(this, block, mirrIn, replyOut, 
                                 numTargets, Thread.currentThread()));
@@ -647,7 +659,7 @@ class BlockReceiver implements java.io.C
       // if this write is for a replication request (and not
       // from a client), then finalize block. For client-writes, 
       // the block is finalized in the PacketResponder.
-      if (clientName.length() == 0) {
+      if (isDatanode) {
         // close the block/crc files
         close();
 
@@ -686,7 +698,7 @@ class BlockReceiver implements java.io.C
    * if this write is for a replication request (and not from a client)
    */
   private void cleanupBlock() throws IOException {
-    if (clientName.length() == 0
+    if (isDatanode
         && initialStage != BlockConstructionStage.TRANSFER_RBW) {
       datanode.data.unfinalizeBlock(block);
     }
@@ -912,14 +924,13 @@ class BlockReceiver implements java.io.C
               block.setNumBytes(replicaInfo.getNumBytes());
               datanode.data.finalizeBlock(block);
               datanode.closeBlock(block, DataNode.EMPTY_DEL_HINT);
-              if (ClientTraceLog.isInfoEnabled() &&
-                  receiver.clientName.length() > 0) {
+              if (ClientTraceLog.isInfoEnabled() && isClient) {
                 long offset = 0;
                 DatanodeRegistration dnR = 
                   datanode.getDNRegistrationForBP(block.getBlockPoolId());
                 ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT,
                       receiver.inAddr, receiver.myAddr, block.getNumBytes(),
-                      "HDFS_WRITE", receiver.clientName, offset,
+                      "HDFS_WRITE", receiver.clientname, offset,
                       dnR.getStorageID(), block, endTime-startTime));
               } else {
                 LOG.info("Received block " + block + 

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1125583&r1=1125582&r2=1125583&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
Fri May 20 22:44:57 2011
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.da
 import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.ERROR;
 import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.ERROR_ACCESS_TOKEN;
 import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.SUCCESS;
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 import static org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT;
 
 import java.io.BufferedInputStream;
@@ -34,14 +35,13 @@ import java.net.SocketException;
 
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.io.IOUtils;
@@ -50,8 +50,8 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
 
@@ -206,11 +206,14 @@ class DataXceiver extends DataTransferPr
    * Write a block to disk.
    */
   @Override
-  protected void opWriteBlock(DataInputStream in, ExtendedBlock block, 
-      int pipelineSize, BlockConstructionStage stage,
-      long newGs, long minBytesRcvd, long maxBytesRcvd,
-      String client, DatanodeInfo srcDataNode, DatanodeInfo[] targets,
-      Token<BlockTokenIdentifier> blockToken) throws IOException {
+  protected void opWriteBlock(final DataInputStream in, final ExtendedBlock block, 
+      final int pipelineSize, final BlockConstructionStage stage,
+      final long newGs, final long minBytesRcvd, final long maxBytesRcvd,
+      final String clientname, final DatanodeInfo srcDataNode,
+      final DatanodeInfo[] targets, final Token<BlockTokenIdentifier> blockToken
+      ) throws IOException {
+    final boolean isDatanode = clientname.length() == 0;
+    final boolean isClient = !isDatanode;
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() +
@@ -238,7 +241,7 @@ class DataXceiver extends DataTransferPr
             BlockTokenSecretManager.AccessMode.WRITE);
       } catch (InvalidToken e) {
         try {
-          if (client.length() != 0) {
+          if (isClient) {
             ERROR_ACCESS_TOKEN.write(replyOut);
             Text.writeString(replyOut, dnR.getName());
             replyOut.flush();
@@ -261,14 +264,14 @@ class DataXceiver extends DataTransferPr
     String firstBadLink = "";           // first datanode that failed in connection setup
     DataTransferProtocol.Status mirrorInStatus = SUCCESS;
     try {
-      if (client.length() == 0 || 
+      if (isDatanode || 
           stage != BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
         // open a block receiver
         blockReceiver = new BlockReceiver(block, in, 
             s.getRemoteSocketAddress().toString(),
             s.getLocalSocketAddress().toString(),
             stage, newGs, minBytesRcvd, maxBytesRcvd,
-            client, srcDataNode, datanode);
+            clientname, srcDataNode, datanode);
       } else {
         datanode.data.recoverClose(block, newGs, minBytesRcvd);
       }
@@ -299,7 +302,7 @@ class DataXceiver extends DataTransferPr
 
           // Write header: Copied from DFSClient.java!
           DataTransferProtocol.Sender.opWriteBlock(mirrorOut, originalBlock,
-              pipelineSize, stage, newGs, minBytesRcvd, maxBytesRcvd, client,
+              pipelineSize, stage, newGs, minBytesRcvd, maxBytesRcvd, clientname,
               srcDataNode, targets, blockToken);
 
           if (blockReceiver != null) { // send checksum header
@@ -308,7 +311,7 @@ class DataXceiver extends DataTransferPr
           mirrorOut.flush();
 
           // read connect ack (only for clients, not for replication req)
-          if (client.length() != 0) {
+          if (isClient) {
             mirrorInStatus = DataTransferProtocol.Status.read(mirrorIn);
             firstBadLink = Text.readString(mirrorIn);
             if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
@@ -320,7 +323,7 @@ class DataXceiver extends DataTransferPr
           }
 
         } catch (IOException e) {
-          if (client.length() != 0) {
+          if (isClient) {
             ERROR.write(replyOut);
             Text.writeString(replyOut, mirrorNode);
             replyOut.flush();
@@ -331,7 +334,7 @@ class DataXceiver extends DataTransferPr
           mirrorIn = null;
           IOUtils.closeSocket(mirrorSock);
           mirrorSock = null;
-          if (client.length() > 0) {
+          if (isClient) {
             throw e;
           } else {
             LOG.info(dnR + ":Exception transfering block " +
@@ -343,7 +346,7 @@ class DataXceiver extends DataTransferPr
       }
 
       // send connect ack back to source (only for clients)
-      if (client.length() != 0) {
+      if (isClient) {
         if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
           LOG.info("Datanode " + targets.length +
                    " forwarding connect ack to upstream firstbadlink is " +
@@ -362,7 +365,7 @@ class DataXceiver extends DataTransferPr
       }
 
       // update its generation stamp
-      if (client.length() != 0 && 
+      if (isClient && 
           stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
         block.setGenerationStamp(newGs);
         block.setNumBytes(minBytesRcvd);
@@ -371,7 +374,7 @@ class DataXceiver extends DataTransferPr
       // if this write is for a replication request or recovering
       // a failed close for client, then confirm block. For other client-writes,
       // the block is finalized in the PacketResponder.
-      if ((client.length() == 0 && stage != BlockConstructionStage.TRANSFER_RBW)
+      if ((isDatanode  && stage != BlockConstructionStage.TRANSFER_RBW)
           ||
           stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
         datanode.closeBlock(block, DataNode.EMPTY_DEL_HINT);

Propchange: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:44:57 2011
@@ -5,4 +5,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:820487
-/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:44:57 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/test/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/test/hdfs:820487
-/hadoop/hdfs/trunk/src/test/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/test/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:44:57 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/webapps/datanode:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/datanode:820487
-/hadoop/hdfs/trunk/src/webapps/datanode:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/webapps/datanode:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:44:57 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/hdfs:820487
-/hadoop/hdfs/trunk/src/webapps/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/webapps/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:44:57 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/webapps/secondary:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/secondary:820487
-/hadoop/hdfs/trunk/src/webapps/secondary:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/webapps/secondary:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576



Mime
View raw message