hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1125575 - in /hadoop/hdfs/branches/yahoo-merge: ./ src/c++/libhdfs/ src/contrib/hdfsproxy/ src/java/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/test/hdfs/ src/test/hdfs/org/apache/hadoop/...
Date Fri, 20 May 2011 22:25:28 GMT
Author: szetszwo
Date: Fri May 20 22:25:27 2011
New Revision: 1125575

URL: http://svn.apache.org/viewvc?rev=1125575&view=rev
Log:
svn merge -c 1081580 from trunk for HDFS-1675.

Added:
    hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
      - copied unchanged from r1081580, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
Modified:
    hadoop/hdfs/branches/yahoo-merge/   (props changed)
    hadoop/hdfs/branches/yahoo-merge/CHANGES.txt
    hadoop/hdfs/branches/yahoo-merge/build.xml   (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/c++/libhdfs/   (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/contrib/hdfsproxy/   (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/java/   (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
  (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/   (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/hdfs/branches/yahoo-merge/src/webapps/datanode/   (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/webapps/hdfs/   (props changed)
    hadoop/hdfs/branches/yahoo-merge/src/webapps/secondary/   (props changed)

Propchange: hadoop/hdfs/branches/yahoo-merge/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:25:27 2011
@@ -2,4 +2,4 @@
 /hadoop/hdfs/branches/HDFS-1052:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265:796829-820463
 /hadoop/hdfs/branches/branch-0.21:820487
-/hadoop/hdfs/trunk:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1036738,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1036738,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Modified: hadoop/hdfs/branches/yahoo-merge/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/CHANGES.txt?rev=1125575&r1=1125574&r2=1125575&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/CHANGES.txt (original)
+++ hadoop/hdfs/branches/yahoo-merge/CHANGES.txt Fri May 20 22:25:27 2011
@@ -230,6 +230,8 @@ Trunk (unreleased changes)
     HDFS-1755. Federation: The BPOfferService must always connect to namenode as 
     the login user. (jitendra)
 
+    HDFS-1675. Support transferring RBW between datanodes. (szetszwo)
+
     HDFS-1791. Federation: Add command to delete block pool directories 
     from a datanode. (jitendra)
 

Propchange: hadoop/hdfs/branches/yahoo-merge/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:25:27 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/build.xml:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/build.xml:796829-820463
 /hadoop/hdfs/branches/branch-0.21/build.xml:820487
-/hadoop/hdfs/trunk/build.xml:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/build.xml:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/c++/libhdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:25:27 2011
@@ -1,4 +1,4 @@
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663
 /hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
-/hadoop/hdfs/trunk/src/c++/libhdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/c++/libhdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/contrib/hdfsproxy/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:25:27 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/contrib/hdfsproxy:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/contrib/hdfsproxy:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy:820487
-/hadoop/hdfs/trunk/src/contrib/hdfsproxy:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/contrib/hdfsproxy:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:25:27 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/java:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java:820487
-/hadoop/hdfs/trunk/src/java:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/java:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java?rev=1125575&r1=1125574&r2=1125575&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
Fri May 20 22:25:27 2011
@@ -46,11 +46,11 @@ public interface DataTransferProtocol {
    * when protocol changes. It is not very obvious. 
    */
   /*
-   * Version 20:
+   * Version 21:
    *    Changed the protocol methods to use ExtendedBlock instead
    *    of Block.
    */
-  public static final int DATA_TRANSFER_VERSION = 19;
+  public static final int DATA_TRANSFER_VERSION = 21;
 
   /** Operation */
   public enum Op {
@@ -144,7 +144,9 @@ public interface DataTransferProtocol {
     // Recover a failed PIPELINE_CLOSE
     PIPELINE_CLOSE_RECOVERY,
     // pipeline set up for block creation
-    PIPELINE_SETUP_CREATE;
+    PIPELINE_SETUP_CREATE,
+    // similar to replication but transferring rbw instead of finalized
+    TRANSFER_RBW;
     
     final static private byte RECOVERY_BIT = (byte)1;
     

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1125575&r1=1125574&r2=1125575&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
Fri May 20 22:25:27 2011
@@ -81,6 +81,7 @@ class BlockReceiver implements java.io.C
   DatanodeInfo srcDataNode = null;
   private Checksum partialCrc = null;
   private final DataNode datanode;
+  private final BlockConstructionStage initialStage;
   final private ReplicaInPipelineInterface replicaInfo;
   volatile private boolean mirrorError;
 
@@ -97,6 +98,11 @@ class BlockReceiver implements java.io.C
       this.clientName = clientName;
       this.srcDataNode = srcDataNode;
       this.datanode = datanode;
+      
+      //for datanode, we have
+      //1: clientName.length() == 0, and
+      //2: stage == null, PIPELINE_SETUP_CREATE or TRANSFER_RBW
+      this.initialStage = stage;
       //
       // Open local disk out
       //
@@ -645,9 +651,11 @@ class BlockReceiver implements java.io.C
         // close the block/crc files
         close();
 
-        // Finalize the block. Does this fsync()?
-        block.setNumBytes(replicaInfo.getNumBytes());
-        datanode.data.finalizeBlock(block);
+        if (initialStage != BlockConstructionStage.TRANSFER_RBW) {
+          // Finalize the block. Does this fsync()?
+          block.setNumBytes(replicaInfo.getNumBytes());
+          datanode.data.finalizeBlock(block);
+        }
         datanode.metrics.incrBlocksWritten();
       }
 
@@ -678,7 +686,8 @@ class BlockReceiver implements java.io.C
    * if this write is for a replication request (and not from a client)
    */
   private void cleanupBlock() throws IOException {
-    if (clientName.length() == 0) { // not client write
+    if (clientName.length() == 0
+        && initialStage != BlockConstructionStage.TRANSFER_RBW) {
       datanode.data.unfinalizeBlock(block);
     }
   }

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1125575&r1=1125574&r2=1125575&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Fri May 20 22:25:27 2011
@@ -1781,7 +1781,8 @@ public class DataNode extends Configured
                  block + " to " + xfersBuilder);                       
       }
 
-      new Daemon(new DataTransfer(xferTargets, block, this)).start();
+      new Daemon(new DataTransfer(xferTargets, block,
+          BlockConstructionStage.PIPELINE_SETUP_CREATE)).start();
     }
   }
 
@@ -1882,21 +1883,21 @@ public class DataNode extends Configured
    * Used for transferring a block of data.  This class
    * sends a piece of data to another DataNode.
    */
-  class DataTransfer implements Runnable {
-    DatanodeInfo targets[];
-    ExtendedBlock b;
-    DataNode datanode;
+  private class DataTransfer implements Runnable {
+    final DatanodeInfo[] targets;
+    final ExtendedBlock b;
+    final BlockConstructionStage stage;
     final private DatanodeRegistration bpReg;
 
     /**
      * Connect to the first item in the target list.  Pass along the 
      * entire target list, the block, and the data.
      */
-    public DataTransfer(DatanodeInfo targets[], ExtendedBlock b,
-        DataNode datanode) throws IOException {
+    DataTransfer(DatanodeInfo targets[], ExtendedBlock b, BlockConstructionStage stage
+        ) throws IOException {
       this.targets = targets;
       this.b = b;
-      this.datanode = datanode;
+      this.stage = stage;
       BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId());
       bpReg = bpos.bpRegistration;
     }
@@ -1924,7 +1925,7 @@ public class DataNode extends Configured
                                                             SMALL_BUFFER_SIZE));
 
         blockSender = new BlockSender(b, 0, b.getNumBytes(), 
-            false, false, false, datanode);
+            false, false, false, DataNode.this);
         DatanodeInfo srcNode = new DatanodeInfo(bpReg);
 
         //
@@ -1935,9 +1936,9 @@ public class DataNode extends Configured
           accessToken = blockPoolTokenSecretManager.generateToken(b, 
               EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE));
         }
+
         DataTransferProtocol.Sender.opWriteBlock(out,
-            b, 0, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0, 0, "",
-            srcNode, targets, accessToken);
+            b, 0, stage, 0, 0, 0, "", srcNode, targets, accessToken);
 
         // send data & checksum
         blockSender.sendBlock(out, baseStream, null);
@@ -1949,7 +1950,7 @@ public class DataNode extends Configured
         LOG.warn(bpReg + ":Failed to transfer " + b + " to " + targets[0].getName()
             + " got " + StringUtils.stringifyException(ie));
         // check if there are any disk problem
-        datanode.checkDiskError();
+        checkDiskError();
         
       } finally {
         xmitsInProgress.getAndDecrement();
@@ -2492,13 +2493,17 @@ public class DataNode extends Configured
   /** {@inheritDoc} */
   @Override // ClientDataNodeProtocol
   public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException {
+    checkWriteAccess(block);
+    return data.getReplicaVisibleLength(block);
+  }
+
+  private void checkWriteAccess(final ExtendedBlock block) throws IOException {
     if (isBlockTokenEnabled) {
       Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
           .getTokenIdentifiers();
       if (tokenIds.size() != 1) {
-        throw new IOException("Can't continue with getReplicaVisibleLength() "
-            + "authorization since none or more than one BlockTokenIdentifier "
-            + "is found.");
+        throw new IOException("Can't continue since none or more than one "
+            + "BlockTokenIdentifier is found.");
       }
       for (TokenIdentifier tokenId : tokenIds) {
         BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
@@ -2509,10 +2514,53 @@ public class DataNode extends Configured
             BlockTokenSecretManager.AccessMode.READ);
       }
     }
+  }
 
-    return data.getReplicaVisibleLength(block);
+  /**
+   * Transfer a block to the datanode targets.
+   * @return rbw's visible length
+   */
+  long transferBlockForPipelineRecovery(final ExtendedBlock b,
+      final DatanodeInfo[] targets) throws IOException {
+    checkWriteAccess(b);
+    final Block stored;
+    final boolean isRbw;
+    final long visible;
+
+    //get replica information
+    synchronized(data) {
+      stored = data.getStoredBlock(b.getBlockPoolId(), b.getBlockId());
+      if (stored.getGenerationStamp() < b.getGenerationStamp()) {
+        throw new IOException(
+            "stored.getGenerationStamp() < b.getGenerationStamp(), stored="
+            + stored + ", b=" + b);        
+      }
+      isRbw = data.isValidRbw(b);
+      visible = data.getReplicaVisibleLength(b);
+    }
+
+    if (targets.length > 0) {
+      if (isRbw) {
+        //transfer rbw
+        new DataTransfer(targets, b, BlockConstructionStage.TRANSFER_RBW).run();
+      } else {
+        //transfer finalized replica
+        transferBlock(new ExtendedBlock(b.getBlockPoolId(), stored), targets);
+      }
+    }
+    //TODO: should return: visible + storedGS + isRbw
+    return visible;
   }
-  
+
+  /**
+   * Covert an existing temporary replica to a rbw. 
+   * @param temporary specifies id, gs and visible bytes.
+   * @throws IOException
+   */
+  void convertTemporaryToRbw(final ExtendedBlock temporary) throws IOException {
+    data.convertTemporaryToRbw(temporary);
+  }
+
   // Determine a Datanode's streaming address
   public static InetSocketAddress getStreamingAddr(Configuration conf) {
     return NetUtils.createSocketAddr(

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1125575&r1=1125574&r2=1125575&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
Fri May 20 22:25:27 2011
@@ -371,7 +371,8 @@ class DataXceiver extends DataTransferPr
       // if this write is for a replication request or recovering
       // a failed close for client, then confirm block. For other client-writes,
       // the block is finalized in the PacketResponder.
-      if (client.length() == 0 || 
+      if ((client.length() == 0 && stage != BlockConstructionStage.TRANSFER_RBW)
+          ||
           stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
         datanode.closeBlock(block, DataNode.EMPTY_DEL_HINT);
         LOG.info("Received block " + block + 

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1125575&r1=1125574&r2=1125575&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
Fri May 20 22:25:27 2011
@@ -130,21 +130,7 @@ public class FSDataset implements FSCons
     private File addBlock(Block b, File src, boolean createOk, 
                           boolean resetIdx) throws IOException {
       if (numBlocks < maxBlocksPerDir) {
-        File dest = new File(dir, b.getBlockName());
-        File metaData = getMetaFile(src, b.getGenerationStamp());
-        File newmeta = getMetaFile(dest, b.getGenerationStamp());
-        if ( ! metaData.renameTo( newmeta ) ||
-            ! src.renameTo( dest ) ) {
-          throw new IOException( "could not move files for " + b +
-                                 " from " + src + " to " + 
-                                 dest.getAbsolutePath() + " or from"
-                                 + metaData + " to " + newmeta);
-        }
-        if (DataNode.LOG.isDebugEnabled()) {
-          DataNode.LOG.debug("addBlock: Moved " + metaData + " to " + newmeta);
-          DataNode.LOG.debug("addBlock: Moved " + src + " to " + dest);
-        }
-
+        final File dest = moveBlockFiles(b, src, dir);
         numBlocks += 1;
         return dest;
       }
@@ -1364,6 +1350,26 @@ public class FSDataset implements FSCons
     return info.unlinkBlock(numLinks);
   }
 
+  private static File moveBlockFiles(Block b, File srcfile, File destdir
+      ) throws IOException {
+    final File dstfile = new File(destdir, b.getBlockName());
+    final File srcmeta = getMetaFile(srcfile, b.getGenerationStamp());
+    final File dstmeta = getMetaFile(dstfile, b.getGenerationStamp());
+    if (!srcmeta.renameTo(dstmeta)) {
+      throw new IOException("Failed to move meta file for " + b
+          + " from " + srcmeta + " to " + dstmeta);
+    }
+    if (!srcfile.renameTo(dstfile)) {
+      throw new IOException("Failed to move block file for " + b
+          + " from " + srcfile + " to " + dstfile.getAbsolutePath());
+    }
+    if (DataNode.LOG.isDebugEnabled()) {
+      DataNode.LOG.debug("addBlock: Moved " + srcmeta + " to " + dstmeta);
+      DataNode.LOG.debug("addBlock: Moved " + srcfile + " to " + dstfile);
+    }
+    return dstfile;
+  }
+
   static private void truncateBlock(File blockFile, File metaFile,
       long oldlen, long newlen) throws IOException {
     DataNode.LOG.info("truncateBlock: blockFile=" + blockFile
@@ -1695,6 +1701,57 @@ public class FSDataset implements FSCons
   }
   
   @Override // FSDatasetInterface
+  public synchronized ReplicaInPipelineInterface convertTemporaryToRbw(
+      final ExtendedBlock b) throws IOException {
+    final long blockId = b.getBlockId();
+    final long expectedGs = b.getGenerationStamp();
+    final long visible = b.getNumBytes();
+    DataNode.LOG.info("Covert the temporary replica " + b
+        + " to RBW, visible length is " + visible);
+
+    // get replica
+    final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), blockId);
+    if (r == null) {
+      throw new ReplicaNotFoundException(
+          ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
+    }
+    // check the replica's state
+    if (r.getState() != ReplicaState.TEMPORARY) {
+      throw new ReplicaNotFoundException(
+          "r.getState() != ReplicaState.TEMPORARY, r=" + r);
+    }
+    // check generation stamp
+    if (r.getGenerationStamp() != expectedGs) {
+      throw new ReplicaNotFoundException(
+          "r.getGenerationStamp() != expectedGs = " + expectedGs + ", r=" + r);
+    }
+    // check length
+    final long numBytes = r.getNumBytes();
+    if (numBytes < visible) {
+      throw new ReplicaNotFoundException(numBytes + " = numBytes < visible = "
+          + visible + ", r=" + r);
+    }
+    // check volume
+    final FSVolume v = r.getVolume();
+    if (v == null) {
+      throw new IOException("r.getVolume() = null, temp="  + r);
+    }
+    
+    // move block files to the rbw directory
+    BlockPoolSlice bpslice = v.getBlockPoolSlice(b.getBlockPoolId());
+    final File dest = moveBlockFiles(b.getLocalBlock(), r.getBlockFile(),
+        bpslice.getRbwDir());
+    // create RBW
+    final ReplicaBeingWritten rbw = new ReplicaBeingWritten(
+        blockId, numBytes, expectedGs,
+        v, dest.getParentFile(), Thread.currentThread());
+    rbw.setBytesAcked(visible);
+    // overwrite the RBW in the volume map
+    volumeMap.add(b.getBlockPoolId(), rbw);
+    return rbw;
+  }
+
+  @Override // FSDatasetInterface
   public synchronized ReplicaInPipelineInterface createTemporary(ExtendedBlock b)
       throws IOException {
     ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
@@ -1888,14 +1945,24 @@ public class FSDataset implements FSCons
    */
   @Override // FSDatasetInterface
   public boolean isValidBlock(ExtendedBlock b) {
-    ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
+    return isValid(b, ReplicaState.FINALIZED);
+  }
+
+  /**
+   * Check whether the given block is a valid RBW.
+   */
+  @Override // {@link FSDatasetInterface}
+  public boolean isValidRbw(final ExtendedBlock b) {
+    return isValid(b, ReplicaState.RBW);
+  }
+
+  /** Does the block exist and have the given state? */
+  private boolean isValid(final ExtendedBlock b, final ReplicaState state) {
+    final ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
         b.getLocalBlock());
-    
-    if (replicaInfo == null || 
-        replicaInfo.getState() != ReplicaState.FINALIZED) {
-      return false;
-    }
-    return replicaInfo.getBlockFile().exists();
+    return replicaInfo != null
+        && replicaInfo.getState() == state
+        && replicaInfo.getBlockFile().exists();
   }
 
   /**

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java?rev=1125575&r1=1125574&r2=1125575&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
Fri May 20 22:25:27 2011
@@ -218,6 +218,14 @@ public interface FSDatasetInterface exte
   throws IOException;
 
   /**
+   * Covert a temporary replica to a RBW.
+   * @param temporary the temporary replica being converted
+   * @return the result RBW
+   */
+  public ReplicaInPipelineInterface convertTemporaryToRbw(
+      ExtendedBlock temporary) throws IOException;
+
+  /**
    * Append to a finalized replica and returns the meta info of the replica
    * 
    * @param b block
@@ -287,6 +295,13 @@ public interface FSDatasetInterface exte
   public boolean isValidBlock(ExtendedBlock b);
 
   /**
+   * Is the block a valid RBW?
+   * @param b
+   * @return - true if the specified block is a valid RBW
+   */
+  public boolean isValidRbw(ExtendedBlock b);
+
+  /**
    * Invalidates the specified blocks
    * @param bpid Block pool Id
    * @param invalidBlks - the blocks to be invalidated

Propchange: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:25:27 2011
@@ -5,4 +5,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:820487
-/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:25:27 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/test/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/test/hdfs:820487
-/hadoop/hdfs/trunk/src/test/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/test/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1125575&r1=1125574&r2=1125575&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
Fri May 20 22:25:27 2011
@@ -431,7 +431,7 @@ public class SimulatedFSDataset  impleme
 
   @Override // FSDatasetInterface
   public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
-    if (isBeingWritten(b)) {
+    if (isValidRbw(b)) {
       blockMap.remove(b.getLocalBlock());
     }
   }
@@ -549,8 +549,8 @@ public class SimulatedFSDataset  impleme
     return binfo.isFinalized();
   }
 
-  /* check if a block is created but not finalized */
-  private synchronized boolean isBeingWritten(ExtendedBlock b) {
+  @Override
+  public synchronized boolean isValidRbw(Block b) {
     final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId());
     if (map == null) {
       return false;
@@ -561,7 +561,8 @@ public class SimulatedFSDataset  impleme
     }
     return !binfo.isFinalized();  
   }
-  
+
+  @Override
   public String toString() {
     return getStorageInfo();
   }
@@ -646,7 +647,7 @@ public class SimulatedFSDataset  impleme
           throw new ReplicaAlreadyExistsException("Block " + b + 
               " is valid, and cannot be written to.");
       }
-    if (isBeingWritten(b)) {
+    if (isValidRbw(b)) {
         throw new ReplicaAlreadyExistsException("Block " + b + 
             " is being written, and cannot be written to.");
     }
@@ -957,4 +958,17 @@ public class SimulatedFSDataset  impleme
   public void deleteBlockPool(String bpid, boolean force) {
      return;
   }
+
+  @Override
+  public ReplicaInPipelineInterface convertTemporaryToRbw(Block temporary)
+      throws IOException {
+    final BInfo r = blockMap.get(temporary);
+    if (r == null) {
+      throw new IOException("Block not found, temporary=" + temporary);
+    } else if (r.isFinalized()) {
+      throw new IOException("Replica already finalized, temporary="
+          + temporary + ", r=" + r);
+    }
+    return r;
+  }
 }

Propchange: hadoop/hdfs/branches/yahoo-merge/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:25:27 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/webapps/datanode:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/datanode:820487
-/hadoop/hdfs/trunk/src/webapps/datanode:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/webapps/datanode:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:25:27 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/hdfs:820487
-/hadoop/hdfs/trunk/src/webapps/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/webapps/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 20 22:25:27 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary:1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/webapps/secondary:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/secondary:820487
-/hadoop/hdfs/trunk/src/webapps/secondary:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/webapps/secondary:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035508,1039957,1040005,1052823,1060619,1061067,1062020,1062045,1062052,1071518,1074282,1080095,1080380,1080836,1081580,1083951,1087080,1091619,1092584,1095245,1095789,1096846,1097648,1097969,1098867,1099640,1101324,1101753,1104395,1104407,1124576



Mime
View raw message