hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1301663 - in /hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/server/datanode/ src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ src/test/java/org/apa...
Date Fri, 16 Mar 2012 17:36:58 GMT
Author: szetszwo
Date: Fri Mar 16 17:36:57 2012
New Revision: 1301663

URL: http://svn.apache.org/viewvc?rev=1301663&view=rev
Log:
svn merge -c 1301661 from trunk for HDFS-3088.

Added:
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/
      - copied from r1301661, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
      - copied unchanged from r1301661, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/LengthInputStream.java
      - copied unchanged from r1301661, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/LengthInputStream.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaInputStreams.java
      - copied unchanged from r1301661, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaInputStreams.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaOutputStreams.java
      - copied unchanged from r1301661, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaOutputStreams.java
Modified:
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java

Propchange: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar 16 17:36:57 2012
@@ -1,5 +1,5 @@
 /hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs:1227776-1294021
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189613,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1204709,1204825,1205146,1205260,1205626,1205697,1206178,1206786,1206830,1207585,
 1207694,1208140,1208153,1208313,1210208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969,1239752,1240020,1240653,1240897,1240928,1241007,1241519,1242087,1242891,1243065,1243104,1243654,1243690,1244766,1245751,1245762,1292419,1292626,1293419,1293487,1295061,1295227,1295929,1297328,1298044,1298066,1298696,1298700,1299045,1299139,1299144,1299434,1299963,1300392,1301127,1301287,1301295,1301308,1301312
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189613,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1204709,1204825,1205146,1205260,1205626,1205697,1206178,1206786,1206830,1207585,
 1207694,1208140,1208153,1208313,1210208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969,1239752,1240020,1240653,1240897,1240928,1241007,1241519,1242087,1242891,1243065,1243104,1243654,1243690,1244766,1245751,1245762,1292419,1292626,1293419,1293487,1295061,1295227,1295929,1297328,1298044,1298066,1298696,1298700,1299045,1299139,1299144,1299434,1299963,1300392,1301127,1301287,1301295,1301308,1301312,1301661
 /hadoop/core/branches/branch-0.19/hdfs:713112
 /hadoop/hdfs/branches/HDFS-1052:987665-1095512
 /hadoop/hdfs/branches/HDFS-265:796829-820463

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Mar 16 17:36:57 2012
@@ -141,6 +141,8 @@ Release 0.23.3 - UNRELEASED
     HDFS-3057. httpfs and hdfs launcher scripts should honor CATALINA_HOME 
     and HADOOP_LIBEXEC_DIR (rvs via tucu)
 
+    HDFS-3088. Move FSDatasetInterface inner classes to a package.  (szetszwo)
+
   OPTIMIZATIONS
     HDFS-2477. Optimize computing the diff between a block report and the
                namenode state. (Tomasz Nykiel via hairong)

Propchange: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar 16 17:36:57 2012
@@ -1,5 +1,5 @@
 /hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1227776-1294021
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189613,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1205146,1205260,1205697,1206786,1206830,1207694,1208140,1208153,12
 08313,1210208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969,1239752,1240020,1240653,1240897,1240928,1241007,1241519,1242087,1242891,1243065,1243104,1243654,1244766,1245751,1245762,1292419,1293419,1293487,1295061,1295227,1295929,1297328,1298044,1298696,1298700,1299045,1299139,1299144,1299434,1299963,1300392,1301127,1301287,1301308,1301312
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189613,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1205146,1205260,1205697,1206786,1206830,1207694,1208140,1208153,12
 08313,1210208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969,1239752,1240020,1240653,1240897,1240928,1241007,1241519,1242087,1242891,1243065,1243104,1243654,1244766,1245751,1245762,1292419,1293419,1293487,1295061,1295227,1295929,1297328,1298044,1298696,1298700,1299045,1299139,1299144,1299434,1299963,1300392,1301127,1301287,1301308,1301312,1301661
 /hadoop/core/branches/branch-0.19/hdfs/src/java:713112
 /hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
 /hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Fri Mar 16 17:36:57 2012
@@ -44,7 +44,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
 
@@ -72,7 +72,7 @@ class BlockPoolSliceScanner {
   private final AtomicLong lastScanTime = new AtomicLong();
 
   private final DataNode datanode;
-  private final FSDatasetInterface<? extends FSVolumeInterface> dataset;
+  private final FSDatasetInterface<? extends FsVolumeSpi> dataset;
   
   private final SortedSet<BlockScanInfo> blockInfoSet
       = new TreeSet<BlockScanInfo>();
@@ -134,7 +134,7 @@ class BlockPoolSliceScanner {
   }
   
   BlockPoolSliceScanner(String bpid, DataNode datanode,
-      FSDatasetInterface<? extends FSVolumeInterface> dataset,
+      FSDatasetInterface<? extends FsVolumeSpi> dataset,
       Configuration conf) {
     this.datanode = datanode;
     this.dataset = dataset;

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Fri Mar 16 17:36:57 2012
@@ -38,12 +38,12 @@ import org.apache.hadoop.fs.FSOutputSumm
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
 import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
@@ -86,7 +86,7 @@ class BlockReceiver implements Closeable
   private DataOutputStream mirrorOut;
   private Daemon responder = null;
   private DataTransferThrottler throttler;
-  private FSDataset.BlockWriteStreams streams;
+  private ReplicaOutputStreams streams;
   private DatanodeInfo srcDataNode = null;
   private Checksum partialCrc = null;
   private final DataNode datanode;
@@ -202,16 +202,16 @@ class BlockReceiver implements Closeable
       this.bytesPerChecksum = diskChecksum.getBytesPerChecksum();
       this.checksumSize = diskChecksum.getChecksumSize();
 
-      this.out = streams.dataOut;
+      this.out = streams.getDataOut();
       if (out instanceof FileOutputStream) {
         this.outFd = ((FileOutputStream)out).getFD();
       } else {
         LOG.warn("Could not get file descriptor for outputstream of class " +
             out.getClass());
       }
-      this.cout = streams.checksumOut;
+      this.cout = streams.getChecksumOut();
       this.checksumOut = new DataOutputStream(new BufferedOutputStream(
-          streams.checksumOut, HdfsConstants.SMALL_BUFFER_SIZE));
+          cout, HdfsConstants.SMALL_BUFFER_SIZE));
       // write data chunk header if creating a new replica
       if (isCreate) {
         BlockMetadataHeader.writeHeader(checksumOut, diskChecksum);
@@ -856,13 +856,13 @@ class BlockReceiver implements Closeable
     //
     byte[] buf = new byte[sizePartialChunk];
     byte[] crcbuf = new byte[checksumSize];
-    FSDataset.BlockInputStreams instr = null;
+    ReplicaInputStreams instr = null;
     try { 
       instr = datanode.data.getTmpInputStreams(block, blkoff, ckoff);
-      IOUtils.readFully(instr.dataIn, buf, 0, sizePartialChunk);
+      IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk);
 
       // open meta file and read in crc value computer earlier
-      IOUtils.readFully(instr.checksumIn, crcbuf, 0, crcbuf.length);
+      IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length);
     } finally {
       IOUtils.closeStream(instr);
     }

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java Fri Mar 16 17:36:57 2012
@@ -21,7 +21,7 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 
 /**************************************************
  * BlockVolumeChoosingPolicy allows a DataNode to
@@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.server.dat
  *
  ***************************************************/
 @InterfaceAudience.Private
-public interface BlockVolumeChoosingPolicy<V extends FSVolumeInterface> {
+public interface BlockVolumeChoosingPolicy<V extends FsVolumeSpi> {
 
   /**
    * Returns a specific FSVolume after applying a suitable choice algorithm

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java Fri Mar 16 17:36:57 2012
@@ -31,7 +31,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 
 /**
  * DataBlockScanner manages block scanning for all the block pools. For each
@@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.server.dat
 public class DataBlockScanner implements Runnable {
   public static final Log LOG = LogFactory.getLog(DataBlockScanner.class);
   private final DataNode datanode;
-  private final FSDatasetInterface<? extends FSVolumeInterface> dataset;
+  private final FSDatasetInterface<? extends FsVolumeSpi> dataset;
   private final Configuration conf;
   
   /**
@@ -55,7 +55,7 @@ public class DataBlockScanner implements
   Thread blockScannerThread = null;
   
   DataBlockScanner(DataNode datanode,
-      FSDatasetInterface<? extends FSVolumeInterface> dataset,
+      FSDatasetInterface<? extends FsVolumeSpi> dataset,
       Configuration conf) {
     this.datanode = datanode;
     this.dataset = dataset;

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Mar 16 17:36:57 2012
@@ -123,8 +123,8 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.Util;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets;
@@ -235,7 +235,7 @@ public class DataNode extends Configured
   
   volatile boolean shouldRun = true;
   private BlockPoolManager blockPoolManager;
-  volatile FSDatasetInterface<? extends FSVolumeInterface> data = null;
+  volatile FSDatasetInterface<? extends FsVolumeSpi> data = null;
   private String clusterId = null;
 
   public final static String EMPTY_DEL_HINT = "";

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Fri Mar 16 17:36:57 2012
@@ -55,7 +55,7 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
@@ -511,7 +511,7 @@ class DataXceiver extends Receiver imple
     checkAccess(out, true, block, blockToken,
         Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
     updateCurrentThreadName("Reading metadata for block " + block);
-    final MetaDataInputStream metadataIn = 
+    final LengthInputStream metadataIn = 
       datanode.data.getMetaDataInputStream(block);
     final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
         metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java Fri Mar 16 17:36:57 2012
@@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.util.Daemon;
 
 /**
@@ -157,13 +157,13 @@ public class DirectoryScanner implements
     private final long blockId;
     private final File metaFile;
     private final File blockFile;
-    private final FSVolumeInterface volume;
+    private final FsVolumeSpi volume;
 
     ScanInfo(long blockId) {
       this(blockId, null, null, null);
     }
 
-    ScanInfo(long blockId, File blockFile, File metaFile, FSVolumeInterface vol) {
+    ScanInfo(long blockId, File blockFile, File metaFile, FsVolumeSpi vol) {
       this.blockId = blockId;
       this.metaFile = metaFile;
       this.blockFile = blockFile;
@@ -182,7 +182,7 @@ public class DirectoryScanner implements
       return blockId;
     }
 
-    FSVolumeInterface getVolume() {
+    FsVolumeSpi getVolume() {
       return volume;
     }
 
@@ -412,8 +412,8 @@ public class DirectoryScanner implements
 
   /** Is the given volume still valid in the dataset? */
   private static boolean isValid(final FSDatasetInterface<?> dataset,
-      final FSVolumeInterface volume) {
-    for (FSVolumeInterface vol : dataset.getVolumes()) {
+      final FsVolumeSpi volume) {
+    for (FsVolumeSpi vol : dataset.getVolumes()) {
       if (vol == volume) {
         return true;
       }
@@ -424,7 +424,7 @@ public class DirectoryScanner implements
   /** Get lists of blocks on the disk sorted by blockId, per blockpool */
   private Map<String, ScanInfo[]> getDiskReport() {
     // First get list of data directories
-    final List<? extends FSVolumeInterface> volumes = dataset.getVolumes();
+    final List<? extends FsVolumeSpi> volumes = dataset.getVolumes();
     ArrayList<ScanInfoPerBlockPool> dirReports =
       new ArrayList<ScanInfoPerBlockPool>(volumes.size());
     
@@ -473,9 +473,9 @@ public class DirectoryScanner implements
 
   private static class ReportCompiler 
   implements Callable<ScanInfoPerBlockPool> {
-    private FSVolumeInterface volume;
+    private FsVolumeSpi volume;
 
-    public ReportCompiler(FSVolumeInterface volume) {
+    public ReportCompiler(FsVolumeSpi volume) {
       this.volume = volume;
     }
 
@@ -492,7 +492,7 @@ public class DirectoryScanner implements
     }
 
     /** Compile list {@link ScanInfo} for the blocks in the directory <dir> */
-    private LinkedList<ScanInfo> compileReport(FSVolumeInterface vol, File dir,
+    private LinkedList<ScanInfo> compileReport(FsVolumeSpi vol, File dir,
         LinkedList<ScanInfo> report) {
       File[] files;
       try {

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Fri Mar 16 17:36:57 2012
@@ -61,7 +61,10 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
@@ -548,7 +551,7 @@ class FSDataset implements FSDatasetInte
    * 
    * It uses the {@link FSDataset} object for synchronization.
    */
-  static class FSVolume implements FSVolumeInterface {
+  static class FSVolume implements FsVolumeSpi {
     private final FSDataset dataset;
     private final Map<String, BlockPoolSlice> map = new HashMap<String, BlockPoolSlice>();
     private final File currentDir;    // <StorageDirectory>/current
@@ -865,7 +868,7 @@ class FSDataset implements FSDatasetInte
       
     private long getRemaining() throws IOException {
       long remaining = 0L;
-      for (FSVolumeInterface vol : volumes) {
+      for (FsVolumeSpi vol : volumes) {
         remaining += vol.getAvailable();
       }
       return remaining;
@@ -1052,13 +1055,13 @@ class FSDataset implements FSDatasetInte
   }
 
   @Override // FSDatasetInterface
-  public MetaDataInputStream getMetaDataInputStream(ExtendedBlock b)
+  public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
       throws IOException {
     final File meta = getMetaFile(b);
     if (meta == null || !meta.exists()) {
       return null;
     }
-    return new MetaDataInputStream(new FileInputStream(meta), meta.length());
+    return new LengthInputStream(new FileInputStream(meta), meta.length());
   }
     
   private final DataNode datanode;
@@ -1287,7 +1290,7 @@ class FSDataset implements FSDatasetInte
    * Returns handles to the block file and its metadata file
    */
   @Override // FSDatasetInterface
-  public synchronized BlockInputStreams getTmpInputStreams(ExtendedBlock b, 
+  public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, 
                           long blkOffset, long ckoff) throws IOException {
     ReplicaInfo info = getReplicaInfo(b);
     File blockFile = info.getBlockFile();
@@ -1300,7 +1303,7 @@ class FSDataset implements FSDatasetInte
     if (ckoff > 0) {
       metaInFile.seek(ckoff);
     }
-    return new BlockInputStreams(new FileInputStream(blockInFile.getFD()),
+    return new ReplicaInputStreams(new FileInputStream(blockInFile.getFD()),
                                 new FileInputStream(metaInFile.getFD()));
   }
     
@@ -1742,9 +1745,9 @@ class FSDataset implements FSDatasetInte
    * last checksum will be overwritten.
    */
   @Override // FSDatasetInterface
-  public void adjustCrcChannelPosition(ExtendedBlock b, BlockWriteStreams streams, 
+  public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams streams, 
       int checksumSize) throws IOException {
-    FileOutputStream file = (FileOutputStream) streams.checksumOut;
+    FileOutputStream file = (FileOutputStream) streams.getChecksumOut();
     FileChannel channel = file.getChannel();
     long oldPos = channel.position();
     long newPos = oldPos - checksumSize;
@@ -2195,7 +2198,7 @@ class FSDataset implements FSDatasetInte
    */
   @Override
   public void checkAndUpdate(String bpid, long blockId, File diskFile,
-      File diskMetaFile, FSVolumeInterface vol) {
+      File diskMetaFile, FsVolumeSpi vol) {
     Block corruptBlock = null;
     ReplicaInfo memBlockInfo;
     synchronized (this) {

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java Fri Mar 16 17:36:57 2012
@@ -18,12 +18,9 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 
-import java.io.Closeable;
 import java.io.File;
-import java.io.FilterInputStream;
 import java.io.IOException;
 import java.io.InputStream;
-import java.io.OutputStream;
 import java.util.List;
 import java.util.Map;
 
@@ -34,11 +31,13 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.ReflectionUtils;
 
@@ -50,7 +49,7 @@ import org.apache.hadoop.util.Reflection
  *
  */
 @InterfaceAudience.Private
-public interface FSDatasetInterface<V extends FSDatasetInterface.FSVolumeInterface>
+public interface FSDatasetInterface<V extends FsVolumeSpi>
     extends FSDatasetMBean {
   /**
    * A factory for creating FSDatasetInterface objects.
@@ -78,24 +77,6 @@ public interface FSDatasetInterface<V ex
   }
 
   /**
-   * This is an interface for the underlying volume.
-   * @see org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume
-   */
-  interface FSVolumeInterface {
-    /** @return a list of block pools. */
-    public String[] getBlockPoolList();
-
-    /** @return the available storage space in bytes. */
-    public long getAvailable() throws IOException;
-
-    /** @return the path to the volume */
-    public String getPath(String bpid) throws IOException;
-
-    /** @return the directory for the finalized blocks in the block pool. */
-    public File getFinalizedDir(String bpid) throws IOException;
-  }
-
-  /**
    * Create rolling logs.
    * 
    * @param prefix the prefix of the log names.
@@ -121,32 +102,15 @@ public interface FSDatasetInterface<V ex
    * as corrupted.
    */
   public void checkAndUpdate(String bpid, long blockId, File diskFile,
-      File diskMetaFile, FSVolumeInterface vol);
+      File diskMetaFile, FsVolumeSpi vol);
 
   /**
-   * This class provides the input stream and length of the metadata
-   * of a block
-   *
-   */
-  static class MetaDataInputStream extends FilterInputStream {
-    MetaDataInputStream(InputStream stream, long len) {
-      super(stream);
-      length = len;
-    }
-    private long length;
-    
-    public long getLength() {
-      return length;
-    }
-  }
-  
-  /**
    * @param b - the block
    * @return a stream if the meta-data of the block exists;
    *         otherwise, return null.
    * @throws IOException
    */
-  public MetaDataInputStream getMetaDataInputStream(ExtendedBlock b
+  public LengthInputStream getMetaDataInputStream(ExtendedBlock b
       ) throws IOException;
 
   /**
@@ -197,58 +161,10 @@ public interface FSDatasetInterface<V ex
    *  starting at the offset
    * @throws IOException
    */
-  public BlockInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
+  public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
       long ckoff) throws IOException;
 
      /**
-      * 
-      * This class contains the output streams for the data and checksum
-      * of a block
-      *
-      */
-     static class BlockWriteStreams {
-      OutputStream dataOut;
-      OutputStream checksumOut;
-      DataChecksum checksum;
-      
-      BlockWriteStreams(OutputStream dOut, OutputStream cOut,
-          DataChecksum checksum) {
-        dataOut = dOut;
-        checksumOut = cOut;
-        this.checksum = checksum;
-      }
-      
-      void close() {
-        IOUtils.closeStream(dataOut);
-        IOUtils.closeStream(checksumOut);
-      }
-      
-      DataChecksum getChecksum() {
-        return checksum;
-      }
-    }
-
-  /**
-   * This class contains the input streams for the data and checksum
-   * of a block
-   */
-  static class BlockInputStreams implements Closeable {
-    final InputStream dataIn;
-    final InputStream checksumIn;
-
-    BlockInputStreams(InputStream dataIn, InputStream checksumIn) {
-      this.dataIn = dataIn;
-      this.checksumIn = checksumIn;
-    }
-
-    @Override
-    public void close() {
-      IOUtils.closeStream(dataIn);
-      IOUtils.closeStream(checksumIn);
-    }
-  }
-    
-  /**
    * Creates a temporary replica and returns the meta information of the replica
    * 
    * @param b block
@@ -395,7 +311,7 @@ public interface FSDatasetInterface<V ex
    * @param checksumSize number of bytes each checksum has
    * @throws IOException
    */
-  public void adjustCrcChannelPosition(ExtendedBlock b, BlockWriteStreams stream, 
+  public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams stream, 
       int checksumSize) throws IOException;
 
   /**

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java Fri Mar 16 17:36:57 2012
@@ -21,7 +21,7 @@ import java.io.File;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 
 /**
  * This class describes a replica that has been finalized.
@@ -38,7 +38,7 @@ class FinalizedReplica extends ReplicaIn
    * @param dir directory path where block and meta files are located
    */
   FinalizedReplica(long blockId, long len, long genStamp,
-      FSVolumeInterface vol, File dir) {
+      FsVolumeSpi vol, File dir) {
     super(blockId, len, genStamp, vol, dir);
   }
   
@@ -48,7 +48,7 @@ class FinalizedReplica extends ReplicaIn
    * @param vol volume where replica is located
    * @param dir directory path where block and meta files are located
    */
-  FinalizedReplica(Block block, FSVolumeInterface vol, File dir) {
+  FinalizedReplica(Block block, FsVolumeSpi vol, File dir) {
     super(block, vol, dir);
   }
 

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java Fri Mar 16 17:36:57 2012
@@ -21,7 +21,7 @@ import java.io.File;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 
 /** This class represents replicas being written. 
  * Those are the replicas that
@@ -36,7 +36,7 @@ class ReplicaBeingWritten extends Replic
    * @param dir directory path where block and meta files are located
    */
   ReplicaBeingWritten(long blockId, long genStamp, 
-        FSVolumeInterface vol, File dir) {
+        FsVolumeSpi vol, File dir) {
     super( blockId, genStamp, vol, dir);
   }
   
@@ -48,7 +48,7 @@ class ReplicaBeingWritten extends Replic
    * @param writer a thread that is writing to this replica
    */
   ReplicaBeingWritten(Block block, 
-      FSVolumeInterface vol, File dir, Thread writer) {
+      FsVolumeSpi vol, File dir, Thread writer) {
     super( block, vol, dir, writer);
   }
 
@@ -62,7 +62,7 @@ class ReplicaBeingWritten extends Replic
    * @param writer a thread that is writing to this replica
    */
   ReplicaBeingWritten(long blockId, long len, long genStamp,
-      FSVolumeInterface vol, File dir, Thread writer ) {
+      FsVolumeSpi vol, File dir, Thread writer ) {
     super( blockId, len, genStamp, vol, dir, writer);
   }
 

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java Fri Mar 16 17:36:57 2012
@@ -24,8 +24,8 @@ import java.io.RandomAccessFile;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.DataChecksum;
 
@@ -53,7 +53,7 @@ class ReplicaInPipeline extends ReplicaI
    * @param state replica state
    */
     ReplicaInPipeline(long blockId, long genStamp, 
-        FSVolumeInterface vol, File dir) {
+        FsVolumeSpi vol, File dir) {
     this( blockId, 0L, genStamp, vol, dir, Thread.currentThread());
   }
 
@@ -65,7 +65,7 @@ class ReplicaInPipeline extends ReplicaI
    * @param writer a thread that is writing to this replica
    */
   ReplicaInPipeline(Block block, 
-      FSVolumeInterface vol, File dir, Thread writer) {
+      FsVolumeSpi vol, File dir, Thread writer) {
     this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
         vol, dir, writer);
   }
@@ -80,7 +80,7 @@ class ReplicaInPipeline extends ReplicaI
    * @param writer a thread that is writing to this replica
    */
   ReplicaInPipeline(long blockId, long len, long genStamp,
-      FSVolumeInterface vol, File dir, Thread writer ) {
+      FsVolumeSpi vol, File dir, Thread writer ) {
     super( blockId, len, genStamp, vol, dir);
     this.bytesAcked = len;
     this.bytesOnDisk = len;
@@ -168,7 +168,7 @@ class ReplicaInPipeline extends ReplicaI
   }
   
   @Override // ReplicaInPipelineInterface
-  public BlockWriteStreams createStreams(boolean isCreate, 
+  public ReplicaOutputStreams createStreams(boolean isCreate, 
       DataChecksum requestedChecksum) throws IOException {
     File blockFile = getBlockFile();
     File metaFile = getMetaFile();
@@ -234,7 +234,7 @@ class ReplicaInPipeline extends ReplicaI
         blockOut.getChannel().position(blockDiskSize);
         crcOut.getChannel().position(crcDiskSize);
       }
-      return new BlockWriteStreams(blockOut, crcOut, checksum);
+      return new ReplicaOutputStreams(blockOut, crcOut, checksum);
     } catch (IOException e) {
       IOUtils.closeStream(blockOut);
       IOUtils.closeStream(metaRAF);

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java Fri Mar 16 17:36:57 2012
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.da
 
 import java.io.IOException;
 
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.util.DataChecksum;
 
 /** 
@@ -66,6 +66,6 @@ interface ReplicaInPipelineInterface ext
    * @return output streams for writing
    * @throws IOException if any error occurs
    */
-  public BlockWriteStreams createStreams(boolean isCreate,
+  public ReplicaOutputStreams createStreams(boolean isCreate,
       DataChecksum requestedChecksum) throws IOException;
 }

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java Fri Mar 16 17:36:57 2012
@@ -26,7 +26,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.io.IOUtils;
 
 /**
@@ -36,7 +36,7 @@ import org.apache.hadoop.io.IOUtils;
 @InterfaceAudience.Private
 abstract public class ReplicaInfo extends Block implements Replica {
   /** volume where the replica belongs */
-  private FSVolumeInterface volume;
+  private FsVolumeSpi volume;
   /** directory where block & meta files belong */
   private File dir;
 
@@ -47,7 +47,7 @@ abstract public class ReplicaInfo extend
    * @param vol volume where replica is located
    * @param dir directory path where block and meta files are located
    */
-  ReplicaInfo(long blockId, long genStamp, FSVolumeInterface vol, File dir) {
+  ReplicaInfo(long blockId, long genStamp, FsVolumeSpi vol, File dir) {
     this( blockId, 0L, genStamp, vol, dir);
   }
   
@@ -57,7 +57,7 @@ abstract public class ReplicaInfo extend
    * @param vol volume where replica is located
    * @param dir directory path where block and meta files are located
    */
-  ReplicaInfo(Block block, FSVolumeInterface vol, File dir) {
+  ReplicaInfo(Block block, FsVolumeSpi vol, File dir) {
     this(block.getBlockId(), block.getNumBytes(), 
         block.getGenerationStamp(), vol, dir);
   }
@@ -71,7 +71,7 @@ abstract public class ReplicaInfo extend
    * @param dir directory path where block and meta files are located
    */
   ReplicaInfo(long blockId, long len, long genStamp,
-      FSVolumeInterface vol, File dir) {
+      FsVolumeSpi vol, File dir) {
     super(blockId, len, genStamp);
     this.volume = vol;
     this.dir = dir;
@@ -113,14 +113,14 @@ abstract public class ReplicaInfo extend
    * Get the volume where this replica is located on disk
    * @return the volume where this replica is located on disk
    */
-  FSVolumeInterface getVolume() {
+  FsVolumeSpi getVolume() {
     return volume;
   }
   
   /**
    * Set the volume where this replica is located on disk
    */
-  void setVolume(FSVolumeInterface vol) {
+  void setVolume(FsVolumeSpi vol) {
     this.volume = vol;
   }
   

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java Fri Mar 16 17:36:57 2012
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.da
 import java.io.File;
 
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 
 /**
@@ -145,7 +145,7 @@ class ReplicaUnderRecovery extends Repli
   }
   
   @Override //ReplicaInfo
-  void setVolume(FSVolumeInterface vol) {
+  void setVolume(FsVolumeSpi vol) {
     super.setVolume(vol);
     original.setVolume(vol);
   }

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java Fri Mar 16 17:36:57 2012
@@ -21,7 +21,7 @@ import java.io.File;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 
 /**
  * This class represents a replica that is waiting to be recovered.
@@ -44,7 +44,7 @@ class ReplicaWaitingToBeRecovered extend
    * @param dir directory path where block and meta files are located
    */
   ReplicaWaitingToBeRecovered(long blockId, long len, long genStamp,
-      FSVolumeInterface vol, File dir) {
+      FsVolumeSpi vol, File dir) {
     super(blockId, len, genStamp, vol, dir);
   }
   
@@ -54,7 +54,7 @@ class ReplicaWaitingToBeRecovered extend
    * @param vol volume where replica is located
    * @param dir directory path where block and meta files are located
    */
-  ReplicaWaitingToBeRecovered(Block block, FSVolumeInterface vol, File dir) {
+  ReplicaWaitingToBeRecovered(Block block, FsVolumeSpi vol, File dir) {
     super(block, vol, dir);
   }
   

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java Fri Mar 16 17:36:57 2012
@@ -20,10 +20,10 @@ package org.apache.hadoop.hdfs.server.da
 import java.io.IOException;
 import java.util.List;
 
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 
-public class RoundRobinVolumesPolicy<V extends FSVolumeInterface>
+public class RoundRobinVolumesPolicy<V extends FsVolumeSpi>
     implements BlockVolumeChoosingPolicy<V> {
 
   private int curVolume = 0;

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Fri Mar 16 17:36:57 2012
@@ -39,6 +39,10 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
@@ -61,8 +65,7 @@ import org.apache.hadoop.util.DiskChecke
  * 
  * Note the synchronization is coarse grained - it is at each method. 
  */
-public class SimulatedFSDataset
-    implements FSDatasetInterface<FSDatasetInterface.FSVolumeInterface> {
+public class SimulatedFSDataset implements FSDatasetInterface<FsVolumeSpi> {
   static class Factory extends FSDatasetInterface.Factory<SimulatedFSDataset> {
     @Override
     public SimulatedFSDataset createFSDatasetInterface(DataNode datanode,
@@ -215,14 +218,14 @@ public class SimulatedFSDataset
     }
 
     @Override
-    synchronized public BlockWriteStreams createStreams(boolean isCreate, 
+    synchronized public ReplicaOutputStreams createStreams(boolean isCreate, 
         DataChecksum requestedChecksum) throws IOException {
       if (finalized) {
         throw new IOException("Trying to write to a finalized replica "
             + theBlock);
       } else {
         SimulatedOutputStream crcStream = new SimulatedOutputStream();
-        return new BlockWriteStreams(oStream, crcStream, requestedChecksum);
+        return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum);
       }
     }
 
@@ -688,13 +691,13 @@ public class SimulatedFSDataset
 
   /** Not supported */
   @Override // FSDatasetInterface
-  public BlockInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
+  public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
       long ckoff) throws IOException {
     throw new IOException("Not supported");
   }
 
   @Override // FSDatasetInterface
-  public synchronized MetaDataInputStream getMetaDataInputStream(ExtendedBlock b
+  public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b
       ) throws IOException {
     final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
@@ -706,7 +709,7 @@ public class SimulatedFSDataset
           " is being written, its meta cannot be read");
     }
     final SimulatedInputStream sin = binfo.getMetaIStream();
-    return new MetaDataInputStream(sin, sin.getLength());
+    return new LengthInputStream(sin, sin.getLength());
   }
 
   @Override
@@ -716,7 +719,7 @@ public class SimulatedFSDataset
 
   @Override // FSDatasetInterface
   public synchronized void adjustCrcChannelPosition(ExtendedBlock b,
-                                              BlockWriteStreams stream, 
+                                              ReplicaOutputStreams stream, 
                                               int checksumSize)
                                               throws IOException {
   }
@@ -959,12 +962,12 @@ public class SimulatedFSDataset
 
   @Override
   public void checkAndUpdate(String bpid, long blockId, File diskFile,
-      File diskMetaFile, FSVolumeInterface vol) {
+      File diskMetaFile, FsVolumeSpi vol) {
     throw new UnsupportedOperationException();
   }
 
   @Override
-  public List<FSVolumeInterface> getVolumes() {
+  public List<FsVolumeSpi> getVolumes() {
     throw new UnsupportedOperationException();
   }
 

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java Fri Mar 16 17:36:57 2012
@@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.protocolPB
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode.BlockRecord;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -535,11 +535,11 @@ public class TestBlockRecovery {
       LOG.debug("Running " + GenericTestUtils.getMethodName());
     }
     ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
-    BlockWriteStreams streams = null;
+    ReplicaOutputStreams streams = null;
     try {
       streams = replicaInfo.createStreams(true,
           DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512));
-      streams.checksumOut.write('a');
+      streams.getChecksumOut().write('a');
       dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
       try {
         dn.syncBlock(rBlock, initBlockRecords(dn));

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java Fri Mar 16 17:36:57 2012
@@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Assert;
 import org.junit.Test;
@@ -98,7 +98,7 @@ public class TestDatanodeRestart {
       out.write(writeBuf);
       out.hflush();
       DataNode dn = cluster.getDataNodes().get(0);
-      for (FSVolumeInterface v : dn.data.getVolumes()) {
+      for (FsVolumeSpi v : dn.data.getVolumes()) {
         FSVolume volume = (FSVolume)v;
         File currentDir = volume.getCurrentDir().getParentFile().getParentFile();
         File rbwDir = new File(currentDir, "rbw");

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java Fri Mar 16 17:36:57 2012
@@ -21,7 +21,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.junit.Assert;
@@ -33,19 +33,19 @@ public class TestRoundRobinVolumesPolicy
   // Test the Round-Robin block-volume choosing algorithm.
   @Test
   public void testRR() throws Exception {
-    final List<FSVolumeInterface> volumes = new ArrayList<FSVolumeInterface>();
+    final List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
 
     // First volume, with 100 bytes of space.
-    volumes.add(Mockito.mock(FSVolumeInterface.class));
+    volumes.add(Mockito.mock(FsVolumeSpi.class));
     Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L);
 
     // Second volume, with 200 bytes of space.
-    volumes.add(Mockito.mock(FSVolumeInterface.class));
+    volumes.add(Mockito.mock(FsVolumeSpi.class));
     Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
 
     @SuppressWarnings("unchecked")
-    final RoundRobinVolumesPolicy<FSVolumeInterface> policy = 
-        (RoundRobinVolumesPolicy<FSVolumeInterface>)ReflectionUtils.newInstance(
+    final RoundRobinVolumesPolicy<FsVolumeSpi> policy = 
+        (RoundRobinVolumesPolicy<FsVolumeSpi>)ReflectionUtils.newInstance(
             RoundRobinVolumesPolicy.class, null);
     
     // Test two rounds of round-robin choosing
@@ -71,18 +71,18 @@ public class TestRoundRobinVolumesPolicy
   @Test
   public void testRRPolicyExceptionMessage()
       throws Exception {
-    final List<FSVolumeInterface> volumes = new ArrayList<FSVolumeInterface>();
+    final List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
 
     // First volume, with 500 bytes of space.
-    volumes.add(Mockito.mock(FSVolumeInterface.class));
+    volumes.add(Mockito.mock(FsVolumeSpi.class));
     Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L);
 
     // Second volume, with 600 bytes of space.
-    volumes.add(Mockito.mock(FSVolumeInterface.class));
+    volumes.add(Mockito.mock(FsVolumeSpi.class));
     Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L);
 
-    final RoundRobinVolumesPolicy<FSVolumeInterface> policy
-        = new RoundRobinVolumesPolicy<FSVolumeInterface>();
+    final RoundRobinVolumesPolicy<FsVolumeSpi> policy
+        = new RoundRobinVolumesPolicy<FsVolumeSpi>();
     int blockSize = 700;
     try {
       policy.chooseVolume(volumes, blockSize);

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1301663&r1=1301662&r2=1301663&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Fri Mar 16 17:36:57 2012
@@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.util.DataChecksum;
 
 /**
@@ -63,10 +63,10 @@ public class TestSimulatedFSDataset exte
       // we pass expected len as zero, - fsdataset should use the sizeof actual
       // data written
       ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
-      BlockWriteStreams out = bInfo.createStreams(true,
+      ReplicaOutputStreams out = bInfo.createStreams(true,
           DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512));
       try {
-        OutputStream dataOut  = out.dataOut;
+        OutputStream dataOut  = out.getDataOut();
         assertEquals(0, fsdataset.getLength(b));
         for (int j=1; j <= blockIdToLen(i); ++j) {
           dataOut.write(j);



Mime
View raw message