hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1242642 [2/2] - in /hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdf...
Date Fri, 10 Feb 2012 03:04:12 GMT
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
Fri Feb 10 03:04:05 2012
@@ -26,7 +26,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.io.IOUtils;
 
 /**
@@ -35,8 +35,10 @@ import org.apache.hadoop.io.IOUtils;
  */
 @InterfaceAudience.Private
 abstract public class ReplicaInfo extends Block implements Replica {
-  private FSVolume volume;      // volume where the replica belongs
-  private File     dir;         // directory where block & meta files belong
+  /** volume where the replica belongs */
+  private FSVolumeInterface volume;
+  /** directory where block & meta files belong */
+  private File dir;
 
   /**
    * Constructor for a zero length replica
@@ -45,7 +47,7 @@ abstract public class ReplicaInfo extend
    * @param vol volume where replica is located
    * @param dir directory path where block and meta files are located
    */
-  ReplicaInfo(long blockId, long genStamp, FSVolume vol, File dir) {
+  ReplicaInfo(long blockId, long genStamp, FSVolumeInterface vol, File dir) {
     this( blockId, 0L, genStamp, vol, dir);
   }
   
@@ -55,7 +57,7 @@ abstract public class ReplicaInfo extend
    * @param vol volume where replica is located
    * @param dir directory path where block and meta files are located
    */
-  ReplicaInfo(Block block, FSVolume vol, File dir) {
+  ReplicaInfo(Block block, FSVolumeInterface vol, File dir) {
     this(block.getBlockId(), block.getNumBytes(), 
         block.getGenerationStamp(), vol, dir);
   }
@@ -69,7 +71,7 @@ abstract public class ReplicaInfo extend
    * @param dir directory path where block and meta files are located
    */
   ReplicaInfo(long blockId, long len, long genStamp,
-      FSVolume vol, File dir) {
+      FSVolumeInterface vol, File dir) {
     super(blockId, len, genStamp);
     this.volume = vol;
     this.dir = dir;
@@ -111,14 +113,14 @@ abstract public class ReplicaInfo extend
    * Get the volume where this replica is located on disk
    * @return the volume where this replica is located on disk
    */
-  FSVolume getVolume() {
+  FSVolumeInterface getVolume() {
     return volume;
   }
   
   /**
    * Set the volume where this replica is located on disk
    */
-  void setVolume(FSVolume vol) {
+  void setVolume(FSVolumeInterface vol) {
     this.volume = vol;
   }
   
@@ -162,7 +164,7 @@ abstract public class ReplicaInfo extend
    * be recovered (especially on Windows) on datanode restart.
    */
   private void unlinkFile(File file, Block b) throws IOException {
-    File tmpFile = DatanodeUtil.createTmpFile(b, FSDataset.getUnlinkTmpFile(file));
+    File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file));
     try {
       FileInputStream in = new FileInputStream(file);
       try {

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
Fri Feb 10 03:04:05 2012
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.da
 import java.io.File;
 
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 
 /**
@@ -145,7 +145,7 @@ class ReplicaUnderRecovery extends Repli
   }
   
   @Override //ReplicaInfo
-  void setVolume(FSVolume vol) {
+  void setVolume(FSVolumeInterface vol) {
     super.setVolume(vol);
     original.setVolume(vol);
   }

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
Fri Feb 10 03:04:05 2012
@@ -21,7 +21,7 @@ import java.io.File;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 
 /**
  * This class represents a replica that is waiting to be recovered.
@@ -44,7 +44,7 @@ class ReplicaWaitingToBeRecovered extend
    * @param dir directory path where block and meta files are located
    */
   ReplicaWaitingToBeRecovered(long blockId, long len, long genStamp,
-      FSVolume vol, File dir) {
+      FSVolumeInterface vol, File dir) {
     super(blockId, len, genStamp, vol, dir);
   }
   
@@ -54,7 +54,7 @@ class ReplicaWaitingToBeRecovered extend
    * @param vol volume where replica is located
    * @param dir directory path where block and meta files are located
    */
-  ReplicaWaitingToBeRecovered(Block block, FSVolume vol, File dir) {
+  ReplicaWaitingToBeRecovered(Block block, FSVolumeInterface vol, File dir) {
     super(block, vol, dir);
   }
   

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
Fri Feb 10 03:04:05 2012
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.da
 import java.io.IOException;
 import java.util.List;
 
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 
 public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
@@ -28,8 +28,8 @@ public class RoundRobinVolumesPolicy imp
   private int curVolume = 0;
 
   @Override
-  public synchronized FSVolume chooseVolume(List<FSVolume> volumes, long blockSize)
-      throws IOException {
+  public synchronized FSVolumeInterface chooseVolume(
+      List<FSVolumeInterface> volumes, long blockSize) throws IOException {
     if(volumes.size() < 1) {
       throw new DiskOutOfSpaceException("No more available volumes");
     }
@@ -44,7 +44,7 @@ public class RoundRobinVolumesPolicy imp
     long maxAvailable = 0;
     
     while (true) {
-      FSVolume volume = volumes.get(curVolume);
+      FSVolumeInterface volume = volumes.get(curVolume);
       curVolume = (curVolume + 1) % volumes.size();
       long availableVolumeSize = volume.getAvailable();
       if (availableVolumeSize > blockSize) { return volume; }

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
Fri Feb 10 03:04:05 2012
@@ -153,8 +153,8 @@ class NameNodeRpcServer implements Namen
     this.metrics = NameNode.getNameNodeMetrics();
     
     int handlerCount = 
-      conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY, 
-                  DFS_DATANODE_HANDLER_COUNT_DEFAULT);
+      conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY, 
+                  DFS_NAMENODE_HANDLER_COUNT_DEFAULT);
     InetSocketAddress socAddr = nn.getRpcServerAddress(conf);
 		RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
          ProtobufRpcEngine.class);

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
Fri Feb 10 03:04:05 2012
@@ -1127,7 +1127,7 @@ public class DFSAdmin extends FsShell {
 
   private ClientDatanodeProtocol getDataNodeProxy(String datanode)
       throws IOException {
-    InetSocketAddress datanodeAddr = DFSUtil.getSocketAddress(datanode);
+    InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
     // Get the current configuration
     Configuration conf = getConf();
 

Propchange: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 03:04:05 2012
@@ -1,4 +1,4 @@
-/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1227776-1241553
+/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1227776-1242605
 /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189613,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779,1239752,1240897,1240928
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663

Propchange: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 03:04:05 2012
@@ -1,4 +1,4 @@
-/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1227776-1241553
+/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1227776-1242605
 /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189613,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779,1239752,1240897,1240928
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
 /hadoop/core/trunk/src/webapps/datanode:776175-784663

Propchange: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 03:04:05 2012
@@ -1,4 +1,4 @@
-/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1227776-1241553
+/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1227776-1242605
 /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189613,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779,1239752,1240897,1240928
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663

Propchange: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 03:04:05 2012
@@ -1,4 +1,4 @@
-/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1227776-1241553
+/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1227776-1242605
 /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189613,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779,1239752,1240897,1240928
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
 /hadoop/core/trunk/src/webapps/secondary:776175-784663

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml
Fri Feb 10 03:04:05 2012
@@ -21,7 +21,7 @@
   <property name="aspectversion" value="1.6.5"/>
   <!-- TODO this has to be changed synchronously with build.xml version prop.-->
   <!-- this workarounds of test-patch setting its own 'version' -->
-  <property name="project.version" value="0.23.0-SNAPSHOT"/>
+  <property name="project.version" value="0.23.2-SNAPSHOT"/>
 
   <!-- Properties common for all fault injections -->
   <property name="build-fi.dir" value="${basedir}/build-fi"/>

Propchange: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 03:04:05 2012
@@ -1,4 +1,4 @@
-/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1227776-1241553
+/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1227776-1242605
 /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189613,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779,1239752,1240897,1240928
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
 /hadoop/core/trunk/src/test/hdfs:776175-785643

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
Fri Feb 10 03:04:05 2012
@@ -17,12 +17,14 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
@@ -38,11 +40,10 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.BlockPoolSlice;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
-import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -988,8 +989,33 @@ public class SimulatedFSDataset  impleme
   }
 
   @Override
-  public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b)
-      throws IOException {
-    throw new IOException("getBlockLocalPathInfo not supported.");
+  public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public String[] getBlockPoolList() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void checkAndUpdate(String bpid, long blockId, File diskFile,
+      File diskMetaFile, FSVolumeInterface vol) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<FSVolumeInterface> getVolumes() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<Block> getFinalizedBlocks(String bpid) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Map<String, Object> getVolumeInfoMap() {
+    throw new UnsupportedOperationException();
   }
 }

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
Fri Feb 10 03:04:05 2012
@@ -23,7 +23,7 @@ import static org.junit.Assert.assertNot
 import static org.junit.Assert.assertNotSame;
 
 import java.io.IOException;
-import java.util.Collection;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -31,7 +31,6 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.junit.Assert;
@@ -81,11 +80,11 @@ public class TestDataNodeMultipleRegistr
 
       // check number of volumes in fsdataset
       DataNode dn = cluster.getDataNodes().get(0);
-      Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo();
-      assertNotNull("No volumes in the fsdataset", volInfos);
+      final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
+      Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
       int i = 0;
-      for (VolumeInfo vi : volInfos) {
-        LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace);
+      for (Map.Entry<String, Object> e : volInfos.entrySet()) {
+        LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
       }
       // number of volumes should be 2 - [data1, data2]
       assertEquals("number of volumes is wrong", 2, volInfos.size());
@@ -143,11 +142,11 @@ public class TestDataNodeMultipleRegistr
 
       // check number of vlumes in fsdataset
       DataNode dn = cluster.getDataNodes().get(0);
-      Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo();
-      assertNotNull("No volumes in the fsdataset", volInfos);
+      final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
+      Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
       int i = 0;
-      for (VolumeInfo vi : volInfos) {
-        LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace);
+      for (Map.Entry<String, Object> e : volInfos.entrySet()) {
+        LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
       }
       // number of volumes should be 2 - [data1, data2]
       assertEquals("number of volumes is wrong", 2, volInfos.size());

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
Fri Feb 10 03:04:05 2012
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
 import java.io.File;
 import java.io.FilenameFilter;
 import java.io.IOException;
@@ -29,8 +32,8 @@ import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.BlockReaderFactory;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -43,13 +46,10 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.net.NetUtils;
-
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 /**
  * Fine-grain testing of block files and locations after volume failure.
@@ -274,8 +274,7 @@ public class TestDataNodeVolumeFailure {
     String file = BlockReaderFactory.getFileName(targetAddr, 
         "test-blockpoolid",
         block.getBlockId());
-    BlockReader blockReader = 
-      BlockReaderFactory.newBlockReader(conf, s, file, block, lblock
+    BlockReaderFactory.newBlockReader(conf, s, file, block, lblock
         .getBlockToken(), 0, -1);
 
     // nothing - if it fails - it will throw and exception
@@ -372,7 +371,7 @@ public class TestDataNodeVolumeFailure {
         new FilenameFilter() {
           public boolean accept(File dir, String name) {
             return name.startsWith("blk_") &&
-            name.endsWith(FSDataset.METADATA_EXTENSION);
+            name.endsWith(DatanodeUtil.METADATA_EXTENSION);
           }
         }
     );

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
Fri Feb 10 03:04:05 2012
@@ -30,17 +30,17 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-
-import org.junit.Test;
 import org.junit.Assert;
+import org.junit.Test;
 
 /** Test if a datanode can correctly upgrade itself */
 public class TestDatanodeRestart {
@@ -98,8 +98,9 @@ public class TestDatanodeRestart {
       out.write(writeBuf);
       out.hflush();
       DataNode dn = cluster.getDataNodes().get(0);
-      for (FSVolume volume : ((FSDataset)dn.data).volumes.getVolumes()) {
-        File currentDir = volume.getDir().getParentFile();
+      for (FSVolumeInterface v : dn.data.getVolumes()) {
+        FSVolume volume = (FSVolume)v;
+        File currentDir = volume.getCurrentDir().getParentFile().getParentFile();
         File rbwDir = new File(currentDir, "rbw");
         for (File file : rbwDir.listFiles()) {
           if (isCorrupt && Block.isBlockFilename(file)) {
@@ -188,7 +189,7 @@ public class TestDatanodeRestart {
     } else {
       src = replicaInfo.getMetaFile();
     }
-    File dst = FSDataset.getUnlinkTmpFile(src);
+    File dst = DatanodeUtil.getUnlinkTmpFile(src);
     if (isRename) {
       src.renameTo(dst);
     } else {

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
Fri Feb 10 03:04:05 2012
@@ -25,20 +25,20 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Random;
 
+import junit.framework.TestCase;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
-
-import junit.framework.TestCase;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 
 /**
  * Tests {@link DirectoryScanner} handling of differences
@@ -142,10 +142,10 @@ public class TestDirectoryScanner extend
 
   /** Create a block file in a random volume*/
   private long createBlockFile() throws IOException {
-    List<FSVolume> volumes = fds.volumes.getVolumes();
+    List<FSVolumeInterface> volumes = fds.getVolumes();
     int index = rand.nextInt(volumes.size() - 1);
     long id = getFreeBlockId();
-    File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
+    File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
     File file = new File(finalizedDir, getBlockFile(id));
     if (file.createNewFile()) {
       LOG.info("Created block file " + file.getName());
@@ -155,10 +155,10 @@ public class TestDirectoryScanner extend
 
   /** Create a metafile in a random volume*/
   private long createMetaFile() throws IOException {
-    List<FSVolume> volumes = fds.volumes.getVolumes();
+    List<FSVolumeInterface> volumes = fds.getVolumes();
     int index = rand.nextInt(volumes.size() - 1);
     long id = getFreeBlockId();
-    File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
+    File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
     File file = new File(finalizedDir, getMetaFile(id));
     if (file.createNewFile()) {
       LOG.info("Created metafile " + file.getName());
@@ -168,10 +168,10 @@ public class TestDirectoryScanner extend
 
   /** Create block file and corresponding metafile in a rondom volume */
   private long createBlockMetaFile() throws IOException {
-    List<FSVolume> volumes = fds.volumes.getVolumes();
+    List<FSVolumeInterface> volumes = fds.getVolumes();
     int index = rand.nextInt(volumes.size() - 1);
     long id = getFreeBlockId();
-    File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
+    File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
     File file = new File(finalizedDir, getBlockFile(id));
     if (file.createNewFile()) {
       LOG.info("Created block file " + file.getName());

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
Fri Feb 10 03:04:05 2012
@@ -21,10 +21,10 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-import junit.framework.Assert;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -33,14 +33,14 @@ public class TestRoundRobinVolumesPolicy
   // Test the Round-Robin block-volume choosing algorithm.
   @Test
   public void testRR() throws Exception {
-    final List<FSVolume> volumes = new ArrayList<FSVolume>();
+    final List<FSVolumeInterface> volumes = new ArrayList<FSVolumeInterface>();
 
     // First volume, with 100 bytes of space.
-    volumes.add(Mockito.mock(FSVolume.class));
+    volumes.add(Mockito.mock(FSVolumeInterface.class));
     Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L);
 
     // Second volume, with 200 bytes of space.
-    volumes.add(Mockito.mock(FSVolume.class));
+    volumes.add(Mockito.mock(FSVolumeInterface.class));
     Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
 
     RoundRobinVolumesPolicy policy = ReflectionUtils.newInstance(
@@ -69,14 +69,14 @@ public class TestRoundRobinVolumesPolicy
   @Test
   public void testRRPolicyExceptionMessage()
       throws Exception {
-    final List<FSVolume> volumes = new ArrayList<FSVolume>();
+    final List<FSVolumeInterface> volumes = new ArrayList<FSVolumeInterface>();
 
     // First volume, with 500 bytes of space.
-    volumes.add(Mockito.mock(FSVolume.class));
+    volumes.add(Mockito.mock(FSVolumeInterface.class));
     Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L);
 
     // Second volume, with 600 bytes of space.
-    volumes.add(Mockito.mock(FSVolume.class));
+    volumes.add(Mockito.mock(FSVolumeInterface.class));
     Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L);
 
     RoundRobinVolumesPolicy policy = new RoundRobinVolumesPolicy();

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
Fri Feb 10 03:04:05 2012
@@ -140,7 +140,7 @@ public class TestWriteToReplica {
     ReplicasMap replicasMap = dataSet.volumeMap;
     FSVolume vol = dataSet.volumes.getNextVolume(0);
     ReplicaInfo replicaInfo = new FinalizedReplica(
-        blocks[FINALIZED].getLocalBlock(), vol, vol.getDir());
+        blocks[FINALIZED].getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
     replicasMap.add(bpid, replicaInfo);
     replicaInfo.getBlockFile().createNewFile();
     replicaInfo.getMetaFile().createNewFile();
@@ -160,15 +160,15 @@ public class TestWriteToReplica {
         blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid,
             blocks[RWR].getLocalBlock()).getParentFile()));
     replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
-        .getLocalBlock(), vol, vol.getDir()), 2007));    
+        .getLocalBlock(), vol, vol.getCurrentDir().getParentFile()), 2007));    
     
     return blocks;
   }
   
   private void testAppend(String bpid, FSDataset dataSet, ExtendedBlock[] blocks) throws
IOException {
     long newGS = blocks[FINALIZED].getGenerationStamp()+1;
-    FSVolume v = dataSet.volumeMap.get(bpid, blocks[FINALIZED].getLocalBlock())
-        .getVolume();
+    final FSVolume v = (FSVolume)dataSet.volumeMap.get(
+        bpid, blocks[FINALIZED].getLocalBlock()).getVolume();
     long available = v.getCapacity()-v.getDfsUsed();
     long expectedLen = blocks[FINALIZED].getNumBytes();
     try {

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
Fri Feb 10 03:04:05 2012
@@ -34,6 +34,7 @@ import java.util.Properties;
 import java.util.Set;
 
 import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -59,6 +60,8 @@ import static org.mockito.Mockito.mock;
  */
 public abstract class FSImageTestUtil {
   
+  public static final Log LOG = LogFactory.getLog(FSImageTestUtil.class);
+
   /**
    * The position in the fsimage header where the txid is
    * written.
@@ -369,6 +372,8 @@ public abstract class FSImageTestUtil {
       List<Integer> txids) {
 
     for (File nameDir : getNameNodeCurrentDirs(cluster)) {
+      LOG.info("examining name dir with files: " +
+          Joiner.on(",").join(nameDir.listFiles()));
       // Should have fsimage_N for the three checkpoints
       for (long checkpointTxId : txids) {
         File image = new File(nameDir,

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
(original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
Fri Feb 10 03:04:05 2012
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.junit.Assert.*;
+
 import java.io.File;
 import java.io.IOException;
 import java.util.Collections;
@@ -38,15 +40,15 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
+import org.junit.Before;
+import org.junit.Test;
 
 import com.google.common.base.Supplier;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
 
-import junit.framework.TestCase;
-
-public class TestBackupNode extends TestCase {
+public class TestBackupNode {
   public static final Log LOG = LogFactory.getLog(TestBackupNode.class);
 
   
@@ -57,8 +59,8 @@ public class TestBackupNode extends Test
   
   static final String BASE_DIR = MiniDFSCluster.getBaseDirectory();
 
-  protected void setUp() throws Exception {
-    super.setUp();
+  @Before
+  public void setUp() throws Exception {
     File baseDir = new File(BASE_DIR);
     if(baseDir.exists())
       if(!(FileUtil.fullyDelete(baseDir)))
@@ -89,8 +91,7 @@ public class TestBackupNode extends Test
     return (BackupNode)NameNode.createNameNode(new String[]{startupOpt.getName()}, c);
   }
 
-  void waitCheckpointDone(
-      MiniDFSCluster cluster, BackupNode backup, long txid) {
+  void waitCheckpointDone(MiniDFSCluster cluster, long txid) {
     long thisCheckpointTxId;
     do {
       try {
@@ -98,9 +99,8 @@ public class TestBackupNode extends Test
             "checkpoint txid should increase above " + txid);
         Thread.sleep(1000);
       } catch (Exception e) {}
-      thisCheckpointTxId = backup.getFSImage().getStorage()
+      thisCheckpointTxId = cluster.getNameNode().getFSImage().getStorage()
         .getMostRecentCheckpointTxId();
-
     } while (thisCheckpointTxId < txid);
     
     // Check that the checkpoint got uploaded to NN successfully
@@ -108,6 +108,7 @@ public class TestBackupNode extends Test
         Collections.singletonList((int)thisCheckpointTxId));
   }
 
+  @Test
   public void testCheckpointNode() throws Exception {
     testCheckpoint(StartupOption.CHECKPOINT);
   }
@@ -117,6 +118,7 @@ public class TestBackupNode extends Test
    * and keep in sync, even while the NN rolls, checkpoints
    * occur, etc.
    */
+  @Test
   public void testBackupNodeTailsEdits() throws Exception {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
@@ -234,6 +236,7 @@ public class TestBackupNode extends Test
     FSImageTestUtil.assertParallelFilesAreIdentical(dirs, ImmutableSet.of("VERSION"));
   }
   
+  @Test
   public void testBackupNode() throws Exception {
     testCheckpoint(StartupOption.BACKUP);
   }  
@@ -270,7 +273,7 @@ public class TestBackupNode extends Test
       //
       long txid = cluster.getNameNodeRpc().getTransactionID();
       backup = startBackupNode(conf, op, 1);
-      waitCheckpointDone(cluster, backup, txid);
+      waitCheckpointDone(cluster, txid);
     } catch(IOException e) {
       LOG.error("Error in TestBackupNode:", e);
       assertTrue(e.getLocalizedMessage(), false);
@@ -305,7 +308,7 @@ public class TestBackupNode extends Test
       //
       backup = startBackupNode(conf, op, 1);
       long txid = cluster.getNameNodeRpc().getTransactionID();
-      waitCheckpointDone(cluster, backup, txid);
+      waitCheckpointDone(cluster, txid);
 
       for (int i = 0; i < 10; i++) {
         fileSys.mkdirs(new Path("file_" + i));
@@ -313,11 +316,11 @@ public class TestBackupNode extends Test
 
       txid = cluster.getNameNodeRpc().getTransactionID();
       backup.doCheckpoint();
-      waitCheckpointDone(cluster, backup, txid);
+      waitCheckpointDone(cluster, txid);
 
       txid = cluster.getNameNodeRpc().getTransactionID();
       backup.doCheckpoint();
-      waitCheckpointDone(cluster, backup, txid);
+      waitCheckpointDone(cluster, txid);
 
     } catch(IOException e) {
       LOG.error("Error in TestBackupNode:", e);

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/pom.xml?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/pom.xml (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/pom.xml Fri Feb 10 03:04:05
2012
@@ -17,12 +17,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.23.1-SNAPSHOT</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-hdfs-project</artifactId>
-  <version>0.23.1-SNAPSHOT</version>
+  <version>0.23.2-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Project</description>
   <name>Apache Hadoop HDFS Project</name>
   <packaging>pom</packaging>



Mime
View raw message