hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1126941 [3/3] - in /hadoop/hdfs/branches/yahoo-merge: ./ src/c++/libhdfs/ src/contrib/hdfsproxy/ src/java/ src/java/org/apache/hadoop/hdfs/server/common/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/serv...
Date Tue, 24 May 2011 09:03:15 GMT
Propchange: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue May 24 09:03:13 2011
@@ -1,6 +1,6 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
 /hadoop/core/trunk/src/test/hdfs:776175-785643
-/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:1078871,1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
+/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:1078871,1078924,1078943,1079607,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/test/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/test/hdfs:820487
-/hadoop/hdfs/trunk/src/test/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035386,1035508,1035841-1035842,1036213,1038001,1038859,1039957,1040005,1040411-1040412,1042017,1042925,1049193,1051334,1052823,1053214,1060619,1061067,1062020,1062045,1062052,1067288,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1086479,1086654,1087080,1087115,1087437,1090114,1090357,1091515,1091619,1091874,1092432,1092507,1092524,1092584,1094748,1095245,1095461,1095789,1096846,1097648,1097969,1098781,1098867,1099285,1099640-1099641,1101137,1101282,1101293,1101324,1101675,1101753,1102005,1102459,1102511,1102833,1102947,1103957-1103958,1103970,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/test/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035386,1035508,1035841-1035842,1036213,1038001,1038859,1039957,1040005,1040411-1040412,1042017,1042925,1049193,1051334,1052823,1053214,1060619,1061067,1062020,1062045,1062052,1066305,1067288,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1086479,1086654,1087080,1087115,1087437,1090114,1090357,1091515,1091619,1091874,1092432,1092507,1092524,1092584,1094748,1095245,1095461,1095789,1096846,1097648,1097969,1098781,1098867,1099285,1099640-1099641,1101137,1101282,1101293,1101324,1101675,1101753,1102005,1102459,1102511,1102833,1102947,1103957-1103958,1103970,1104395,1104407,1124576

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
Tue May 24 09:03:13 2011
@@ -267,7 +267,7 @@ public class TestDFSRollback extends Tes
           UpgradeUtilities.getCurrentClusterID(null),
           UpgradeUtilities.getCurrentFsscTime(null));
       
-      UpgradeUtilities.createNameNodeVersionFile(baseDirs,
+      UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs,
           storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
       startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
Tue May 24 09:03:13 2011
@@ -271,7 +271,7 @@ public class TestDFSUpgrade {
           UpgradeUtilities.getCurrentClusterID(null),
           UpgradeUtilities.getCurrentFsscTime(null));
       
-      UpgradeUtilities.createNameNodeVersionFile(baseDirs, storageInfo,
+      UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
           UpgradeUtilities.getCurrentBlockPoolID(cluster));
       
       startNameNodeShouldFail(StartupOption.UPGRADE);
@@ -284,7 +284,7 @@ public class TestDFSUpgrade {
           UpgradeUtilities.getCurrentClusterID(null),
           UpgradeUtilities.getCurrentFsscTime(null));
       
-      UpgradeUtilities.createNameNodeVersionFile(baseDirs, storageInfo,
+      UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
           UpgradeUtilities.getCurrentBlockPoolID(cluster));
       
       startNameNodeShouldFail(StartupOption.UPGRADE);

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
Tue May 24 09:03:13 2011
@@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
-import org.apache.hadoop.hdfs.server.namenode.FSImage;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
 
@@ -400,14 +400,15 @@ public class UpgradeUtilities {
    *
    * @return the created version file
    */
-  public static File[] createNameNodeVersionFile(File[] parent,
-      StorageInfo version, String bpid) throws IOException {
-    FSImage storage = null;
+  public static File[] createNameNodeVersionFile(Configuration conf,
+      File[] parent, StorageInfo version, String bpid) throws IOException {
+    Storage storage = null;
     File[] versionFiles = new File[parent.length];
     for (int i = 0; i < parent.length; i++) {
       File versionFile = new File(parent[i], "VERSION");
       FileUtil.fullyDelete(versionFile);
-      storage = new FSImage(version, bpid);
+      storage = new NNStorage(conf);
+      storage.setStorageInfo(version);
       StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
       sd.write(versionFile);
       versionFiles[i] = versionFile;

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
Tue May 24 09:03:13 2011
@@ -34,7 +34,6 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -72,8 +71,8 @@ public class TestDataNodeMultipleRegistr
       String cid2 = nn2.getFSImage().getClusterID();
       int lv1 = nn1.getFSImage().getLayoutVersion();
       int lv2 = nn2.getFSImage().getLayoutVersion();
-      int ns1 = nn1.getFSImage().namespaceID;
-      int ns2 = nn2.getFSImage().namespaceID;
+      int ns1 = nn1.getFSImage().getNamespaceID();
+      int ns2 = nn2.getFSImage().getNamespaceID();
       assertNotSame("namespace ids should be different", ns1, ns2);
       LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri="
           + nn1.getNameNodeAddress());

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
Tue May 24 09:03:13 2011
@@ -193,7 +193,7 @@ public class CreateEditsLog {
     FileNameGenerator nameGenerator = new FileNameGenerator(BASE_PATH, 100);
 
     FSEditLog editLog = fsImage.getEditLog();
-    editLog.createEditLogFile(fsImage.getFsEditName());
+    editLog.createEditLogFile(fsImage.getStorage().getFsEditName());
     editLog.open();
     addFiles(editLog, numFiles, replication, numBlocksPerFile, startingBlockId,
              nameGenerator);

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
Tue May 24 09:03:13 2011
@@ -33,7 +33,8 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
-import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
@@ -83,9 +84,10 @@ public class OfflineEditsViewerHelper {
   private String getEditsFilename() throws IOException {
     FSImage image = cluster.getNameNode().getFSImage();
     // it was set up to only have ONE StorageDirectory
-    Iterator<StorageDirectory> it = image.dirIterator(NameNodeDirType.EDITS);
+    Iterator<StorageDirectory> it
+      = image.getStorage().dirIterator(NameNodeDirType.EDITS);
     StorageDirectory sd = it.next();
-    return image.getEditFile(sd).getAbsolutePath();
+    return image.getStorage().getEditFile(sd).getAbsolutePath();
   }
 
   /**

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
Tue May 24 09:03:13 2011
@@ -33,11 +33,11 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -189,15 +189,15 @@ public class TestCheckpoint extends Test
     // and that temporary checkpoint files are gone.
     FSImage image = cluster.getNameNode().getFSImage();
     for (Iterator<StorageDirectory> it = 
-             image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
+           image.getStorage().dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
       StorageDirectory sd = it.next();
-      assertFalse(FSImage.getImageFile(sd, NameNodeFile.IMAGE_NEW).exists());
+      assertFalse(image.getStorage().getStorageFile(sd, NameNodeFile.IMAGE_NEW).exists());
     }
     for (Iterator<StorageDirectory> it = 
-            image.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
+           image.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
       StorageDirectory sd = it.next();
-      assertFalse(image.getEditNewFile(sd).exists());
-      File edits = image.getEditFile(sd);
+      assertFalse(image.getStorage().getEditNewFile(sd).exists());
+      File edits = image.getStorage().getEditFile(sd);
       assertTrue(edits.exists()); // edits should exist and be empty
       long editsLen = edits.length();
       assertTrue(editsLen == Integer.SIZE/Byte.SIZE);
@@ -365,10 +365,10 @@ public class TestCheckpoint extends Test
       assertTrue(!fileSys.exists(file1));
       StorageDirectory sd = null;
       for (Iterator<StorageDirectory> it = 
-                image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();)
+                image.getStorage().dirIterator(NameNodeDirType.IMAGE); it.hasNext();)
          sd = it.next();
       assertTrue(sd != null);
-      long fsimageLength = FSImage.getImageFile(sd, NameNodeFile.IMAGE).length();
+      long fsimageLength = image.getStorage().getStorageFile(sd, NameNodeFile.IMAGE).length();
       //
       // Make the checkpoint
       //
@@ -386,8 +386,8 @@ public class TestCheckpoint extends Test
 
       // Verify that image file sizes did not change.
       for (Iterator<StorageDirectory> it = 
-              image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
-        assertTrue(FSImage.getImageFile(it.next(), 
+              image.getStorage().dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
+        assertTrue(image.getStorage().getStorageFile(it.next(), 
                                 NameNodeFile.IMAGE).length() == fsimageLength);
       }
 
@@ -480,7 +480,7 @@ public class TestCheckpoint extends Test
     SecondaryNameNode secondary = null;
     try {
       secondary = startSecondaryNameNode(conf);
-      assertFalse(secondary.getFSImage().isLockSupported(0));
+      assertFalse(secondary.getFSImage().getStorage().isLockSupported(0));
       secondary.shutdown();
     } catch (IOException e) { // expected to fail
       assertTrue(secondary == null);
@@ -505,7 +505,7 @@ public class TestCheckpoint extends Test
     try {
       nn = startNameNode(conf, checkpointDirs, checkpointEditsDirs,
                           StartupOption.REGULAR);
-      assertFalse(nn.getFSImage().isLockSupported(0));
+      assertFalse(nn.getFSImage().getStorage().isLockSupported(0));
       nn.stop(); nn = null;
     } catch (IOException e) { // expected to fail
       assertTrue(nn == null);
@@ -519,7 +519,7 @@ public class TestCheckpoint extends Test
     SecondaryNameNode secondary2 = null;
     try {
       secondary2 = startSecondaryNameNode(conf);
-      assertFalse(secondary2.getFSImage().isLockSupported(0));
+      assertFalse(secondary2.getFSImage().getStorage().isLockSupported(0));
       secondary2.shutdown();
     } catch (IOException e) { // expected to fail
       assertTrue(secondary2 == null);
@@ -567,8 +567,8 @@ public class TestCheckpoint extends Test
     // Verify that image file sizes did not change.
     FSImage image = nn.getFSImage();
     for (Iterator<StorageDirectory> it = 
-            image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
-      assertTrue(FSImage.getImageFile(it.next(), 
+            image.getStorage().dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
+      assertTrue(image.getStorage().getStorageFile(it.next(), 
                           NameNodeFile.IMAGE).length() == fsimageLength);
     }
     nn.stop();
@@ -905,15 +905,15 @@ public class TestCheckpoint extends Test
       // Make the checkpoint
       //
       SecondaryNameNode secondary = startSecondaryNameNode(conf);
-      long fsimageLength = FSImage.getImageFile(
-          image.dirIterator(NameNodeDirType.IMAGE).next(),
-          NameNodeFile.IMAGE).length();
+      long fsimageLength = image.getStorage()
+        .getStorageFile(image.getStorage().dirIterator(NameNodeDirType.IMAGE).next(),
+                        NameNodeFile.IMAGE).length();
       assertFalse("Image is downloaded", secondary.doCheckpoint());
 
       // Verify that image file sizes did not change.
       for (Iterator<StorageDirectory> it = 
-              image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
-        assertTrue("Image size does not change", FSImage.getImageFile(it.next(), 
+             image.getStorage().dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
+        assertTrue("Image size does not change", image.getStorage().getStorageFile(it.next(),

                                 NameNodeFile.IMAGE).length() == fsimageLength);
       }
 
@@ -922,9 +922,10 @@ public class TestCheckpoint extends Test
       assertTrue("Image is not downloaded", secondary.doCheckpoint());
 
       for (Iterator<StorageDirectory> it = 
-        image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
-        assertTrue("Image size increased", FSImage.getImageFile(it.next(), 
-                          NameNodeFile.IMAGE).length() > fsimageLength);
+             image.getStorage().dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
+        assertTrue("Image size increased", 
+                   image.getStorage().getStorageFile(it.next(), 
+                                                     NameNodeFile.IMAGE).length() > fsimageLength);
      }
 
       secondary.shutdown();

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
Tue May 24 09:03:13 2011
@@ -35,7 +35,6 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -92,7 +91,8 @@ public class TestClusterId {
     Collection<URI> editsToFormat = new ArrayList<URI>(0);
     FSImage fsImage = new FSImage(dirsToFormat, editsToFormat);
     
-    Iterator<StorageDirectory> sdit = fsImage.dirIterator(NameNodeDirType.IMAGE);
+    Iterator<StorageDirectory> sdit = 
+      fsImage.getStorage().dirIterator(NNStorage.NameNodeDirType.IMAGE);
     StorageDirectory sd = sdit.next();
     Properties props = sd.readFrom(sd.getVersionFile());
     String cid = props.getProperty("clusterID");

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
Tue May 24 09:03:13 2011
@@ -29,8 +29,8 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
-import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 
 /**
  * This class tests the creation and validation of a checkpoint.
@@ -140,8 +140,8 @@ public class TestEditLog extends TestCas
       //
       FSEditLogLoader loader = new FSEditLogLoader(namesystem);
       for (Iterator<StorageDirectory> it = 
-              fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
-        File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS);
+              fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
+        File editFile = fsimage.getStorage().getStorageFile(it.next(), NameNodeFile.EDITS);
         System.out.println("Verifying file: " + editFile);
         int numEdits = loader.loadFSEdits(
                                   new EditLogFileInputStream(editFile));

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
Tue May 24 09:03:13 2011
@@ -38,8 +38,8 @@ import org.apache.hadoop.hdfs.protocol.F
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
-import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import static org.junit.Assert.*;
@@ -219,8 +219,8 @@ public class TestEditLogRace {
     // If there were any corruptions, it is likely that the reading in
     // of these transactions will throw an exception.
     for (Iterator<StorageDirectory> it = 
-           fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
-      File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS);
+           fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
+      File editFile = fsimage.getStorage().getStorageFile(it.next(), NameNodeFile.EDITS);
       System.out.println("Verifying file: " + editFile);
       int numEdits = new FSEditLogLoader(namesystem).loadFSEdits(
         new EditLogFileInputStream(editFile));

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
Tue May 24 09:03:13 2011
@@ -31,6 +31,8 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 import org.apache.hadoop.util.PureJavaCrc32;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 
 import java.util.Iterator;
 import java.util.List;
@@ -108,11 +110,11 @@ public class TestParallelImageWrite exte
   
   private void checkImages(FSNamesystem fsn) throws Exception {
     Iterator<StorageDirectory> iter = fsn.
-            getFSImage().dirIterator(FSImage.NameNodeDirType.IMAGE);
+            getFSImage().getStorage().dirIterator(NameNodeDirType.IMAGE);
     List<Long> checksums = new ArrayList<Long>();
     while (iter.hasNext()) {
       StorageDirectory sd = iter.next();
-      File fsImage = FSImage.getImageFile(sd, FSImage.NameNodeFile.IMAGE);
+      File fsImage = fsn.getFSImage().getStorage().getStorageFile(sd, NameNodeFile.IMAGE);
       PureJavaCrc32 crc = new PureJavaCrc32();
       FileInputStream in = new FileInputStream(fsImage);
       byte[] buff = new byte[4096];

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
Tue May 24 09:03:13 2011
@@ -104,11 +104,17 @@ public class TestSaveNamespace {
 
     // Replace the FSImage with a spy
     FSImage originalImage = fsn.dir.fsImage;
+    NNStorage storage = originalImage.getStorage();
+    storage.close(); // unlock any directories that FSNamesystem's initialization may have
locked
+
+    NNStorage spyStorage = spy(storage);
+    originalImage.storage = spyStorage;
+
     FSImage spyImage = spy(originalImage);
-    spyImage.setStorageDirectories(
-        FSNamesystem.getNamespaceDirs(conf), 
-        FSNamesystem.getNamespaceEditsDirs(conf));
     fsn.dir.fsImage = spyImage;
+    
+    spyImage.getStorage().setStorageDirectories(FSNamesystem.getNamespaceDirs(conf), 
+                                                FSNamesystem.getNamespaceEditsDirs(conf));
 
     // inject fault
     switch(fault) {
@@ -120,12 +126,12 @@ public class TestSaveNamespace {
     case MOVE_CURRENT:
       // The spy throws a RuntimeException when calling moveCurrent()
       doThrow(new RuntimeException("Injected fault: moveCurrent")).
-        when(spyImage).moveCurrent((StorageDirectory)anyObject());
+        when(spyStorage).moveCurrent((StorageDirectory)anyObject());
       break;
     case MOVE_LAST_CHECKPOINT:
       // The spy throws a RuntimeException when calling moveLastCheckpoint()
       doThrow(new RuntimeException("Injected fault: moveLastCheckpoint")).
-        when(spyImage).moveLastCheckpoint((StorageDirectory)anyObject());
+        when(spyStorage).moveLastCheckpoint((StorageDirectory)anyObject());
       break;
     }
 
@@ -175,12 +181,18 @@ public class TestSaveNamespace {
 
     // Replace the FSImage with a spy
     FSImage originalImage = fsn.dir.fsImage;
+    NNStorage storage = originalImage.getStorage();
+    storage.close(); // unlock any directories that FSNamesystem's initialization may have
locked
+
+    NNStorage spyStorage = spy(storage);
+    originalImage.storage = spyStorage;
+
     FSImage spyImage = spy(originalImage);
-    spyImage.setStorageDirectories(
-        FSNamesystem.getNamespaceDirs(conf), 
-        FSNamesystem.getNamespaceEditsDirs(conf));
     fsn.dir.fsImage = spyImage;
 
+    spyImage.getStorage().setStorageDirectories(FSNamesystem.getNamespaceDirs(conf), 
+                                                FSNamesystem.getNamespaceEditsDirs(conf));
+
     // inject fault
     // The spy throws a IOException when writing to the second directory
     doAnswer(new FaultySaveImage(false)).
@@ -196,9 +208,9 @@ public class TestSaveNamespace {
       fsn.saveNamespace();
       LOG.warn("First savenamespace sucessful.");
       assertTrue("Savenamespace should have marked one directory as bad." +
-                 " But found " + spyImage.getRemovedStorageDirs().size() +
+                 " But found " + spyStorage.getRemovedStorageDirs().size() +
                  " bad directories.", 
-                   spyImage.getRemovedStorageDirs().size() == 1);
+                   spyStorage.getRemovedStorageDirs().size() == 1);
 
       // The next call to savenamespace should try inserting the
       // erroneous directory back to fs.name.dir. This command should
@@ -208,9 +220,9 @@ public class TestSaveNamespace {
       LOG.warn("Second savenamespace sucessful.");
       assertTrue("Savenamespace should have been successful in removing " +
                  " bad directories from Image."  +
-                 " But found " + originalImage.getRemovedStorageDirs().size() +
+                 " But found " + storage.getRemovedStorageDirs().size() +
                  " bad directories.", 
-                 originalImage.getRemovedStorageDirs().size() == 0);
+                 storage.getRemovedStorageDirs().size() == 0);
 
       // Now shut down and restart the namesystem
       LOG.info("Shutting down fsimage.");
@@ -258,8 +270,10 @@ public class TestSaveNamespace {
 
     // Replace the FSImage with a spy
     final FSImage originalImage = fsn.dir.fsImage;
+    originalImage.getStorage().close();
+
     FSImage spyImage = spy(originalImage);
-    spyImage.setStorageDirectories(
+    spyImage.getStorage().setStorageDirectories(
         FSNamesystem.getNamespaceDirs(conf), 
         FSNamesystem.getNamespaceEditsDirs(conf));
     fsn.dir.fsImage = spyImage;

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
Tue May 24 09:03:13 2011
@@ -29,8 +29,8 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
-import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -138,8 +138,8 @@ public class TestSecurityTokenEditLog ex
       namesystem.getDelegationTokenSecretManager().stopThreads();
       int numKeys = namesystem.getDelegationTokenSecretManager().getNumberOfKeys();
       for (Iterator<StorageDirectory> it = 
-              fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
-        File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS);
+             fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
+        File editFile = fsimage.getStorage().getStorageFile(it.next(), NameNodeFile.EDITS);
         System.out.println("Verifying file: " + editFile);
         int numEdits = loader.loadFSEdits(
                                   new EditLogFileInputStream(editFile));

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
Tue May 24 09:03:13 2011
@@ -48,8 +48,8 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
-import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.util.StringUtils;
@@ -226,15 +226,15 @@ public class TestStartup extends TestCas
    */
   private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize)
{
     StorageDirectory sd =null;
-    for (Iterator<StorageDirectory> it = img.dirIterator(); it.hasNext();) {
+    for (Iterator<StorageDirectory> it = img.getStorage().dirIterator(); it.hasNext();)
{
       sd = it.next();
 
       if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
-        File imf = FSImage.getImageFile(sd, NameNodeFile.IMAGE);
+        File imf = img.getStorage().getStorageFile(sd, NameNodeFile.IMAGE);
         LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + ";
expected = " + expectedImgSize);
         assertEquals(expectedImgSize, imf.length());	
       } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
-        File edf = FSImage.getImageFile(sd, NameNodeFile.EDITS);
+        File edf = img.getStorage().getStorageFile(sd, NameNodeFile.EDITS);
         LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length()  +
"; expected = " + expectedEditsSize);
         assertEquals(expectedEditsSize, edf.length());	
       } else {
@@ -337,10 +337,10 @@ public class TestStartup extends TestCas
 
       // now verify that image and edits are created in the different directories
       FSImage image = nn.getFSImage();
-      StorageDirectory sd = image.getStorageDir(0); //only one
+      StorageDirectory sd = image.getStorage().getStorageDir(0); //only one
       assertEquals(sd.getStorageDirType(), NameNodeDirType.IMAGE_AND_EDITS);
-      File imf = FSImage.getImageFile(sd, NameNodeFile.IMAGE);
-      File edf = FSImage.getImageFile(sd, NameNodeFile.EDITS);
+      File imf = image.getStorage().getStorageFile(sd, NameNodeFile.IMAGE);
+      File edf = image.getStorage().getStorageFile(sd, NameNodeFile.EDITS);
       LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length());
       LOG.info("--edits file " + edf.getAbsolutePath() + "; len = " + edf.length());
 
@@ -445,7 +445,7 @@ public class TestStartup extends TestCas
     FSImage image = namenode.getFSImage();
     image.loadFSImage();
 
-    File versionFile = image.getStorageDir(0).getVersionFile();
+    File versionFile = image.getStorage().getStorageDir(0).getVersionFile();
 
     RandomAccessFile file = new RandomAccessFile(versionFile, "rws");
     FileInputStream in = null;
@@ -458,12 +458,12 @@ public class TestStartup extends TestCas
       props.load(in);
 
       // get the MD5 property and change it
-      String sMd5 = props.getProperty(FSImage.MESSAGE_DIGEST_PROPERTY);
+      String sMd5 = props.getProperty(NNStorage.MESSAGE_DIGEST_PROPERTY);
       MD5Hash md5 = new MD5Hash(sMd5);
       byte[] bytes = md5.getDigest();
       bytes[0] += 1;
       md5 = new MD5Hash(bytes);
-      props.setProperty(FSImage.MESSAGE_DIGEST_PROPERTY, md5.toString());
+      props.setProperty(NNStorage.MESSAGE_DIGEST_PROPERTY, md5.toString());
 
       // write the properties back to version file
       file.seek(0);

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=1126941&r1=1126940&r2=1126941&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
(original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
Tue May 24 09:03:13 2011
@@ -45,8 +45,8 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
-import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 
 
@@ -128,7 +128,7 @@ public class TestStorageRestore extends 
    */
   public void invalidateStorage(FSImage fi) throws IOException {
     ArrayList<StorageDirectory> al = new ArrayList<StorageDirectory>(2);
-    Iterator<StorageDirectory> it = fi.dirIterator();
+    Iterator<StorageDirectory> it = fi.getStorage().dirIterator();
     while(it.hasNext()) {
       StorageDirectory sd = it.next();
       if(sd.getRoot().equals(path2) || sd.getRoot().equals(path3)) {
@@ -136,7 +136,7 @@ public class TestStorageRestore extends 
       }
     }
     // simulate an error
-    fi.processIOError(al, true);
+    fi.getStorage().reportErrorsOnDirectories(al);
   }
   
   /**
@@ -144,15 +144,15 @@ public class TestStorageRestore extends 
    */
   public void printStorages(FSImage fs) {
     LOG.info("current storages and corresoponding sizes:");
-    for(Iterator<StorageDirectory> it = fs.dirIterator(); it.hasNext(); ) {
+    for(Iterator<StorageDirectory> it = fs.getStorage().dirIterator(); it.hasNext();
) {
       StorageDirectory sd = it.next();
       
       if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
-        File imf = FSImage.getImageFile(sd, NameNodeFile.IMAGE);
+        File imf = fs.getStorage().getStorageFile(sd, NameNodeFile.IMAGE);
         LOG.info("  image file " + imf.getAbsolutePath() + "; len = " + imf.length());  
       }
       if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
-        File edf = FSImage.getImageFile(sd, NameNodeFile.EDITS);
+        File edf = fs.getStorage().getStorageFile(sd, NameNodeFile.EDITS);
         LOG.info("  edits file " + edf.getAbsolutePath() + "; len = " + edf.length()); 
       }
     }
@@ -342,7 +342,7 @@ public class TestStorageRestore extends 
       FSImage fsi = cluster.getNameNode().getFSImage();
 
       // it is started with dfs.name.dir.restore set to true (in SetUp())
-      boolean restore = fsi.getRestoreFailedStorage();
+      boolean restore = fsi.getStorage().getRestoreFailedStorage();
       LOG.info("Restore is " + restore);
       assertEquals(restore, true);
 
@@ -355,19 +355,19 @@ public class TestStorageRestore extends 
           new CLITestData.TestCmd(cmd, CLITestData.TestCmd.CommandType.DFSADMIN),
           namenode);
       executor.executeCommand(cmd);
-      restore = fsi.getRestoreFailedStorage();
+      restore = fsi.getStorage().getRestoreFailedStorage();
       assertFalse("After set true call restore is " + restore, restore);
 
       // run one more time - to set it to true again
       cmd = "-fs NAMENODE -restoreFailedStorage true";
       executor.executeCommand(cmd);
-      restore = fsi.getRestoreFailedStorage();
+      restore = fsi.getStorage().getRestoreFailedStorage();
       assertTrue("After set false call restore is " + restore, restore);
       
    // run one more time - no change in value
       cmd = "-fs NAMENODE -restoreFailedStorage check";
       CommandExecutor.Result cmdResult = executor.executeCommand(cmd);
-      restore = fsi.getRestoreFailedStorage();
+      restore = fsi.getStorage().getRestoreFailedStorage();
       assertTrue("After check call restore is " + restore, restore);
       String commandOutput = cmdResult.getCommandOutput();
       commandOutput.trim();

Propchange: hadoop/hdfs/branches/yahoo-merge/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue May 24 09:03:13 2011
@@ -1,6 +1,6 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
 /hadoop/core/trunk/src/webapps/datanode:776175-784663
-/hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode:1078871,1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
+/hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode:1078871,1078924,1078943,1079607,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/webapps/datanode:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/datanode:820487
-/hadoop/hdfs/trunk/src/webapps/datanode:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035386,1035508,1035841-1035842,1036213,1038001,1038859,1039957,1040005,1040411-1040412,1042017,1042925,1049193,1051334,1052823,1053214,1060619,1061067,1062020,1062045,1062052,1067288,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1086479,1086654,1087080,1087115,1087437,1090114,1090357,1091515,1091619,1091874,1092432,1092507,1092524,1092584,1094748,1095245,1095461,1095789,1096846,1097648,1097969,1098781,1098867,1099285,1099640-1099641,1101137,1101282,1101293,1101324,1101675,1101753,1102005,1102459,1102511,1102833,1102947,1103957-1103958,1103970,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/webapps/datanode:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035386,1035508,1035841-1035842,1036213,1038001,1038859,1039957,1040005,1040411-1040412,1042017,1042925,1049193,1051334,1052823,1053214,1060619,1061067,1062020,1062045,1062052,1066305,1067288,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1086479,1086654,1087080,1087115,1087437,1090114,1090357,1091515,1091619,1091874,1092432,1092507,1092524,1092584,1094748,1095245,1095461,1095789,1096846,1097648,1097969,1098781,1098867,1099285,1099640-1099641,1101137,1101282,1101293,1101324,1101675,1101753,1102005,1102459,1102511,1102833,1102947,1103957-1103958,1103970,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue May 24 09:03:13 2011
@@ -1,6 +1,6 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663
-/hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs:1078871,1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
+/hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs:1078871,1078924,1078943,1079607,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/hdfs:820487
-/hadoop/hdfs/trunk/src/webapps/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035386,1035508,1035841-1035842,1036213,1038001,1038859,1039957,1040005,1040411-1040412,1042017,1042925,1049193,1051334,1052823,1053214,1060619,1061067,1062020,1062045,1062052,1067288,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1086479,1086654,1087080,1087115,1087437,1090114,1090357,1091515,1091619,1091874,1092432,1092507,1092524,1092584,1094748,1095245,1095461,1095789,1096846,1097648,1097969,1098781,1098867,1099285,1099640-1099641,1101137,1101282,1101293,1101324,1101675,1101753,1102005,1102459,1102511,1102833,1102947,1103957-1103958,1103970,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/webapps/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035386,1035508,1035841-1035842,1036213,1038001,1038859,1039957,1040005,1040411-1040412,1042017,1042925,1049193,1051334,1052823,1053214,1060619,1061067,1062020,1062045,1062052,1066305,1067288,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1086479,1086654,1087080,1087115,1087437,1090114,1090357,1091515,1091619,1091874,1092432,1092507,1092524,1092584,1094748,1095245,1095461,1095789,1096846,1097648,1097969,1098781,1098867,1099285,1099640-1099641,1101137,1101282,1101293,1101324,1101675,1101753,1102005,1102459,1102511,1102833,1102947,1103957-1103958,1103970,1104395,1104407,1124576

Propchange: hadoop/hdfs/branches/yahoo-merge/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue May 24 09:03:13 2011
@@ -1,6 +1,6 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
 /hadoop/core/trunk/src/webapps/secondary:776175-784663
-/hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary:1078871,1078924,1078943,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
+/hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary:1078871,1078924,1078943,1079607,1080331,1080391,1080402,1081603,1082326,1084245,1086788,1090419
 /hadoop/hdfs/branches/HDFS-265/src/webapps/secondary:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/secondary:820487
-/hadoop/hdfs/trunk/src/webapps/secondary:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035386,1035508,1035841-1035842,1036213,1038001,1038859,1039957,1040005,1040411-1040412,1042017,1042925,1049193,1051334,1052823,1053214,1060619,1061067,1062020,1062045,1062052,1067288,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1086479,1086654,1087080,1087115,1087437,1090114,1090357,1091515,1091619,1091874,1092432,1092507,1092524,1092584,1094748,1095245,1095461,1095789,1096846,1097648,1097969,1098781,1098867,1099285,1099640-1099641,1101137,1101282,1101293,1101324,1101675,1101753,1102005,1102459,1102511,1102833,1102947,1103957-1103958,1103970,1104395,1104407,1124576
+/hadoop/hdfs/trunk/src/webapps/secondary:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1035386,1035508,1035841-1035842,1036213,1038001,1038859,1039957,1040005,1040411-1040412,1042017,1042925,1049193,1051334,1052823,1053214,1060619,1061067,1062020,1062045,1062052,1066305,1067288,1071518,1074282,1080095,1080380,1080836,1081580,1082263,1083951,1085509,1086479,1086654,1087080,1087115,1087437,1090114,1090357,1091515,1091619,1091874,1092432,1092507,1092524,1092584,1094748,1095245,1095461,1095789,1096846,1097648,1097969,1098781,1098867,1099285,1099640-1099641,1101137,1101282,1101293,1101324,1101675,1101753,1102005,1102459,1102511,1102833,1102947,1103957-1103958,1103970,1104395,1104407,1124576



Mime
View raw message