hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rang...@apache.org
Subject svn commit: r652216 [2/2] - in /hadoop/core/trunk: ./ src/java/org/apache/hadoop/dfs/ src/test/org/apache/hadoop/dfs/
Date Wed, 30 Apr 2008 01:28:36 GMT
Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSImage.java?rev=652216&r1=652215&r2=652216&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSImage.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSImage.java Tue Apr 29 18:28:36 2008
@@ -203,10 +203,6 @@
                                                  "storage directory does not exist or is
not accessible.");
         case NOT_FORMATTED:
           break;
-        case CONVERT:
-          if (convertLayout(sd)) // need to reformat empty image
-            curState = StorageState.NOT_FORMATTED;
-          break;
         case NORMAL:
           break;
         default:  // recovery is possible
@@ -236,6 +232,9 @@
     if (!isFormatted && startOpt != StartupOption.ROLLBACK 
                      && startOpt != StartupOption.IMPORT)
       throw new IOException("NameNode is not formatted.");
+    if (layoutVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION) {
+      checkVersionUpgradable(layoutVersion);
+    }
     if (startOpt != StartupOption.UPGRADE
           && layoutVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION
           && layoutVersion != FSConstants.LAYOUT_VERSION)
@@ -568,67 +567,9 @@
     } finally {
       oldFile.close();
     }
-    // check consistency of the old storage
-    if (!oldImageDir.isDirectory())
-      throw new InconsistentFSStateException(sd.root,
-                                             oldImageDir + " is not a directory.");
-    if (!oldImageDir.canWrite())
-      throw new InconsistentFSStateException(sd.root,
-                                             oldImageDir + " is not writable.");
     return true;
   }
   
-  private boolean convertLayout(StorageDirectory sd) throws IOException {
-    assert FSConstants.LAYOUT_VERSION < LAST_PRE_UPGRADE_LAYOUT_VERSION :
-      "Bad current layout version: FSConstants.LAYOUT_VERSION should decrease";
-    File oldImageDir = new File(sd.root, "image");
-    assert oldImageDir.exists() : "Old image directory is missing";
-    File oldImage = new File(oldImageDir, "fsimage");
-    
-    LOG.info("Old layout version directory " + oldImageDir
-             + " is found. New layout version is "
-             + FSConstants.LAYOUT_VERSION);
-    LOG.info("Trying to convert ...");
-
-    // we did not use locking for the pre upgrade layout, so we cannot prevent 
-    // old name-nodes from running in the same directory as the new ones
-
-    // check new storage
-    File newImageDir = sd.getCurrentDir();
-    File versionF = sd.getVersionFile();
-    if (versionF.exists())
-      throw new IOException("Version file already exists: " + versionF);
-    if (newImageDir.exists()) // // somebody created current dir manually
-      deleteDir(newImageDir);
-
-    // move old image files into new location
-    rename(oldImageDir, newImageDir);
-    File oldEdits1 = new File(sd.root, "edits");
-    // move old edits into data
-    if (oldEdits1.exists())
-      rename(oldEdits1, getImageFile(sd, NameNodeFile.EDITS));
-    File oldEdits2 = new File(sd.root, "edits.new");
-    if (oldEdits2.exists())
-      rename(oldEdits2, getImageFile(sd, NameNodeFile.EDITS_NEW));
-
-    // Write new layout with 
-    // setting layoutVersion = LAST_PRE_UPGRADE_LAYOUT_VERSION
-    // means the actual version should be obtained from the image file
-    this.layoutVersion = LAST_PRE_UPGRADE_LAYOUT_VERSION;
-    File newImageFile = getImageFile(sd, NameNodeFile.IMAGE);
-    boolean needReformat = false;
-    if (!newImageFile.exists()) {
-      // in pre upgrade versions image file was allowed not to exist
-      // we treat it as non formatted then
-      LOG.info("Old image file " + oldImage + " does not exist. ");
-      needReformat = true;
-    } else {
-      sd.write();
-    }
-    LOG.info("Conversion of " + oldImage + " is complete.");
-    return needReformat;
-  }
-
   //
   // Atomic move sequence, to recover from interrupted checkpoint
   //
@@ -740,25 +681,24 @@
                                                                      new FileInputStream(curFile)));
     try {
       /*
+       * Note: Remove any checks for version earlier than 
+       * Storage.LAST_UPGRADABLE_LAYOUT_VERSION since we should never get 
+       * to here with older images.
+       */
+      
+      /*
        * TODO we need to change format of the image file
        * it should not contain version and namespace fields
        */
       // read image version: first appeared in version -1
       int imgVersion = in.readInt();
       // read namespaceID: first appeared in version -2
-      if (imgVersion <= -2) {
-        this.namespaceID = in.readInt();
-      }
+      this.namespaceID = in.readInt();
+
       // read number of files
       int numFiles = 0;
-      // version 0 does not store version #
-      // starts directly with the number of files
-      if (imgVersion >= 0) {
-        numFiles = imgVersion;
-        imgVersion = 0;
-      } else {
-        numFiles = in.readInt();
-      }
+      numFiles = in.readInt();
+
       this.layoutVersion = imgVersion;
       // read in the last generation stamp.
       if (imgVersion <= -12) {
@@ -775,14 +715,9 @@
         long modificationTime = 0;
         long blockSize = 0;
         name.readFields(in);
-        // version 0 does not support per file replication
-        if (!(imgVersion >= 0)) {
-          replication = in.readShort(); // other versions do
-          replication = FSEditLog.adjustReplication(replication);
-        }
-        if (imgVersion <= -5) {
-          modificationTime = in.readLong();
-        }
+        replication = in.readShort();
+        replication = FSEditLog.adjustReplication(replication);
+        modificationTime = in.readLong();
         if (imgVersion <= -8) {
           blockSize = in.readLong();
         }

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?rev=652216&r1=652215&r2=652216&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Tue Apr 29 18:28:36
2008
@@ -548,121 +548,6 @@
                               curReplicasDelta, expectedReplicasDelta);
   }
 
-  /**
-   * Used only during DFS upgrade for block level CRCs (HADOOP-1134).
-   * This returns information for a given blocks that includes:
-   * <li> full path name for the file that contains the block.
-   * <li> offset of first byte of the block.
-   * <li> file length and length of the block.
-   * <li> all block locations for the crc file (".file.crc").
-   * <li> replication for crc file.
-   * When replicas is true, it includes replicas of the block.
-   */
-  public synchronized BlockCrcInfo blockCrcInfo(
-                           Block block,
-                           BlockCrcUpgradeObjectNamenode namenodeUpgradeObj,
-                           boolean replicas) {
-    BlockCrcInfo crcInfo = new BlockCrcInfo();
-    crcInfo.status = BlockCrcInfo.STATUS_ERROR;
-    
-    INodeFile fileINode = blocksMap.getINode(block);
-    if ( fileINode == null || fileINode.isDirectory() ) {
-      // Most probably reason is that this block does not exist
-      if (blocksMap.getStoredBlock(block) == null) {
-        crcInfo.status = BlockCrcInfo.STATUS_UNKNOWN_BLOCK;
-      } else {
-        LOG.warn("getBlockCrcInfo(): Could not find file for " + block);
-      }
-      return crcInfo;
-    }
-
-    crcInfo.fileName = "localName:" + fileINode.getLocalName();
-    
-    // Find the offset and length for this block.
-    Block[] fileBlocks = fileINode.getBlocks();
-    crcInfo.blockLen = -1;
-    if ( fileBlocks != null ) {
-      for ( Block b:fileBlocks ) {
-        if ( block.equals(b) ) {
-          crcInfo.blockLen = b.getNumBytes();
-        }
-        if ( crcInfo.blockLen < 0 ) {
-          crcInfo.startOffset += b.getNumBytes();
-        }
-        crcInfo.fileSize += b.getNumBytes();
-      }
-    }
-
-    if ( crcInfo.blockLen < 0 ) {
-      LOG.warn("blockCrcInfo(): " + block + 
-               " could not be found in blocks for " + crcInfo.fileName);
-      return crcInfo;
-    }
-    
-    String fileName = fileINode.getLocalName();    
-    if ( fileName.startsWith(".") && fileName.endsWith(".crc") ) {
-      crcInfo.status = BlockCrcInfo.STATUS_CRC_BLOCK;
-      return crcInfo;
-    }
-
-    if (replicas) {
-      // include block replica locations, instead of crcBlocks
-      crcInfo.blockLocationsIncluded = true;
-      
-      DatanodeInfo[] dnInfo = new DatanodeInfo[blocksMap.numNodes(block)];
-      Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
-      for (int i=0; it != null && it.hasNext(); i++ ) {
-        dnInfo[i] = new DatanodeInfo(it.next());
-      }
-      crcInfo.blockLocations = new LocatedBlock(block, dnInfo, 
-                                                crcInfo.startOffset);
-    } else {
-
-      //Find CRC file
-      BlockCrcUpgradeObjectNamenode.INodeMapEntry entry =
-                                namenodeUpgradeObj.getINodeMapEntry(fileINode);
-      
-      if (entry == null || entry.parent == null) {
-        LOG.warn("Could not find parent INode for " + fileName + "  " + block);
-        return crcInfo;
-      }
-      
-      crcInfo.fileName = entry.getAbsoluteName();
-      
-      String crcName = "." + fileName + ".crc";
-      INode iNode = entry.getParentINode().getChild(crcName);
-      if (iNode == null || iNode.isDirectory()) {
-        // Should we log this?
-        crcInfo.status = BlockCrcInfo.STATUS_NO_CRC_DATA;
-        return crcInfo;
-      }
-
-      INodeFile crcINode = (INodeFile)iNode;
-      Block[] blocks = crcINode.getBlocks();
-      if ( blocks == null )  {
-        LOG.warn("getBlockCrcInfo(): could not find blocks for crc file for " +
-                 crcInfo.fileName);
-        return crcInfo;
-      }
-
-      crcInfo.crcBlocks = new LocatedBlock[ blocks.length ];
-      for (int i=0; i<blocks.length; i++) {
-        DatanodeInfo[] dnArr = new DatanodeInfo[ blocksMap.numNodes(blocks[i]) ];
-        int idx = 0;
-        for (Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(blocks[i]); 
-        it.hasNext();) { 
-          dnArr[ idx++ ] = it.next();
-        }
-        crcInfo.crcBlocks[i] = new LocatedBlock(blocks[i], dnArr);
-      }
-
-      crcInfo.crcReplication = crcINode.getReplication();
-    }
-    
-    crcInfo.status = BlockCrcInfo.STATUS_DATA_BLOCK;
-    return crcInfo;
-  }
-  
   /////////////////////////////////////////////////////////
   //
   // These methods are called by secondary namenodes

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?rev=652216&r1=652215&r2=652216&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Tue Apr 29 18:28:36 2008
@@ -599,11 +599,6 @@
     return namesystem.processDistributedUpgradeCommand(comm);
   }
 
-  public BlockCrcInfo blockCrcUpgradeGetBlockLocations(Block block) 
-                                                       throws IOException {
-    return namesystem.blockCrcInfo(block, null, true);
-  }
-
   /** 
    * Verify request.
    * 

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java?rev=652216&r1=652215&r2=652216&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java Tue Apr 29 18:28:36
2008
@@ -507,9 +507,6 @@
                   "checkpoint directory does not exist or is not accessible.");
           case NOT_FORMATTED:
             break;  // it's ok since initially there is no current and VERSION
-          case CONVERT:
-            throw new InconsistentFSStateException(sd.root,
-                  "not a checkpoint directory.");
           case NORMAL:
             break;
           default:  // recovery is possible

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/Storage.java?rev=652216&r1=652215&r2=652216&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/Storage.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/Storage.java Tue Apr 29 18:28:36 2008
@@ -98,6 +98,10 @@
   // last layout version that did not suppot upgrades
   protected static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3;
   
+  // this corresponds to Hadoop-0.14.
+  protected static final int LAST_UPGRADABLE_LAYOUT_VERSION = -7;
+  protected static final String LAST_UPGRADABLE_HADOOP_VERSION = "Hadoop-0.14";
+  
   private   static final String STORAGE_FILE_LOCK     = "in_use.lock";
   protected static final String STORAGE_FILE_VERSION  = "VERSION";
   private   static final String STORAGE_DIR_CURRENT   = "current";
@@ -111,7 +115,6 @@
   protected enum StorageState {
     NON_EXISTENT,
     NOT_FORMATTED,
-    CONVERT,
     COMPLETE_UPGRADE,
     RECOVER_UPGRADE,
     COMPLETE_FINALIZE,
@@ -294,9 +297,11 @@
 
       if (startOpt == StartupOption.FORMAT)
         return StorageState.NOT_FORMATTED;
-      // check whether a conversion is required
-      if (startOpt != StartupOption.IMPORT && isConversionNeeded(this))
-        return StorageState.CONVERT;
+      if (startOpt != StartupOption.IMPORT) {
+        //make sure no conversion is required
+        checkConversionNeeded(this);
+      }
+
       // check whether current directory is valid
       File versionFile = getVersionFile();
       boolean hasCurrent = versionFile.exists();
@@ -509,6 +514,41 @@
   }
   
   abstract boolean isConversionNeeded(StorageDirectory sd) throws IOException;
+
+  /*
+   * Coversion is no longer supported. So this should throw exception if
+   * conversion is needed.
+   */
+  private void checkConversionNeeded(StorageDirectory sd) throws IOException {
+    if (isConversionNeeded(sd)) {
+      //throw an exception
+      checkVersionUpgradable(0);
+    }
+  }
+
+  /**
+   * Checks if the upgrade from the given old version is supported. If
+   * no upgrade is supported, it throws IncorrectVersionException.
+   * 
+   * @param oldVersion
+   */
+  static void checkVersionUpgradable(int oldVersion) 
+                                     throws IOException {
+    if (oldVersion > LAST_UPGRADABLE_LAYOUT_VERSION) {
+      String msg = "*********** Upgrade is not supported from this older" +
+                   " version of storage to the current version." + 
+                   " Please upgrade to " + LAST_UPGRADABLE_HADOOP_VERSION +
+                   " or a later version and then upgrade to current" +
+                   " version. Old layout version is " + 
+                   (oldVersion == 0 ? "'too old'" : (""+oldVersion)) +
+                   " and latest layout version this software version can" +
+                   " upgrade from is " + LAST_UPGRADABLE_LAYOUT_VERSION +
+                   ". ************";
+      LOG.error(msg);
+      throw new IOException(msg); 
+    }
+    
+  }
   
   /**
    * Get common storage fields.

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UpgradeObjectCollection.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UpgradeObjectCollection.java?rev=652216&r1=652215&r2=652216&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UpgradeObjectCollection.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UpgradeObjectCollection.java Tue Apr
29 18:28:36 2008
@@ -33,8 +33,6 @@
     initialize();
     // Registered distributed upgrade objects here
     // registerUpgrade(new UpgradeObject());
-    registerUpgrade(new BlockCrcUpgradeObjectNamenode());
-    registerUpgrade(new BlockCrcUpgradeObjectDatanode());
   }
 
   static class UOSignature implements Comparable<UOSignature> {

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java?rev=652216&r1=652215&r2=652216&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java Tue Apr 29
18:28:36 2008
@@ -65,7 +65,7 @@
    *      {pastFsscTime,currentFsscTime,futureFsscTime}
    */
   private StorageInfo[] initializeVersions() throws Exception {
-    int layoutVersionOld = -3;
+    int layoutVersionOld = Storage.LAST_UPGRADABLE_LAYOUT_VERSION;
     int layoutVersionCur = UpgradeUtilities.getCurrentLayoutVersion();
     int layoutVersionNew = Integer.MIN_VALUE;
     int namespaceIdCur = UpgradeUtilities.getCurrentNamespaceID(null);

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java?rev=652216&r1=652215&r2=652216&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java Tue Apr 29 18:28:36
2008
@@ -213,6 +213,15 @@
       startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
+      log("NameNode upgrade with old layout version in current", numDirs);
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs,
+                                         new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION
+ 1,
+                                                         UpgradeUtilities.getCurrentNamespaceID(null),
+                                                         UpgradeUtilities.getCurrentFsscTime(null)));
+      startNameNodeShouldFail(StartupOption.UPGRADE);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
       log("NameNode upgrade with future layout version in current", numDirs);
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs,

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgradeFromImage.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgradeFromImage.java?rev=652216&r1=652215&r2=652216&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgradeFromImage.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgradeFromImage.java Tue Apr
29 18:28:36 2008
@@ -39,8 +39,9 @@
  * various forms of wrong data and verifies that Datanode handles it well.
  * 
  * This test uses the following two file from src/test/.../dfs directory :
- *   1) hadoop-12-dfs-dir.tgz : contains the tar of 
- *   2) hadoop-12-dfs-dir.txt : checksums that are compared in this test.
+ *   1) hadoop-version-dfs-dir.tgz : contains DFS directories.
+ *   2) hadoop-dfs-dir.txt : checksums that are compared in this test.
+ * Please read hadoop-dfs-dir.txt for more information.  
  */
 public class TestDFSUpgradeFromImage extends TestCase {
   
@@ -65,7 +66,7 @@
 
   void unpackStorage() throws IOException {
     String tarFile = System.getProperty("test.cache.data") + 
-                     "/hadoop-12-dfs-dir.tgz";
+                     "/hadoop-14-dfs-dir.tgz";
     String dataDir = System.getProperty("test.build.data");
     File dfsDir = new File(dataDir, "dfs");
     if ( dfsDir.exists() && !FileUtil.fullyDelete(dfsDir) ) {
@@ -81,7 +82,7 @@
     
     BufferedReader reader = new BufferedReader( 
                         new FileReader(System.getProperty("test.cache.data") +
-                                       "/hadoop-12-dfs-dir.txt"));
+                                       "/hadoop-dfs-dir.txt"));
     String line;
     while ( (line = reader.readLine()) != null ) {
       

Added: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/hadoop-14-dfs-dir.tgz
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/hadoop-14-dfs-dir.tgz?rev=652216&view=auto
==============================================================================
Binary file - no diff available.

Propchange: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/hadoop-14-dfs-dir.tgz
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Copied: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/hadoop-dfs-dir.txt (from r652208,
hadoop/core/trunk/src/test/org/apache/hadoop/dfs/hadoop-12-dfs-dir.txt)
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/hadoop-dfs-dir.txt?p2=hadoop/core/trunk/src/test/org/apache/hadoop/dfs/hadoop-dfs-dir.txt&p1=hadoop/core/trunk/src/test/org/apache/hadoop/dfs/hadoop-12-dfs-dir.txt&r1=652208&r2=652216&rev=652216&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/hadoop-12-dfs-dir.txt (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/hadoop-dfs-dir.txt Tue Apr 29 18:28:36
2008
@@ -1,16 +1,14 @@
 #
-# This is a readme for hadoop-12-dir.tgz and hadoop-12-dir.txt.
-#
-# 08/08/2007:
+# This is a readme for hadoop-version-dfs-dir.tgz and hadoop-dfs-dir.txt.
 #
 # See HADOOP-1629 for more info if needed.
 # These two files are used by unit test TestDFSUpgradeFromImage.java 
 # 
-# hadoop-12-dfs-dir.tgz : 
+# hadoop-14-dfs-dir.tgz : 
 # ---------------------
 # This file contains the HDFS directory structure for one namenode and 4 datanodes.
 # The structure is setup similar to the structure used in MiniDFSCluster.
-# The directory was created with Hadoo-0.12.x (svn revision 526216).
+# The directory was created with Hadoo-0.14.x.
 #
 # In the test, this directory is unpacked and MiniDFSCluster is run with 
 # "-upgrade" option. The test waits for the upgrade to complete 
@@ -18,7 +16,7 @@
 # directory structure and file checksums exactly match the information
 # in this file.
 #
-# hadoop-12-dfs-dir.txt :
+# hadoop-dfs-dir.txt :
 # ---------------------
 # Along with this description this file contains the expected files and 
 # checksums or the files in the upgraded DFS.
@@ -27,7 +25,7 @@
 # some recoverable errors (i.e. corrupt or missing .crc files).
 #
 # A similar set of files exist in two different DFS directories. 
-# For e.g. "top-dir-1Mb-12" contains files created with dfs.block.size of 1Mb 
+# For e.g. "top-dir-1Mb-512" contains files created with dfs.block.size of 1Mb 
 # and io.bytes.per.checksum of 512.
 #
 # In the future, when Hadoop project no longer supports upgrade from



Mime
View raw message