hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1377092 [3/4] - in /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs...
Date Fri, 24 Aug 2012 20:38:22 GMT
Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java Fri Aug 24 20:38:08 2012
@@ -35,7 +35,8 @@ import org.apache.hadoop.hdfs.server.pro
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public interface JournalManager extends Closeable, FormatConfirmable {
+public interface JournalManager extends Closeable, FormatConfirmable,
+    LogsPurgeable {
 
   /**
    * Format the underlying storage, removing any previously
@@ -74,17 +75,6 @@ public interface JournalManager extends 
   void setOutputBufferCapacity(int size);
 
   /**
-   * The JournalManager may archive/purge any logs for transactions less than
-   * or equal to minImageTxId.
-   *
-   * @param minTxIdToKeep the earliest txid that must be retained after purging
-   *                      old logs
-   * @throws IOException if purging fails
-   */
-  void purgeLogsOlderThan(long minTxIdToKeep)
-    throws IOException;
-
-  /**
    * Recover segments which have not been finalized.
    */
   void recoverUnfinalizedSegments() throws IOException;

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java Fri Aug 24 20:38:08 2012
@@ -32,8 +32,6 @@ import java.util.Properties;
 import java.util.UUID;
 import java.util.concurrent.CopyOnWriteArrayList;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -45,7 +43,6 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
-import org.apache.hadoop.hdfs.server.common.UpgradeManager;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.util.PersistentLongFile;
@@ -65,8 +62,6 @@ import com.google.common.collect.Lists;
 @InterfaceAudience.Private
 public class NNStorage extends Storage implements Closeable,
     StorageErrorReporter {
-  private static final Log LOG = LogFactory.getLog(NNStorage.class.getName());
-
   static final String DEPRECATED_MESSAGE_DIGEST_PROPERTY = "imageMD5Digest";
   static final String LOCAL_URI_SCHEME = "file";
 
@@ -112,7 +107,6 @@ public class NNStorage extends Storage i
     }
   }
 
-  private UpgradeManager upgradeManager = null;
   protected String blockpoolID = ""; // id of the block pool
   
   /**
@@ -551,11 +545,8 @@ public class NNStorage extends Storage i
   
   public static NamespaceInfo newNamespaceInfo()
       throws UnknownHostException {
-    return new NamespaceInfo(
-        newNamespaceID(),
-        newClusterID(),
-        newBlockPoolID(),
-        0L, 0);
+    return new NamespaceInfo(newNamespaceID(), newClusterID(),
+        newBlockPoolID(), 0L);
   }
   
   public void format() throws IOException {
@@ -600,13 +591,6 @@ public class NNStorage extends Storage i
       String sbpid = props.getProperty("blockpoolID");
       setBlockPoolID(sd.getRoot(), sbpid);
     }
-    
-    String sDUS, sDUV;
-    sDUS = props.getProperty("distributedUpgradeState");
-    sDUV = props.getProperty("distributedUpgradeVersion");
-    setDistributedUpgradeState(
-        sDUS == null? false : Boolean.parseBoolean(sDUS),
-        sDUV == null? getLayoutVersion() : Integer.parseInt(sDUV));
     setDeprecatedPropertiesForUpgrade(props);
   }
 
@@ -653,13 +637,6 @@ public class NNStorage extends Storage i
     if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       props.setProperty("blockpoolID", blockpoolID);
     }
-    boolean uState = getDistributedUpgradeState();
-    int uVersion = getDistributedUpgradeVersion();
-    if(uState && uVersion != getLayoutVersion()) {
-      props.setProperty("distributedUpgradeState", Boolean.toString(uState));
-      props.setProperty("distributedUpgradeVersion",
-                        Integer.toString(uVersion));
-    }
   }
   
   static File getStorageFile(StorageDirectory sd, NameNodeFile type, long imageTxId) {
@@ -732,7 +709,7 @@ public class NNStorage extends Storage i
    * Return the first readable image file for the given txid, or null
    * if no such image can be found
    */
-  File findImageFile(long txid) throws IOException {
+  File findImageFile(long txid) {
     return findFile(NameNodeDirType.IMAGE,
         getImageFileName(txid));
   }
@@ -754,76 +731,6 @@ public class NNStorage extends Storage i
   }
 
   /**
-   * Set the upgrade manager for use in a distributed upgrade.
-   * @param um The upgrade manager
-   */
-  void setUpgradeManager(UpgradeManager um) {
-    upgradeManager = um;
-  }
-
-  /**
-   * @return The current distribued upgrade state.
-   */
-  boolean getDistributedUpgradeState() {
-    return upgradeManager == null ? false : upgradeManager.getUpgradeState();
-  }
-
-  /**
-   * @return The current upgrade version.
-   */
-  int getDistributedUpgradeVersion() {
-    return upgradeManager == null ? 0 : upgradeManager.getUpgradeVersion();
-  }
-
-  /**
-   * Set the upgrade state and version.
-   * @param uState the new state.
-   * @param uVersion the new version.
-   */
-  private void setDistributedUpgradeState(boolean uState, int uVersion) {
-    if (upgradeManager != null) {
-      upgradeManager.setUpgradeState(uState, uVersion);
-    }
-  }
-
-  /**
-   * Verify that the distributed upgrade state is valid.
-   * @param startOpt the option the namenode was started with.
-   */
-  void verifyDistributedUpgradeProgress(StartupOption startOpt
-                                        ) throws IOException {
-    if(startOpt == StartupOption.ROLLBACK || startOpt == StartupOption.IMPORT)
-      return;
-
-    assert upgradeManager != null : "FSNameSystem.upgradeManager is null.";
-    if(startOpt != StartupOption.UPGRADE) {
-      if(upgradeManager.getUpgradeState())
-        throw new IOException(
-                    "\n   Previous distributed upgrade was not completed. "
-                  + "\n   Please restart NameNode with -upgrade option.");
-      if(upgradeManager.getDistributedUpgrades() != null)
-        throw new IOException("\n   Distributed upgrade for NameNode version "
-                              + upgradeManager.getUpgradeVersion()
-                              + " to current LV " + HdfsConstants.LAYOUT_VERSION
-                              + " is required.\n   Please restart NameNode"
-                              + " with -upgrade option.");
-    }
-  }
-
-  /**
-   * Initialize a distributed upgrade.
-   */
-  void initializeDistributedUpgrade() throws IOException {
-    if(! upgradeManager.initializeUpgrade())
-      return;
-    // write new upgrade state into disk
-    writeAll();
-    LOG.info("\n   Distributed upgrade for NameNode version "
-             + upgradeManager.getUpgradeVersion() + " to current LV "
-             + HdfsConstants.LAYOUT_VERSION + " is initialized.");
-  }
-
-  /**
    * Disable the check for pre-upgradable layouts. Needed for BackupImage.
    * @param val Whether to disable the preupgradeable layout check.
    */
@@ -1099,7 +1006,6 @@ public class NNStorage extends Storage i
         getNamespaceID(),
         getClusterID(),
         getBlockPoolID(),
-        getCTime(),
-        getDistributedUpgradeVersion());
+        getCTime());
   }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java Fri Aug 24 20:38:08 2012
@@ -52,12 +52,12 @@ public class NNStorageRetentionManager {
       NNStorageRetentionManager.class);
   private final NNStorage storage;
   private final StoragePurger purger;
-  private final FSEditLog editLog;
+  private final LogsPurgeable purgeableLogs;
   
   public NNStorageRetentionManager(
       Configuration conf,
       NNStorage storage,
-      FSEditLog editLog,
+      LogsPurgeable purgeableLogs,
       StoragePurger purger) {
     this.numCheckpointsToRetain = conf.getInt(
         DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY,
@@ -72,13 +72,13 @@ public class NNStorageRetentionManager {
         " must not be negative");
     
     this.storage = storage;
-    this.editLog = editLog;
+    this.purgeableLogs = purgeableLogs;
     this.purger = purger;
   }
   
   public NNStorageRetentionManager(Configuration conf, NNStorage storage,
-      FSEditLog editLog) {
-    this(conf, storage, editLog, new DeletionStoragePurger());
+      LogsPurgeable purgeableLogs) {
+    this(conf, storage, purgeableLogs, new DeletionStoragePurger());
   }
 
   public void purgeOldStorage() throws IOException {
@@ -95,7 +95,7 @@ public class NNStorageRetentionManager {
     // handy for HA, where a remote node may not have as many
     // new images.
     long purgeLogsFrom = Math.max(0, minImageTxId + 1 - numExtraEditsToRetain);
-    editLog.purgeLogsOlderThan(purgeLogsFrom);
+    purgeableLogs.purgeLogsOlderThan(purgeLogsFrom);
   }
   
   private void purgeCheckpointsOlderThan(
@@ -103,7 +103,6 @@ public class NNStorageRetentionManager {
       long minTxId) {
     for (FSImageFile image : inspector.getFoundImages()) {
       if (image.getCheckpointTxId() < minTxId) {
-        LOG.info("Purging old image " + image);
         purger.purgeImage(image);
       }
     }
@@ -146,11 +145,13 @@ public class NNStorageRetentionManager {
   static class DeletionStoragePurger implements StoragePurger {
     @Override
     public void purgeLog(EditLogFile log) {
+      LOG.info("Purging old edit log " + log);
       deleteOrWarn(log.getFile());
     }
 
     @Override
     public void purgeImage(FSImageFile image) {
+      LOG.info("Purging old image " + image);
       deleteOrWarn(image.getFile());
       deleteOrWarn(MD5FileUtils.getDigestFileForFile(image.getFile()));
     }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Aug 24 20:38:08 2012
@@ -511,9 +511,7 @@ public class NameNode {
   }
   
   private void startTrashEmptier(Configuration conf) throws IOException {
-    long trashInterval = conf.getLong(
-        CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY,
-        CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
+    long trashInterval = namesystem.getServerDefaults().getTrashInterval();  
     if (trashInterval == 0) {
       return;
     } else if (trashInterval < 0) {

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Fri Aug 24 20:38:08 2012
@@ -59,7 +59,6 @@ import org.apache.hadoop.hdfs.protocol.E
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -88,7 +87,6 @@ import org.apache.hadoop.hdfs.security.t
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
-import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@@ -107,7 +105,6 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -270,7 +267,10 @@ class NameNodeRpcServer implements Namen
     this.minimumDataNodeVersion = conf.get(
         DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY,
         DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT);
-  }
+
+    // Set terse exception whose stack trace won't be logged
+    this.clientRpcServer.addTerseExceptions(SafeModeException.class);
+ }
   
   /**
    * Actually start serving requests.
@@ -740,13 +740,6 @@ class NameNodeRpcServer implements Namen
   }
 
   @Override // ClientProtocol
-  public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
-      throws IOException {
-    namesystem.checkOperation(OperationCategory.READ);
-    return namesystem.distributedUpgradeProgress(action);
-  }
-
-  @Override // ClientProtocol
   public void metaSave(String filename) throws IOException {
     namesystem.checkOperation(OperationCategory.UNCHECKED);
     namesystem.metaSave(filename);
@@ -916,11 +909,6 @@ class NameNodeRpcServer implements Namen
     return namesystem.getNamespaceInfo();
   }
 
-  @Override // DatanodeProtocol
-  public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException {
-    return namesystem.processDistributedUpgradeCommand(comm);
-  }
-
   /** 
    * Verifies the given registration.
    * 

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Fri Aug 24 20:38:08 2012
@@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -49,7 +48,6 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.io.Text;
@@ -120,19 +118,6 @@ class NamenodeJspHelper {
     return str;
   }
 
-  static String getUpgradeStatusText(FSNamesystem fsn) {
-    String statusText = "";
-    try {
-      UpgradeStatusReport status = fsn
-          .distributedUpgradeProgress(UpgradeAction.GET_STATUS);
-      statusText = (status == null ? "There are no upgrades in progress."
-          : status.getStatusText(false));
-    } catch (IOException e) {
-      statusText = "Upgrade status unknown.";
-    }
-    return statusText;
-  }
-
   /** Return a table containing version information. */
   static String getVersionTable(FSNamesystem fsn) {
     return "<div class='dfstable'><table>"
@@ -141,8 +126,6 @@ class NamenodeJspHelper {
         + VersionInfo.getVersion() + ", " + VersionInfo.getRevision()
         + "</td></tr>\n" + "\n  <tr><td class='col1'>Compiled:</td><td>" + VersionInfo.getDate()
         + " by " + VersionInfo.getUser() + " from " + VersionInfo.getBranch()
-        + "</td></tr>\n  <tr><td class='col1'>Upgrades:</td><td>"
-        + getUpgradeStatusText(fsn)
         + "</td></tr>\n  <tr><td class='col1'>Cluster ID:</td><td>" + fsn.getClusterId()
         + "</td></tr>\n  <tr><td class='col1'>Block Pool ID:</td><td>" + fsn.getBlockPoolId()
         + "</td></tr>\n</table></div>";

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Fri Aug 24 20:38:08 2012
@@ -58,6 +58,8 @@ import org.apache.hadoop.hdfs.server.com
 
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
+import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
@@ -376,6 +378,7 @@ public class SecondaryNameNode implement
               downloadImage = false;
               LOG.info("Image has not changed. Will not download image.");
             } else {
+              LOG.info("Image has changed. Downloading updated image from NN.");
               MD5Hash downloadedHash = TransferFsImage.downloadImageToStorage(
                   nnHostPort, sig.mostRecentCheckpointTxId, dstImage.getStorage(), true);
               dstImage.saveDigestAndRenameCheckpointImage(
@@ -472,10 +475,6 @@ public class SecondaryNameNode implement
     LOG.warn("Checkpoint done. New Image Size: " 
              + dstStorage.getFsImageName(txid).length());
     
-    // Since we've successfully checkpointed, we can remove some old
-    // image files
-    checkpointImage.purgeOldStorage();
-    
     return loadImage;
   }
   
@@ -702,6 +701,34 @@ public class SecondaryNameNode implement
   }
   
   static class CheckpointStorage extends FSImage {
+    
+    private static class CheckpointLogPurger implements LogsPurgeable {
+      
+      private NNStorage storage;
+      private StoragePurger purger
+          = new NNStorageRetentionManager.DeletionStoragePurger();
+      
+      public CheckpointLogPurger(NNStorage storage) {
+        this.storage = storage;
+      }
+
+      @Override
+      public void purgeLogsOlderThan(long minTxIdToKeep) throws IOException {
+        Iterator<StorageDirectory> iter = storage.dirIterator();
+        while (iter.hasNext()) {
+          StorageDirectory dir = iter.next();
+          List<EditLogFile> editFiles = FileJournalManager.matchEditLogs(
+              dir.getCurrentDir());
+          for (EditLogFile f : editFiles) {
+            if (f.getLastTxId() < minTxIdToKeep) {
+              purger.purgeLog(f);
+            }
+          }
+        }
+      }
+      
+    }
+    
     /**
      * Construct a checkpoint image.
      * @param conf Node configuration.
@@ -718,6 +745,11 @@ public class SecondaryNameNode implement
       // we shouldn't have any editLog instance. Setting to null
       // makes sure we don't accidentally depend on it.
       editLog = null;
+      
+      // Replace the archival manager with one that can actually work on the
+      // 2NN's edits storage.
+      this.archivalManager = new NNStorageRetentionManager(conf, storage,
+          new CheckpointLogPurger(storage));
     }
 
     /**
@@ -814,6 +846,7 @@ public class SecondaryNameNode implement
     }
     
     Checkpointer.rollForwardByApplyingLogs(manifest, dstImage, dstNamesystem);
+    // The following has the side effect of purging old fsimages/edit logs.
     dstImage.saveFSImageInAllDirs(dstNamesystem, dstImage.getLastAppliedTxId());
     dstStorage.writeAll();
   }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java Fri Aug 24 20:38:08 2012
@@ -17,7 +17,10 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicInteger;
+
 
 /** Manage name-to-serial-number maps for users and groups. */
 class SerialNumberManager {
@@ -40,33 +43,41 @@ class SerialNumberManager {
   }
 
   private static class SerialNumberMap<T> {
-    private int max = 0;
-    private int nextSerialNumber() {return max++;}
-
-    private Map<T, Integer> t2i = new HashMap<T, Integer>();
-    private Map<Integer, T> i2t = new HashMap<Integer, T>();
-
-    synchronized int get(T t) {
+    private AtomicInteger max = new AtomicInteger(1);
+    private ConcurrentMap<T, Integer> t2i = new ConcurrentHashMap<T, Integer>();
+    private ConcurrentMap<Integer, T> i2t = new ConcurrentHashMap<Integer, T>();
+
+    int get(T t) {
+      if (t == null) {
+        return 0;
+      }
       Integer sn = t2i.get(t);
       if (sn == null) {
-        sn = nextSerialNumber();
-        t2i.put(t, sn);
+        sn = max.getAndIncrement();
+        Integer old = t2i.putIfAbsent(t, sn);
+        if (old != null) {
+          return old;
+        }
         i2t.put(sn, t);
       }
       return sn;
     }
 
-    synchronized T get(int i) {
-      if (!i2t.containsKey(i)) {
+    T get(int i) {
+      if (i == 0) {
+        return null;
+      }
+      T t = i2t.get(i);
+      if (t == null) {
         throw new IllegalStateException("!i2t.containsKey(" + i
             + "), this=" + this);
       }
-      return i2t.get(i);
+      return t;
     }
 
-    @Override
+    /** {@inheritDoc} */
     public String toString() {
       return "max=" + max + ",\n  t2i=" + t2i + ",\n  i2t=" + i2t;
     }
   }
-}
+}
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Fri Aug 24 20:38:08 2012
@@ -669,17 +669,6 @@ public class NamenodeWebHdfsMethods {
       final String js = JsonUtil.toJsonString(token);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
-    case GETDELEGATIONTOKENS:
-    {
-      if (delegation.getValue() != null) {
-        throw new IllegalArgumentException(delegation.getName()
-            + " parameter is not null.");
-      }
-      final Token<? extends TokenIdentifier>[] tokens = new Token<?>[1];
-      tokens[0] = generateDelegationToken(namenode, ugi, renewer.getValue());
-      final String js = JsonUtil.toJsonString(tokens);
-      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-    }
     case GETHOMEDIRECTORY:
     {
       final String js = JsonUtil.toJsonString(

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java Fri Aug 24 20:38:08 2012
@@ -72,8 +72,6 @@ public interface DatanodeProtocol {
   final static int DNA_RECOVERBLOCK = 6;  // request a block recovery
   final static int DNA_ACCESSKEYUPDATE = 7;  // update access key
   final static int DNA_BALANCERBANDWIDTHUPDATE = 8; // update balancer bandwidth
-  final static int DNA_UC_ACTION_REPORT_STATUS = 100; // Report upgrade status
-  final static int DNA_UC_ACTION_START_UPGRADE = 101; // start upgrade
 
   /** 
    * Register Datanode.
@@ -151,18 +149,6 @@ public interface DatanodeProtocol {
   public NamespaceInfo versionRequest() throws IOException;
 
   /**
-   * This is a very general way to send a command to the name-node during
-   * distributed upgrade process.
-   * 
-   * The generosity is because the variety of upgrade commands is unpredictable.
-   * The reply from the name-node is also received in the form of an upgrade 
-   * command. 
-   * 
-   * @return a reply in the form of an upgrade command
-   */
-  UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException;
-  
-  /**
    * same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])}
    * }
    */

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java Fri Aug 24 20:38:08 2012
@@ -37,7 +37,6 @@ import org.apache.hadoop.util.VersionInf
 @InterfaceStability.Evolving
 public class NamespaceInfo extends StorageInfo {
   String  buildVersion;
-  int distributedUpgradeVersion;
   String blockPoolID = "";    // id of the block pool
   String softwareVersion;
 
@@ -47,17 +46,16 @@ public class NamespaceInfo extends Stora
   }
 
   public NamespaceInfo(int nsID, String clusterID, String bpID,
-      long cT, int duVersion, String buildVersion, String softwareVersion) {
+      long cT, String buildVersion, String softwareVersion) {
     super(HdfsConstants.LAYOUT_VERSION, nsID, clusterID, cT);
     blockPoolID = bpID;
     this.buildVersion = buildVersion;
-    this.distributedUpgradeVersion = duVersion;
     this.softwareVersion = softwareVersion;
   }
 
   public NamespaceInfo(int nsID, String clusterID, String bpID, 
-      long cT, int duVersion) {
-    this(nsID, clusterID, bpID, cT, duVersion, Storage.getBuildVersion(),
+      long cT) {
+    this(nsID, clusterID, bpID, cT, Storage.getBuildVersion(),
         VersionInfo.getVersion());
   }
   
@@ -65,10 +63,6 @@ public class NamespaceInfo extends Stora
     return buildVersion;
   }
 
-  public int getDistributedUpgradeVersion() {
-    return distributedUpgradeVersion;
-  }
-  
   public String getBlockPoolID() {
     return blockPoolID;
   }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Fri Aug 24 20:38:08 2012
@@ -47,8 +47,6 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
-import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
 import org.apache.hadoop.ipc.RPC;
@@ -303,15 +301,9 @@ public class DFSAdmin extends FsShell {
       long remaining = ds.getRemaining();
       long presentCapacity = used + remaining;
       boolean mode = dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET);
-      UpgradeStatusReport status = 
-                      dfs.distributedUpgradeProgress(UpgradeAction.GET_STATUS);
-
       if (mode) {
         System.out.println("Safe mode is ON");
       }
-      if (status != null) {
-        System.out.println(status.getStatusText(false));
-      }
       System.out.println("Configured Capacity: " + capacity
                          + " (" + StringUtils.byteDesc(capacity) + ")");
       System.out.println("Present Capacity: " + presentCapacity
@@ -578,10 +570,6 @@ public class DFSAdmin extends FsShell {
       "\t\tfollowed by Namenode doing the same.\n" + 
       "\t\tThis completes the upgrade process.\n";
 
-    String upgradeProgress = "-upgradeProgress <status|details|force>: \n" +
-      "\t\trequest current distributed upgrade status, \n" +
-      "\t\ta detailed status or force the upgrade to proceed.\n";
-
     String metaSave = "-metasave <filename>: \tSave Namenode's primary data structures\n" +
       "\t\tto <filename> in the directory specified by hadoop.log.dir property.\n" +
       "\t\t<filename> will contain one line for each of the following\n" +
@@ -643,8 +631,6 @@ public class DFSAdmin extends FsShell {
       System.out.println(refreshNodes);
     } else if ("finalizeUpgrade".equals(cmd)) {
       System.out.println(finalizeUpgrade);
-    } else if ("upgradeProgress".equals(cmd)) {
-      System.out.println(upgradeProgress);
     } else if ("metasave".equals(cmd)) {
       System.out.println(metaSave);
     } else if (SetQuotaCommand.matches("-"+cmd)) {
@@ -681,7 +667,6 @@ public class DFSAdmin extends FsShell {
       System.out.println(restoreFailedStorage);
       System.out.println(refreshNodes);
       System.out.println(finalizeUpgrade);
-      System.out.println(upgradeProgress);
       System.out.println(metaSave);
       System.out.println(SetQuotaCommand.DESCRIPTION);
       System.out.println(ClearQuotaCommand.DESCRIPTION);
@@ -715,54 +700,19 @@ public class DFSAdmin extends FsShell {
   }
 
   /**
-   * Command to request current distributed upgrade status, 
-   * a detailed status, or to force the upgrade to proceed.
-   * 
-   * Usage: java DFSAdmin -upgradeProgress [status | details | force]
-   * @exception IOException 
-   */
-  public int upgradeProgress(String[] argv, int idx) throws IOException {
-    
-    if (idx != argv.length - 1) {
-      printUsage("-upgradeProgress");
-      return -1;
-    }
-
-    UpgradeAction action;
-    if ("status".equalsIgnoreCase(argv[idx])) {
-      action = UpgradeAction.GET_STATUS;
-    } else if ("details".equalsIgnoreCase(argv[idx])) {
-      action = UpgradeAction.DETAILED_STATUS;
-    } else if ("force".equalsIgnoreCase(argv[idx])) {
-      action = UpgradeAction.FORCE_PROCEED;
-    } else {
-      printUsage("-upgradeProgress");
-      return -1;
-    }
-
-    DistributedFileSystem dfs = getDFS();
-    UpgradeStatusReport status = dfs.distributedUpgradeProgress(action);
-    String statusText = (status == null ? 
-        "There are no upgrades in progress." :
-          status.getStatusText(action == UpgradeAction.DETAILED_STATUS));
-    System.out.println(statusText);
-    return 0;
-  }
-
-  /**
    * Dumps DFS data structures into specified file.
    * Usage: java DFSAdmin -metasave filename
    * @param argv List of of command line parameters.
    * @param idx The index of the command that is being processed.
-   * @exception IOException if an error accoured wile accessing
+   * @exception IOException if an error occurred while accessing
    *            the file or path.
    */
   public int metaSave(String[] argv, int idx) throws IOException {
     String pathname = argv[idx];
     DistributedFileSystem dfs = getDFS();
     dfs.metaSave(pathname);
-    System.out.println("Created file " + pathname + " on server " +
-                       dfs.getUri());
+    System.out.println("Created metasave file " + pathname + " in the log " +
+        "directory of namenode " + dfs.getUri());
     return 0;
   }
 
@@ -918,9 +868,6 @@ public class DFSAdmin extends FsShell {
     } else if ("-finalizeUpgrade".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [-finalizeUpgrade]");
-    } else if ("-upgradeProgress".equals(cmd)) {
-      System.err.println("Usage: java DFSAdmin"
-                         + " [-upgradeProgress status | details | force]");
     } else if ("-metasave".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
           + " [-metasave filename]");
@@ -969,7 +916,6 @@ public class DFSAdmin extends FsShell {
       System.err.println("           [-restoreFailedStorage true|false|check]");
       System.err.println("           [-refreshNodes]");
       System.err.println("           [-finalizeUpgrade]");
-      System.err.println("           [-upgradeProgress status | details | force]");
       System.err.println("           [-metasave filename]");
       System.err.println("           [-refreshServiceAcl]");
       System.err.println("           [-refreshUserToGroupsMappings]");
@@ -1039,11 +985,6 @@ public class DFSAdmin extends FsShell {
         printUsage(cmd);
         return exitCode;
       }
-    } else if ("-upgradeProgress".equals(cmd)) {
-        if (argv.length != 2) {
-          printUsage(cmd);
-          return exitCode;
-        }
     } else if ("-metasave".equals(cmd)) {
       if (argv.length != 2) {
         printUsage(cmd);
@@ -1113,8 +1054,6 @@ public class DFSAdmin extends FsShell {
         exitCode = refreshNodes();
       } else if ("-finalizeUpgrade".equals(cmd)) {
         exitCode = finalizeUpgrade();
-      } else if ("-upgradeProgress".equals(cmd)) {
-        exitCode = upgradeProgress(argv, i);
       } else if ("-metasave".equals(cmd)) {
         exitCode = metaSave(argv, i);
       } else if (ClearQuotaCommand.matches(cmd)) {

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Fri Aug 24 20:38:08 2012
@@ -189,13 +189,14 @@ public class DelegationTokenFetcher {
                 }
               } else {
                 FileSystem fs = FileSystem.get(conf);
-                Token<?> token = fs.getDelegationToken(renewer);
                 Credentials cred = new Credentials();
-                cred.addToken(token.getService(), token);
+                Token<?> tokens[] = fs.addDelegationTokens(renewer, cred);
                 cred.writeTokenStorageFile(tokenFile, conf);
                 if(LOG.isDebugEnabled()) {
-                  LOG.debug("Fetched token for " + token.getService()
-                      + " into " + tokenFile);
+                  for (Token<?> token : tokens) {
+                    LOG.debug("Fetched token for " + token.getService()
+                        + " into " + tokenFile);
+                  }
                 }
               }
             }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Fri Aug 24 20:38:08 2012
@@ -29,6 +29,8 @@ import java.util.TreeMap;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
+import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -43,6 +45,7 @@ import org.apache.hadoop.hdfs.security.t
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
 import org.mortbay.util.ajax.JSON;
 
@@ -512,7 +515,21 @@ public class JsonUtil {
     final byte[] bytes = StringUtils.hexStringToByte((String)m.get("bytes"));
 
     final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes));
-    final MD5MD5CRC32FileChecksum checksum = new MD5MD5CRC32FileChecksum();
+    final DataChecksum.Type crcType = 
+        MD5MD5CRC32FileChecksum.getCrcTypeFromAlgorithmName(algorithm);
+    final MD5MD5CRC32FileChecksum checksum;
+
+    // Recreate what DFSClient would have returned.
+    switch(crcType) {
+      case CRC32:
+        checksum = new MD5MD5CRC32GzipFileChecksum();
+        break;
+      case CRC32C:
+        checksum = new MD5MD5CRC32CastagnoliFileChecksum();
+        break;
+      default:
+        throw new IOException("Unknown algorithm: " + algorithm);
+    }
     checksum.readFields(in);
 
     //check algorithm name

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Fri Aug 24 20:38:08 2012
@@ -30,7 +30,6 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.util.Collection;
-import java.util.List;
 import java.util.Map;
 import java.util.StringTokenizer;
 
@@ -376,8 +375,7 @@ public class WebHdfsFileSystem extends F
         + Param.toSortedString("&", parameters);
     final URL url;
     if (op == PutOpParam.Op.RENEWDELEGATIONTOKEN
-        || op == GetOpParam.Op.GETDELEGATIONTOKEN
-        || op == GetOpParam.Op.GETDELEGATIONTOKENS) {
+        || op == GetOpParam.Op.GETDELEGATIONTOKEN) {
       // Skip adding delegation token for getting or renewing delegation token,
       // because these operations require kerberos authentication.
       url = getNamenodeURL(path, query);
@@ -840,10 +838,9 @@ public class WebHdfsFileSystem extends F
     return statuses;
   }
 
-  @SuppressWarnings("deprecation")
   @Override
-  public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer
-      ) throws IOException {
+  public Token<DelegationTokenIdentifier> getDelegationToken(
+      final String renewer) throws IOException {
     final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
     final Map<?, ?> m = run(op, null, new RenewerParam(renewer));
     final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m); 
@@ -852,18 +849,6 @@ public class WebHdfsFileSystem extends F
   }
 
   @Override
-  public List<Token<?>> getDelegationTokens(final String renewer
-      ) throws IOException {
-    final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKENS;
-    final Map<?, ?> m = run(op, null, new RenewerParam(renewer));
-    final List<Token<?>> tokens = JsonUtil.toTokenList(m);
-    for(Token<?> t : tokens) {
-      SecurityUtil.setTokenService(t, nnAddr);
-    }
-    return tokens;
-  }
-
-  @Override
   public Token<?> getRenewToken() {
     return delegationToken;
   }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java Fri Aug 24 20:38:08 2012
@@ -32,7 +32,6 @@ public class GetOpParam extends HttpOpPa
 
     GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK),
     GETDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK),
-    GETDELEGATIONTOKENS(false, HttpURLConnection.HTTP_OK),
 
     /** GET_BLOCK_LOCATIONS is a private unstable op. */
     GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),

Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1373573-1377085

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_dfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_dfs.c?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_dfs.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_dfs.c Fri Aug 24 20:38:08 2012
@@ -24,6 +24,7 @@
 
 #include <string.h>
 #include <stdlib.h>
+#include <unistd.h>
 
 int is_protected(const char *path) {
 
@@ -65,15 +66,6 @@ static struct fuse_operations dfs_oper =
   .truncate = dfs_truncate,
 };
 
-static void print_env_vars(void)
-{
-  const char *cp = getenv("CLASSPATH");
-  const char *ld = getenv("LD_LIBRARY_PATH");
-
-  fprintf(stderr, "LD_LIBRARY_PATH=%s",ld == NULL ? "NULL" : ld);
-  fprintf(stderr, "CLASSPATH=%s",cp == NULL ? "NULL" : cp);
-}
-
 int main(int argc, char *argv[])
 {
   int ret;
@@ -103,7 +95,7 @@ int main(int argc, char *argv[])
   }
 
   {
-    char buf[1024];
+    char buf[80];
 
     snprintf(buf, sizeof buf, "-oattr_timeout=%d",options.attribute_timeout);
     fuse_opt_add_arg(&args, buf);
@@ -114,24 +106,18 @@ int main(int argc, char *argv[])
 
   if (options.nn_uri == NULL) {
     print_usage(argv[0]);
-    exit(0);
-  }
-
-  ret = fuseConnectInit(options.nn_uri, options.nn_port);
-  if (ret) {
-    ERROR("FATAL: dfs_init: fuseConnInit failed with error %d!", ret);
-    print_env_vars();
-    exit(EXIT_FAILURE);
-  }
-  if (options.initchecks == 1) {
-    ret = fuseConnectTest();
-    if (ret) {
-      ERROR("FATAL: dfs_init: fuseConnTest failed with error %d!", ret);
-      print_env_vars();
-      exit(EXIT_FAILURE);
-    }
+    exit(EXIT_SUCCESS);
   }
 
+  /* Note: do not call any libhdfs functions until fuse_main has been invoked.
+   *
+   * fuse_main will daemonize this process, by calling fork().  This will cause
+   * any extant threads to be destroyed, which could cause problems if 
+   * libhdfs has started some Java threads.
+   *
+   * Most initialization code should go in dfs_init, which is invoked after the
+   * fork.  See HDFS-3808 for details.
+   */
   ret = fuse_main(args.argc, args.argv, &dfs_oper, NULL);
   fuse_opt_free_args(&args);
   return ret;

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c Fri Aug 24 20:38:08 2012
@@ -26,11 +26,20 @@
 #include <stdlib.h>
 #include <string.h>
 
+static void print_env_vars(void)
+{
+  const char *cp = getenv("CLASSPATH");
+  const char *ld = getenv("LD_LIBRARY_PATH");
+
+  ERROR("LD_LIBRARY_PATH=%s",ld == NULL ? "NULL" : ld);
+  ERROR("CLASSPATH=%s",cp == NULL ? "NULL" : cp);
+}
+
 // Hacked up function to basically do:
 //  protectedpaths = split(options.protected,':');
 
-void init_protectedpaths(dfs_context *dfs) {
-
+static void init_protectedpaths(dfs_context *dfs)
+{
   char *tmp = options.protected;
 
   // handle degenerate case up front.
@@ -39,7 +48,6 @@ void init_protectedpaths(dfs_context *df
     dfs->protectedpaths[0] = NULL;
     return;
   }
-  assert(tmp);
 
   if (options.debug) {
     print_options();
@@ -80,10 +88,10 @@ void init_protectedpaths(dfs_context *df
 
 static void dfsPrintOptions(FILE *fp, const struct options *o)
 {
-  fprintf(fp, "[ protected=%s, nn_uri=%s, nn_port=%d, "
+  INFO("Mounting with options: [ protected=%s, nn_uri=%s, nn_port=%d, "
           "debug=%d, read_only=%d, initchecks=%d, "
           "no_permissions=%d, usetrash=%d, entry_timeout=%d, "
-          "attribute_timeout=%d, rdbuffer_size=%Zd, direct_io=%d ]",
+          "attribute_timeout=%d, rdbuffer_size=%zd, direct_io=%d ]",
           (o->protected ? o->protected : "(NULL)"), o->nn_uri, o->nn_port, 
           o->debug, o->read_only, o->initchecks,
           o->no_permissions, o->usetrash, o->entry_timeout,
@@ -92,12 +100,14 @@ static void dfsPrintOptions(FILE *fp, co
 
 void *dfs_init(void)
 {
+  int ret;
+
   //
   // Create a private struct of data we will pass to fuse here and which
   // will then be accessible on every call.
   //
-  dfs_context *dfs = (dfs_context*)malloc(sizeof(dfs_context));
-  if (NULL == dfs) {
+  dfs_context *dfs = calloc(1, sizeof(*dfs));
+  if (!dfs) {
     ERROR("FATAL: could not malloc dfs_context");
     exit(1);
   }
@@ -110,17 +120,30 @@ void *dfs_init(void)
   dfs->rdbuffer_size         = options.rdbuffer_size;
   dfs->direct_io             = options.direct_io;
 
-  fprintf(stderr, "Mounting with options ");
   dfsPrintOptions(stderr, &options);
-  fprintf(stderr, "\n");
 
   init_protectedpaths(dfs);
   assert(dfs->protectedpaths != NULL);
 
   if (dfs->rdbuffer_size <= 0) {
-    DEBUG("dfs->rdbuffersize <= 0 = %ld", dfs->rdbuffer_size);
+    DEBUG("dfs->rdbuffersize <= 0 = %zd", dfs->rdbuffer_size);
     dfs->rdbuffer_size = 32768;
   }
+
+  ret = fuseConnectInit(options.nn_uri, options.nn_port);
+  if (ret) {
+    ERROR("FATAL: dfs_init: fuseConnectInit failed with error %d!", ret);
+    print_env_vars();
+    exit(EXIT_FAILURE);
+  }
+  if (options.initchecks == 1) {
+    ret = fuseConnectTest();
+    if (ret) {
+      ERROR("FATAL: dfs_init: fuseConnectTest failed with error %d!", ret);
+      print_env_vars();
+      exit(EXIT_FAILURE);
+    }
+  }
   return (void*)dfs;
 }
 

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c Fri Aug 24 20:38:08 2012
@@ -279,12 +279,19 @@ done:
     return ret;
 }
 
+struct hdfsBuilderConfOpt {
+    struct hdfsBuilderConfOpt *next;
+    const char *key;
+    const char *val;
+};
+
 struct hdfsBuilder {
     int forceNewInstance;
     const char *nn;
     tPort port;
     const char *kerbTicketCachePath;
     const char *userName;
+    struct hdfsBuilderConfOpt *opts;
 };
 
 struct hdfsBuilder *hdfsNewBuilder(void)
@@ -297,8 +304,32 @@ struct hdfsBuilder *hdfsNewBuilder(void)
     return bld;
 }
 
+int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key,
+                          const char *val)
+{
+    struct hdfsBuilderConfOpt *opt, *next;
+    
+    opt = calloc(1, sizeof(struct hdfsBuilderConfOpt));
+    if (!opt)
+        return -ENOMEM;
+    next = bld->opts;
+    bld->opts = opt;
+    opt->next = next;
+    opt->key = key;
+    opt->val = val;
+    return 0;
+}
+
 void hdfsFreeBuilder(struct hdfsBuilder *bld)
 {
+    struct hdfsBuilderConfOpt *cur, *next;
+
+    cur = bld->opts;
+    for (cur = bld->opts; cur; ) {
+        next = cur->next;
+        free(cur);
+        cur = next;
+    }
     free(bld);
 }
 
@@ -451,6 +482,7 @@ hdfsFS hdfsBuilderConnect(struct hdfsBui
     char *cURI = 0, buf[512];
     int ret;
     jobject jRet = NULL;
+    struct hdfsBuilderConfOpt *opt;
 
     //Get the JNIEnv* corresponding to current thread
     env = getJNIEnv();
@@ -466,6 +498,16 @@ hdfsFS hdfsBuilderConnect(struct hdfsBui
             "hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf)));
         goto done;
     }
+    // set configuration values
+    for (opt = bld->opts; opt; opt = opt->next) {
+        jthr = hadoopConfSetStr(env, jConfiguration, opt->key, opt->val);
+        if (jthr) {
+            ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "hdfsBuilderConnect(%s): error setting conf '%s' to '%s'",
+                hdfsBuilderToStr(bld, buf, sizeof(buf)), opt->key, opt->val);
+            goto done;
+        }
+    }
  
     //Check what type of FileSystem the caller wants...
     if (bld->nn == NULL) {
@@ -596,7 +638,7 @@ done:
     destroyLocalReference(env, jURIString);
     destroyLocalReference(env, jUserString);
     free(cURI);
-    free(bld);
+    hdfsFreeBuilder(bld);
 
     if (ret) {
         errno = ret;
@@ -644,7 +686,29 @@ int hdfsDisconnect(hdfsFS fs)
     return 0;
 }
 
+/**
+ * Get the default block size of a FileSystem object.
+ *
+ * @param env       The Java env
+ * @param jFS       The FileSystem object
+ * @param jPath     The path to find the default blocksize at
+ * @param out       (out param) the default block size
+ *
+ * @return          NULL on success; or the exception
+ */
+static jthrowable getDefaultBlockSize(JNIEnv *env, jobject jFS,
+                                      jobject jPath, jlong *out)
+{
+    jthrowable jthr;
+    jvalue jVal;
 
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
+                 "getDefaultBlockSize", JMETHOD1(JPARAM(HADOOP_PATH), "J"), jPath);
+    if (jthr)
+        return jthr;
+    *out = jVal.j;
+    return NULL;
+}
 
 hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags, 
                       int bufferSize, short replication, tSize blockSize)
@@ -665,7 +729,6 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const c
     }
 
     jstring jStrBufferSize = NULL, jStrReplication = NULL;
-    jstring jStrBlockSize = NULL;
     jobject jConfiguration = NULL, jPath = NULL, jFile = NULL;
     jobject jFS = (jobject)fs;
     jthrowable jthr;
@@ -724,7 +787,6 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const c
 
     jint jBufferSize = bufferSize;
     jshort jReplication = replication;
-    jlong jBlockSize = blockSize;
     jStrBufferSize = (*env)->NewStringUTF(env, "io.file.buffer.size"); 
     if (!jStrBufferSize) {
         ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL, "OOM");
@@ -735,11 +797,6 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const c
         ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL, "OOM");
         goto done;
     }
-    jStrBlockSize = (*env)->NewStringUTF(env, "dfs.block.size");
-    if (!jStrBlockSize) {
-        ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL, "OOM");
-        goto done;
-    }
 
     if (!bufferSize) {
         jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration, 
@@ -768,20 +825,6 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const c
             }
             jReplication = jVal.i;
         }
-        
-        //blockSize
-        if (!blockSize) {
-            jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration, 
-                             HADOOP_CONF, "getLong", "(Ljava/lang/String;J)J",
-                             jStrBlockSize, (jlong)67108864);
-            if (jthr) {
-                ret  = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                    "hdfsOpenFile(%s): Configuration#getLong(dfs.block.size)",
-                    path);
-                goto done;
-            }
-            jBlockSize = jVal.j;
-        }
     }
  
     /* Create and return either the FSDataInputStream or
@@ -798,6 +841,15 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const c
     } else {
         // WRITE/CREATE
         jboolean jOverWrite = 1;
+        jlong jBlockSize = blockSize;
+
+        if (jBlockSize == 0) {
+            jthr = getDefaultBlockSize(env, jFS, jPath, &jBlockSize);
+            if (jthr) {
+                ret = EIO;
+                goto done;
+            }
+        }
         jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
                          method, signature, jPath, jOverWrite,
                          jBufferSize, jReplication, jBlockSize);
@@ -842,7 +894,6 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const c
 done:
     destroyLocalReference(env, jStrBufferSize);
     destroyLocalReference(env, jStrReplication);
-    destroyLocalReference(env, jStrBlockSize);
     destroyLocalReference(env, jConfiguration); 
     destroyLocalReference(env, jPath); 
     destroyLocalReference(env, jFile); 
@@ -2142,6 +2193,39 @@ tOffset hdfsGetDefaultBlockSize(hdfsFS f
 }
 
 
+tOffset hdfsGetDefaultBlockSizeAtPath(hdfsFS fs, const char *path)
+{
+    // JAVA EQUIVALENT:
+    //  fs.getDefaultBlockSize(path);
+
+    jthrowable jthr;
+    jobject jFS = (jobject)fs;
+    jobject jPath;
+    tOffset blockSize;
+    JNIEnv* env = getJNIEnv();
+
+    if (env == NULL) {
+        errno = EINTERNAL;
+        return -1;
+    }
+    jthr = constructNewObjectOfPath(env, path, &jPath);
+    if (jthr) {
+        errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+            "hdfsGetDefaultBlockSize(path=%s): constructNewObjectOfPath",
+            path);
+        return -1;
+    }
+    jthr = getDefaultBlockSize(env, jFS, jPath, &blockSize);
+    (*env)->DeleteLocalRef(env, jPath);
+    if (jthr) {
+        errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+            "hdfsGetDefaultBlockSize(path=%s): "
+            "FileSystem#getDefaultBlockSize", path);
+        return -1;
+    }
+    return blockSize;
+}
+
 
 tOffset hdfsGetCapacity(hdfsFS fs)
 {

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h Fri Aug 24 20:38:08 2012
@@ -217,6 +217,20 @@ extern  "C" {
     void hdfsFreeBuilder(struct hdfsBuilder *bld);
 
     /**
+     * Set a configuration string for an HdfsBuilder.
+     *
+     * @param key      The key to set.
+     * @param val      The value, or NULL to set no value.
+     *                 This will be shallow-copied.  You are responsible for
+     *                 ensuring that it remains valid until the builder is
+     *                 freed.
+     *
+     * @return         0 on success; nonzero error code otherwise.
+     */
+    int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key,
+                              const char *val);
+
+    /**
      * Get a configuration string.
      *
      * @param key      The key to find
@@ -234,7 +248,7 @@ extern  "C" {
      *
      * @param key      The key to find
      * @param val      (out param) The value.  This will NOT be changed if the
-	 *                 key isn't found.
+     *                 key isn't found.
      *
      * @return         0 on success; nonzero error code otherwise.
      *                 Failure to find the key is not an error.
@@ -550,14 +564,30 @@ extern  "C" {
 
 
     /** 
-     * hdfsGetDefaultBlockSize - Get the optimum blocksize.
-     * @param fs The configured filesystem handle.
-     * @return Returns the blocksize; -1 on error. 
+     * hdfsGetDefaultBlockSize - Get the default blocksize.
+     *
+     * @param fs            The configured filesystem handle.
+     * @deprecated          Use hdfsGetDefaultBlockSizeAtPath instead.
+     *
+     * @return              Returns the default blocksize, or -1 on error.
      */
     tOffset hdfsGetDefaultBlockSize(hdfsFS fs);
 
 
     /** 
+     * hdfsGetDefaultBlockSizeAtPath - Get the default blocksize at the
+     * filesystem indicated by a given path.
+     *
+     * @param fs            The configured filesystem handle.
+     * @param path          The given path will be used to locate the actual
+     *                      filesystem.  The full path does not have to exist.
+     *
+     * @return              Returns the default blocksize, or -1 on error.
+     */
+    tOffset hdfsGetDefaultBlockSizeAtPath(hdfsFS fs, const char *path);
+
+
+    /** 
      * hdfsGetCapacity - Return the raw capacity of the filesystem.  
      * @param fs The configured filesystem handle.
      * @return Returns the raw-capacity; -1 on error. 

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c Fri Aug 24 20:38:08 2012
@@ -21,14 +21,20 @@
 #include "native_mini_dfs.h"
 
 #include <errno.h>
+#include <inttypes.h>
 #include <semaphore.h>
 #include <pthread.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 
+#define TO_STR_HELPER(X) #X
+#define TO_STR(X) TO_STR_HELPER(X)
+
 #define TLH_MAX_THREADS 100
 
+#define TLH_DEFAULT_BLOCK_SIZE 134217728
+
 static sem_t tlhSem;
 
 static struct NativeMiniDfsCluster* tlhCluster;
@@ -46,6 +52,7 @@ static int hdfsSingleNameNodeConnect(str
 {
     int ret, port;
     hdfsFS hdfs;
+    struct hdfsBuilder *bld;
     
     port = nmdGetNameNodePort(cl);
     if (port < 0) {
@@ -53,7 +60,17 @@ static int hdfsSingleNameNodeConnect(str
                 "returned error %d\n", port);
         return port;
     }
-    hdfs = hdfsConnectNewInstance("localhost", port);
+    bld = hdfsNewBuilder();
+    if (!bld)
+        return -ENOMEM;
+    hdfsBuilderSetForceNewInstance(bld);
+    hdfsBuilderSetNameNode(bld, "localhost");
+    hdfsBuilderSetNameNodePort(bld, port);
+    hdfsBuilderConfSetStr(bld, "dfs.block.size",
+                          TO_STR(TLH_DEFAULT_BLOCK_SIZE));
+    hdfsBuilderConfSetStr(bld, "dfs.blocksize",
+                          TO_STR(TLH_DEFAULT_BLOCK_SIZE));
+    hdfs = hdfsBuilderConnect(bld);
     if (!hdfs) {
         ret = -errno;
         return ret;
@@ -62,6 +79,37 @@ static int hdfsSingleNameNodeConnect(str
     return 0;
 }
 
+static int doTestGetDefaultBlockSize(hdfsFS fs, const char *path)
+{
+    uint64_t blockSize;
+    int ret;
+
+    blockSize = hdfsGetDefaultBlockSize(fs);
+    if (blockSize < 0) {
+        ret = errno;
+        fprintf(stderr, "hdfsGetDefaultBlockSize failed with error %d\n", ret);
+        return ret;
+    } else if (blockSize != TLH_DEFAULT_BLOCK_SIZE) {
+        fprintf(stderr, "hdfsGetDefaultBlockSize got %"PRId64", but we "
+                "expected %d\n", blockSize, TLH_DEFAULT_BLOCK_SIZE);
+        return EIO;
+    }
+
+    blockSize = hdfsGetDefaultBlockSizeAtPath(fs, path);
+    if (blockSize < 0) {
+        ret = errno;
+        fprintf(stderr, "hdfsGetDefaultBlockSizeAtPath(%s) failed with "
+                "error %d\n", path, ret);
+        return ret;
+    } else if (blockSize != TLH_DEFAULT_BLOCK_SIZE) {
+        fprintf(stderr, "hdfsGetDefaultBlockSizeAtPath(%s) got "
+                "%"PRId64", but we expected %d\n", 
+                path, blockSize, TLH_DEFAULT_BLOCK_SIZE);
+        return EIO;
+    }
+    return 0;
+}
+
 static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs)
 {
     char prefix[256], tmp[256];
@@ -77,6 +125,8 @@ static int doTestHdfsOperations(struct t
     EXPECT_ZERO(hdfsCreateDirectory(fs, prefix));
     snprintf(tmp, sizeof(tmp), "%s/file", prefix);
 
+    EXPECT_ZERO(doTestGetDefaultBlockSize(fs, prefix));
+
     /* There should not be any file to open for reading. */
     EXPECT_NULL(hdfsOpenFile(fs, tmp, O_RDONLY, 0, 0, 0));
 

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto Fri Aug 24 20:38:08 2012
@@ -90,6 +90,26 @@ message GetBlockLocalPathInfoResponsePro
 }
 
 /**
+ * blocks - list of ExtendedBlocks on which we are querying additional info
+ * tokens - list of access tokens corresponding to list of ExtendedBlocks
+ */
+message GetHdfsBlockLocationsRequestProto {
+  repeated ExtendedBlockProto blocks = 1;
+  repeated BlockTokenIdentifierProto tokens = 2;
+}
+
+/**
+ * volumeIds - id of each volume, potentially multiple bytes
+ * volumeIndexes - for each block, an index into volumeIds specifying the volume
+ *               on which it is located. If block is not present on any volume,
+ *               index is set to MAX_INT.
+ */
+message GetHdfsBlockLocationsResponseProto {
+  repeated bytes volumeIds = 1;
+  repeated uint32 volumeIndexes = 2;
+}
+
+/**
  * Protocol used from client to the Datanode.
  * See the request and response for details of rpc call.
  */
@@ -119,4 +139,11 @@ service ClientDatanodeProtocolService {
    */
   rpc getBlockLocalPathInfo(GetBlockLocalPathInfoRequestProto)
       returns(GetBlockLocalPathInfoResponseProto);
+
+  /**
+   * Retrieve additional HDFS-specific metadata about a set of blocks stored
+   * on the local file system.
+   */
+  rpc getHdfsBlockLocations(GetHdfsBlockLocationsRequestProto)
+      returns(GetHdfsBlockLocationsResponseProto);
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Fri Aug 24 20:38:08 2012
@@ -296,19 +296,6 @@ message FinalizeUpgradeRequestProto { //
 message FinalizeUpgradeResponseProto { // void response
 }
 
-enum UpgradeActionProto {
-  GET_STATUS = 1;
-  DETAILED_STATUS = 2;
-  FORCE_PROCEED = 3;
-}
-
-message DistributedUpgradeProgressRequestProto {
-  required UpgradeActionProto action = 1;
-}
-message DistributedUpgradeProgressResponseProto {
-  optional UpgradeStatusReportProto report = 1;
-}
-
 message ListCorruptFileBlocksRequestProto {
   required string path = 1;
   optional string cookie = 2;
@@ -490,8 +477,6 @@ service ClientNamenodeProtocol {
   rpc refreshNodes(RefreshNodesRequestProto) returns(RefreshNodesResponseProto);
   rpc finalizeUpgrade(FinalizeUpgradeRequestProto)
       returns(FinalizeUpgradeResponseProto);
-  rpc distributedUpgradeProgress(DistributedUpgradeProgressRequestProto)
-      returns(DistributedUpgradeProgressResponseProto);
   rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto)
       returns(ListCorruptFileBlocksResponseProto);
   rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Fri Aug 24 20:38:08 2012
@@ -60,7 +60,7 @@ message DatanodeCommandProto {
     FinalizeCommand = 3;
     KeyUpdateCommand = 4;
     RegisterCommand = 5;
-    UpgradeCommand = 6;
+    UnusedUpgradeCommand = 6;
     NullDatanodeCommand = 7;
   }
 
@@ -74,7 +74,6 @@ message DatanodeCommandProto {
   optional FinalizeCommandProto finalizeCmd = 5;
   optional KeyUpdateCommandProto keyUpdateCmd = 6;
   optional RegisterCommandProto registerCmd = 7;
-  optional UpgradeCommandProto upgradeCmd = 8;
 }
 
 /**
@@ -132,20 +131,6 @@ message RegisterCommandProto {
 }
 
 /**
- * Generic distributed upgrade Command
- */
-message UpgradeCommandProto {
-  enum Action {
-    UNKNOWN = 0;          // Unknown action
-    REPORT_STATUS = 100;  // Report upgrade status
-    START_UPGRADE = 101;  // Start upgrade
-  }
-  required Action action = 1;  // Upgrade action
-  required uint32 version = 2; // Version of the upgrade
-  required uint32 upgradeStatus = 3; // % completed in range 0 & 100
-}
-
-/**
  * registration - Information of the datanode registering with the namenode
  */
 message RegisterDatanodeRequestProto {
@@ -303,20 +288,6 @@ message ErrorReportResponseProto {
 }
 
 /**
- * cmd - Upgrade command sent from datanode to namenode
- */
-message ProcessUpgradeRequestProto {
-  optional UpgradeCommandProto cmd = 1;
-}
-
-/**
- * cmd - Upgrade command sent from namenode to datanode
- */
-message ProcessUpgradeResponseProto {
-  optional UpgradeCommandProto cmd = 1;
-}
-
-/**
  * blocks - list of blocks that are reported as corrupt
  */
 message ReportBadBlocksRequestProto {
@@ -389,12 +360,6 @@ service DatanodeProtocolService {
   rpc versionRequest(VersionRequestProto) returns(VersionResponseProto);
 
   /**
-   * Generic way to send commands from datanode to namenode during
-   * distributed upgrade process.
-   */
-  rpc processUpgrade(ProcessUpgradeRequestProto) returns(ProcessUpgradeResponseProto);
-
-  /**
    * Report corrupt blocks at the specified location
    */
   rpc reportBadBlocks(ReportBadBlocksRequestProto) returns(ReportBadBlocksResponseProto);

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto Fri Aug 24 20:38:08 2012
@@ -185,4 +185,5 @@ message OpBlockChecksumResponseProto {
   required uint32 bytesPerCrc = 1;
   required uint64 crcPerBlock = 2;
   required bytes md5 = 3;
+  optional ChecksumTypeProto crcType = 4 [default = CRC32];
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Fri Aug 24 20:38:08 2012
@@ -179,6 +179,15 @@ message HdfsFileStatusProto {
 } 
 
 /**
+ * Checksum algorithms/types used in HDFS
+ */
+enum ChecksumTypeProto {
+  NULL = 0;
+  CRC32 = 1;
+  CRC32C = 2;
+}
+
+/**
  * HDFS Server Defaults
  */
 message FsServerDefaultsProto {
@@ -188,6 +197,8 @@ message FsServerDefaultsProto {
   required uint32 replication = 4; // Actually a short - only 16 bits used
   required uint32 fileBufferSize = 5;
   optional bool encryptDataTransfer = 6 [default = false];
+  optional uint64 trashInterval = 7 [default = 0];
+  optional ChecksumTypeProto checksumType = 8 [default = CRC32];
 }
 
 
@@ -200,15 +211,6 @@ message DirectoryListingProto {
 }
 
 /**
- * Status of current cluster upgrade from one version to another
- */
-message UpgradeStatusReportProto {
-  required uint32 version = 1;;
-  required uint32 upgradeStatus = 2; // % completed in range 0 & 100
-	required bool finalized = 3;
-}
-
-/**
  * Common node information shared by all the nodes in the cluster
  */
 message StorageInfoProto {
@@ -315,7 +317,7 @@ message RemoteEditLogManifestProto {
  */
 message NamespaceInfoProto {
   required string buildVersion = 1;         // Software revision version (e.g. an svn or git revision)
-  required uint32 distUpgradeVersion = 2;   // Distributed upgrade version
+  required uint32 unused = 2;               // Retained for backward compatibility
   required string blockPoolID = 3;          // block pool used by the namespace
   required StorageInfoProto storageInfo = 4;// Node information
   required string softwareVersion = 5;      // Software version number (e.g. 2.0.0)

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Fri Aug 24 20:38:08 2012
@@ -78,7 +78,7 @@
 
 <property>
   <name>dfs.datanode.handler.count</name>
-  <value>3</value>
+  <value>10</value>
   <description>The number of server threads for the datanode.</description>
 </property>
 
@@ -1056,4 +1056,28 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
+  <value>false</value>
+  <description>
+    Boolean which enables backend datanode-side support for the experimental DistributedFileSystem#getFileVBlockStorageLocations API.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.file-block-storage-locations.num-threads</name>
+  <value>10</value>
+  <description>
+    Number of threads used for making parallel RPCs in DistributedFileSystem#getFileBlockStorageLocations().
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.file-block-storage-locations.timeout</name>
+  <value>60</value>
+  <description>
+    Timeout (in seconds) for the parallel RPCs made in DistributedFileSystem#getFileBlockStorageLocations().
+  </description>
+</property>
+
 </configuration>

Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1373573-1377085

Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1373573-1377085

Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1373573-1377085

Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1373573-1377085

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java Fri Aug 24 20:38:08 2012
@@ -75,9 +75,12 @@ public class TestHDFSCLI extends CLITest
   @After
   @Override
   public void tearDown() throws Exception {
-    if (null != fs)
+    if (fs != null) {
       fs.close();
-    dfsCluster.shutdown();
+    }
+    if (dfsCluster != null) {
+      dfsCluster.shutdown();
+    }
     Thread.sleep(2000);
     super.tearDown();
   }
@@ -94,9 +97,7 @@ public class TestHDFSCLI extends CLITest
   protected Result execute(CLICommand cmd) throws Exception {
     return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
   }
-
-  //TODO: The test is failing due to the change in HADOOP-7360.
-  //      HDFS-2038 is going to fix it.  Disable the test for the moment.
+  
   @Test
   @Override
   public void testAll () {

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java Fri Aug 24 20:38:08 2012
@@ -59,7 +59,9 @@ public class TestViewFileSystemAtHdfsRoo
       
   @AfterClass
   public static void clusterShutdownAtEnd() throws Exception {
-    cluster.shutdown();   
+    if (cluster != null) {
+      cluster.shutdown();
+    }
   }
 
   @Override
@@ -84,7 +86,7 @@ public class TestViewFileSystemAtHdfsRoo
 
   @Override
   int getExpectedDelegationTokenCount() {
-    return 8;
+    return 1; // all point to the same fs so 1 unique token
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java Fri Aug 24 20:38:08 2012
@@ -117,7 +117,7 @@ public class TestViewFileSystemHdfs exte
 
   @Override
   int getExpectedDelegationTokenCount() {
-    return 9;
+    return 2; // Mount points to 2 unique hdfs 
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Fri Aug 24 20:38:08 2012
@@ -75,7 +75,7 @@ public class TestDataTransferProtocol {
                     "org.apache.hadoop.hdfs.TestDataTransferProtocol");
 
   private static final DataChecksum DEFAULT_CHECKSUM =
-    DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32C, 512);
+    DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C, 512);
   
   DatanodeID datanode;
   InetSocketAddress dnAddr;



Mime
View raw message