hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1329947 [2/2] - in /hadoop/common/branches/HDFS-3092/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-httpfs/src/main/jav...
Date Tue, 24 Apr 2012 19:05:18 GMT
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java Tue Apr 24 19:05:09 2012
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hdfs.security.token.delegation;
 
-import java.net.InetSocketAddress;
+import java.net.URI;
+import java.util.Collection;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -25,7 +26,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 
@@ -37,32 +37,35 @@ public class DelegationTokenSelector
     extends AbstractDelegationTokenSelector<DelegationTokenIdentifier>{
   public static final String SERVICE_NAME_KEY = "hdfs.service.host_";
 
-  private static final DelegationTokenSelector INSTANCE = new DelegationTokenSelector();
-
-  /** Select the delegation token for hdfs from the ugi. */
-  public static Token<DelegationTokenIdentifier> selectHdfsDelegationToken(
-      final InetSocketAddress nnAddr, final UserGroupInformation ugi,
+  /**
+   * Select the delegation token for hdfs.  The port will be rewritten to
+   * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port. 
+   * This method should only be called by non-hdfs filesystems that do not
+   * use the rpc port to acquire tokens.  Ex. webhdfs, hftp 
+   * @param nnUri of the remote namenode
+   * @param tokens as a collection
+   * @param conf hadoop configuration
+   * @return Token
+   */
+  public Token<DelegationTokenIdentifier> selectToken(
+      final URI nnUri, Collection<Token<?>> tokens,
       final Configuration conf) {
     // this guesses the remote cluster's rpc service port.
     // the current token design assumes it's the same as the local cluster's
     // rpc port unless a config key is set.  there should be a way to automatic
     // and correctly determine the value
-    final String key = SERVICE_NAME_KEY + SecurityUtil.buildTokenService(nnAddr);
-    final String nnServiceName = conf.get(key);
+    Text serviceName = SecurityUtil.buildTokenService(nnUri);
+    final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName);
     
     int nnRpcPort = NameNode.DEFAULT_PORT;
     if (nnServiceName != null) {
       nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); 
     }
+    // use original hostname from the uri to avoid unintentional host resolving
+    serviceName = SecurityUtil.buildTokenService(
+    		NetUtils.createSocketAddrForHost(nnUri.getHost(), nnRpcPort));
     
-    final Text serviceName = SecurityUtil.buildTokenService(
-        new InetSocketAddress(nnAddr.getHostName(), nnRpcPort));
-    return INSTANCE.selectToken(serviceName, ugi.getTokens());
-  }
-  
-  public static Token<DelegationTokenIdentifier> selectHdfsDelegationToken(
-      Text serviceName, UserGroupInformation ugi) {
-    return INSTANCE.selectToken(serviceName, ugi.getTokens());
+    return selectToken(serviceName, tokens);
   }
 
   public DelegationTokenSelector() {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Tue Apr 24 19:05:09 2012
@@ -1513,11 +1513,6 @@ public class DataNode extends Configured
       printUsage();
       return null;
     }
-    if (conf.get("dfs.network.script") != null) {
-      LOG.error("This configuration for rack identification is not supported" +
-          " anymore. RackID resolution is handled by the NameNode.");
-      System.exit(-1);
-    }
     Collection<URI> dataDirs = getStorageDirs(conf);
     UserGroupInformation.setConfiguration(conf);
     SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY,

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java Tue Apr 24 19:05:09 2012
@@ -178,8 +178,25 @@ public class DatanodeWebHdfsMethods {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException, URISyntaxException {
+        return put(in, ugi, delegation, nnRpcAddr, path.getAbsolutePath(), op,
+            permission, overwrite, bufferSize, replication, blockSize);
+      }
+    });
+  }
 
-    final String fullpath = path.getAbsolutePath();
+  private Response put(
+      final InputStream in,
+      final UserGroupInformation ugi,
+      final DelegationParam delegation,
+      final InetSocketAddress nnRpcAddr,
+      final String fullpath,
+      final PutOpParam op,
+      final PermissionParam permission,
+      final OverwriteParam overwrite,
+      final BufferSizeParam bufferSize,
+      final ReplicationParam replication,
+      final BlockSizeParam blockSize
+      ) throws IOException, URISyntaxException {
     final DataNode datanode = (DataNode)context.getAttribute("datanode");
 
     switch(op.getValue()) {
@@ -214,8 +231,6 @@ public class DatanodeWebHdfsMethods {
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }
-      }
-    });
   }
 
   /** Handle HTTP POST request for the root for the root. */
@@ -265,8 +280,21 @@ public class DatanodeWebHdfsMethods {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException {
+        return post(in, ugi, delegation, nnRpcAddr, path.getAbsolutePath(), op,
+            bufferSize);
+      }
+    });
+  }
 
-    final String fullpath = path.getAbsolutePath();
+  private Response post(
+      final InputStream in,
+      final UserGroupInformation ugi,
+      final DelegationParam delegation,
+      final InetSocketAddress nnRpcAddr,
+      final String fullpath,
+      final PostOpParam op,
+      final BufferSizeParam bufferSize
+      ) throws IOException {
     final DataNode datanode = (DataNode)context.getAttribute("datanode");
 
     switch(op.getValue()) {
@@ -292,8 +320,6 @@ public class DatanodeWebHdfsMethods {
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }
-      }
-    });
   }
 
   /** Handle HTTP GET request for the root. */
@@ -348,8 +374,22 @@ public class DatanodeWebHdfsMethods {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException {
+        return get(ugi, delegation, nnRpcAddr, path.getAbsolutePath(), op,
+            offset, length, bufferSize);
+      }
+    });
+  }
 
-    final String fullpath = path.getAbsolutePath();
+  private Response get(
+      final UserGroupInformation ugi,
+      final DelegationParam delegation,
+      final InetSocketAddress nnRpcAddr,
+      final String fullpath,
+      final GetOpParam op,
+      final OffsetParam offset,
+      final LengthParam length,
+      final BufferSizeParam bufferSize
+      ) throws IOException {
     final DataNode datanode = (DataNode)context.getAttribute("datanode");
     final Configuration conf = new Configuration(datanode.getConf());
 
@@ -412,7 +452,5 @@ public class DatanodeWebHdfsMethods {
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }
-      }
-    });
   }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Tue Apr 24 19:05:09 2012
@@ -1160,7 +1160,9 @@ public abstract class FSEditLogOp {
     @Override
     protected void toXml(ContentHandler contentHandler) throws SAXException {
       XMLUtils.addSaxString(contentHandler, "SRC", src);
-      XMLUtils.addSaxString(contentHandler, "USERNAME", username);
+      if (username != null) {
+        XMLUtils.addSaxString(contentHandler, "USERNAME", username);
+      }
       if (groupname != null) {
         XMLUtils.addSaxString(contentHandler, "GROUPNAME", groupname);
       }
@@ -1168,12 +1170,10 @@ public abstract class FSEditLogOp {
     
     @Override void fromXml(Stanza st) throws InvalidXmlException {
       this.src = st.getValue("SRC");
-      this.username = st.getValue("USERNAME");
-      if (st.hasChildren("GROUPNAME")) {
-        this.groupname = st.getValue("GROUPNAME");
-      } else {
-        this.groupname = null;
-      }
+      this.username = (st.hasChildren("USERNAME")) ? 
+          st.getValue("USERNAME") : null;
+      this.groupname = (st.hasChildren("GROUPNAME")) ? 
+          st.getValue("GROUPNAME") : null;
     }
   }
 
@@ -2346,7 +2346,7 @@ public abstract class FSEditLogOp {
         Long.valueOf(block.getBlockId()).toString());
     XMLUtils.addSaxString(contentHandler, "NUM_BYTES",
         Long.valueOf(block.getNumBytes()).toString());
-    XMLUtils.addSaxString(contentHandler, "GENERATION_STAMP",
+    XMLUtils.addSaxString(contentHandler, "GENSTAMP",
         Long.valueOf(block.getGenerationStamp()).toString());
     contentHandler.endElement("", "", "BLOCK");
   }
@@ -2355,7 +2355,7 @@ public abstract class FSEditLogOp {
       throws InvalidXmlException {
     long blockId = Long.valueOf(st.getValue("BLOCK_ID"));
     long numBytes = Long.valueOf(st.getValue("NUM_BYTES"));
-    long generationStamp = Long.valueOf(st.getValue("GENERATION_STAMP"));
+    long generationStamp = Long.valueOf(st.getValue("GENSTAMP"));
     return new Block(blockId, numBytes, generationStamp);
   }
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java Tue Apr 24 19:05:09 2012
@@ -35,6 +35,8 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
@@ -44,6 +46,7 @@ import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
 /**
@@ -218,26 +221,44 @@ public class GetImageServlet extends Htt
     return throttler;
   }
   
-  protected boolean isValidRequestor(String remoteUser, Configuration conf)
+  @VisibleForTesting
+  static boolean isValidRequestor(String remoteUser, Configuration conf)
       throws IOException {
     if(remoteUser == null) { // This really shouldn't happen...
       LOG.warn("Received null remoteUser while authorizing access to getImage servlet");
       return false;
     }
+    
+    Set<String> validRequestors = new HashSet<String>();
 
-    String[] validRequestors = {
+    validRequestors.add(
         SecurityUtil.getServerPrincipal(conf
             .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), NameNode
-            .getAddress(conf).getHostName()),
+            .getAddress(conf).getHostName()));
+    validRequestors.add(
         SecurityUtil.getServerPrincipal(conf
             .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), NameNode
-            .getAddress(conf).getHostName()),
+            .getAddress(conf).getHostName()));
+    validRequestors.add(
         SecurityUtil.getServerPrincipal(conf
             .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
-            SecondaryNameNode.getHttpAddress(conf).getHostName()),
+            SecondaryNameNode.getHttpAddress(conf).getHostName()));
+    validRequestors.add(
         SecurityUtil.getServerPrincipal(conf
             .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY),
-            SecondaryNameNode.getHttpAddress(conf).getHostName()) };
+            SecondaryNameNode.getHttpAddress(conf).getHostName()));
+
+    if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) {
+      Configuration otherNnConf = HAUtil.getConfForOtherNode(conf);
+      validRequestors.add(
+          SecurityUtil.getServerPrincipal(otherNnConf
+              .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
+              NameNode.getAddress(otherNnConf).getHostName()));
+      validRequestors.add(
+          SecurityUtil.getServerPrincipal(otherNnConf
+              .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
+              NameNode.getAddress(otherNnConf).getHostName()));
+    }
 
     for(String v : validRequestors) {
       if(v != null && v.equals(remoteUser)) {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Tue Apr 24 19:05:09 2012
@@ -171,7 +171,8 @@ public class NameNode {
     DFS_NAMENODE_BACKUP_ADDRESS_KEY,
     DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
     DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY,
-    DFS_HA_FENCE_METHODS_KEY
+    DFS_HA_FENCE_METHODS_KEY,
+    DFS_NAMENODE_USER_NAME_KEY
   };
   
   public long getProtocolVersion(String protocol, 

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java Tue Apr 24 19:05:09 2012
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 
@@ -93,14 +94,15 @@ public class ConfiguredFailoverProxyProv
             "for URI " + uri);
       }
       
-      for (InetSocketAddress address : addressesInNN.values()) {
+      Collection<InetSocketAddress> addressesOfNns = addressesInNN.values();
+      for (InetSocketAddress address : addressesOfNns) {
         proxies.add(new AddressRpcProxyPair<T>(address));
-        
-        // The client may have a delegation token set for the logical
-        // URI of the cluster. Clone this token to apply to each of the
-        // underlying IPC addresses so that the IPC code can find it.
-        HAUtil.cloneDelegationTokenForLogicalUri(ugi, uri, address);
       }
+
+      // The client may have a delegation token set for the logical
+      // URI of the cluster. Clone this token to apply to each of the
+      // underlying IPC addresses so that the IPC code can find it.
+      HAUtil.cloneDelegationTokenForLogicalUri(ugi, uri, addressesOfNns);
     } catch (IOException e) {
       throw new RuntimeException(e);
     }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Tue Apr 24 19:05:09 2012
@@ -314,8 +314,40 @@ public class NamenodeWebHdfsMethods {
       public Response run() throws IOException, URISyntaxException {
         REMOTE_ADDRESS.set(request.getRemoteAddr());
         try {
+          return put(ugi, delegation, username, doAsUser,
+              path.getAbsolutePath(), op, destination, owner, group,
+              permission, overwrite, bufferSize, replication, blockSize,
+              modificationTime, accessTime, renameOptions, createParent,
+              delegationTokenArgument);
+        } finally {
+          REMOTE_ADDRESS.set(null);
+        }
+      }
+    });
+  }
+
+  private Response put(
+      final UserGroupInformation ugi,
+      final DelegationParam delegation,
+      final UserParam username,
+      final DoAsParam doAsUser,
+      final String fullpath,
+      final PutOpParam op,
+      final DestinationParam destination,
+      final OwnerParam owner,
+      final GroupParam group,
+      final PermissionParam permission,
+      final OverwriteParam overwrite,
+      final BufferSizeParam bufferSize,
+      final ReplicationParam replication,
+      final BlockSizeParam blockSize,
+      final ModificationTimeParam modificationTime,
+      final AccessTimeParam accessTime,
+      final RenameOptionSetParam renameOptions,
+      final CreateParentParam createParent,
+      final TokenArgumentParam delegationTokenArgument
+      ) throws IOException, URISyntaxException {
 
-    final String fullpath = path.getAbsolutePath();
     final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
     final NamenodeProtocols np = namenode.getRpcServer();
@@ -396,12 +428,6 @@ public class NamenodeWebHdfsMethods {
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }
-
-        } finally {
-          REMOTE_ADDRESS.set(null);
-        }
-      }
-    });
   }
 
   /** Handle HTTP POST request for the root. */
@@ -452,8 +478,24 @@ public class NamenodeWebHdfsMethods {
       public Response run() throws IOException, URISyntaxException {
         REMOTE_ADDRESS.set(request.getRemoteAddr());
         try {
+          return post(ugi, delegation, username, doAsUser,
+              path.getAbsolutePath(), op, bufferSize);
+        } finally {
+          REMOTE_ADDRESS.set(null);
+        }
+      }
+    });
+  }
 
-    final String fullpath = path.getAbsolutePath();
+  private Response post(
+      final UserGroupInformation ugi,
+      final DelegationParam delegation,
+      final UserParam username,
+      final DoAsParam doAsUser,
+      final String fullpath,
+      final PostOpParam op,
+      final BufferSizeParam bufferSize
+      ) throws IOException, URISyntaxException {
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
 
     switch(op.getValue()) {
@@ -466,12 +508,6 @@ public class NamenodeWebHdfsMethods {
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }
-
-        } finally {
-          REMOTE_ADDRESS.set(null);
-        }
-      }
-    });
   }
 
   /** Handle HTTP GET request for the root. */
@@ -534,9 +570,28 @@ public class NamenodeWebHdfsMethods {
       public Response run() throws IOException, URISyntaxException {
         REMOTE_ADDRESS.set(request.getRemoteAddr());
         try {
+          return get(ugi, delegation, username, doAsUser,
+              path.getAbsolutePath(), op, offset, length, renewer, bufferSize);
+        } finally {
+          REMOTE_ADDRESS.set(null);
+        }
+      }
+    });
+  }
 
+  private Response get(
+      final UserGroupInformation ugi,
+      final DelegationParam delegation,
+      final UserParam username,
+      final DoAsParam doAsUser,
+      final String fullpath,
+      final GetOpParam op,
+      final OffsetParam offset,
+      final LengthParam length,
+      final RenewerParam renewer,
+      final BufferSizeParam bufferSize
+      ) throws IOException, URISyntaxException {
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
-    final String fullpath = path.getAbsolutePath();
     final NamenodeProtocols np = namenode.getRpcServer();
 
     switch(op.getValue()) {
@@ -613,13 +668,7 @@ public class NamenodeWebHdfsMethods {
     }
     default:
       throw new UnsupportedOperationException(op + " is not supported");
-    }    
-
-        } finally {
-          REMOTE_ADDRESS.set(null);
-        }
-      }
-    });
+    }
   }
 
   private static DirectoryListing getDirectoryListing(final NamenodeProtocols np,
@@ -712,25 +761,35 @@ public class NamenodeWebHdfsMethods {
       public Response run() throws IOException {
         REMOTE_ADDRESS.set(request.getRemoteAddr());
         try {
-
-        final NameNode namenode = (NameNode)context.getAttribute("name.node");
-        final String fullpath = path.getAbsolutePath();
-
-        switch(op.getValue()) {
-        case DELETE:
-        {
-          final boolean b = namenode.getRpcServer().delete(fullpath, recursive.getValue());
-          final String js = JsonUtil.toJsonString("boolean", b);
-          return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-        }
-        default:
-          throw new UnsupportedOperationException(op + " is not supported");
-        }
-
+          return delete(ugi, delegation, username, doAsUser,
+              path.getAbsolutePath(), op, recursive);
         } finally {
           REMOTE_ADDRESS.set(null);
         }
       }
     });
   }
+
+  private Response delete(
+      final UserGroupInformation ugi,
+      final DelegationParam delegation,
+      final UserParam username,
+      final DoAsParam doAsUser,
+      final String fullpath,
+      final DeleteOpParam op,
+      final RecursiveParam recursive
+      ) throws IOException {
+    final NameNode namenode = (NameNode)context.getAttribute("name.node");
+
+    switch(op.getValue()) {
+    case DELETE:
+    {
+      final boolean b = namenode.getRpcServer().delete(fullpath, recursive.getValue());
+      final String js = JsonUtil.toJsonString("boolean", b);
+      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+    }
+    default:
+      throw new UnsupportedOperationException(op + " is not supported");
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Tue Apr 24 19:05:09 2012
@@ -29,6 +29,7 @@ import java.net.MalformedURLException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
+import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.StringTokenizer;
@@ -117,8 +118,8 @@ public class WebHdfsFileSystem extends F
   /** Delegation token kind */
   public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
   /** Token selector */
-  public static final AbstractDelegationTokenSelector<DelegationTokenIdentifier> DT_SELECTOR
-      = new AbstractDelegationTokenSelector<DelegationTokenIdentifier>(TOKEN_KIND) {};
+  public static final WebHdfsDelegationTokenSelector DT_SELECTOR
+      = new WebHdfsDelegationTokenSelector();
 
   private static DelegationTokenRenewer<WebHdfsFileSystem> DT_RENEWER = null;
 
@@ -164,7 +165,7 @@ public class WebHdfsFileSystem extends F
     } catch (URISyntaxException e) {
       throw new IllegalArgumentException(e);
     }
-    this.nnAddr = NetUtils.createSocketAddr(uri.toString());
+    this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
     this.workingDir = getHomeDirectory();
 
     if (UserGroupInformation.isSecurityEnabled()) {
@@ -174,12 +175,7 @@ public class WebHdfsFileSystem extends F
 
   protected void initDelegationToken() throws IOException {
     // look for webhdfs token, then try hdfs
-    final Text serviceName = SecurityUtil.buildTokenService(nnAddr);
-    Token<?> token = DT_SELECTOR.selectToken(serviceName, ugi.getTokens());      
-    if (token == null) {
-      token = DelegationTokenSelector.selectHdfsDelegationToken(
-          nnAddr, ugi, getConf());
-    }
+    Token<?> token = selectDelegationToken(ugi);
 
     //since we don't already have a token, go get one
     boolean createdToken = false;
@@ -200,6 +196,11 @@ public class WebHdfsFileSystem extends F
     }
   }
 
+  protected Token<DelegationTokenIdentifier> selectDelegationToken(
+      UserGroupInformation ugi) {
+    return DT_SELECTOR.selectToken(getCanonicalUri(), ugi.getTokens(), getConf());
+  }
+
   @Override
   protected int getDefaultPort() {
     return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
@@ -845,4 +846,24 @@ public class WebHdfsFileSystem extends F
       }
     }
   }
+  
+  private static class WebHdfsDelegationTokenSelector
+  extends AbstractDelegationTokenSelector<DelegationTokenIdentifier> {
+    private static final DelegationTokenSelector hdfsTokenSelector =
+        new DelegationTokenSelector();
+    
+    public WebHdfsDelegationTokenSelector() {
+      super(TOKEN_KIND);
+    }
+    
+    Token<DelegationTokenIdentifier> selectToken(URI nnUri,
+        Collection<Token<?>> tokens, Configuration conf) {
+      Token<DelegationTokenIdentifier> token =
+          selectToken(SecurityUtil.buildTokenService(nnUri), tokens);
+      if (token == null) {
+        token = hdfsTokenSelector.selectToken(nnUri, tokens, conf); 
+      }
+      return token;
+    }
+  }
 }

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1327258-1329943

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1327258-1329943

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1327258-1329943

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1327258-1329943

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1327258-1329943

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Apr 24 19:05:09 2012
@@ -94,6 +94,7 @@ import org.apache.hadoop.metrics2.lib.De
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.StaticMapping;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.util.StringUtils;
@@ -1058,16 +1059,14 @@ public class MiniDFSCluster {
       if(dn == null)
         throw new IOException("Cannot start DataNode in "
             + dnConf.get(DFS_DATANODE_DATA_DIR_KEY));
-      //NOTE: the following is true if and only if:
-      //      hadoop.security.token.service.use_ip=true
-      //since the HDFS does things based on IP:port, we need to add the mapping
-      //for IP:port to rackId
-      String ipAddr = dn.getXferAddress().getAddress().getHostAddress();
+      //since the HDFS does things based on host|ip:port, we need to add the
+      //mapping for the service to rackId
+      String service =
+          SecurityUtil.buildTokenService(dn.getXferAddress()).toString();
       if (racks != null) {
-        int port = dn.getXferAddress().getPort();
-        LOG.info("Adding node with IP:port : " + ipAddr + ":" + port +
+        LOG.info("Adding node with service : " + service +
                             " to rack " + racks[i-curDatanodesNum]);
-        StaticMapping.addNodeToRack(ipAddr + ":" + port,
+        StaticMapping.addNodeToRack(service,
                                   racks[i-curDatanodesNum]);
       }
       dn.runDatanodeDaemon();

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Tue Apr 24 19:05:09 2012
@@ -63,7 +63,6 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.mockito.internal.stubbing.answers.ThrowsException;
@@ -100,7 +99,7 @@ public class TestDFSClientRetries extend
     }
 
     @Override
-    public Writable call(RpcKind rpcKind, String protocol, Writable param, long receiveTime)
+    public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param, long receiveTime)
         throws IOException {
       if (sleep) {
         // sleep a bit

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java Tue Apr 24 19:05:09 2012
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
 import static 
   org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
 
+import java.io.IOException;
 import java.lang.reflect.Field;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
@@ -31,6 +32,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.SecurityUtilTestHelper;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -66,4 +68,153 @@ public class TestHftpDelegationToken {
     renewToken.setAccessible(true);
     assertSame("wrong token", token, renewToken.get(fs));
   }
+
+  @Test
+  public void testSelectHftpDelegationToken() throws Exception {
+    SecurityUtilTestHelper.setTokenServiceUseIp(true);
+
+    Configuration conf = new Configuration();
+    conf.setClass("fs.hftp.impl", MyHftpFileSystem.class, FileSystem.class);
+    
+    int httpPort = 80;
+    int httpsPort = 443;
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, httpPort);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, httpsPort);
+    
+    // test with implicit default port 
+    URI fsUri = URI.create("hftp://localhost");
+    MyHftpFileSystem fs = (MyHftpFileSystem) FileSystem.newInstance(fsUri, conf);
+    assertEquals(httpPort, fs.getCanonicalUri().getPort());
+    checkTokenSelection(fs, httpsPort, conf); // should still use secure port
+
+    // test with explicit default port
+    fsUri = URI.create("hftp://localhost:"+httpPort);
+    fs = (MyHftpFileSystem) FileSystem.newInstance(fsUri, conf);
+    assertEquals(httpPort, fs.getCanonicalUri().getPort());
+    checkTokenSelection(fs, httpsPort, conf); // should still use secure port
+    
+    // test with non-default port
+    fsUri = URI.create("hftp://localhost:"+(httpPort+1));
+    fs = (MyHftpFileSystem) FileSystem.newInstance(fsUri, conf);
+    assertEquals(httpPort+1, fs.getCanonicalUri().getPort());
+    checkTokenSelection(fs, httpsPort, conf); // should still use secure port
+    
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 5);
+  }
+
+  @Test
+  public void testSelectHsftpDelegationToken() throws Exception {
+    SecurityUtilTestHelper.setTokenServiceUseIp(true);
+
+    Configuration conf = new Configuration();
+    conf.setClass("fs.hsftp.impl", MyHsftpFileSystem.class, FileSystem.class);
+
+    int httpPort = 80;
+    int httpsPort = 443;
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, httpPort);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, httpsPort);
+
+    // test with implicit default port 
+    URI fsUri = URI.create("hsftp://localhost");
+    MyHsftpFileSystem fs = (MyHsftpFileSystem) FileSystem.newInstance(fsUri, conf);
+    assertEquals(httpsPort, fs.getCanonicalUri().getPort());
+    checkTokenSelection(fs, httpsPort, conf); 
+
+    // test with explicit default port
+    fsUri = URI.create("hsftp://localhost:"+httpsPort);
+    fs = (MyHsftpFileSystem) FileSystem.newInstance(fsUri, conf);
+    assertEquals(httpsPort, fs.getCanonicalUri().getPort());
+    checkTokenSelection(fs, httpsPort, conf);
+    
+    // test with non-default port
+    fsUri = URI.create("hsftp://localhost:"+(httpsPort+1));
+    fs = (MyHsftpFileSystem) FileSystem.newInstance(fsUri, conf);
+    assertEquals(httpsPort+1, fs.getCanonicalUri().getPort());
+    checkTokenSelection(fs, httpsPort+1, conf);
+    
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 5);
+  }
+  
+  private void checkTokenSelection(HftpFileSystem fs,
+                                   int port,
+                                   Configuration conf) throws IOException {
+    UserGroupInformation ugi =
+        UserGroupInformation.createUserForTesting(fs.getUri().getAuthority(), new String[]{});
+
+    // use ip-based tokens
+    SecurityUtilTestHelper.setTokenServiceUseIp(true);
+
+    // test fallback to hdfs token
+    Token<?> hdfsToken = new Token<TokenIdentifier>(
+        new byte[0], new byte[0],
+        DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
+        new Text("127.0.0.1:8020"));
+    ugi.addToken(hdfsToken);
+
+    // test fallback to hdfs token
+    Token<?> token = fs.selectDelegationToken(ugi);
+    assertNotNull(token);
+    assertEquals(hdfsToken, token);
+
+    // test hftp is favored over hdfs
+    Token<?> hftpToken = new Token<TokenIdentifier>(
+        new byte[0], new byte[0],
+        HftpFileSystem.TOKEN_KIND, new Text("127.0.0.1:"+port));
+    ugi.addToken(hftpToken);
+    token = fs.selectDelegationToken(ugi);
+    assertNotNull(token);
+    assertEquals(hftpToken, token);
+    
+    // switch to using host-based tokens, no token should match
+    SecurityUtilTestHelper.setTokenServiceUseIp(false);
+    token = fs.selectDelegationToken(ugi);
+    assertNull(token);
+    
+    // test fallback to hdfs token
+    hdfsToken = new Token<TokenIdentifier>(
+        new byte[0], new byte[0],
+        DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
+        new Text("localhost:8020"));
+    ugi.addToken(hdfsToken);
+    token = fs.selectDelegationToken(ugi);
+    assertNotNull(token);
+    assertEquals(hdfsToken, token);
+
+    // test hftp is favored over hdfs
+    hftpToken = new Token<TokenIdentifier>(
+        new byte[0], new byte[0],
+        HftpFileSystem.TOKEN_KIND, new Text("localhost:"+port));
+    ugi.addToken(hftpToken);
+    token = fs.selectDelegationToken(ugi);
+    assertNotNull(token);
+    assertEquals(hftpToken, token);
+  }
+  
+  static class MyHftpFileSystem extends HftpFileSystem {
+    @Override
+    public URI getCanonicalUri() {
+      return super.getCanonicalUri();
+    }
+    @Override
+    public int getDefaultPort() {
+      return super.getDefaultPort();
+    }
+    // don't automatically get a token
+    @Override
+    protected void initDelegationToken() throws IOException {}
+  }
+  
+  static class MyHsftpFileSystem extends HsftpFileSystem {
+    @Override
+    public URI getCanonicalUri() {
+      return super.getCanonicalUri();
+    }
+    @Override
+    public int getDefaultPort() {
+      return super.getDefaultPort();
+    }
+    // don't automatically get a token
+    @Override
+    protected void initDelegationToken() throws IOException {}
+  }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java Tue Apr 24 19:05:09 2012
@@ -41,6 +41,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SaslInputStream;
 import org.apache.hadoop.security.SaslRpcClient;
 import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.log4j.Level;
@@ -91,10 +92,8 @@ public class TestClientProtocolWithDeleg
     DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, owner, null);
     Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
         dtId, sm);
-    Text host = new Text(addr.getAddress().getHostAddress() + ":"
-        + addr.getPort());
-    token.setService(host);
-    LOG.info("Service IP address for token is " + host);
+    SecurityUtil.setTokenService(token, addr);
+    LOG.info("Service for token is " + token.getService());
     current.addToken(token);
     current.doAs(new PrivilegedExceptionAction<Object>() {
       @Override

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java Tue Apr 24 19:05:09 2012
@@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
 import org.junit.Assert;
@@ -86,7 +85,7 @@ public class TestInterDatanodeProtocol {
     }
 
     @Override
-    public Writable call(RpcKind rpcKind, String protocol, Writable param, long receiveTime)
+    public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param, long receiveTime)
         throws IOException {
       if (sleep) {
         // sleep a bit

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Tue Apr 24 19:05:09 2012
@@ -228,6 +228,7 @@ public class TestFsck extends TestCase {
   }
 
   public void testFsckMoveAndDelete() throws Exception {
+    final int MAX_MOVE_TRIES = 5;
     DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3, 8*1024);
     MiniDFSCluster cluster = null;
     FileSystem fs = null;
@@ -269,17 +270,19 @@ public class TestFsck extends TestCase {
       } 
       
       // After a fsck -move, the corrupted file should still exist.
-      outStr = runFsck(conf, 1, true, "/", "-move" );
-      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
-      String[] newFileNames = util.getFileNames(topDir);
-      boolean found = false;
-      for (String f : newFileNames) {
-        if (f.equals(corruptFileName)) {
-          found = true;
-          break;
+      for (int i = 0; i < MAX_MOVE_TRIES; i++) {
+        outStr = runFsck(conf, 1, true, "/", "-move" );
+        assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+        String[] newFileNames = util.getFileNames(topDir);
+        boolean found = false;
+        for (String f : newFileNames) {
+          if (f.equals(corruptFileName)) {
+            found = true;
+            break;
+          }
         }
+        assertTrue(found);
       }
-      assertTrue(found);
 
       // Fix the filesystem by moving corrupted files to lost+found
       outStr = runFsck(conf, 1, true, "/", "-move", "-delete");

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java Tue Apr 24 19:05:09 2012
@@ -26,6 +26,7 @@ import java.net.InetSocketAddress;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
+import java.util.HashSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -44,11 +45,13 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.SecurityUtilTestHelper;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.AfterClass;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -100,6 +103,11 @@ public class TestDelegationTokensWithHA 
   }
 
 
+  @Before
+  public void prepTest() {
+    SecurityUtilTestHelper.setTokenServiceUseIp(true);
+  }
+  
   @Test
   public void testDelegationTokenDFSApi() throws Exception {
     Token<DelegationTokenIdentifier> token = dfs.getDelegationToken("JobTracker");
@@ -187,23 +195,48 @@ public class TestDelegationTokensWithHA 
     URI haUri = new URI("hdfs://my-ha-uri/");
     token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri));
     ugi.addToken(token);
-    HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nn0.getNameNodeAddress());
-    HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nn1.getNameNodeAddress());
+
+    Collection<InetSocketAddress> nnAddrs = new HashSet<InetSocketAddress>();
+    nnAddrs.add(nn0.getNameNodeAddress());
+    nnAddrs.add(nn1.getNameNodeAddress());
+    HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs);
     
     Collection<Token<? extends TokenIdentifier>> tokens = ugi.getTokens();
     assertEquals(3, tokens.size());
     
     LOG.info("Tokens:\n" + Joiner.on("\n").join(tokens));
+    DelegationTokenSelector dts = new DelegationTokenSelector();
     
     // check that the token selected for one of the physical IPC addresses
     // matches the one we received
-    InetSocketAddress addr = nn0.getNameNodeAddress();
-    Text ipcDtService = SecurityUtil.buildTokenService(addr);
-    Token<DelegationTokenIdentifier> token2 =
-        DelegationTokenSelector.selectHdfsDelegationToken(ipcDtService, ugi);
-    assertNotNull(token2);
-    assertArrayEquals(token.getIdentifier(), token2.getIdentifier());
-    assertArrayEquals(token.getPassword(), token2.getPassword());
+    for (InetSocketAddress addr : nnAddrs) {
+      Text ipcDtService = SecurityUtil.buildTokenService(addr);
+      Token<DelegationTokenIdentifier> token2 =
+          dts.selectToken(ipcDtService, ugi.getTokens());
+      assertNotNull(token2);
+      assertArrayEquals(token.getIdentifier(), token2.getIdentifier());
+      assertArrayEquals(token.getPassword(), token2.getPassword());
+    }
+    
+    // switch to host-based tokens, shouldn't match existing tokens 
+    SecurityUtilTestHelper.setTokenServiceUseIp(false);
+    for (InetSocketAddress addr : nnAddrs) {
+      Text ipcDtService = SecurityUtil.buildTokenService(addr);
+      Token<DelegationTokenIdentifier> token2 =
+          dts.selectToken(ipcDtService, ugi.getTokens());
+      assertNull(token2);
+    }
+    
+    // reclone the tokens, and see if they match now
+    HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs);
+    for (InetSocketAddress addr : nnAddrs) {
+      Text ipcDtService = SecurityUtil.buildTokenService(addr);
+      Token<DelegationTokenIdentifier> token2 =
+          dts.selectToken(ipcDtService, ugi.getTokens());
+      assertNotNull(token2);
+      assertArrayEquals(token.getIdentifier(), token2.getIdentifier());
+      assertArrayEquals(token.getPassword(), token2.getPassword());
+    }    
   }
 
   /**

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java Tue Apr 24 19:05:09 2012
@@ -34,10 +34,16 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.SecurityUtilTestHelper;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
 import org.junit.Assert;
 import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.mockito.Mockito.mock;
 
 public class TestWebHdfsUrl {
@@ -90,4 +96,98 @@ public class TestWebHdfsUrl {
   private String generateUrlQueryPrefix(HttpOpParam.Op op, String username) {
     return "op=" + op.toString() + "&user.name=" + username;
   }
-}
+  
+  @Test
+  public void testSelectHdfsDelegationToken() throws Exception {
+    SecurityUtilTestHelper.setTokenServiceUseIp(true);
+
+    Configuration conf = new Configuration();
+    conf.setClass("fs.webhdfs.impl", MyWebHdfsFileSystem.class, FileSystem.class);
+    
+    // test with implicit default port 
+    URI fsUri = URI.create("webhdfs://localhost");
+    MyWebHdfsFileSystem fs = (MyWebHdfsFileSystem) FileSystem.get(fsUri, conf);
+    checkTokenSelection(fs, conf);
+
+    // test with explicit default port
+    fsUri = URI.create("webhdfs://localhost:"+fs.getDefaultPort());
+    fs = (MyWebHdfsFileSystem) FileSystem.get(fsUri, conf);
+    checkTokenSelection(fs, conf);
+    
+    // test with non-default port
+    fsUri = URI.create("webhdfs://localhost:"+(fs.getDefaultPort()-1));
+    fs = (MyWebHdfsFileSystem) FileSystem.get(fsUri, conf);
+    checkTokenSelection(fs, conf);
+
+  }
+  
+  private void checkTokenSelection(MyWebHdfsFileSystem fs,
+                                   Configuration conf) throws IOException {
+    int port = fs.getCanonicalUri().getPort();
+    // can't clear tokens from ugi, so create a new user everytime
+    UserGroupInformation ugi =
+        UserGroupInformation.createUserForTesting(fs.getUri().getAuthority(), new String[]{});
+
+    // use ip-based tokens
+    SecurityUtilTestHelper.setTokenServiceUseIp(true);
+
+    // test fallback to hdfs token
+    Token<?> hdfsToken = new Token<TokenIdentifier>(
+        new byte[0], new byte[0],
+        DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
+        new Text("127.0.0.1:8020"));
+    ugi.addToken(hdfsToken);
+
+    // test fallback to hdfs token
+    Token<?> token = fs.selectDelegationToken(ugi);
+    assertNotNull(token);
+    assertEquals(hdfsToken, token);
+
+    // test webhdfs is favored over hdfs
+    Token<?> webHdfsToken = new Token<TokenIdentifier>(
+        new byte[0], new byte[0],
+        WebHdfsFileSystem.TOKEN_KIND, new Text("127.0.0.1:"+port));
+    ugi.addToken(webHdfsToken);
+    token = fs.selectDelegationToken(ugi);
+    assertNotNull(token);
+    assertEquals(webHdfsToken, token);
+    
+    // switch to using host-based tokens, no token should match
+    SecurityUtilTestHelper.setTokenServiceUseIp(false);
+    token = fs.selectDelegationToken(ugi);
+    assertNull(token);
+    
+    // test fallback to hdfs token
+    hdfsToken = new Token<TokenIdentifier>(
+        new byte[0], new byte[0],
+        DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
+        new Text("localhost:8020"));
+    ugi.addToken(hdfsToken);
+    token = fs.selectDelegationToken(ugi);
+    assertNotNull(token);
+    assertEquals(hdfsToken, token);
+
+    // test webhdfs is favored over hdfs
+    webHdfsToken = new Token<TokenIdentifier>(
+        new byte[0], new byte[0],
+        WebHdfsFileSystem.TOKEN_KIND, new Text("localhost:"+port));
+    ugi.addToken(webHdfsToken);
+    token = fs.selectDelegationToken(ugi);
+    assertNotNull(token);
+    assertEquals(webHdfsToken, token);
+  }
+  
+  static class MyWebHdfsFileSystem extends WebHdfsFileSystem {
+    @Override
+    public URI getCanonicalUri() {
+      return super.getCanonicalUri();
+    }
+    @Override
+    public int getDefaultPort() {
+      return super.getDefaultPort();
+    }
+    // don't automatically get a token
+    @Override
+    protected void initDelegationToken() throws IOException {}
+  }
+}
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1329947&r1=1329946&r2=1329947&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Tue Apr 24 19:05:09 2012
@@ -247,7 +247,7 @@
       <BLOCK>
         <BLOCK_ID>-7144805496741076283</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1004</GENERATION_STAMP>
+        <GENSTAMP>1004</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -266,12 +266,12 @@
       <BLOCK>
         <BLOCK_ID>-7144805496741076283</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1004</GENERATION_STAMP>
+        <GENSTAMP>1004</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-4125931756867080767</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1005</GENERATION_STAMP>
+        <GENSTAMP>1005</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -290,17 +290,17 @@
       <BLOCK>
         <BLOCK_ID>-7144805496741076283</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1004</GENERATION_STAMP>
+        <GENSTAMP>1004</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-4125931756867080767</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1005</GENERATION_STAMP>
+        <GENSTAMP>1005</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>1562413691487277050</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1006</GENERATION_STAMP>
+        <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -319,17 +319,17 @@
       <BLOCK>
         <BLOCK_ID>-7144805496741076283</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1004</GENERATION_STAMP>
+        <GENSTAMP>1004</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-4125931756867080767</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1005</GENERATION_STAMP>
+        <GENSTAMP>1005</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>1562413691487277050</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1006</GENERATION_STAMP>
+        <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
         <USERNAME>todd</USERNAME>
@@ -379,7 +379,7 @@
       <BLOCK>
         <BLOCK_ID>6084289468290363112</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1008</GENERATION_STAMP>
+        <GENSTAMP>1008</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -398,12 +398,12 @@
       <BLOCK>
         <BLOCK_ID>6084289468290363112</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1008</GENERATION_STAMP>
+        <GENSTAMP>1008</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-4219431127125026105</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1009</GENERATION_STAMP>
+        <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -422,17 +422,17 @@
       <BLOCK>
         <BLOCK_ID>6084289468290363112</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1008</GENERATION_STAMP>
+        <GENSTAMP>1008</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-4219431127125026105</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1009</GENERATION_STAMP>
+        <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-1765119074945211374</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1010</GENERATION_STAMP>
+        <GENSTAMP>1010</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -451,17 +451,17 @@
       <BLOCK>
         <BLOCK_ID>6084289468290363112</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1008</GENERATION_STAMP>
+        <GENSTAMP>1008</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-4219431127125026105</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1009</GENERATION_STAMP>
+        <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-1765119074945211374</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1010</GENERATION_STAMP>
+        <GENSTAMP>1010</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
         <USERNAME>todd</USERNAME>
@@ -511,7 +511,7 @@
       <BLOCK>
         <BLOCK_ID>-7448471719302683860</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1012</GENERATION_STAMP>
+        <GENSTAMP>1012</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -530,12 +530,12 @@
       <BLOCK>
         <BLOCK_ID>-7448471719302683860</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1012</GENERATION_STAMP>
+        <GENSTAMP>1012</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-8051065559769974521</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1013</GENERATION_STAMP>
+        <GENSTAMP>1013</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -554,17 +554,17 @@
       <BLOCK>
         <BLOCK_ID>-7448471719302683860</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1012</GENERATION_STAMP>
+        <GENSTAMP>1012</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-8051065559769974521</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1013</GENERATION_STAMP>
+        <GENSTAMP>1013</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>3808670437711973616</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1014</GENERATION_STAMP>
+        <GENSTAMP>1014</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -583,17 +583,17 @@
       <BLOCK>
         <BLOCK_ID>-7448471719302683860</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1012</GENERATION_STAMP>
+        <GENSTAMP>1012</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-8051065559769974521</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1013</GENERATION_STAMP>
+        <GENSTAMP>1013</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>3808670437711973616</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1014</GENERATION_STAMP>
+        <GENSTAMP>1014</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
         <USERNAME>todd</USERNAME>
@@ -722,7 +722,7 @@
       <BLOCK>
         <BLOCK_ID>-357061736603024522</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1016</GENERATION_STAMP>
+        <GENSTAMP>1016</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -734,7 +734,7 @@
       <BLOCK>
         <BLOCK_ID>-357061736603024522</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1016</GENERATION_STAMP>
+        <GENSTAMP>1016</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -769,7 +769,7 @@
       <BLOCK>
         <BLOCK_ID>-357061736603024522</BLOCK_ID>
         <NUM_BYTES>11</NUM_BYTES>
-        <GENERATION_STAMP>1017</GENERATION_STAMP>
+        <GENSTAMP>1017</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
         <USERNAME>todd</USERNAME>
@@ -779,9 +779,17 @@
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_END_LOG_SEGMENT</OPCODE>
+    <OPCODE>OP_SET_OWNER</OPCODE>
     <DATA>
       <TXID>59</TXID>
+      <SRC>/file_create</SRC>
+      <GROUPNAME>newGroup</GROUPNAME>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_END_LOG_SEGMENT</OPCODE>
+    <DATA>
+      <TXID>60</TXID>
     </DATA>
   </RECORD>
 </EDITS>



Mime
View raw message