hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1196458 [7/9] - in /hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/bin/ src/main/java/ src/main/java/org/apache/hadoop/fs/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ ...
Date Wed, 02 Nov 2011 05:35:26 GMT
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Wed Nov  2 05:34:31 2011
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.na
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.PrintStream;
 import java.net.URI;
@@ -29,6 +28,7 @@ import java.util.EnumSet;
 
 import javax.servlet.ServletContext;
 import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.Consumes;
 import javax.ws.rs.DELETE;
 import javax.ws.rs.DefaultValue;
@@ -42,16 +42,20 @@ import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.Response.Status;
 import javax.ws.rs.core.StreamingOutput;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
@@ -64,8 +68,9 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
 import org.apache.hadoop.hdfs.web.resources.DelegationParam;
+import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
 import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
-import org.apache.hadoop.hdfs.web.resources.DstPathParam;
+import org.apache.hadoop.hdfs.web.resources.DestinationParam;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.GroupParam;
 import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
@@ -84,7 +89,6 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
 import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
@@ -100,6 +104,8 @@ import com.sun.jersey.spi.container.Reso
 public class NamenodeWebHdfsMethods {
   public static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
 
+  private static final UriFsPathParam ROOT = new UriFsPathParam("");
+  
   private static final ThreadLocal<String> REMOTE_ADDRESS = new ThreadLocal<String>(); 
 
   /** @return the remote client address. */
@@ -109,6 +115,7 @@ public class NamenodeWebHdfsMethods {
 
   private @Context ServletContext context;
   private @Context HttpServletRequest request;
+  private @Context HttpServletResponse response;
 
   private static DatanodeInfo chooseDatanode(final NameNode namenode,
       final String path, final HttpOpParam.Op op, final long openOffset
@@ -118,6 +125,9 @@ public class NamenodeWebHdfsMethods {
         || op == PostOpParam.Op.APPEND) {
       final NamenodeProtocols np = namenode.getRpcServer();
       final HdfsFileStatus status = np.getFileInfo(path);
+      if (status == null) {
+        throw new FileNotFoundException("File " + path + " not found.");
+      }
       final long len = status.getLen();
       if (op == GetOpParam.Op.OPEN && (openOffset < 0L || openOffset >= len)) {
         throw new IOException("Offset=" + openOffset + " out of the range [0, "
@@ -143,11 +153,11 @@ public class NamenodeWebHdfsMethods {
       final NameNode namenode, final UserGroupInformation ugi,
       final String renewer) throws IOException {
     final Credentials c = DelegationTokenSecretManager.createCredentials(
-        namenode, ugi, request.getUserPrincipal().getName());
+        namenode, ugi,
+        renewer != null? renewer: request.getUserPrincipal().getName());
     final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
-    t.setService(new Text(SecurityUtil.buildDTServiceName(
-        NameNode.getUri(namenode.getNameNodeAddress()),
-        NameNode.DEFAULT_PORT)));
+    t.setKind(WebHdfsFileSystem.TOKEN_KIND);
+    SecurityUtil.setTokenService(t, namenode.getNameNodeAddress());
     return t;
   }
 
@@ -173,7 +183,7 @@ public class NamenodeWebHdfsMethods {
     final String query = op.toQueryString()
         + '&' + new UserParam(ugi) + delegationQuery
         + Param.toSortedString("&", parameters);
-    final String uripath = "/" + WebHdfsFileSystem.PATH_PREFIX + path;
+    final String uripath = WebHdfsFileSystem.PATH_PREFIX + path;
 
     final URI uri = new URI("http", null, dn.getHostName(), dn.getInfoPort(),
         uripath, query, null);
@@ -183,21 +193,61 @@ public class NamenodeWebHdfsMethods {
     return uri;
   }
 
+  /** Handle HTTP PUT request for the root. */
+  @PUT
+  @Path("/")
+  @Consumes({"*/*"})
+  @Produces({MediaType.APPLICATION_JSON})
+  public Response putRoot(
+      @Context final UserGroupInformation ugi,
+      @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
+          final DelegationParam delegation,
+      @QueryParam(PutOpParam.NAME) @DefaultValue(PutOpParam.DEFAULT)
+          final PutOpParam op,
+      @QueryParam(DestinationParam.NAME) @DefaultValue(DestinationParam.DEFAULT)
+          final DestinationParam destination,
+      @QueryParam(OwnerParam.NAME) @DefaultValue(OwnerParam.DEFAULT)
+          final OwnerParam owner,
+      @QueryParam(GroupParam.NAME) @DefaultValue(GroupParam.DEFAULT)
+          final GroupParam group,
+      @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
+          final PermissionParam permission,
+      @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT)
+          final OverwriteParam overwrite,
+      @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
+          final BufferSizeParam bufferSize,
+      @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
+          final ReplicationParam replication,
+      @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT)
+          final BlockSizeParam blockSize,
+      @QueryParam(ModificationTimeParam.NAME) @DefaultValue(ModificationTimeParam.DEFAULT)
+          final ModificationTimeParam modificationTime,
+      @QueryParam(AccessTimeParam.NAME) @DefaultValue(AccessTimeParam.DEFAULT)
+          final AccessTimeParam accessTime,
+      @QueryParam(RenameOptionSetParam.NAME) @DefaultValue(RenameOptionSetParam.DEFAULT)
+          final RenameOptionSetParam renameOptions,
+      @QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT) 
+          final TokenArgumentParam delegationTokenArgument
+      ) throws IOException, InterruptedException {
+    return put(ugi, delegation, ROOT, op, destination, owner, group,
+        permission, overwrite, bufferSize, replication, blockSize,
+        modificationTime, accessTime, renameOptions, delegationTokenArgument);
+  }
+
   /** Handle HTTP PUT request. */
   @PUT
   @Path("{" + UriFsPathParam.NAME + ":.*}")
   @Consumes({"*/*"})
   @Produces({MediaType.APPLICATION_JSON})
   public Response put(
-      final InputStream in,
       @Context final UserGroupInformation ugi,
       @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
           final DelegationParam delegation,
       @PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
       @QueryParam(PutOpParam.NAME) @DefaultValue(PutOpParam.DEFAULT)
           final PutOpParam op,
-      @QueryParam(DstPathParam.NAME) @DefaultValue(DstPathParam.DEFAULT)
-          final DstPathParam dstPath,
+      @QueryParam(DestinationParam.NAME) @DefaultValue(DestinationParam.DEFAULT)
+          final DestinationParam destination,
       @QueryParam(OwnerParam.NAME) @DefaultValue(OwnerParam.DEFAULT)
           final OwnerParam owner,
       @QueryParam(GroupParam.NAME) @DefaultValue(GroupParam.DEFAULT)
@@ -217,16 +267,21 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(AccessTimeParam.NAME) @DefaultValue(AccessTimeParam.DEFAULT)
           final AccessTimeParam accessTime,
       @QueryParam(RenameOptionSetParam.NAME) @DefaultValue(RenameOptionSetParam.DEFAULT)
-          final RenameOptionSetParam renameOptions
-      ) throws IOException, URISyntaxException, InterruptedException {
+          final RenameOptionSetParam renameOptions,
+      @QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT) 
+          final TokenArgumentParam delegationTokenArgument
+      ) throws IOException, InterruptedException {
 
     if (LOG.isTraceEnabled()) {
       LOG.trace(op + ": " + path + ", ugi=" + ugi
-          + Param.toSortedString(", ", dstPath, owner, group, permission,
+          + Param.toSortedString(", ", destination, owner, group, permission,
               overwrite, bufferSize, replication, blockSize,
               modificationTime, accessTime, renameOptions));
     }
 
+    //clear content type
+    response.setContentType(null);
+
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException, URISyntaxException {
@@ -234,6 +289,7 @@ public class NamenodeWebHdfsMethods {
         try {
 
     final String fullpath = path.getAbsolutePath();
+    final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
     final NamenodeProtocols np = namenode.getRpcServer();
 
@@ -255,24 +311,28 @@ public class NamenodeWebHdfsMethods {
     {
       final EnumSet<Options.Rename> s = renameOptions.getValue();
       if (s.isEmpty()) {
-        @SuppressWarnings("deprecation")
-        final boolean b = np.rename(fullpath, dstPath.getValue());
+        final boolean b = np.rename(fullpath, destination.getValue());
         final String js = JsonUtil.toJsonString("boolean", b);
         return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
       } else {
-        np.rename2(fullpath, dstPath.getValue(),
+        np.rename2(fullpath, destination.getValue(),
             s.toArray(new Options.Rename[s.size()]));
         return Response.ok().type(MediaType.APPLICATION_JSON).build();
       }
     }
     case SETREPLICATION:
     {
-      final boolean b = np.setReplication(fullpath, replication.getValue());
+      final boolean b = np.setReplication(fullpath, replication.getValue(conf));
       final String js = JsonUtil.toJsonString("boolean", b);
-      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+      final ResponseBuilder r = b? Response.ok(): Response.status(Status.FORBIDDEN);
+      return r.entity(js).type(MediaType.APPLICATION_JSON).build();
     }
     case SETOWNER:
     {
+      if (owner.getValue() == null && group.getValue() == null) {
+        throw new IllegalArgumentException("Both owner and group are empty.");
+      }
+
       np.setOwner(fullpath, owner.getValue(), group.getValue());
       return Response.ok().type(MediaType.APPLICATION_JSON).build();
     }
@@ -286,6 +346,21 @@ public class NamenodeWebHdfsMethods {
       np.setTimes(fullpath, modificationTime.getValue(), accessTime.getValue());
       return Response.ok().type(MediaType.APPLICATION_JSON).build();
     }
+    case RENEWDELEGATIONTOKEN:
+    {
+      final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
+      token.decodeFromUrlString(delegationTokenArgument.getValue());
+      final long expiryTime = np.renewDelegationToken(token);
+      final String js = JsonUtil.toJsonString("long", expiryTime);
+      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+    }
+    case CANCELDELEGATIONTOKEN:
+    {
+      final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
+      token.decodeFromUrlString(delegationTokenArgument.getValue());
+      np.cancelDelegationToken(token);
+      return Response.ok().type(MediaType.APPLICATION_JSON).build();
+    }
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }
@@ -297,13 +372,29 @@ public class NamenodeWebHdfsMethods {
     });
   }
 
+  /** Handle HTTP POST request for the root. */
+  @POST
+  @Path("/")
+  @Consumes({"*/*"})
+  @Produces({MediaType.APPLICATION_JSON})
+  public Response postRoot(
+      @Context final UserGroupInformation ugi,
+      @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
+          final DelegationParam delegation,
+      @QueryParam(PostOpParam.NAME) @DefaultValue(PostOpParam.DEFAULT)
+          final PostOpParam op,
+      @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
+          final BufferSizeParam bufferSize
+      ) throws IOException, InterruptedException {
+    return post(ugi, delegation, ROOT, op, bufferSize);
+  }
+
   /** Handle HTTP POST request. */
   @POST
   @Path("{" + UriFsPathParam.NAME + ":.*}")
   @Consumes({"*/*"})
   @Produces({MediaType.APPLICATION_JSON})
   public Response post(
-      final InputStream in,
       @Context final UserGroupInformation ugi,
       @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
           final DelegationParam delegation,
@@ -312,13 +403,16 @@ public class NamenodeWebHdfsMethods {
           final PostOpParam op,
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
           final BufferSizeParam bufferSize
-      ) throws IOException, URISyntaxException, InterruptedException {
+      ) throws IOException, InterruptedException {
 
     if (LOG.isTraceEnabled()) {
       LOG.trace(op + ": " + path + ", ugi=" + ugi
           + Param.toSortedString(", ", bufferSize));
     }
 
+    //clear content type
+    response.setContentType(null);
+
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException, URISyntaxException {
@@ -346,13 +440,11 @@ public class NamenodeWebHdfsMethods {
     });
   }
 
-  private static final UriFsPathParam ROOT = new UriFsPathParam("");
-
   /** Handle HTTP GET request for the root. */
   @GET
   @Path("/")
   @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
-  public Response root(
+  public Response getRoot(
       @Context final UserGroupInformation ugi,
       @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
           final DelegationParam delegation,
@@ -389,13 +481,15 @@ public class NamenodeWebHdfsMethods {
           final RenewerParam renewer,
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
           final BufferSizeParam bufferSize
-      ) throws IOException, URISyntaxException, InterruptedException {
+      ) throws IOException, InterruptedException {
 
     if (LOG.isTraceEnabled()) {
       LOG.trace(op + ": " + path + ", ugi=" + ugi
           + Param.toSortedString(", ", offset, length, renewer, bufferSize));
     }
 
+    //clear content type
+    response.setContentType(null);
 
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
@@ -419,14 +513,18 @@ public class NamenodeWebHdfsMethods {
       final long offsetValue = offset.getValue();
       final Long lengthValue = length.getValue();
       final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath,
-          offsetValue, lengthValue != null? lengthValue: offsetValue + 1);
+          offsetValue, lengthValue != null? lengthValue: Long.MAX_VALUE);
       final String js = JsonUtil.toJsonString(locatedblocks);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
     case GETFILESTATUS:
     {
       final HdfsFileStatus status = np.getFileInfo(fullpath);
-      final String js = JsonUtil.toJsonString(status);
+      if (status == null) {
+        throw new FileNotFoundException("File does not exist: " + fullpath);
+      }
+
+      final String js = JsonUtil.toJsonString(status, true);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
     case LISTSTATUS:
@@ -482,33 +580,49 @@ public class NamenodeWebHdfsMethods {
       @Override
       public void write(final OutputStream outstream) throws IOException {
         final PrintStream out = new PrintStream(outstream);
-        out.println("{\"" + HdfsFileStatus[].class.getSimpleName() + "\":[");
+        out.println("{\"" + HdfsFileStatus.class.getSimpleName() + "es\":{\""
+            + HdfsFileStatus.class.getSimpleName() + "\":[");
 
         final HdfsFileStatus[] partial = first.getPartialListing();
         if (partial.length > 0) {
-          out.print(JsonUtil.toJsonString(partial[0]));
+          out.print(JsonUtil.toJsonString(partial[0], false));
         }
         for(int i = 1; i < partial.length; i++) {
           out.println(',');
-          out.print(JsonUtil.toJsonString(partial[i]));
+          out.print(JsonUtil.toJsonString(partial[i], false));
         }
 
         for(DirectoryListing curr = first; curr.hasMore(); ) { 
           curr = getDirectoryListing(np, p, curr.getLastName());
           for(HdfsFileStatus s : curr.getPartialListing()) {
             out.println(',');
-            out.print(JsonUtil.toJsonString(s));
+            out.print(JsonUtil.toJsonString(s, false));
           }
         }
         
-        out.println("]}");
+        out.println();
+        out.println("]}}");
       }
     };
   }
 
+  /** Handle HTTP DELETE request for the root. */
+  @DELETE
+  @Path("/")
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response deleteRoot(
+      @Context final UserGroupInformation ugi,
+      @QueryParam(DeleteOpParam.NAME) @DefaultValue(DeleteOpParam.DEFAULT)
+          final DeleteOpParam op,
+      @QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT)
+          final RecursiveParam recursive
+      ) throws IOException, InterruptedException {
+    return delete(ugi, ROOT, op, recursive);
+  }
+
   /** Handle HTTP DELETE request. */
   @DELETE
-  @Path("{path:.*}")
+  @Path("{" + UriFsPathParam.NAME + ":.*}")
   @Produces(MediaType.APPLICATION_JSON)
   public Response delete(
       @Context final UserGroupInformation ugi,
@@ -524,6 +638,9 @@ public class NamenodeWebHdfsMethods {
           + Param.toSortedString(", ", recursive));
     }
 
+    //clear content type
+    response.setContentType(null);
+
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java Wed Nov  2 05:34:31 2011
@@ -67,7 +67,6 @@ public class BlockCommand extends Datano
   public BlockCommand(int action, String poolId,
       List<BlockTargetPair> blocktargetlist) {
     super(action);
-
     this.poolId = poolId;
     blocks = new Block[blocktargetlist.size()]; 
     targets = new DatanodeInfo[blocks.length][];
@@ -85,12 +84,21 @@ public class BlockCommand extends Datano
    * @param blocks blocks related to the action
    */
   public BlockCommand(int action, String poolId, Block blocks[]) {
+    this(action, poolId, blocks, EMPTY_TARGET);
+  }
+
+  /**
+   * Create BlockCommand for the given action
+   * @param blocks blocks related to the action
+   */
+  public BlockCommand(int action, String poolId, Block[] blocks,
+      DatanodeInfo[][] targets) {
     super(action);
     this.poolId = poolId;
     this.blocks = blocks;
-    this.targets = EMPTY_TARGET;
+    this.targets = targets;
   }
-
+  
   public String getBlockPoolId() {
     return poolId;
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java Wed Nov  2 05:34:31 2011
@@ -117,8 +117,12 @@ public class BlockRecoveryCommand extend
    * the specified capacity for recovering blocks.
    */
   public BlockRecoveryCommand(int capacity) {
+    this(new ArrayList<RecoveringBlock>(capacity));
+  }
+  
+  public BlockRecoveryCommand(Collection<RecoveringBlock> blocks) {
     super(DatanodeProtocol.DNA_RECOVERBLOCK);
-    recoveringBlocks = new ArrayList<RecoveringBlock>(capacity);
+    recoveringBlocks = blocks;
   }
 
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java Wed Nov  2 05:34:31 2011
@@ -17,17 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
+import org.apache.avro.reflect.Union;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.avro.reflect.Union;
 
 /**
  * Base class for data-node command.
@@ -36,55 +28,13 @@ import org.apache.avro.reflect.Union;
 
 // Declare subclasses for Avro's denormalized representation
 @Union({Void.class,
-      DatanodeCommand.Register.class, DatanodeCommand.Finalize.class,
+      RegisterCommand.class, FinalizeCommand.class,
       BlockCommand.class, UpgradeCommand.class,
       BlockRecoveryCommand.class, KeyUpdateCommand.class})
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public abstract class DatanodeCommand extends ServerCommand {
-  static class Register extends DatanodeCommand {
-    private Register() {super(DatanodeProtocol.DNA_REGISTER);}
-    public void readFields(DataInput in) {}
-    public void write(DataOutput out) {}
-  }
-
-  public static class Finalize extends DatanodeCommand {
-    String blockPoolId;
-    private Finalize() {
-      super(DatanodeProtocol.DNA_FINALIZE);
-    }
-    
-    public Finalize(String bpid) {
-      super(DatanodeProtocol.DNA_FINALIZE);
-      blockPoolId = bpid;
-    }
-    
-    public String getBlockPoolId() {
-      return blockPoolId;
-    }
-    
-    public void readFields(DataInput in) throws IOException {
-      blockPoolId = WritableUtils.readString(in);
-    }
-    public void write(DataOutput out) throws IOException {
-      WritableUtils.writeString(out, blockPoolId);
-    }
-  }
-
-  static {                                      // register a ctor
-    WritableFactories.setFactory(Register.class,
-        new WritableFactory() {
-          public Writable newInstance() {return new Register();}
-        });
-    WritableFactories.setFactory(Finalize.class,
-        new WritableFactory() {
-          public Writable newInstance() {return new Finalize();}
-        });
-  }
-
-  public static final DatanodeCommand REGISTER = new Register();
-  
   public DatanodeCommand() {
     super();
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java Wed Nov  2 05:34:31 2011
@@ -22,10 +22,11 @@ import java.io.*;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
+import org.apache.hadoop.hdfs.server.protocolR23Compatible.DatanodeWireProtocol;
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.KerberosInfo;
 
@@ -45,7 +46,14 @@ import org.apache.avro.reflect.Nullable;
 @InterfaceAudience.Private
 public interface DatanodeProtocol extends VersionedProtocol {
   /**
-   * 28: Add Balancer Bandwidth Command protocol.
+   * This class is used by both the Namenode (client) and BackupNode (server) 
+   * to insulate from the protocol serialization.
+   * 
+   * If you are adding/changing DN's interface then you need to 
+   * change both this class and ALSO
+   * {@link DatanodeWireProtocol}.
+   * These changes need to be done in a compatible fashion as described in 
+   * {@link ClientNamenodeWireProtocol}
    */
   public static final long versionID = 28L;
   

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java Wed Nov  2 05:34:31 2011
@@ -63,9 +63,14 @@ implements Writable, NodeRegistration {
    * Create DatanodeRegistration
    */
   public DatanodeRegistration(String nodeName) {
+    this(nodeName, new StorageInfo(), new ExportedBlockKeys());
+  }
+  
+  public DatanodeRegistration(String nodeName, StorageInfo info,
+      ExportedBlockKeys keys) {
     super(nodeName);
-    this.storageInfo = new StorageInfo();
-    this.exportedKeys = new ExportedBlockKeys();
+    this.storageInfo = info;
+    this.exportedKeys = keys;
   }
   
   public void setStorageInfo(StorageInfo storage) {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java Wed Nov  2 05:34:31 2011
@@ -25,7 +25,9 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeWireProtocol;
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.KerberosInfo;
 
@@ -39,6 +41,23 @@ public interface InterDatanodeProtocol e
   public static final Log LOG = LogFactory.getLog(InterDatanodeProtocol.class);
 
   /**
+   * Until version 9, this class InterDatanodeProtocol served as both
+   * the interface to the DN AND the RPC protocol used to communicate with the 
+   * DN.
+   * 
+   * Post version 6L (release 23 of Hadoop), the protocol is implemented in
+   * {@literal ../protocolR23Compatible/InterDatanodeWireProtocol}
+   * 
+   * This class is used by both the DN to insulate from the protocol 
+   * serialization.
+   * 
+   * If you are adding/changing DN's interface then you need to 
+   * change both this class and ALSO
+   * {@link InterDatanodeWireProtocol}
+   * These changes need to be done in a compatible fashion as described in 
+   * {@link ClientNamenodeWireProtocol}
+   * 
+   * The log of historical changes can be retrieved from the svn).
    * 6: Add block pool ID to Block
    */
   public static final long versionID = 6L;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java Wed Nov  2 05:34:31 2011
@@ -21,6 +21,8 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
+import org.apache.hadoop.hdfs.protocolR23Compatible.JournalWireProtocol;
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.KerberosInfo;
 
@@ -33,6 +35,17 @@ import org.apache.hadoop.security.Kerber
     clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
 @InterfaceAudience.Private
 public interface JournalProtocol extends VersionedProtocol {
+  /**
+   * 
+   * This class is used by both the Namenode (client) and BackupNode (server) 
+   * to insulate from the protocol serialization.
+   * 
+   * If you are adding/changing DN's interface then you need to 
+   * change both this class and ALSO
+   * {@link JournalWireProtocol}.
+   * These changes need to be done in a compatible fashion as described in 
+   * {@link ClientNamenodeWireProtocol}
+   */
   public static final long versionID = 1L;
 
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java Wed Nov  2 05:34:31 2011
@@ -38,9 +38,21 @@ import org.apache.hadoop.security.Kerber
 @InterfaceAudience.Private
 public interface NamenodeProtocol extends VersionedProtocol {
   /**
-   * Compared to the previous version the following changes have been introduced:
-   * (Only the latest change is reflected.
-   * The log of historical changes can be retrieved from the svn).
+   * Until version 6L, this class served as both
+   * the client interface to the NN AND the RPC protocol used to 
+   * communicate with the NN.
+   * 
+   * Post version 70 (release 23 of Hadoop), the protocol is implemented in
+   * {@literal ../protocolR23Compatible/ClientNamenodeWireProtocol}
+   * 
+   * This class is used by both the DFSClient and the 
+   * NN server side to insulate from the protocol serialization.
+   * 
+   * If you are adding/changing NN's interface then you need to 
+   * change both this class and ALSO
+   * {@link org.apache.hadoop.hdfs.protocolR23Compatible.NamenodeWireProtocol}.
+   * These changes need to be done in a compatible fashion as described in 
+   * {@link org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol}
    * 
    * 6: Switch to txid-based file naming for image and edits
    */
@@ -62,7 +74,7 @@ public interface NamenodeProtocol extend
    * @param datanode  a data node
    * @param size      requested size
    * @return          a list of blocks & their locations
-   * @throws RemoteException if size is less than or equal to 0 or
+   * @throws IOException if size is less than or equal to 0 or
                                    datanode does not exist
    */
   public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java Wed Nov  2 05:34:31 2011
@@ -52,10 +52,9 @@ implements NodeRegistration {
                               String httpAddress,
                               StorageInfo storageInfo,
                               NamenodeRole role) {
-    super();
+    super(storageInfo);
     this.rpcAddress = address;
     this.httpAddress = httpAddress;
-    this.setStorageInfo(storageInfo);
     this.role = role;
   }
 
@@ -64,6 +63,10 @@ implements NodeRegistration {
     return rpcAddress;
   }
   
+  public String getHttpAddress() {
+    return httpAddress;
+  }
+  
   @Override // NodeRegistration
   public String getRegistrationID() {
     return Storage.getRegistrationID(this);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Wed Nov  2 05:34:31 2011
@@ -1146,10 +1146,9 @@ public class DFSAdmin extends FsShell {
         conf.get(DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY, ""));
 
     // Create the client
-    ClientDatanodeProtocol dnProtocol = RPC.getProxy(
-        ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID,
-        datanodeAddr, getUGI(), conf, NetUtils.getSocketFactory(conf,
-            ClientDatanodeProtocol.class));
+    ClientDatanodeProtocol dnProtocol =     
+        DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
+            NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
     return dnProtocol;
   }
   

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Wed Nov  2 05:34:31 2011
@@ -39,14 +39,17 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.HftpFileSystem;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
 import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
 import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -149,34 +152,31 @@ public class DelegationTokenFetcher {
                 DataInputStream in = new DataInputStream(
                     new ByteArrayInputStream(token.getIdentifier()));
                 id.readFields(in);
-                if(LOG.isDebugEnabled()) {
-                  LOG.debug("Token (" + id + ") for " + token.getService());
-                }
+                System.out.println("Token (" + id + ") for " + 
+                                   token.getService());
               }
-              return null;
-            }
-            
-            if (webUrl != null) {
-              if (renew) {
-                long result;
-                for (Token<?> token : readTokens(tokenFile, conf)) {
-                  result = renewDelegationToken(webUrl,
-                      (Token<DelegationTokenIdentifier>) token);
-                  if(LOG.isDebugEnabled()) {
-                	  LOG.debug("Renewed token via " + webUrl + " for "
-                          + token.getService() + " until: " + new Date(result));
+            } else if (cancel) {
+              for(Token<?> token: readTokens(tokenFile, conf)) {
+                if (token.isManaged()) {
+                  token.cancel(conf);
+                  if (LOG.isDebugEnabled()) {
+                    LOG.debug("Cancelled token for " + token.getService());
                   }
                 }
-              } else if (cancel) {
-                for (Token<?> token : readTokens(tokenFile, conf)) {
-                  cancelDelegationToken(webUrl,
-                      (Token<DelegationTokenIdentifier>) token);
-                  if(LOG.isDebugEnabled()) {
-                    LOG.debug("Cancelled token via " + webUrl + " for "
-                	    + token.getService());
+              }
+            } else if (renew) {
+              for (Token<?> token : readTokens(tokenFile, conf)) {
+                if (token.isManaged()) {
+                  long result = token.renew(conf);
+                  if (LOG.isDebugEnabled()) {
+                    LOG.debug("Renewed token for " + token.getService()
+                        + " until: " + new Date(result));
                   }
                 }
-              } else {
+              }
+            } else {
+              // otherwise we are fetching
+              if (webUrl != null) {
                 Credentials creds = getDTfromRemote(webUrl, renewer);
                 creds.writeTokenStorageFile(tokenFile, conf);
                 for (Token<?> token : creds.getAllTokens()) {
@@ -185,29 +185,8 @@ public class DelegationTokenFetcher {
                         + token.getService() + " into " + tokenFile);
                   }
                 }
-              }
-            } else {
-              FileSystem fs = FileSystem.get(conf);
-              if (cancel) {
-                for (Token<?> token : readTokens(tokenFile, conf)) {
-                  ((DistributedFileSystem) fs)
-                      .cancelDelegationToken((Token<DelegationTokenIdentifier>) token);
-                  if(LOG.isDebugEnabled()) {
-                    LOG.debug("Cancelled token for "
-                        + token.getService());
-                  }
-                }
-              } else if (renew) {
-                long result;
-                for (Token<?> token : readTokens(tokenFile, conf)) {
-                  result = ((DistributedFileSystem) fs)
-                      .renewDelegationToken((Token<DelegationTokenIdentifier>) token);
-                  if(LOG.isDebugEnabled()) {
-                    LOG.debug("Renewed token for " + token.getService()
-                        + " until: " + new Date(result));
-                  }
-                }
               } else {
+                FileSystem fs = FileSystem.get(conf);
                 Token<?> token = fs.getDelegationToken(renewer);
                 Credentials cred = new Credentials();
                 cred.addToken(token.getService(), token);
@@ -230,8 +209,9 @@ public class DelegationTokenFetcher {
     try {
       StringBuffer url = new StringBuffer();
       if (renewer != null) {
-        url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC).append("?").
-        append(GetDelegationTokenServlet.RENEWER).append("=").append(renewer);
+        url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC)
+           .append("?").append(GetDelegationTokenServlet.RENEWER).append("=")
+           .append(renewer);
       } else {
         url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC);
       }
@@ -248,6 +228,12 @@ public class DelegationTokenFetcher {
       Credentials ts = new Credentials();
       dis = new DataInputStream(in);
       ts.readFields(dis);
+      for(Token<?> token: ts.getAllTokens()) {
+        token.setKind(HftpFileSystem.TOKEN_KIND);
+        token.setService(new Text(SecurityUtil.buildDTServiceName
+                                   (remoteURL.toURI(), 
+                                    DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT)));
+      }
       return ts;
     } catch (Exception e) {
       throw new IOException("Unable to obtain remote token", e);
@@ -295,7 +281,8 @@ public class DelegationTokenFetcher {
 
       IOUtils.cleanup(LOG, in);
       if(e!=null) {
-        LOG.info("rethrowing exception from HTTP request: " + e.getLocalizedMessage());
+        LOG.info("rethrowing exception from HTTP request: " + 
+                 e.getLocalizedMessage());
         throw e;
       }
       throw ie;
@@ -383,7 +370,8 @@ public class DelegationTokenFetcher {
 
       IOUtils.cleanup(LOG, in);
       if(e!=null) {
-        LOG.info("rethrowing exception from HTTP request: " + e.getLocalizedMessage());
+        LOG.info("rethrowing exception from HTTP request: " + 
+                 e.getLocalizedMessage());
         throw e;
       }
       throw ie;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilter.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilter.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilter.java Wed Nov  2 05:34:31 2011
@@ -17,12 +17,17 @@
  */
 package org.apache.hadoop.hdfs.web;
 
-import java.util.Map;
+import java.io.IOException;
 import java.util.Properties;
 
+import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.web.resources.DelegationParam;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
@@ -41,30 +46,36 @@ public class AuthFilter extends Authenti
    * The prefix is removed from the returned property names.
    *
    * @param prefix parameter not used.
-   * @param config parameter not used.
+   * @param config parameter contains the initialization values.
    * @return Hadoop-Auth configuration properties.
+   * @throws ServletException 
    */
   @Override
-  protected Properties getConfiguration(String prefix, FilterConfig config) {
-    final Configuration conf = new Configuration();
-    final Properties p = new Properties();
-
-    //set authentication type
+  protected Properties getConfiguration(String prefix, FilterConfig config)
+      throws ServletException {
+    final Properties p = super.getConfiguration(CONF_PREFIX, config);
+    // set authentication type
     p.setProperty(AUTH_TYPE, UserGroupInformation.isSecurityEnabled()?
         KerberosAuthenticationHandler.TYPE: PseudoAuthenticationHandler.TYPE);
     //For Pseudo Authentication, allow anonymous.
     p.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
     //set cookie path
     p.setProperty(COOKIE_PATH, "/");
+    return p;
+  }
 
-    //set other configurations with CONF_PREFIX
-    for (Map.Entry<String, String> entry : conf) {
-      final String key = entry.getKey();
-      if (key.startsWith(CONF_PREFIX)) {
-        //remove prefix from the key and set property
-        p.setProperty(key.substring(CONF_PREFIX.length()), conf.get(key));
-      }
+  @Override
+  public void doFilter(ServletRequest request, ServletResponse response,
+      FilterChain filterChain) throws IOException, ServletException {
+    HttpServletRequest httpRequest = (HttpServletRequest) request;
+    String tokenString = httpRequest
+        .getParameter(DelegationParam.NAME);
+    if (tokenString != null) {
+      //Token is present in the url, therefore token will be used for
+      //authentication, bypass kerberos authentication.
+      filterChain.doFilter(httpRequest, response);
+      return;
     }
-    return p;
+    super.doFilter(request, response, filterChain);
   }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Wed Nov  2 05:34:31 2011
@@ -27,6 +27,7 @@ import java.util.Map;
 import java.util.TreeMap;
 
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -98,17 +99,18 @@ public class JsonUtil {
   /** Convert an exception object to a Json string. */
   public static String toJsonString(final Exception e) {
     final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("className", e.getClass().getName());
+    m.put("exception", e.getClass().getSimpleName());
     m.put("message", e.getMessage());
+    m.put("javaClassName", e.getClass().getName());
     return toJsonString(RemoteException.class, m);
   }
 
   /** Convert a Json map to a RemoteException. */
   public static RemoteException toRemoteException(final Map<?, ?> json) {
     final Map<?, ?> m = (Map<?, ?>)json.get(RemoteException.class.getSimpleName());
-    final String className = (String)m.get("className");
     final String message = (String)m.get("message");
-    return new RemoteException(className, message);
+    final String javaClassName = (String)m.get("javaClassName");
+    return new RemoteException(javaClassName, message);
   }
 
   private static String toJsonString(final Class<?> clazz, final Object value) {
@@ -133,37 +135,39 @@ public class JsonUtil {
   }
 
   /** Convert a HdfsFileStatus object to a Json string. */
-  public static String toJsonString(final HdfsFileStatus status) {
+  public static String toJsonString(final HdfsFileStatus status,
+      boolean includeType) {
     if (status == null) {
       return null;
-    } else {
-      final Map<String, Object> m = new TreeMap<String, Object>();
-      m.put("localName", status.getLocalName());
-      m.put("isDir", status.isDir());
-      m.put("isSymlink", status.isSymlink());
-      if (status.isSymlink()) {
-        m.put("symlink", status.getSymlink());
-      }
-
-      m.put("len", status.getLen());
-      m.put("owner", status.getOwner());
-      m.put("group", status.getGroup());
-      m.put("permission", toString(status.getPermission()));
-      m.put("accessTime", status.getAccessTime());
-      m.put("modificationTime", status.getModificationTime());
-      m.put("blockSize", status.getBlockSize());
-      m.put("replication", status.getReplication());
-      return toJsonString(HdfsFileStatus.class, m);
     }
+    final Map<String, Object> m = new TreeMap<String, Object>();
+    m.put("localName", status.getLocalName());
+    m.put("isDir", status.isDir());
+    m.put("isSymlink", status.isSymlink());
+    if (status.isSymlink()) {
+      m.put("symlink", status.getSymlink());
+    }
+
+    m.put("len", status.getLen());
+    m.put("owner", status.getOwner());
+    m.put("group", status.getGroup());
+    m.put("permission", toString(status.getPermission()));
+    m.put("accessTime", status.getAccessTime());
+    m.put("modificationTime", status.getModificationTime());
+    m.put("blockSize", status.getBlockSize());
+    m.put("replication", status.getReplication());
+    return includeType ? toJsonString(HdfsFileStatus.class, m) : 
+      JSON.toString(m);
   }
 
   /** Convert a Json map to a HdfsFileStatus object. */
-  public static HdfsFileStatus toFileStatus(final Map<?, ?> json) {
+  public static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
     if (json == null) {
       return null;
     }
 
-    final Map<?, ?> m = (Map<?, ?>)json.get(HdfsFileStatus.class.getSimpleName());
+    final Map<?, ?> m = includesType ? 
+        (Map<?, ?>)json.get(HdfsFileStatus.class.getSimpleName()) : json;
     final String localName = (String) m.get("localName");
     final boolean isDir = (Boolean) m.get("isDir");
     final boolean isSymlink = (Boolean) m.get("isSymlink");
@@ -287,7 +291,7 @@ public class JsonUtil {
       return array;
     }
   }
-
+  
   /** Convert a LocatedBlock to a Json map. */
   private static Map<String, Object> toJsonMap(final LocatedBlock locatedblock
       ) throws IOException {
@@ -331,7 +335,7 @@ public class JsonUtil {
     } else {
       final Object[] a = new Object[array.size()];
       for(int i = 0; i < array.size(); i++) {
-        a[i] = toJsonMap(array.get(0));
+        a[i] = toJsonMap(array.get(i));
       }
       return a;
     }
@@ -433,7 +437,7 @@ public class JsonUtil {
     m.put("algorithm", checksum.getAlgorithmName());
     m.put("length", checksum.getLength());
     m.put("bytes", StringUtils.byteToHexString(checksum.getBytes()));
-    return toJsonString(MD5MD5CRC32FileChecksum.class, m);
+    return toJsonString(FileChecksum.class, m);
   }
 
   /** Convert a Json map to a MD5MD5CRC32FileChecksum. */
@@ -443,8 +447,7 @@ public class JsonUtil {
       return null;
     }
 
-    final Map<?, ?> m = (Map<?, ?>)json.get(
-        MD5MD5CRC32FileChecksum.class.getSimpleName());
+    final Map<?, ?> m = (Map<?, ?>)json.get(FileChecksum.class.getSimpleName());
     final String algorithm = (String)m.get("algorithm");
     final int length = (int)(long)(Long)m.get("length");
     final byte[] bytes = StringUtils.hexStringToByte((String)m.get("bytes"));

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Wed Nov  2 05:34:31 2011
@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
@@ -31,6 +32,8 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
@@ -38,25 +41,30 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.ByteRangeInputStream;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.HftpFileSystem;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenRenewer;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
+import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
+import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
 import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
-import org.apache.hadoop.hdfs.web.resources.DstPathParam;
+import org.apache.hadoop.hdfs.web.resources.DestinationParam;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.GroupParam;
 import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
@@ -76,26 +84,47 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenRenewer;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 import org.apache.hadoop.util.Progressable;
 import org.mortbay.util.ajax.JSON;
 
 /** A FileSystem for HDFS over the web. */
-public class WebHdfsFileSystem extends HftpFileSystem {
+public class WebHdfsFileSystem extends FileSystem
+    implements DelegationTokenRenewer.Renewable {
+  public static final Log LOG = LogFactory.getLog(WebHdfsFileSystem.class);
   /** File System URI: {SCHEME}://namenode:port/path/to/file */
   public static final String SCHEME = "webhdfs";
+  /** WebHdfs version. */
+  public static final int VERSION = 1;
   /** Http URI: http://namenode:port/{PATH_PREFIX}/path/to/file */
-  public static final String PATH_PREFIX = SCHEME;
+  public static final String PATH_PREFIX = "/" + SCHEME + "/v" + VERSION;
 
+  /** SPNEGO authenticator */
   private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
+  /** Delegation token kind */
+  public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
+
+  private static final DelegationTokenRenewer<WebHdfsFileSystem> dtRenewer
+      = new DelegationTokenRenewer<WebHdfsFileSystem>(WebHdfsFileSystem.class);
+  static {
+    dtRenewer.start();
+  }
 
   private final UserGroupInformation ugi;
+  private InetSocketAddress nnAddr;
+  private Token<?> delegationToken;
+  private Token<?> renewToken;
   private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
-  protected Path workingDir;
+  private Path workingDir;
 
   {
     try {
@@ -111,7 +140,47 @@ public class WebHdfsFileSystem extends H
     super.initialize(uri, conf);
     setConf(conf);
 
+    this.nnAddr = NetUtils.createSocketAddr(uri.toString());
     this.workingDir = getHomeDirectory();
+
+    if (UserGroupInformation.isSecurityEnabled()) {
+      initDelegationToken();
+    }
+  }
+
+  protected void initDelegationToken() throws IOException {
+    // look for webhdfs token, then try hdfs
+    final Text serviceName = SecurityUtil.buildTokenService(nnAddr);
+    Token<?> token = webhdfspTokenSelector.selectToken(
+        serviceName, ugi.getTokens());      
+    if (token == null) {
+      token = DelegationTokenSelector.selectHdfsDelegationToken(
+          nnAddr, ugi, getConf());
+    }
+
+    //since we don't already have a token, go get one
+    boolean createdToken = false;
+    if (token == null) {
+      token = getDelegationToken(null);
+      createdToken = (token != null);
+    }
+
+    // security might be disabled
+    if (token != null) {
+      setDelegationToken(token);
+      if (createdToken) {
+        dtRenewer.addRenewAction(this);
+        LOG.debug("Created new DT for " + token.getService());
+      } else {
+        LOG.debug("Found existing DT for " + token.getService());        
+      }
+    }
+  }
+
+  @Override
+  protected int getDefaultPort() {
+    return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
+        DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
   }
 
   @Override
@@ -148,19 +217,18 @@ public class WebHdfsFileSystem extends H
     return f.isAbsolute()? f: new Path(workingDir, f);
   }
 
-  @SuppressWarnings("unchecked")
-  private static <T> T jsonParse(final InputStream in) throws IOException {
+  private static Map<?, ?> jsonParse(final InputStream in) throws IOException {
     if (in == null) {
       throw new IOException("The input stream is null.");
     }
-    return (T)JSON.parse(new InputStreamReader(in));
+    return (Map<?, ?>)JSON.parse(new InputStreamReader(in));
   }
 
-  private static void validateResponse(final HttpOpParam.Op op,
+  private static Map<?, ?> validateResponse(final HttpOpParam.Op op,
       final HttpURLConnection conn) throws IOException {
     final int code = conn.getResponseCode();
     if (code != op.getExpectedHttpResponseCode()) {
-      final Map<String, Object> m;
+      final Map<?, ?> m;
       try {
         m = jsonParse(conn.getErrorStream());
       } catch(IOException e) {
@@ -169,6 +237,10 @@ public class WebHdfsFileSystem extends H
             + ", message=" + conn.getResponseMessage(), e);
       }
 
+      if (m.get(RemoteException.class.getSimpleName()) == null) {
+        return m;
+      }
+
       final RemoteException re = JsonUtil.toRemoteException(m);
       throw re.unwrapRemoteException(AccessControlException.class,
           DSQuotaExceededException.class,
@@ -179,34 +251,82 @@ public class WebHdfsFileSystem extends H
           NSQuotaExceededException.class,
           UnresolvedPathException.class);
     }
+    return null;
+  }
+
+  /**
+   * Return a URL pointing to given path on the namenode.
+   *
+   * @param path to obtain the URL for
+   * @param query string to append to the path
+   * @return namenode URL referring to the given path
+   * @throws IOException on error constructing the URL
+   */
+  private URL getNamenodeURL(String path, String query) throws IOException {
+    final URL url = new URL("http", nnAddr.getHostName(),
+          nnAddr.getPort(), path + '?' + query);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("url=" + url);
+    }
+    return url;
+  }
+  
+  private String addDt2Query(String query) throws IOException {
+    if (UserGroupInformation.isSecurityEnabled()) {
+      synchronized (this) {
+        if (delegationToken != null) {
+          final String encoded = delegationToken.encodeToUrlString();
+          return query + JspHelper.getDelegationTokenUrlParam(encoded);
+        } // else we are talking to an insecure cluster
+      }
+    }
+    return query;
   }
 
   URL toUrl(final HttpOpParam.Op op, final Path fspath,
       final Param<?,?>... parameters) throws IOException {
     //initialize URI path and query
-    final String path = "/" + PATH_PREFIX
+    final String path = PATH_PREFIX
         + (fspath == null? "/": makeQualified(fspath).toUri().getPath());
     final String query = op.toQueryString()
         + '&' + new UserParam(ugi)
         + Param.toSortedString("&", parameters);
-    final URL url = getNamenodeURL(path, addDelegationTokenParam(query));
+    final URL url;
+    if (op.equals(PutOpParam.Op.RENEWDELEGATIONTOKEN)
+        || op.equals(GetOpParam.Op.GETDELEGATIONTOKEN)) {
+      // Skip adding delegation token for getting or renewing delegation token,
+      // because these operations require kerberos authentication.
+      url = getNamenodeURL(path, query);
+    } else {
+      url = getNamenodeURL(path, addDt2Query(query));
+    }
     if (LOG.isTraceEnabled()) {
       LOG.trace("url=" + url);
     }
     return url;
   }
 
+  private HttpURLConnection getHttpUrlConnection(URL url)
+      throws IOException {
+    final HttpURLConnection conn;
+    try {
+      if (ugi.hasKerberosCredentials()) { 
+        conn = new AuthenticatedURL(AUTH).openConnection(url, authToken);
+      } else {
+        conn = (HttpURLConnection)url.openConnection();
+      }
+    } catch (AuthenticationException e) {
+      throw new IOException("Authentication failed, url=" + url, e);
+    }
+    return conn;
+  }
+  
   private HttpURLConnection httpConnect(final HttpOpParam.Op op, final Path fspath,
       final Param<?,?>... parameters) throws IOException {
     final URL url = toUrl(op, fspath, parameters);
 
     //connect and get response
-    final HttpURLConnection conn;
-    try {
-      conn = new AuthenticatedURL(AUTH).openConnection(url, authToken);
-    } catch(AuthenticationException e) {
-      throw new IOException("Authentication failed, url=" + url, e);
-    }
+    final HttpURLConnection conn = getHttpUrlConnection(url);
     try {
       conn.setRequestMethod(op.getType().toString());
       conn.setDoOutput(op.getDoOutput());
@@ -216,7 +336,7 @@ public class WebHdfsFileSystem extends H
       }
       conn.connect();
       return conn;
-    } catch(IOException e) {
+    } catch (IOException e) {
       conn.disconnect();
       throw e;
     }
@@ -229,15 +349,15 @@ public class WebHdfsFileSystem extends H
    * @param op http operation
    * @param fspath file system path
    * @param parameters parameters for the operation
-   * @return a JSON object, e.g. Object[], Map<String, Object>, etc.
+   * @return a JSON object, e.g. Object[], Map<?, ?>, etc.
    * @throws IOException
    */
-  private <T> T run(final HttpOpParam.Op op, final Path fspath,
+  private Map<?, ?> run(final HttpOpParam.Op op, final Path fspath,
       final Param<?,?>... parameters) throws IOException {
     final HttpURLConnection conn = httpConnect(op, fspath, parameters);
-    validateResponse(op, conn);
     try {
-      return WebHdfsFileSystem.<T>jsonParse(conn.getInputStream());
+      final Map<?, ?> m = validateResponse(op, conn);
+      return m != null? m: jsonParse(conn.getInputStream());
     } finally {
       conn.disconnect();
     }
@@ -252,8 +372,8 @@ public class WebHdfsFileSystem extends H
 
   private HdfsFileStatus getHdfsFileStatus(Path f) throws IOException {
     final HttpOpParam.Op op = GetOpParam.Op.GETFILESTATUS;
-    final Map<String, Object> json = run(op, f);
-    final HdfsFileStatus status = JsonUtil.toFileStatus(json);
+    final Map<?, ?> json = run(op, f);
+    final HdfsFileStatus status = JsonUtil.toFileStatus(json, true);
     if (status == null) {
       throw new FileNotFoundException("File does not exist: " + f);
     }
@@ -278,7 +398,7 @@ public class WebHdfsFileSystem extends H
   public boolean mkdirs(Path f, FsPermission permission) throws IOException {
     statistics.incrementWriteOps(1);
     final HttpOpParam.Op op = PutOpParam.Op.MKDIRS;
-    final Map<String, Object> json = run(op, f,
+    final Map<?, ?> json = run(op, f,
         new PermissionParam(applyUMask(permission)));
     return (Boolean)json.get("boolean");
   }
@@ -287,8 +407,8 @@ public class WebHdfsFileSystem extends H
   public boolean rename(final Path src, final Path dst) throws IOException {
     statistics.incrementWriteOps(1);
     final HttpOpParam.Op op = PutOpParam.Op.RENAME;
-    final Map<String, Object> json = run(op, src,
-        new DstPathParam(makeQualified(dst).toUri().getPath()));
+    final Map<?, ?> json = run(op, src,
+        new DestinationParam(makeQualified(dst).toUri().getPath()));
     return (Boolean)json.get("boolean");
   }
 
@@ -298,7 +418,7 @@ public class WebHdfsFileSystem extends H
       final Options.Rename... options) throws IOException {
     statistics.incrementWriteOps(1);
     final HttpOpParam.Op op = PutOpParam.Op.RENAME;
-    run(op, src, new DstPathParam(makeQualified(dst).toUri().getPath()),
+    run(op, src, new DestinationParam(makeQualified(dst).toUri().getPath()),
         new RenameOptionSetParam(options));
   }
 
@@ -327,8 +447,7 @@ public class WebHdfsFileSystem extends H
      ) throws IOException {
     statistics.incrementWriteOps(1);
     final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION;
-    final Map<String, Object> json = run(op, p,
-        new ReplicationParam(replication));
+    final Map<?, ?> json = run(op, p, new ReplicationParam(replication));
     return (Boolean)json.get("boolean");
   }
 
@@ -340,6 +459,18 @@ public class WebHdfsFileSystem extends H
     run(op, p, new ModificationTimeParam(mtime), new AccessTimeParam(atime));
   }
 
+  @Override
+  public long getDefaultBlockSize() {
+    return getConf().getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+        DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
+  }
+
+  @Override
+  public short getDefaultReplication() {
+    return (short)getConf().getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
+        DFSConfigKeys.DFS_REPLICATION_DEFAULT);
+  }
+
   private FSDataOutputStream write(final HttpOpParam.Op op,
       final HttpURLConnection conn, final int bufferSize) throws IOException {
     return new FSDataOutputStream(new BufferedOutputStream(
@@ -382,10 +513,16 @@ public class WebHdfsFileSystem extends H
     return write(op, conn, bufferSize);
   }
 
+  @SuppressWarnings("deprecation")
+  @Override
+  public boolean delete(final Path f) throws IOException {
+    return delete(f, true);
+  }
+
   @Override
   public boolean delete(Path f, boolean recursive) throws IOException {
     final HttpOpParam.Op op = DeleteOpParam.Op.DELETE;
-    final Map<String, Object> json = run(op, f, new RecursiveParam(recursive));
+    final Map<?, ?> json = run(op, f, new RecursiveParam(recursive));
     return (Boolean)json.get("boolean");
   }
 
@@ -395,7 +532,24 @@ public class WebHdfsFileSystem extends H
     statistics.incrementReadOps(1);
     final HttpOpParam.Op op = GetOpParam.Op.OPEN;
     final URL url = toUrl(op, f, new BufferSizeParam(buffersize));
-    return new FSDataInputStream(new ByteRangeInputStream(url));
+    ByteRangeInputStream str = getByteRangeInputStream(url);
+    return new FSDataInputStream(str);
+  }
+
+  private class URLOpener extends ByteRangeInputStream.URLOpener {
+
+    public URLOpener(URL u) {
+      super(u);
+    }
+
+    @Override
+    public HttpURLConnection openConnection() throws IOException {
+      return getHttpUrlConnection(offsetUrl);
+    }
+  }
+  
+  private ByteRangeInputStream getByteRangeInputStream(URL url) {
+    return new ByteRangeInputStream(new URLOpener(url), new URLOpener(null));
   }
 
   @Override
@@ -404,24 +558,24 @@ public class WebHdfsFileSystem extends H
 
     final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
     final Map<?, ?> json  = run(op, f);
-    final Object[] array = (Object[])json.get(
-        HdfsFileStatus[].class.getSimpleName());
+    final Map<?, ?> rootmap = (Map<?, ?>)json.get(HdfsFileStatus.class.getSimpleName() + "es");
+    final Object[] array = (Object[])rootmap.get(HdfsFileStatus.class.getSimpleName());
 
     //convert FileStatus
     final FileStatus[] statuses = new FileStatus[array.length];
     for(int i = 0; i < array.length; i++) {
-      @SuppressWarnings("unchecked")
-      final Map<String, Object> m = (Map<String, Object>)array[i];
-      statuses[i] = makeQualified(JsonUtil.toFileStatus(m), f);
+      final Map<?, ?> m = (Map<?, ?>)array[i];
+      statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f);
     }
     return statuses;
   }
 
+  @SuppressWarnings("deprecation")
   @Override
   public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer
       ) throws IOException {
     final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
-    final Map<String, Object> m = run(op, null, new RenewerParam(renewer));
+    final Map<?, ?> m = run(op, null, new RenewerParam(renewer));
     final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m); 
     token.setService(new Text(getCanonicalServiceName()));
     return token;
@@ -435,6 +589,45 @@ public class WebHdfsFileSystem extends H
   }
 
   @Override
+  public Token<?> getRenewToken() {
+    return renewToken;
+  }
+
+  @Override
+  public <T extends TokenIdentifier> void setDelegationToken(
+      final Token<T> token) {
+    synchronized(this) {
+      renewToken = token;
+      // emulate the 203 usage of the tokens
+      // by setting the kind and service as if they were hdfs tokens
+      delegationToken = new Token<T>(token);
+      // NOTE: the remote nn must be configured to use hdfs
+      delegationToken.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
+      // no need to change service because we aren't exactly sure what it
+      // should be.  we can guess, but it might be wrong if the local conf
+      // value is incorrect.  the service is a client side field, so the remote
+      // end does not care about the value
+    }
+  }
+
+  private synchronized long renewDelegationToken(final Token<?> token
+      ) throws IOException {
+    final HttpOpParam.Op op = PutOpParam.Op.RENEWDELEGATIONTOKEN;
+    TokenArgumentParam dtargParam = new TokenArgumentParam(
+        token.encodeToUrlString());
+    final Map<?, ?> m = run(op, null, dtargParam);
+    return (Long) m.get("long");
+  }
+
+  private synchronized void cancelDelegationToken(final Token<?> token
+      ) throws IOException {
+    final HttpOpParam.Op op = PutOpParam.Op.CANCELDELEGATIONTOKEN;
+    TokenArgumentParam dtargParam = new TokenArgumentParam(
+        token.encodeToUrlString());
+    run(op, null, dtargParam);
+  }
+  
+  @Override
   public BlockLocation[] getFileBlockLocations(final FileStatus status,
       final long offset, final long length) throws IOException {
     if (status == null) {
@@ -449,7 +642,7 @@ public class WebHdfsFileSystem extends H
     statistics.incrementReadOps(1);
 
     final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS;
-    final Map<String, Object> m = run(op, p, new OffsetParam(offset),
+    final Map<?, ?> m = run(op, p, new OffsetParam(offset),
         new LengthParam(length));
     return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m));
   }
@@ -459,7 +652,7 @@ public class WebHdfsFileSystem extends H
     statistics.incrementReadOps(1);
 
     final HttpOpParam.Op op = GetOpParam.Op.GETCONTENTSUMMARY;
-    final Map<String, Object> m = run(op, p);
+    final Map<?, ?> m = run(op, p);
     return JsonUtil.toContentSummary(m);
   }
 
@@ -469,7 +662,69 @@ public class WebHdfsFileSystem extends H
     statistics.incrementReadOps(1);
   
     final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM;
-    final Map<String, Object> m = run(op, p);
+    final Map<?, ?> m = run(op, p);
     return JsonUtil.toMD5MD5CRC32FileChecksum(m);
   }
-}
\ No newline at end of file
+
+  private static final DtSelector webhdfspTokenSelector = new DtSelector();
+
+  private static class DtSelector
+      extends AbstractDelegationTokenSelector<DelegationTokenIdentifier> {
+    private DtSelector() {
+      super(TOKEN_KIND);
+    }
+  }
+
+  /** Delegation token renewer. */
+  public static class DtRenewer extends TokenRenewer {
+    @Override
+    public boolean handleKind(Text kind) {
+      return kind.equals(TOKEN_KIND);
+    }
+  
+    @Override
+    public boolean isManaged(Token<?> token) throws IOException {
+      return true;
+    }
+
+    private static WebHdfsFileSystem getWebHdfs(
+        final Token<?> token, final Configuration conf
+        ) throws IOException, InterruptedException, URISyntaxException {
+      
+      final InetSocketAddress nnAddr =  NetUtils.createSocketAddr(
+          token.getService().toString());
+      final URI uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, nnAddr);
+      return (WebHdfsFileSystem)FileSystem.get(uri, conf);
+    }
+
+    @Override
+    public long renew(final Token<?> token, final Configuration conf
+        ) throws IOException, InterruptedException {
+      final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
+      // update the kerberos credentials, if they are coming from a keytab
+      ugi.checkTGTAndReloginFromKeytab();
+
+      try {
+        WebHdfsFileSystem webhdfs = getWebHdfs(token, conf);
+        return webhdfs.renewDelegationToken(token);
+      } catch (URISyntaxException e) {
+        throw new IOException(e);
+      }
+    }
+  
+    @Override
+    public void cancel(final Token<?> token, final Configuration conf
+        ) throws IOException, InterruptedException {
+      final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
+      // update the kerberos credentials, if they are coming from a keytab
+      ugi.checkTGTAndReloginFromKeytab();
+
+      try {
+        final WebHdfsFileSystem webhdfs = getWebHdfs(token, conf);
+        webhdfs.cancelDelegationToken(token);
+      } catch (URISyntaxException e) {
+        throw new IOException(e);
+      }
+    }
+  }
+}

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java Wed Nov  2 05:34:31 2011
@@ -31,7 +31,7 @@ public class AccessTimeParam extends Lon
    * @param value the parameter value.
    */
   public AccessTimeParam(final Long value) {
-    super(DOMAIN, value);
+    super(DOMAIN, value, -1L, null);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java Wed Nov  2 05:34:31 2011
@@ -36,7 +36,7 @@ public class BlockSizeParam extends Long
    * @param value the parameter value.
    */
   public BlockSizeParam(final Long value) {
-    super(DOMAIN, value);
+    super(DOMAIN, value, 1L, null);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java Wed Nov  2 05:34:31 2011
@@ -34,7 +34,7 @@ public class BufferSizeParam extends Int
    * @param value the parameter value.
    */
   public BufferSizeParam(final Integer value) {
-    super(DOMAIN, value);
+    super(DOMAIN, value, 1, null);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java Wed Nov  2 05:34:31 2011
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.web.resou
 
 import org.apache.hadoop.security.UserGroupInformation;
 
-/** Delegation token parameter. */
+/** Represents delegation token used for authentication. */
 public class DelegationParam extends StringParam {
   /** Parameter name. */
   public static final String NAME = "delegation";

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java Wed Nov  2 05:34:31 2011
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.web.resou
 import java.io.FileNotFoundException;
 import java.io.IOException;
 
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.Context;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.ext.ExceptionMapper;
@@ -29,17 +31,33 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.web.JsonUtil;
 
+import com.sun.jersey.api.ParamException;
+
 /** Handle exceptions. */
 @Provider
 public class ExceptionHandler implements ExceptionMapper<Exception> {
   public static final Log LOG = LogFactory.getLog(ExceptionHandler.class);
 
+  private @Context HttpServletResponse response;
+
   @Override
-  public Response toResponse(final Exception e) {
+  public Response toResponse(Exception e) {
     if (LOG.isTraceEnabled()) {
       LOG.trace("GOT EXCEPITION", e);
     }
 
+    //clear content type
+    response.setContentType(null);
+
+    //Convert exception
+    if (e instanceof ParamException) {
+      final ParamException paramexception = (ParamException)e;
+      e = new IllegalArgumentException("Invalid value for webhdfs parameter \""
+          + paramexception.getParameterName() + "\": "
+          + e.getCause().getMessage(), e);
+    } 
+
+    //Map response status
     final Response.Status s;
     if (e instanceof SecurityException) {
       s = Response.Status.UNAUTHORIZED;
@@ -49,7 +67,10 @@ public class ExceptionHandler implements
       s = Response.Status.FORBIDDEN;
     } else if (e instanceof UnsupportedOperationException) {
       s = Response.Status.BAD_REQUEST;
+    } else if (e instanceof IllegalArgumentException) {
+      s = Response.Status.BAD_REQUEST;
     } else {
+      LOG.warn("INTERNAL_SERVER_ERROR", e);
       s = Response.Status.INTERNAL_SERVER_ERROR;
     }
  

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java Wed Nov  2 05:34:31 2011
@@ -19,8 +19,24 @@ package org.apache.hadoop.hdfs.web.resou
 
 /** Integer parameter. */
 abstract class IntegerParam extends Param<Integer, IntegerParam.Domain> {
-  IntegerParam(final Domain domain, final Integer value) {
+  IntegerParam(final Domain domain, final Integer value,
+      final Integer min, final Integer max) {
     super(domain, value);
+    checkRange(min, max);
+  }
+
+  private void checkRange(final Integer min, final Integer max) {
+    if (value == null) {
+      return;
+    }
+    if (min != null && value < min) {
+      throw new IllegalArgumentException("Invalid parameter range: " + getName()
+          + " = " + domain.toString(value) + " < " + domain.toString(min));
+    }
+    if (max != null && value > max) {
+      throw new IllegalArgumentException("Invalid parameter range: " + getName()
+          + " = " + domain.toString(value) + " > " + domain.toString(max));
+    }
   }
   
   @Override
@@ -49,7 +65,12 @@ abstract class IntegerParam extends Para
 
     @Override
     Integer parse(final String str) {
-      return NULL.equals(str)? null: Integer.parseInt(str, radix);
+      try{
+        return NULL.equals(str)? null: Integer.parseInt(str, radix);
+      } catch(NumberFormatException e) {
+        throw new IllegalArgumentException("Failed to parse \"" + str
+            + "\" as a radix-" + radix + " integer.", e);
+      }
     }
 
     /** Convert an Integer to a String. */ 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java Wed Nov  2 05:34:31 2011
@@ -31,7 +31,7 @@ public class LengthParam extends LongPar
    * @param value the parameter value.
    */
   public LengthParam(final Long value) {
-    super(DOMAIN, value);
+    super(DOMAIN, value, 0L, null);
   }
 
   /**



Mime
View raw message