hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1354832 [2/5] - in /hadoop/common/branches/HDFS-3092/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-h...
Date Thu, 28 Jun 2012 07:00:25 GMT
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java Thu Jun 28 06:59:38 2012
@@ -21,26 +21,22 @@ package org.apache.hadoop.fs.http.server
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.fs.http.server.HttpFSParams.AccessTimeParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.BlockSizeParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DataParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DeleteOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DeleteRecursiveParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DoAsParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.FilterParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.FsPathParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.GetOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.GroupParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.LenParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.ModifiedTimeParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.OffsetParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.OverwriteParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.OwnerParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.PermissionParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.PostOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.PutOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.ReplicationParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.ToPathParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AccessTimeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DoAsParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.GroupParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OwnerParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.PermissionParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam;
 import org.apache.hadoop.lib.service.FileSystemAccess;
 import org.apache.hadoop.lib.service.FileSystemAccessException;
 import org.apache.hadoop.lib.service.Groups;
@@ -49,6 +45,7 @@ import org.apache.hadoop.lib.service.Pro
 import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter;
 import org.apache.hadoop.lib.servlet.HostnameFilter;
 import org.apache.hadoop.lib.wsrs.InputStreamEntity;
+import org.apache.hadoop.lib.wsrs.Parameters;
 import org.apache.hadoop.security.authentication.server.AuthenticationToken;
 import org.json.simple.JSONObject;
 import org.slf4j.Logger;
@@ -57,7 +54,6 @@ import org.slf4j.MDC;
 
 import javax.ws.rs.Consumes;
 import javax.ws.rs.DELETE;
-import javax.ws.rs.DefaultValue;
 import javax.ws.rs.GET;
 import javax.ws.rs.POST;
 import javax.ws.rs.PUT;
@@ -90,39 +86,6 @@ public class HttpFSServer {
   private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
 
   /**
-   * Special binding for '/' as it is not handled by the wildcard binding.
-   *
-   * @param user principal making the request.
-   * @param op GET operation, default value is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN}.
-   * @param filter Glob filter, default value is none. Used only if the
-   * operation is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#LISTSTATUS}
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
-   *
-   * @return the request response
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
-   */
-  @GET
-  @Path("/")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response root(@Context Principal user,
-                       @QueryParam(GetOpParam.NAME) GetOpParam op,
-                       @QueryParam(FilterParam.NAME) @DefaultValue(FilterParam.DEFAULT) FilterParam filter,
-                       @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
-    throws IOException, FileSystemAccessException {
-    return get(user, new FsPathParam(""), op, new OffsetParam(OffsetParam.DEFAULT),
-               new LenParam(LenParam.DEFAULT), filter, doAs,
-               new OverwriteParam(OverwriteParam.DEFAULT),
-               new BlockSizeParam(BlockSizeParam.DEFAULT),
-               new PermissionParam(PermissionParam.DEFAULT),
-               new ReplicationParam(ReplicationParam.DEFAULT));
-  }
-
-  /**
    * Resolves the effective user that will be used to request a FileSystemAccess filesystem.
    * <p/>
    * If the doAs-user is NULL or the same as the user, it returns the user.
@@ -207,402 +170,405 @@ public class HttpFSServer {
     return fs;
   }
 
+  private void enforceRootPath(HttpFSFileSystem.Operation op, String path) {
+    if (!path.equals("/")) {
+      throw new UnsupportedOperationException(
+        MessageFormat.format("Operation [{0}], invalid path [{1}], must be '/'",
+                             op, path));
+    }
+  }
+
   /**
-   * Binding to handle all GET requests, supported operations are
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues}.
-   * <p/>
-   * The @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#INSTRUMENTATION} operation is available only
-   * to users that are in HttpFSServer's admin group (see {@link HttpFSServer}. It returns
-   * HttpFSServer instrumentation data. The specified path must be '/'.
+   * Special binding for '/' as it is not handled by the wildcard binding.
    *
-   * @param user principal making the request.
-   * @param path path for the GET request.
-   * @param op GET operation, default value is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN}.
-   * @param offset of the  file being fetch, used only with
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN} operations.
-   * @param len amounts of bytes, used only with @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN}
-   * operations.
-   * @param filter Glob filter, default value is none. Used only if the
-   * operation is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#LISTSTATUS}
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
-   * @param override default is true. Used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
-   * @param blockSize block size to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
-   * @param permission permission to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETPERMISSION}.
-   * @param replication replication factor to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETREPLICATION}.
+   * @param user the principal of the user making the request.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
    *
    * @return the request response.
    *
    * @throws IOException thrown if an IO error occurred. Thrown exceptions are
    * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
    */
   @GET
-  @Path("{path:.*}")
-  @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
-  public Response get(@Context Principal user,
-                      @PathParam("path") @DefaultValue("") FsPathParam path,
-                      @QueryParam(GetOpParam.NAME) GetOpParam op,
-                      @QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT) OffsetParam offset,
-                      @QueryParam(LenParam.NAME) @DefaultValue(LenParam.DEFAULT) LenParam len,
-                      @QueryParam(FilterParam.NAME) @DefaultValue(FilterParam.DEFAULT) FilterParam filter,
-                      @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs,
-
-                      //these params are only for createHandle operation acceptance purposes
-                      @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) OverwriteParam override,
-                      @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) BlockSizeParam blockSize,
-                      @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
-                      PermissionParam permission,
-                      @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
-                      ReplicationParam replication
-  )
+  @Path("/")
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response getRoot(@Context Principal user,
+                          @QueryParam(OperationParam.NAME) OperationParam op,
+                          @Context Parameters params)
     throws IOException, FileSystemAccessException {
-    Response response = null;
-    if (op == null) {
-      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", GetOpParam.NAME));
-    } else {
-      path.makeAbsolute();
-      MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
-      switch (op.value()) {
-        case OPEN: {
-          //Invoking the command directly using an unmanaged FileSystem that is released by the
-          //FileSystemReleaseFilter
-          FSOperations.FSOpen command = new FSOperations.FSOpen(path.value());
-          FileSystem fs = createFileSystem(user, doAs.value());
-          InputStream is = command.execute(fs);
-          AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[]{path, offset, len});
-          InputStreamEntity entity = new InputStreamEntity(is, offset.value(), len.value());
-          response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
-          break;
-        }
-        case GETFILESTATUS: {
-          FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path.value());
-          Map json = fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}]", path);
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case LISTSTATUS: {
-          FSOperations.FSListStatus command = new FSOperations.FSListStatus(path.value(), filter.value());
-          Map json = fsExecute(user, doAs.value(), command);
-          if (filter.value() == null) {
-            AUDIT_LOG.info("[{}]", path);
-          } else {
-            AUDIT_LOG.info("[{}] filter [{}]", path, filter.value());
-          }
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case GETHOMEDIRECTORY: {
-          FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
-          JSONObject json = fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("");
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case INSTRUMENTATION: {
-          if (!path.value().equals("/")) {
-            throw new UnsupportedOperationException(
-              MessageFormat.format("Invalid path for {0}={1}, must be '/'",
-                                   GetOpParam.NAME, HttpFSFileSystem.GetOpValues.INSTRUMENTATION));
-          }
-          Groups groups = HttpFSServerWebApp.get().get(Groups.class);
-          List<String> userGroups = groups.getGroups(user.getName());
-          if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
-            throw new AccessControlException("User not in HttpFSServer admin group");
-          }
-          Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class);
-          Map snapshot = instrumentation.getSnapshot();
-          response = Response.ok(snapshot).build();
-          break;
-        }
-        case GETCONTENTSUMMARY: {
-          FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path.value());
-          Map json = fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}]", path);
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case GETFILECHECKSUM: {
-          FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path.value());
-          Map json = fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}]", path);
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case GETDELEGATIONTOKEN: {
-          response = Response.status(Response.Status.BAD_REQUEST).build();
-          break;
-        }
-        case GETFILEBLOCKLOCATIONS: {
-          response = Response.status(Response.Status.BAD_REQUEST).build();
-          break;
-        }
-      }
-      return response;
-    }
+    return get(user, "", op, params);
+  }
+
+  private String makeAbsolute(String path) {
+    return "/" + ((path != null) ? path : "");
   }
 
   /**
-   * Creates the URL for an upload operation (create or append).
+   * Binding to handle GET requests, supported operations are
    *
-   * @param uriInfo uri info of the request.
-   * @param uploadOperation operation for the upload URL.
+   * @param user the principal of the user making the request.
+   * @param path the path for operation.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
    *
-   * @return the URI for uploading data.
+   * @return the request response.
+   *
+   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+   * handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
    */
-  protected URI createUploadRedirectionURL(UriInfo uriInfo, Enum<?> uploadOperation) {
-    UriBuilder uriBuilder = uriInfo.getRequestUriBuilder();
-    uriBuilder = uriBuilder.replaceQueryParam(PutOpParam.NAME, uploadOperation).
-      queryParam(DataParam.NAME, Boolean.TRUE);
-    return uriBuilder.build(null);
+  @GET
+  @Path("{path:.*}")
+  @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
+  public Response get(@Context Principal user,
+                      @PathParam("path") String path,
+                      @QueryParam(OperationParam.NAME) OperationParam op,
+                      @Context Parameters params)
+    throws IOException, FileSystemAccessException {
+    Response response;
+    path = makeAbsolute(path);
+    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
+    switch (op.value()) {
+      case OPEN: {
+        //Invoking the command directly using an unmanaged FileSystem that is
+        // released by the FileSystemReleaseFilter
+        FSOperations.FSOpen command = new FSOperations.FSOpen(path);
+        FileSystem fs = createFileSystem(user, doAs);
+        InputStream is = command.execute(fs);
+        Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
+        Long len = params.get(LenParam.NAME, LenParam.class);
+        AUDIT_LOG.info("[{}] offset [{}] len [{}]",
+                       new Object[]{path, offset, len});
+        InputStreamEntity entity = new InputStreamEntity(is, offset, len);
+        response =
+          Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
+        break;
+      }
+      case GETFILESTATUS: {
+        FSOperations.FSFileStatus command =
+          new FSOperations.FSFileStatus(path);
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}]", path);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case LISTSTATUS: {
+        String filter = params.get(FilterParam.NAME, FilterParam.class);
+        FSOperations.FSListStatus command = new FSOperations.FSListStatus(
+          path, filter);
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] filter [{}]", path,
+                       (filter != null) ? filter : "-");
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case GETHOMEDIRECTORY: {
+        enforceRootPath(op.value(), path);
+        FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
+        JSONObject json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("");
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case INSTRUMENTATION: {
+        enforceRootPath(op.value(), path);
+        Groups groups = HttpFSServerWebApp.get().get(Groups.class);
+        List<String> userGroups = groups.getGroups(user.getName());
+        if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
+          throw new AccessControlException(
+            "User not in HttpFSServer admin group");
+        }
+        Instrumentation instrumentation =
+          HttpFSServerWebApp.get().get(Instrumentation.class);
+        Map snapshot = instrumentation.getSnapshot();
+        response = Response.ok(snapshot).build();
+        break;
+      }
+      case GETCONTENTSUMMARY: {
+        FSOperations.FSContentSummary command =
+          new FSOperations.FSContentSummary(path);
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}]", path);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case GETFILECHECKSUM: {
+        FSOperations.FSFileChecksum command =
+          new FSOperations.FSFileChecksum(path);
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}]", path);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case GETFILEBLOCKLOCATIONS: {
+        response = Response.status(Response.Status.BAD_REQUEST).build();
+        break;
+      }
+      default: {
+        throw new IOException(
+          MessageFormat.format("Invalid HTTP GET operation [{0}]",
+                               op.value()));
+      }
+    }
+    return response;
   }
 
+
   /**
-   * Binding to handle all DELETE requests.
+   * Binding to handle DELETE requests.
    *
-   * @param user principal making the request.
-   * @param path path for the DELETE request.
-   * @param op DELETE operation, default value is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.DeleteOpValues#DELETE}.
-   * @param recursive indicates if the delete is recursive, default is <code>false</code>
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
+   * @param user the principal of the user making the request.
+   * @param path the path for operation.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
    *
    * @return the request response.
    *
    * @throws IOException thrown if an IO error occurred. Thrown exceptions are
    * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
    */
   @DELETE
   @Path("{path:.*}")
   @Produces(MediaType.APPLICATION_JSON)
   public Response delete(@Context Principal user,
-                         @PathParam("path") FsPathParam path,
-                         @QueryParam(DeleteOpParam.NAME) DeleteOpParam op,
-                         @QueryParam(DeleteRecursiveParam.NAME) @DefaultValue(DeleteRecursiveParam.DEFAULT)
-                         DeleteRecursiveParam recursive,
-                         @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
+                      @PathParam("path") String path,
+                      @QueryParam(OperationParam.NAME) OperationParam op,
+                      @Context Parameters params)
     throws IOException, FileSystemAccessException {
-    Response response = null;
-    if (op == null) {
-      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", DeleteOpParam.NAME));
-    }
+    Response response;
+    path = makeAbsolute(path);
+    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
     switch (op.value()) {
       case DELETE: {
-        path.makeAbsolute();
-        MDC.put(HttpFSFileSystem.OP_PARAM, "DELETE");
+        Boolean recursive =
+          params.get(RecursiveParam.NAME,  RecursiveParam.class);
         AUDIT_LOG.info("[{}] recursive [{}]", path, recursive);
-        FSOperations.FSDelete command = new FSOperations.FSDelete(path.value(), recursive.value());
-        JSONObject json = fsExecute(user, doAs.value(), command);
+        FSOperations.FSDelete command =
+          new FSOperations.FSDelete(path, recursive);
+        JSONObject json = fsExecute(user, doAs, command);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
       }
+      default: {
+        throw new IOException(
+          MessageFormat.format("Invalid HTTP DELETE operation [{0}]",
+                               op.value()));
+      }
     }
     return response;
   }
 
+  /**
+   * Binding to handle POST requests.
+   *
+   * @param is the inputstream for the request payload.
+   * @param user the principal of the user making the request.
+   * @param uriInfo the of the request.
+   * @param path the path for operation.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
+   *
+   * @return the request response.
+   *
+   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+   * handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
+   */
+  @POST
+  @Path("{path:.*}")
+  @Consumes({"*/*"})
+  @Produces({MediaType.APPLICATION_JSON})
+  public Response post(InputStream is,
+                       @Context Principal user,
+                       @Context UriInfo uriInfo,
+                       @PathParam("path") String path,
+                       @QueryParam(OperationParam.NAME) OperationParam op,
+                       @Context Parameters params)
+    throws IOException, FileSystemAccessException {
+    Response response;
+    path = makeAbsolute(path);
+    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
+    switch (op.value()) {
+      case APPEND: {
+        boolean hasData = params.get(DataParam.NAME, DataParam.class);
+        if (!hasData) {
+          response = Response.temporaryRedirect(
+            createUploadRedirectionURL(uriInfo,
+              HttpFSFileSystem.Operation.APPEND)).build();
+        } else {
+          FSOperations.FSAppend command =
+            new FSOperations.FSAppend(is, path);
+          fsExecute(user, doAs, command);
+          AUDIT_LOG.info("[{}]", path);
+          response = Response.ok().type(MediaType.APPLICATION_JSON).build();
+        }
+        break;
+      }
+      default: {
+        throw new IOException(
+          MessageFormat.format("Invalid HTTP POST operation [{0}]",
+                               op.value()));
+      }
+    }
+    return response;
+  }
 
   /**
-   * Binding to handle all PUT requests, supported operations are
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues}.
+   * Creates the URL for an upload operation (create or append).
    *
-   * @param is request input stream, used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues#APPEND} operations.
-   * @param user principal making the request.
-   * @param uriInfo the request uriInfo.
-   * @param path path for the PUT request.
-   * @param op PUT operation, no default value.
-   * @param toPath new path, used only for
-   * {@link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#RENAME} operations.
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETTIMES}.
-   * @param owner owner to set, used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETOWNER} operations.
-   * @param group group to set, used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETOWNER} operations.
-   * @param override default is true. Used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
-   * @param blockSize block size to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
-   * @param permission permission to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETPERMISSION}.
-   * @param replication replication factor to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETREPLICATION}.
-   * @param modifiedTime modified time, in seconds since EPOC, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETTIMES}.
-   * @param accessTime accessed time, in seconds since EPOC, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETTIMES}.
-   * @param hasData indicates if the append request is uploading data or not
-   * (just getting the handle).
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
+   * @param uriInfo uri info of the request.
+   * @param uploadOperation operation for the upload URL.
+   *
+   * @return the URI for uploading data.
+   */
+  protected URI createUploadRedirectionURL(UriInfo uriInfo, Enum<?> uploadOperation) {
+    UriBuilder uriBuilder = uriInfo.getRequestUriBuilder();
+    uriBuilder = uriBuilder.replaceQueryParam(OperationParam.NAME, uploadOperation).
+      queryParam(DataParam.NAME, Boolean.TRUE);
+    return uriBuilder.build(null);
+  }
+
+
+  /**
+   * Binding to handle PUT requests.
+   *
+   * @param is the inputstream for the request payload.
+   * @param user the principal of the user making the request.
+   * @param uriInfo the of the request.
+   * @param path the path for operation.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
    *
    * @return the request response.
    *
    * @throws IOException thrown if an IO error occurred. Thrown exceptions are
    * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
    */
   @PUT
   @Path("{path:.*}")
   @Consumes({"*/*"})
   @Produces({MediaType.APPLICATION_JSON})
   public Response put(InputStream is,
-                      @Context Principal user,
-                      @Context UriInfo uriInfo,
-                      @PathParam("path") FsPathParam path,
-                      @QueryParam(PutOpParam.NAME) PutOpParam op,
-                      @QueryParam(ToPathParam.NAME) @DefaultValue(ToPathParam.DEFAULT) ToPathParam toPath,
-                      @QueryParam(OwnerParam.NAME) @DefaultValue(OwnerParam.DEFAULT) OwnerParam owner,
-                      @QueryParam(GroupParam.NAME) @DefaultValue(GroupParam.DEFAULT) GroupParam group,
-                      @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) OverwriteParam override,
-                      @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) BlockSizeParam blockSize,
-                      @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
-                      PermissionParam permission,
-                      @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
-                      ReplicationParam replication,
-                      @QueryParam(ModifiedTimeParam.NAME) @DefaultValue(ModifiedTimeParam.DEFAULT)
-                      ModifiedTimeParam modifiedTime,
-                      @QueryParam(AccessTimeParam.NAME) @DefaultValue(AccessTimeParam.DEFAULT)
-                      AccessTimeParam accessTime,
-                      @QueryParam(DataParam.NAME) @DefaultValue(DataParam.DEFAULT) DataParam hasData,
-                      @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
+                       @Context Principal user,
+                       @Context UriInfo uriInfo,
+                       @PathParam("path") String path,
+                       @QueryParam(OperationParam.NAME) OperationParam op,
+                       @Context Parameters params)
     throws IOException, FileSystemAccessException {
-    Response response = null;
-    if (op == null) {
-      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", PutOpParam.NAME));
-    }
-    path.makeAbsolute();
+    Response response;
+    path = makeAbsolute(path);
     MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
     switch (op.value()) {
       case CREATE: {
-        if (!hasData.value()) {
+        boolean hasData = params.get(DataParam.NAME, DataParam.class);
+        if (!hasData) {
           response = Response.temporaryRedirect(
-            createUploadRedirectionURL(uriInfo, HttpFSFileSystem.PutOpValues.CREATE)).build();
+            createUploadRedirectionURL(uriInfo,
+              HttpFSFileSystem.Operation.CREATE)).build();
         } else {
-          FSOperations.FSCreate
-            command = new FSOperations.FSCreate(is, path.value(), permission.value(), override.value(),
-                                                replication.value(), blockSize.value());
-          fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}] permission [{}] override [{}] replication [{}] blockSize [{}]",
-                         new Object[]{path, permission, override, replication, blockSize});
+          String permission = params.get(PermissionParam.NAME,
+                                         PermissionParam.class);
+          boolean override = params.get(OverwriteParam.NAME,
+                                        OverwriteParam.class);
+          short replication = params.get(ReplicationParam.NAME,
+                                         ReplicationParam.class);
+          long blockSize = params.get(BlockSizeParam.NAME,
+                                      BlockSizeParam.class);
+          FSOperations.FSCreate command =
+            new FSOperations.FSCreate(is, path, permission, override,
+                                      replication, blockSize);
+          fsExecute(user, doAs, command);
+          AUDIT_LOG.info(
+            "[{}] permission [{}] override [{}] replication [{}] blockSize [{}]",
+            new Object[]{path, permission, override, replication, blockSize});
           response = Response.status(Response.Status.CREATED).build();
         }
         break;
       }
       case MKDIRS: {
-        FSOperations.FSMkdirs command = new FSOperations.FSMkdirs(path.value(), permission.value());
-        JSONObject json = fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] permission [{}]", path, permission.value());
+        String permission = params.get(PermissionParam.NAME,
+                                       PermissionParam.class);
+        FSOperations.FSMkdirs command =
+          new FSOperations.FSMkdirs(path, permission);
+        JSONObject json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] permission [{}]", path, permission);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
       }
       case RENAME: {
-        FSOperations.FSRename command = new FSOperations.FSRename(path.value(), toPath.value());
-        JSONObject json = fsExecute(user, doAs.value(), command);
+        String toPath = params.get(DestinationParam.NAME, DestinationParam.class);
+        FSOperations.FSRename command =
+          new FSOperations.FSRename(path, toPath);
+        JSONObject json = fsExecute(user, doAs, command);
         AUDIT_LOG.info("[{}] to [{}]", path, toPath);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
       }
       case SETOWNER: {
-        FSOperations.FSSetOwner command = new FSOperations.FSSetOwner(path.value(), owner.value(), group.value());
-        fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner.value() + ":" + group.value());
+        String owner = params.get(OwnerParam.NAME, OwnerParam.class);
+        String group = params.get(GroupParam.NAME, GroupParam.class);
+        FSOperations.FSSetOwner command =
+          new FSOperations.FSSetOwner(path, owner, group);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner + ":" + group);
         response = Response.ok().build();
         break;
       }
       case SETPERMISSION: {
-        FSOperations.FSSetPermission command = new FSOperations.FSSetPermission(path.value(), permission.value());
-        fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] to [{}]", path, permission.value());
+        String permission = params.get(PermissionParam.NAME,
+                                       PermissionParam.class);
+        FSOperations.FSSetPermission command =
+          new FSOperations.FSSetPermission(path, permission);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to [{}]", path, permission);
         response = Response.ok().build();
         break;
       }
       case SETREPLICATION: {
-        FSOperations.FSSetReplication command = new FSOperations.FSSetReplication(path.value(), replication.value());
-        JSONObject json = fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] to [{}]", path, replication.value());
+        short replication = params.get(ReplicationParam.NAME,
+                                       ReplicationParam.class);
+        FSOperations.FSSetReplication command =
+          new FSOperations.FSSetReplication(path, replication);
+        JSONObject json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to [{}]", path, replication);
         response = Response.ok(json).build();
         break;
       }
       case SETTIMES: {
-        FSOperations.FSSetTimes
-          command = new FSOperations.FSSetTimes(path.value(), modifiedTime.value(), accessTime.value());
-        fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] to (M/A)[{}]", path, modifiedTime.value() + ":" + accessTime.value());
+        long modifiedTime = params.get(ModifiedTimeParam.NAME,
+                                       ModifiedTimeParam.class);
+        long accessTime = params.get(AccessTimeParam.NAME,
+                                     AccessTimeParam.class);
+        FSOperations.FSSetTimes command =
+          new FSOperations.FSSetTimes(path, modifiedTime, accessTime);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to (M/A)[{}]", path,
+                       modifiedTime + ":" + accessTime);
         response = Response.ok().build();
         break;
       }
-      case RENEWDELEGATIONTOKEN: {
-        response = Response.status(Response.Status.BAD_REQUEST).build();
-        break;
-      }
-      case CANCELDELEGATIONTOKEN: {
-        response = Response.status(Response.Status.BAD_REQUEST).build();
-        break;
-      }
-    }
-    return response;
-  }
-
-  /**
-   * Binding to handle all OPST requests, supported operations are
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues}.
-   *
-   * @param is request input stream, used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues#APPEND} operations.
-   * @param user principal making the request.
-   * @param uriInfo the request uriInfo.
-   * @param path path for the POST request.
-   * @param op POST operation, default is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues#APPEND}.
-   * @param hasData indicates if the append request is uploading data or not (just getting the handle).
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
-   *
-   * @return the request response.
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
-   */
-  @POST
-  @Path("{path:.*}")
-  @Consumes({"*/*"})
-  @Produces({MediaType.APPLICATION_JSON})
-  public Response post(InputStream is,
-                       @Context Principal user,
-                       @Context UriInfo uriInfo,
-                       @PathParam("path") FsPathParam path,
-                       @QueryParam(PostOpParam.NAME) PostOpParam op,
-                       @QueryParam(DataParam.NAME) @DefaultValue(DataParam.DEFAULT) DataParam hasData,
-                       @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
-    throws IOException, FileSystemAccessException {
-    Response response = null;
-    if (op == null) {
-      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", PostOpParam.NAME));
-    }
-    path.makeAbsolute();
-    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
-    switch (op.value()) {
-      case APPEND: {
-        if (!hasData.value()) {
-          response = Response.temporaryRedirect(
-            createUploadRedirectionURL(uriInfo, HttpFSFileSystem.PostOpValues.APPEND)).build();
-        } else {
-          FSOperations.FSAppend command = new FSOperations.FSAppend(is, path.value());
-          fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}]", path);
-          response = Response.ok().type(MediaType.APPLICATION_JSON).build();
-        }
-        break;
+      default: {
+        throw new IOException(
+          MessageFormat.format("Invalid HTTP PUT operation [{0}]",
+                               op.value()));
       }
     }
     return response;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java Thu Jun 28 06:59:38 2012
@@ -22,15 +22,14 @@ import java.text.MessageFormat;
 
 public abstract class BooleanParam extends Param<Boolean> {
 
-  public BooleanParam(String name, String str) {
-    value = parseParam(name, str);
+  public BooleanParam(String name, Boolean defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Boolean parse(String str) throws Exception {
     if (str.equalsIgnoreCase("true")) {
       return true;
-    }
-    if (str.equalsIgnoreCase("false")) {
+    } else if (str.equalsIgnoreCase("false")) {
       return false;
     }
     throw new IllegalArgumentException(MessageFormat.format("Invalid value [{0}], must be a boolean", str));

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java Thu Jun 28 06:59:38 2012
@@ -20,8 +20,8 @@ package org.apache.hadoop.lib.wsrs;
 
 public abstract class ByteParam extends Param<Byte> {
 
-  public ByteParam(String name, String str) {
-    value = parseParam(name, str);
+  public ByteParam(String name, Byte defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Byte parse(String str) throws Exception {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java Thu Jun 28 06:59:38 2012
@@ -25,9 +25,9 @@ import java.util.Arrays;
 public abstract class EnumParam<E extends Enum<E>> extends Param<E> {
   Class<E> klass;
 
-  public EnumParam(String label, String str, Class<E> e) {
+  public EnumParam(String name, Class<E> e, E defaultValue) {
+    super(name, defaultValue);
     klass = e;
-    value = parseParam(label, str);
   }
 
   protected E parse(String str) throws Exception {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java Thu Jun 28 06:59:38 2012
@@ -20,8 +20,8 @@ package org.apache.hadoop.lib.wsrs;
 
 public abstract class IntegerParam extends Param<Integer> {
 
-  public IntegerParam(String name, String str) {
-    value = parseParam(name, str);
+  public IntegerParam(String name, Integer defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Integer parse(String str) throws Exception {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java Thu Jun 28 06:59:38 2012
@@ -20,8 +20,8 @@ package org.apache.hadoop.lib.wsrs;
 
 public abstract class LongParam extends Param<Long> {
 
-  public LongParam(String name, String str) {
-    value = parseParam(name, str);
+  public LongParam(String name, Long defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Long parse(String str) throws Exception {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java Thu Jun 28 06:59:38 2012
@@ -23,32 +23,39 @@ import org.apache.hadoop.lib.util.Check;
 import java.text.MessageFormat;
 
 public abstract class Param<T> {
+  private String name;
   protected T value;
 
-  public T parseParam(String name, String str) {
-    Check.notNull(name, "name");
+  public Param(String name, T defaultValue) {
+    this.name = name;
+    this.value = defaultValue;
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public T parseParam(String str) {
     try {
-      return (str != null && str.trim().length() > 0) ? parse(str) : null;
+      value = (str != null && str.trim().length() > 0) ? parse(str) : value;
     } catch (Exception ex) {
       throw new IllegalArgumentException(
         MessageFormat.format("Parameter [{0}], invalid value [{1}], value must be [{2}]",
                              name, str, getDomain()));
     }
+    return value;
   }
 
   public T value() {
     return value;
   }
 
-  protected void setValue(T value) {
-    this.value = value;
-  }
-
   protected abstract String getDomain();
 
   protected abstract T parse(String str) throws Exception;
 
   public String toString() {
-    return value.toString();
+    return (value != null) ? value.toString() : "NULL";
   }
+
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java Thu Jun 28 06:59:38 2012
@@ -20,8 +20,8 @@ package org.apache.hadoop.lib.wsrs;
 
 public abstract class ShortParam extends Param<Short> {
 
-  public ShortParam(String name, String str) {
-    value = parseParam(name, str);
+  public ShortParam(String name, Short defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Short parse(String str) throws Exception {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java Thu Jun 28 06:59:38 2012
@@ -15,42 +15,38 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.lib.wsrs;
 
-import org.apache.hadoop.lib.util.Check;
-
 import java.text.MessageFormat;
 import java.util.regex.Pattern;
 
 public abstract class StringParam extends Param<String> {
   private Pattern pattern;
 
-  public StringParam(String name, String str) {
-    this(name, str, null);
+  public StringParam(String name, String defaultValue) {
+    this(name, defaultValue, null);
   }
 
-  public StringParam(String name, String str, Pattern pattern) {
+  public StringParam(String name, String defaultValue, Pattern pattern) {
+    super(name, defaultValue);
     this.pattern = pattern;
-    value = parseParam(name, str);
+    parseParam(defaultValue);
   }
 
-  public String parseParam(String name, String str) {
-    String ret = null;
-    Check.notNull(name, "name");
+  public String parseParam(String str) {
     try {
       if (str != null) {
         str = str.trim();
         if (str.length() > 0) {
-          return parse(str);
+          value = parse(str);
         }
       }
     } catch (Exception ex) {
       throw new IllegalArgumentException(
         MessageFormat.format("Parameter [{0}], invalid value [{1}], value must be [{2}]",
-                             name, str, getDomain()));
+                             getName(), str, getDomain()));
     }
-    return ret;
+    return value;
   }
 
   protected String parse(String str) throws Exception {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java Thu Jun 28 06:59:38 2012
@@ -475,6 +475,7 @@ public class TestHttpFSFileSystem extend
       ops[i] = new Object[]{Operation.values()[i]};
     }
     return Arrays.asList(ops);
+//    return Arrays.asList(new Object[][]{ new Object[]{Operation.CREATE}});
   }
 
   private Operation operation;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java Thu Jun 28 06:59:38 2012
@@ -31,34 +31,34 @@ public class TestCheckUploadContentTypeF
 
   @Test
   public void putUpload() throws Exception {
-    test("PUT", HttpFSFileSystem.PutOpValues.CREATE.toString(), "application/octet-stream", true, false);
+    test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "application/octet-stream", true, false);
   }
 
   @Test
   public void postUpload() throws Exception {
-    test("POST", HttpFSFileSystem.PostOpValues.APPEND.toString(), "APPLICATION/OCTET-STREAM", true, false);
+    test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "APPLICATION/OCTET-STREAM", true, false);
   }
 
   @Test
   public void putUploadWrong() throws Exception {
-    test("PUT", HttpFSFileSystem.PutOpValues.CREATE.toString(), "plain/text", false, false);
-    test("PUT", HttpFSFileSystem.PutOpValues.CREATE.toString(), "plain/text", true, true);
+    test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "plain/text", false, false);
+    test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "plain/text", true, true);
   }
 
   @Test
   public void postUploadWrong() throws Exception {
-    test("POST", HttpFSFileSystem.PostOpValues.APPEND.toString(), "plain/text", false, false);
-    test("POST", HttpFSFileSystem.PostOpValues.APPEND.toString(), "plain/text", true, true);
+    test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "plain/text", false, false);
+    test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "plain/text", true, true);
   }
 
   @Test
   public void getOther() throws Exception {
-    test("GET", HttpFSFileSystem.GetOpValues.GETHOMEDIRECTORY.toString(), "plain/text", false, false);
+    test("GET", HttpFSFileSystem.Operation.GETHOMEDIRECTORY.toString(), "plain/text", false, false);
   }
 
   @Test
   public void putOther() throws Exception {
-    test("PUT", HttpFSFileSystem.PutOpValues.MKDIRS.toString(), "plain/text", false, false);
+    test("PUT", HttpFSFileSystem.Operation.MKDIRS.toString(), "plain/text", false, false);
   }
 
   private void test(String method, String operation, String contentType,
@@ -68,7 +68,7 @@ public class TestCheckUploadContentTypeF
     Mockito.reset(request);
     Mockito.when(request.getMethod()).thenReturn(method);
     Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).thenReturn(operation);
-    Mockito.when(request.getParameter(HttpFSParams.DataParam.NAME)).
+    Mockito.when(request.getParameter(HttpFSParametersProvider.DataParam.NAME)).
       thenReturn(Boolean.toString(upload));
     Mockito.when(request.getContentType()).thenReturn(contentType);
 

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-raid/
------------------------------------------------------------------------------
--- svn:ignore (added)
+++ svn:ignore Thu Jun 28 06:59:38 2012
@@ -0,0 +1 @@
+target

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Jun 28 06:59:38 2012
@@ -13,11 +13,6 @@ Trunk (unreleased changes)
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
 
-    HDFS-744. Support hsync in HDFS. (Lars Hofhansl via szetszwo)
-
-    HDFS-3042. Automatic failover support for NameNode HA (todd)
-    (see dedicated section below for subtask breakdown)
-
   IMPROVEMENTS
 
     HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
@@ -90,13 +85,19 @@ Trunk (unreleased changes)
     HDFS-3476. Correct the default used in TestDFSClientRetries.busyTest()
     after HDFS-3462 (harsh)
 
-  OPTIMIZATIONS
+    HDFS-3040. TestMulitipleNNDataBlockScanner is misspelled. (Madhukara Phatak
+    via atm)
 
-    HDFS-2834. Add a ByteBuffer-based read API to DFSInputStream.
-    (Henry Robinson via todd)
+    HDFS-3049. During the normal NN startup process, fall back on a different
+    edit log if we see one that is corrupt (Colin Patrick McCabe via todd)
 
-    HDFS-3110. Use directRead API to reduce the number of buffer copies in
-    libhdfs (Henry Robinson via todd)
+    HDFS-3478. Test quotas with Long.Max_Value. (Sujay Rau via eli)
+
+    HDFS-3498. Support replica removal in BlockPlacementPolicy and make
+    BlockPlacementPolicyDefault extensible for reusing code in subclasses.
+    (Junping Du via szetszwo)
+
+  OPTIMIZATIONS
 
   BUG FIXES
 
@@ -145,11 +146,6 @@ Trunk (unreleased changes)
     factor is reduced after sync follwed by closing that file. (Ashish Singhi 
     via umamahesh)
 
-    HDFS-3235. MiniDFSClusterManager doesn't correctly support -format option.
-    (Henry Robinson via atm)
-
-    HDFS-3243. TestParallelRead timing out on jenkins. (Henry Robinson via todd)
-
     HDFS-3265. PowerPc Build error. (Kumar Ravi via mattf)
 
     HDFS-2312. FSNamesystem javadoc incorrectly says its for DNs. (harsh)
@@ -162,28 +158,24 @@ Trunk (unreleased changes)
     HDFS-3462. TestDFSClientRetries.busyTest() should restore default
     xceiver count in the config. (Madhukara Phatak via harsh)
 
-  BREAKDOWN OF HDFS-3042 SUBTASKS
+    HDFS-3550. Fix raid javadoc warnings. (Jason Lowe via daryn)
 
-    HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
-    
-    HDFS-3200. Scope all ZKFC configurations by nameservice (todd)
-    
-    HDFS-3223. add zkfc to hadoop-daemon.sh script (todd)
-    
-    HDFS-3261. TestHASafeMode fails on HDFS-3042 branch (todd)
-    
-    HDFS-3159. Document NN auto-failover setup and configuration (todd)
-    
-    HDFS-3412. Fix findbugs warnings in auto-HA branch (todd)
-    
-    HDFS-3432. TestDFSZKFailoverController tries to fail over too early (todd)
+    HDFS-3549. Fix dist tar build fails in hadoop-hdfs-raid project. (Jason Lowe via daryn)
 
-Release 2.0.1-alpha - UNRELEASED
-  
+Branch-2 ( Unreleased changes )
+ 
   INCOMPATIBLE CHANGES
 
   NEW FEATURES
 
+    HDFS-744. Support hsync in HDFS. (Lars Hofhansl via szetszwo)
+
+    HDFS-3042. Automatic failover support for NameNode HA (todd)
+    (see dedicated section below for breakdown of subtasks)
+
+    HDFS-3518. Add a utility method HdfsUtils.isHealthy(uri) for checking if
+    the given HDFS is healthy. (szetszwo)
+
   IMPROVEMENTS
 
     HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
@@ -238,11 +230,37 @@ Release 2.0.1-alpha - UNRELEASED
     HDFS-1013. Miscellaneous improvements to HTML markup for web UIs
     (Eugene Koontz via todd)
 
+    HDFS-3052. Change INodeFile and INodeFileUnderConstruction to package
+    private.  (szetszwo)
+
+    HDFS-3520. Add transfer rate logging to TransferFsImage. (eli)
+
+    HDFS-3504. Support configurable retry policy in DFSClient for RPC
+    connections and RPC calls, and add MultipleLinearRandomRetry, a new retry
+    policy.  (szetszwo)
+
+    HDFS-3372. offlineEditsViewer should be able to read a binary
+    edits file with recovery mode. (Colin Patrick McCabe via eli)
+
+    HDFS-3516. Check content-type in WebHdfsFileSystem.  (szetszwo)
+
+    HDFS-3535. Audit logging should log denied accesses. (Andy Isaacson via eli)
+
+    HDFS-3481. Refactor HttpFS handling of JAX-RS query string parameters (tucu)
+
+    HDFS-3572. Cleanup code which inits SPNEGO in HttpServer (todd)
+
   OPTIMIZATIONS
 
     HDFS-2982. Startup performance suffers when there are many edit log
     segments. (Colin Patrick McCabe via todd)
 
+    HDFS-2834. Add a ByteBuffer-based read API to DFSInputStream.
+    (Henry Robinson via todd)
+
+    HDFS-3110. Use directRead API to reduce the number of buffer copies in
+    libhdfs (Henry Robinson via todd)
+
   BUG FIXES
 
     HDFS-3385. The last block of INodeFileUnderConstruction is not
@@ -307,6 +325,69 @@ Release 2.0.1-alpha - UNRELEASED
     HDFS-3266. DFSTestUtil#waitCorruptReplicas doesn't sleep between checks.
     (Madhukara Phatak via atm)
 
+    HDFS-3505. DirectoryScanner does not join all threads in shutdown.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3485. DataTransferThrottler will over-throttle when currentTimeMillis
+    jumps (Andy Isaacson via todd)
+
+    HDFS-2914. HA: Standby should not enter safemode when resources are low.
+    (Vinay via atm)
+
+    HDFS-3235. MiniDFSClusterManager doesn't correctly support -format option.
+    (Henry Robinson via atm)
+
+    HDFS-3514. Add missing TestParallelLocalRead. (Henry Robinson via atm)
+
+    HDFS-3243. TestParallelRead timing out on jenkins. (Henry Robinson via todd)
+
+    HDFS-3490. DatanodeWebHdfsMethods throws NullPointerException if
+    NamenodeRpcAddressParam is not set.  (szetszwo)
+
+    HDFS-2797. Fix misuses of InputStream#skip in the edit log code.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3517. TestStartup should bind ephemeral ports. (eli)
+
+    HDFS-3522. If a namenode is in safemode, it should throw SafeModeException
+    when getBlockLocations has zero locations.  (Brandon Li via szetszwo)
+    
+    HDFS-3408. BKJM : Namenode format fails, if there is no BK root. (Rakesh R via umamahesh)
+
+    HDFS-3389. Document the BKJM usage in Namenode HA. (umamahesh and Ivan Kelly via umamahesh)
+
+    HDFS-3531. EditLogFileOutputStream#preallocate should check for
+    incomplete writes. (Colin Patrick McCabe via eli)
+
+    HDFS-766. Error message not clear for set space quota out of boundary
+    values. (Jon Zuanich via atm)
+
+    HDFS-3480. Multiple SLF4J binding warning. (Vinay via eli)
+
+    HDFS-3524. Update TestFileLengthOnClusterRestart for HDFS-3522.  (Brandon
+    Li via szetszwo)
+
+  BREAKDOWN OF HDFS-3042 SUBTASKS
+
+    HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
+    
+    HDFS-3200. Scope all ZKFC configurations by nameservice (todd)
+    
+    HDFS-3223. add zkfc to hadoop-daemon.sh script (todd)
+    
+    HDFS-3261. TestHASafeMode fails on HDFS-3042 branch (todd)
+    
+    HDFS-3159. Document NN auto-failover setup and configuration (todd)
+    
+    HDFS-3412. Fix findbugs warnings in auto-HA branch (todd)
+    
+    HDFS-3432. TestDFSZKFailoverController tries to fail over too early (todd)
+
+    HDFS-3428. Move DelegationTokenRenewer to common (tucu)
+
+    HDFS-3551. WebHDFS CREATE should use client location for HTTP redirection.
+    (szetszwo)
+
 Release 2.0.0-alpha - 05-23-2012
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/pom.xml Thu Jun 28 06:59:38 2012
@@ -107,21 +107,12 @@ http://maven.apache.org/xsd/maven-4.0.0.
       <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
       <version>3.4.2</version>
-      <exclusions>
-        <exclusion>
-          <!-- otherwise seems to drag in junit 3.8.1 via jline -->
-          <groupId>junit</groupId>
-          <artifactId>junit</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jdmk</groupId>
-          <artifactId>jmxtools</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jmx</groupId>
-          <artifactId>jmxri</artifactId>
-        </exclusion>
-      </exclusions>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.zookeeper</groupId>
@@ -438,76 +429,22 @@ http://maven.apache.org/xsd/maven-4.0.0.
             <artifactId>maven-antrun-plugin</artifactId>
             <executions>
               <execution>
-                <id>compile</id>
+                <id>make</id>
                 <phase>compile</phase>
-                <goals>
-                  <goal>run</goal>
-                </goals>
+                <goals><goal>run</goal></goals>
                 <configuration>
                   <target>
-                    <copy toDir="${project.build.directory}/native">
-                      <fileset dir="${basedir}/src/main/native"/>
-                    </copy>
-                    <mkdir dir="${project.build.directory}/native/m4"/>
+                    <mkdir dir="${project.build.directory}/native"/>
+                    <exec executable="cmake" dir="${project.build.directory}/native" 
+                        failonerror="true">
+                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model}"/>
+                    </exec>
+                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
+                      <arg line="VERBOSE=1"/>
+                    </exec>
                   </target>
                 </configuration>
               </execution>
-            </executions>
-          </plugin>
-          <plugin>
-            <groupId>org.codehaus.mojo</groupId>
-            <artifactId>make-maven-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>compile</id>
-                <phase>compile</phase>
-                <goals>
-                  <goal>autoreconf</goal>
-                  <goal>configure</goal>
-                  <goal>make-install</goal>
-                </goals>
-                <configuration>
-                  <!-- autoreconf settings -->
-                  <workDir>${project.build.directory}/native</workDir>
-                  <arguments>
-                    <argument>-i</argument>
-                    <argument>-f</argument>
-                  </arguments>
-
-                  <!-- configure settings -->
-                  <configureEnvironment>
-                    <property>
-                      <name>ac_cv_func_malloc_0_nonnull</name>
-                      <value>yes</value>
-                    </property>
-                    <property>
-                      <name>JVM_ARCH</name>
-                      <value>${sun.arch.data.model}</value>
-                    </property>
-                  </configureEnvironment>
-                  <configureOptions>
-                  </configureOptions>
-                  <configureWorkDir>${project.build.directory}/native</configureWorkDir>
-                  <prefix>/usr/local</prefix>
-
-                  <!-- make settings -->
-                  <installEnvironment>
-                    <property>
-                      <name>ac_cv_func_malloc_0_nonnull</name>
-                      <value>yes</value>
-                    </property>
-                    <property>
-                      <name>JVM_ARCH</name>
-                      <value>${sun.arch.data.model}</value>
-                    </property>
-                  </installEnvironment>
-
-                  <!-- configure & make settings -->
-                  <destDir>${project.build.directory}/native/target</destDir>
-
-                </configuration>
-              </execution>
-              
               <!-- TODO wire here native testcases
               <execution>
                 <id>test</id>
@@ -564,7 +501,7 @@ http://maven.apache.org/xsd/maven-4.0.0.
             <artifactId>maven-antrun-plugin</artifactId>
             <executions>
               <execution>
-                <id>compile</id>
+                <id>kdc</id>
                 <phase>compile</phase>
                 <goals>
                   <goal>run</goal>

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java Thu Jun 28 06:59:38 2012
@@ -28,6 +28,7 @@ import org.apache.bookkeeper.conf.Client
 import org.apache.bookkeeper.client.BKException;
 import org.apache.bookkeeper.client.BookKeeper;
 import org.apache.bookkeeper.client.LedgerHandle;
+import org.apache.bookkeeper.util.ZkUtils;
 
 import org.apache.zookeeper.data.Stat;
 import org.apache.zookeeper.ZooKeeper;
@@ -36,6 +37,7 @@ import org.apache.zookeeper.WatchedEvent
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.ZooDefs.Ids;
+import org.apache.zookeeper.AsyncCallback.StringCallback;
 
 import java.util.Collection;
 import java.util.Collections;
@@ -124,6 +126,12 @@ public class BookKeeperJournalManager im
 
   private static final String BKJM_EDIT_INPROGRESS = "inprogress_";
 
+  public static final String BKJM_ZK_LEDGERS_AVAILABLE_PATH
+    = "dfs.namenode.bookkeeperjournal.zk.availablebookies";
+
+  public static final String BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT
+    = "/ledgers/available";
+
   private ZooKeeper zkc;
   private final Configuration conf;
   private final BookKeeper bkc;
@@ -196,7 +204,7 @@ public class BookKeeperJournalManager im
         zkc.create(ledgerPath, new byte[] {'0'},
             Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
       }
-
+      prepareBookKeeperEnv();
       bkc = new BookKeeper(new ClientConfiguration(),
                            zkc);
     } catch (KeeperException e) {
@@ -211,6 +219,50 @@ public class BookKeeperJournalManager im
   }
 
   /**
+   * Pre-creating bookkeeper metadata path in zookeeper.
+   */
+  private void prepareBookKeeperEnv() throws IOException {
+    // create bookie available path in zookeeper if it doesn't exists
+    final String zkAvailablePath = conf.get(BKJM_ZK_LEDGERS_AVAILABLE_PATH,
+        BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT);
+    final CountDownLatch zkPathLatch = new CountDownLatch(1);
+
+    StringCallback callback = new StringCallback() {
+      @Override
+      public void processResult(int rc, String path, Object ctx, String name) {
+        if (KeeperException.Code.OK.intValue() == rc
+            || KeeperException.Code.NODEEXISTS.intValue() == rc) {
+          LOG.info("Successfully created bookie available path : "
+              + zkAvailablePath);
+          zkPathLatch.countDown();
+        } else {
+          KeeperException.Code code = KeeperException.Code.get(rc);
+          LOG
+              .error("Error : "
+                  + KeeperException.create(code, path).getMessage()
+                  + ", failed to create bookie available path : "
+                  + zkAvailablePath);
+        }
+      }
+    };
+    ZkUtils.createFullPathOptimistic(zkc, zkAvailablePath, new byte[0],
+        Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, callback, null);
+
+    try {
+      if (!zkPathLatch.await(zkc.getSessionTimeout(), TimeUnit.MILLISECONDS)) {
+        throw new IOException("Couldn't create bookie available path :"
+            + zkAvailablePath + ", timed out " + zkc.getSessionTimeout()
+            + " millis");
+      }
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      throw new IOException(
+          "Interrupted when creating the bookie available path : "
+              + zkAvailablePath, e);
+    }
+  }
+
+  /**
    * Start a new log segment in a BookKeeper ledger.
    * First ensure that we have the write lock for this journal.
    * Then create a ledger and stream based on that ledger.

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/fuse_dfs.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/fuse_dfs.h?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/fuse_dfs.h (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/fuse_dfs.h Thu Jun 28 06:59:38 2012
@@ -31,13 +31,9 @@
 #include <fuse.h>
 #include <fuse/fuse_opt.h>
 
-#ifdef HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#ifdef HAVE_SETXATTR
 #include <sys/xattr.h>
-#endif
+
+#include "config.h"
 
 //
 // Check if a path is in the mount option supplied protected paths.

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml Thu Jun 28 06:59:38 2012
@@ -152,7 +152,7 @@
 <tr><td><code>dfs.web.authentication.kerberos.principal</code></td>
 <td>The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
     The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-    HTTP SPENGO specification.
+    HTTP SPNEGO specification.
 </td></tr>
 <tr><td><code>dfs.web.authentication.kerberos.keytab</code></td>
 <td>The Kerberos keytab file with the credentials for the

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1346682-1354801

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Thu Jun 28 06:59:38 2012
@@ -38,6 +38,10 @@ public class DFSConfigKeys extends Commo
   public static final int     DFS_STREAM_BUFFER_SIZE_DEFAULT = 4096;
   public static final String  DFS_BYTES_PER_CHECKSUM_KEY = "dfs.bytes-per-checksum";
   public static final int     DFS_BYTES_PER_CHECKSUM_DEFAULT = 512;
+  public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY = "dfs.client.retry.policy.enabled";
+  public static final boolean DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = false; 
+  public static final String  DFS_CLIENT_RETRY_POLICY_SPEC_KEY = "dfs.client.retry.policy.spec";
+  public static final String  DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,... 
   public static final String  DFS_CHECKSUM_TYPE_KEY = "dfs.checksum.type";
   public static final String  DFS_CHECKSUM_TYPE_DEFAULT = "CRC32C";
   public static final String  DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
@@ -329,10 +333,10 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_DATANODE_USER_NAME_KEY = "dfs.datanode.kerberos.principal";
   public static final String  DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file";
   public static final String  DFS_NAMENODE_USER_NAME_KEY = "dfs.namenode.kerberos.principal";
-  public static final String  DFS_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY = "dfs.namenode.kerberos.internal.spnego.principal";
+  public static final String  DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY = "dfs.namenode.kerberos.internal.spnego.principal";
   public static final String  DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY = "dfs.secondary.namenode.keytab.file";
   public static final String  DFS_SECONDARY_NAMENODE_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.principal";
-  public static final String  DFS_SECONDARY_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.internal.spnego.principal";
+  public static final String  DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.internal.spnego.principal";
   public static final String  DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold";
   public static final int     DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10;
   

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Thu Jun 28 06:59:38 2012
@@ -96,7 +96,7 @@ public class DistributedFileSystem exten
    */
   @Override
   public String getScheme() {
-    return "hdfs";
+    return HdfsConstants.HDFS_URI_SCHEME;
   }
 
   @Deprecated

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java Thu Jun 28 06:59:38 2012
@@ -37,6 +37,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.DelegationTokenRenewer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
@@ -46,7 +47,6 @@ import org.apache.hadoop.fs.MD5MD5CRC32F
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenRenewer;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;



Mime
View raw message