hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1212060 [2/8] - in /hadoop/common/trunk/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/ hadoop-hdfs-httpfs/src/main/ hadoop-hdfs-httpfs/src/main/conf/ hadoop-hdfs-httpfs/src/main/java/ hadoop-hdfs-httpfs/src/main/java/o...
Date Thu, 08 Dec 2011 19:25:33 GMT
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java Thu Dec  8 19:25:28 2011
@@ -0,0 +1,717 @@
+package org.apache.hadoop.fs.http.server;
+
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.GlobFilter;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.lib.service.FileSystemAccess;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+/**
+ * FileSystem operation executors used by {@link HttpFSServer}.
+ */
+public class FSOperations {
+
+  /**
+   * Converts a Unix permission octal & symbolic representation
+   * (i.e. 655 or -rwxr--r--) into a FileSystemAccess permission.
+   *
+   * @param str Unix permission symbolic representation.
+   *
+   * @return the FileSystemAccess permission. If the given string was
+   *         'default', it returns <code>FsPermission.getDefault()</code>.
+   */
+  private static FsPermission getPermission(String str) {
+    FsPermission permission;
+    if (str.equals(HttpFSFileSystem.DEFAULT_PERMISSION)) {
+      permission = FsPermission.getDefault();
+    } else if (str.length() == 3) {
+      permission = new FsPermission(Short.parseShort(str, 8));
+    } else {
+      permission = FsPermission.valueOf(str);
+    }
+    return permission;
+  }
+
+  @SuppressWarnings({"unchecked", "deprecation"})
+  private static Map fileStatusToJSONRaw(FileStatus status, boolean emptyPathSuffix) {
+    Map json = new LinkedHashMap();
+    json.put(HttpFSFileSystem.PATH_SUFFIX_JSON, (emptyPathSuffix) ? "" : status.getPath().getName());
+    json.put(HttpFSFileSystem.TYPE_JSON, HttpFSFileSystem.FILE_TYPE.getType(status).toString());
+    json.put(HttpFSFileSystem.LENGTH_JSON, status.getLen());
+    json.put(HttpFSFileSystem.OWNER_JSON, status.getOwner());
+    json.put(HttpFSFileSystem.GROUP_JSON, status.getGroup());
+    json.put(HttpFSFileSystem.PERMISSION_JSON, HttpFSFileSystem.permissionToString(status.getPermission()));
+    json.put(HttpFSFileSystem.ACCESS_TIME_JSON, status.getAccessTime());
+    json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON, status.getModificationTime());
+    json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, status.getBlockSize());
+    json.put(HttpFSFileSystem.REPLICATION_JSON, status.getReplication());
+    return json;
+  }
+
+  /**
+   * Converts a FileSystemAccess <code>FileStatus</code> object into a JSON
+   * object.
+   *
+   * @param status FileSystemAccess file status.
+   *
+   * @return The JSON representation of the file status.
+   */
+  @SuppressWarnings({"unchecked", "deprecation"})
+  private static Map fileStatusToJSON(FileStatus status) {
+    Map json = new LinkedHashMap();
+    json.put(HttpFSFileSystem.FILE_STATUS_JSON, fileStatusToJSONRaw(status, true));
+    return json;
+  }
+
+  /**
+   * Converts a <code>FileChecksum</code> object into a JSON array
+   * object.
+   *
+   * @param checksum file checksum.
+   *
+   * @return The JSON representation of the file checksum.
+   */
+  @SuppressWarnings({"unchecked"})
+  private static Map fileChecksumToJSON(FileChecksum checksum) {
+    Map json = new LinkedHashMap();
+    json.put(HttpFSFileSystem.CHECKSUM_ALGORITHM_JSON, checksum.getAlgorithmName());
+    json.put(HttpFSFileSystem.CHECKSUM_BYTES_JSON,
+             org.apache.hadoop.util.StringUtils.byteToHexString(checksum.getBytes()));
+    json.put(HttpFSFileSystem.CHECKSUM_LENGTH_JSON, checksum.getLength());
+    Map response = new LinkedHashMap();
+    response.put(HttpFSFileSystem.FILE_CHECKSUM_JSON, json);
+    return response;
+  }
+
+  /**
+   * Converts a <code>ContentSummary</code> object into a JSON array
+   * object.
+   *
+   * @param contentSummary the content summary
+   *
+   * @return The JSON representation of the content summary.
+   */
+  @SuppressWarnings({"unchecked"})
+  private static Map contentSummaryToJSON(ContentSummary contentSummary) {
+    Map json = new LinkedHashMap();
+    json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON, contentSummary.getDirectoryCount());
+    json.put(HttpFSFileSystem.CONTENT_SUMMARY_FILE_COUNT_JSON, contentSummary.getFileCount());
+    json.put(HttpFSFileSystem.CONTENT_SUMMARY_LENGTH_JSON, contentSummary.getLength());
+    json.put(HttpFSFileSystem.CONTENT_SUMMARY_QUOTA_JSON, contentSummary.getQuota());
+    json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_CONSUMED_JSON, contentSummary.getSpaceConsumed());
+    json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_QUOTA_JSON, contentSummary.getSpaceQuota());
+    Map response = new LinkedHashMap();
+    response.put(HttpFSFileSystem.CONTENT_SUMMARY_JSON, json);
+    return response;
+  }
+
+  /**
+   * Converts a FileSystemAccess <code>FileStatus</code> array into a JSON array
+   * object.
+   *
+   * @param status FileSystemAccess file status array.
+   * <code>SCHEME://HOST:PORT</code> in the file status.
+   *
+   * @return The JSON representation of the file status array.
+   */
+  @SuppressWarnings("unchecked")
+  private static Map fileStatusToJSON(FileStatus[] status) {
+    JSONArray json = new JSONArray();
+    if (status != null) {
+      for (FileStatus s : status) {
+        json.add(fileStatusToJSONRaw(s, false));
+      }
+    }
+    Map response = new LinkedHashMap();
+    Map temp = new LinkedHashMap();
+    temp.put(HttpFSFileSystem.FILE_STATUS_JSON, json);
+    response.put(HttpFSFileSystem.FILE_STATUSES_JSON, temp);
+    return response;
+  }
+
+  /**
+   * Converts an object into a Json Map with with one key-value entry.
+   * <p/>
+   * It assumes the given value is either a JSON primitive type or a
+   * <code>JsonAware</code> instance.
+   *
+   * @param name name for the key of the entry.
+   * @param value for the value of the entry.
+   *
+   * @return the JSON representation of the key-value pair.
+   */
+  @SuppressWarnings("unchecked")
+  private static JSONObject toJSON(String name, Object value) {
+    JSONObject json = new JSONObject();
+    json.put(name, value);
+    return json;
+  }
+
+  /**
+   * Executor that performs an append FileSystemAccess files system operation.
+   */
+  public static class FSAppend implements FileSystemAccess.FileSystemExecutor<Void> {
+    private InputStream is;
+    private Path path;
+
+    /**
+     * Creates an Append executor.
+     *
+     * @param is input stream to append.
+     * @param path path of the file to append.
+     */
+    public FSAppend(InputStream is, String path) {
+      this.is = is;
+      this.path = new Path(path);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return void.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
+      OutputStream os = fs.append(path, bufferSize);
+      IOUtils.copyBytes(is, os, bufferSize, true);
+      os.close();
+      return null;
+    }
+
+  }
+
+  /**
+   * Executor that performs a content-summary FileSystemAccess files system operation.
+   */
+  public static class FSContentSummary implements FileSystemAccess.FileSystemExecutor<Map> {
+    private Path path;
+
+    /**
+     * Creates a content-summary executor.
+     *
+     * @param path the path to retrieve the content-summary.
+     */
+    public FSContentSummary(String path) {
+      this.path = new Path(path);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return a Map object (JSON friendly) with the content-summary.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public Map execute(FileSystem fs) throws IOException {
+      ContentSummary contentSummary = fs.getContentSummary(path);
+      return contentSummaryToJSON(contentSummary);
+    }
+
+  }
+
+  /**
+   * Executor that performs a create FileSystemAccess files system operation.
+   */
+  public static class FSCreate implements FileSystemAccess.FileSystemExecutor<Void> {
+    private InputStream is;
+    private Path path;
+    private String permission;
+    private boolean override;
+    private short replication;
+    private long blockSize;
+
+    /**
+     * Creates a Create executor.
+     *
+     * @param is input stream to for the file to create.
+     * @param path path of the file to create.
+     * @param perm permission for the file.
+     * @param override if the file should be overriden if it already exist.
+     * @param repl the replication factor for the file.
+     * @param blockSize the block size for the file.
+     */
+    public FSCreate(InputStream is, String path, String perm, boolean override, short repl, long blockSize) {
+      this.is = is;
+      this.path = new Path(path);
+      this.permission = perm;
+      this.override = override;
+      this.replication = repl;
+      this.blockSize = blockSize;
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return The URI of the created file.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      if (replication == -1) {
+        replication = (short) fs.getConf().getInt("dfs.replication", 3);
+      }
+      if (blockSize == -1) {
+        blockSize = fs.getConf().getInt("dfs.block.size", 67108864);
+      }
+      FsPermission fsPermission = getPermission(permission);
+      int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
+      OutputStream os = fs.create(path, fsPermission, override, bufferSize, replication, blockSize, null);
+      IOUtils.copyBytes(is, os, bufferSize, true);
+      os.close();
+      return null;
+    }
+
+  }
+
+  /**
+   * Executor that performs a delete FileSystemAccess files system operation.
+   */
+  public static class FSDelete implements FileSystemAccess.FileSystemExecutor<JSONObject> {
+    private Path path;
+    private boolean recursive;
+
+    /**
+     * Creates a Delete executor.
+     *
+     * @param path path to delete.
+     * @param recursive if the delete should be recursive or not.
+     */
+    public FSDelete(String path, boolean recursive) {
+      this.path = new Path(path);
+      this.recursive = recursive;
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return <code>true</code> if the delete operation was successful,
+     *         <code>false</code> otherwise.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public JSONObject execute(FileSystem fs) throws IOException {
+      boolean deleted = fs.delete(path, recursive);
+      return toJSON(HttpFSFileSystem.DELETE_JSON.toLowerCase(), deleted);
+    }
+
+  }
+
+  /**
+   * Executor that performs a file-checksum FileSystemAccess files system operation.
+   */
+  public static class FSFileChecksum implements FileSystemAccess.FileSystemExecutor<Map> {
+    private Path path;
+
+    /**
+     * Creates a file-checksum executor.
+     *
+     * @param path the path to retrieve the checksum.
+     */
+    public FSFileChecksum(String path) {
+      this.path = new Path(path);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return a Map object (JSON friendly) with the file checksum.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public Map execute(FileSystem fs) throws IOException {
+      FileChecksum checksum = fs.getFileChecksum(path);
+      return fileChecksumToJSON(checksum);
+    }
+
+  }
+
+  /**
+   * Executor that performs a file-status FileSystemAccess files system operation.
+   */
+  public static class FSFileStatus implements FileSystemAccess.FileSystemExecutor<Map> {
+    private Path path;
+
+    /**
+     * Creates a file-status executor.
+     *
+     * @param path the path to retrieve the status.
+     */
+    public FSFileStatus(String path) {
+      this.path = new Path(path);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return a Map object (JSON friendly) with the file status.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public Map execute(FileSystem fs) throws IOException {
+      FileStatus status = fs.getFileStatus(path);
+      return fileStatusToJSON(status);
+    }
+
+  }
+
+  /**
+   * Executor that performs a home-dir FileSystemAccess files system operation.
+   */
+  public static class FSHomeDir implements FileSystemAccess.FileSystemExecutor<JSONObject> {
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return a JSON object with the user home directory.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    @SuppressWarnings("unchecked")
+    public JSONObject execute(FileSystem fs) throws IOException {
+      Path homeDir = fs.getHomeDirectory();
+      JSONObject json = new JSONObject();
+      json.put(HttpFSFileSystem.HOME_DIR_JSON, homeDir.toUri().getPath());
+      return json;
+    }
+
+  }
+
+  /**
+   * Executor that performs a list-status FileSystemAccess files system operation.
+   */
+  public static class FSListStatus implements FileSystemAccess.FileSystemExecutor<Map>, PathFilter {
+    private Path path;
+    private PathFilter filter;
+
+    /**
+     * Creates a list-status executor.
+     *
+     * @param path the directory to retrieve the status of its contents.
+     * @param filter glob filter to use.
+     *
+     * @throws IOException thrown if the filter expression is incorrect.
+     */
+    public FSListStatus(String path, String filter) throws IOException {
+      this.path = new Path(path);
+      this.filter = (filter == null) ? this : new GlobFilter(filter);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return a Map with the file status of the directory
+     *         contents.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public Map execute(FileSystem fs) throws IOException {
+      FileStatus[] status = fs.listStatus(path, filter);
+      return fileStatusToJSON(status);
+    }
+
+    @Override
+    public boolean accept(Path path) {
+      return true;
+    }
+
+  }
+
+  /**
+   * Executor that performs a mkdirs FileSystemAccess files system operation.
+   */
+  public static class FSMkdirs implements FileSystemAccess.FileSystemExecutor<JSONObject> {
+
+    private Path path;
+    private String permission;
+
+    /**
+     * Creates a mkdirs executor.
+     *
+     * @param path directory path to create.
+     * @param permission permission to use.
+     */
+    public FSMkdirs(String path, String permission) {
+      this.path = new Path(path);
+      this.permission = permission;
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return <code>true</code> if the mkdirs operation was successful,
+     *         <code>false</code> otherwise.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public JSONObject execute(FileSystem fs) throws IOException {
+      FsPermission fsPermission = getPermission(permission);
+      boolean mkdirs = fs.mkdirs(path, fsPermission);
+      return toJSON(HttpFSFileSystem.MKDIRS_JSON, mkdirs);
+    }
+
+  }
+
+  /**
+   * Executor that performs a open FileSystemAccess files system operation.
+   */
+  public static class FSOpen implements FileSystemAccess.FileSystemExecutor<InputStream> {
+    private Path path;
+
+    /**
+     * Creates a open executor.
+     *
+     * @param path file to open.
+     */
+    public FSOpen(String path) {
+      this.path = new Path(path);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return The inputstream of the file.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public InputStream execute(FileSystem fs) throws IOException {
+      int bufferSize = HttpFSServerWebApp.get().getConfig().getInt("httpfs.buffer.size", 4096);
+      return fs.open(path, bufferSize);
+    }
+
+  }
+
+  /**
+   * Executor that performs a rename FileSystemAccess files system operation.
+   */
+  public static class FSRename implements FileSystemAccess.FileSystemExecutor<JSONObject> {
+    private Path path;
+    private Path toPath;
+
+    /**
+     * Creates a rename executor.
+     *
+     * @param path path to rename.
+     * @param toPath new name.
+     */
+    public FSRename(String path, String toPath) {
+      this.path = new Path(path);
+      this.toPath = new Path(toPath);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return <code>true</code> if the rename operation was successful,
+     *         <code>false</code> otherwise.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public JSONObject execute(FileSystem fs) throws IOException {
+      boolean renamed = fs.rename(path, toPath);
+      return toJSON(HttpFSFileSystem.RENAME_JSON, renamed);
+    }
+
+  }
+
+  /**
+   * Executor that performs a set-owner FileSystemAccess files system operation.
+   */
+  public static class FSSetOwner implements FileSystemAccess.FileSystemExecutor<Void> {
+    private Path path;
+    private String owner;
+    private String group;
+
+    /**
+     * Creates a set-owner executor.
+     *
+     * @param path the path to set the owner.
+     * @param owner owner to set.
+     * @param group group to set.
+     */
+    public FSSetOwner(String path, String owner, String group) {
+      this.path = new Path(path);
+      this.owner = owner;
+      this.group = group;
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return void.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      fs.setOwner(path, owner, group);
+      return null;
+    }
+
+  }
+
+  /**
+   * Executor that performs a set-permission FileSystemAccess files system operation.
+   */
+  public static class FSSetPermission implements FileSystemAccess.FileSystemExecutor<Void> {
+
+    private Path path;
+    private String permission;
+
+    /**
+     * Creates a set-permission executor.
+     *
+     * @param path path to set the permission.
+     * @param permission permission to set.
+     */
+    public FSSetPermission(String path, String permission) {
+      this.path = new Path(path);
+      this.permission = permission;
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return void.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      FsPermission fsPermission = getPermission(permission);
+      fs.setPermission(path, fsPermission);
+      return null;
+    }
+
+  }
+
+  /**
+   * Executor that performs a set-replication FileSystemAccess files system operation.
+   */
+  public static class FSSetReplication implements FileSystemAccess.FileSystemExecutor<JSONObject> {
+    private Path path;
+    private short replication;
+
+    /**
+     * Creates a set-replication executor.
+     *
+     * @param path path to set the replication factor.
+     * @param replication replication factor to set.
+     */
+    public FSSetReplication(String path, short replication) {
+      this.path = new Path(path);
+      this.replication = replication;
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return <code>true</code> if the replication value was set,
+     *         <code>false</code> otherwise.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    @SuppressWarnings("unchecked")
+    public JSONObject execute(FileSystem fs) throws IOException {
+      boolean ret = fs.setReplication(path, replication);
+      JSONObject json = new JSONObject();
+      json.put(HttpFSFileSystem.SET_REPLICATION_JSON, ret);
+      return json;
+    }
+
+  }
+
+  /**
+   * Executor that performs a set-times FileSystemAccess files system operation.
+   */
+  public static class FSSetTimes implements FileSystemAccess.FileSystemExecutor<Void> {
+    private Path path;
+    private long mTime;
+    private long aTime;
+
+    /**
+     * Creates a set-times executor.
+     *
+     * @param path path to set the times.
+     * @param mTime modified time to set.
+     * @param aTime access time to set.
+     */
+    public FSSetTimes(String path, long mTime, long aTime) {
+      this.path = new Path(path);
+      this.mTime = mTime;
+      this.aTime = aTime;
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return void.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      fs.setTimes(path, mTime, aTime);
+      return null;
+    }
+
+  }
+
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java Thu Dec  8 19:25:28 2011
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.http.server;
+
+import org.apache.hadoop.lib.service.FileSystemAccessException;
+import org.apache.hadoop.lib.wsrs.ExceptionProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
+
+import javax.ws.rs.core.Response;
+import javax.ws.rs.ext.Provider;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+/**
+ * JAX-RS <code>ExceptionMapper</code> implementation that maps HttpFSServer's
+ * exceptions to HTTP status codes.
+ */
+@Provider
+public class HttpFSExceptionProvider extends ExceptionProvider {
+  private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
+  private static Logger LOG = LoggerFactory.getLogger(HttpFSExceptionProvider.class);
+
+  /**
+   * Maps different exceptions thrown by HttpFSServer to HTTP status codes.
+   * <p/>
+   * <ul>
+   * <li>SecurityException : HTTP UNAUTHORIZED</li>
+   * <li>FileNotFoundException : HTTP NOT_FOUND</li>
+   * <li>IOException : INTERNAL_HTTP SERVER_ERROR</li>
+   * <li>UnsupporteOperationException : HTTP BAD_REQUEST</li>
+   * <li>all other exceptions : HTTP INTERNAL_SERVER_ERROR </li>
+   * </ul>
+   *
+   * @param throwable exception thrown.
+   *
+   * @return mapped HTTP status code
+   */
+  @Override
+  public Response toResponse(Throwable throwable) {
+    Response.Status status;
+    if (throwable instanceof FileSystemAccessException) {
+      throwable = throwable.getCause();
+    }
+    if (throwable instanceof SecurityException) {
+      status = Response.Status.UNAUTHORIZED;
+    } else if (throwable instanceof FileNotFoundException) {
+      status = Response.Status.NOT_FOUND;
+    } else if (throwable instanceof IOException) {
+      status = Response.Status.INTERNAL_SERVER_ERROR;
+    } else if (throwable instanceof UnsupportedOperationException) {
+      status = Response.Status.BAD_REQUEST;
+    } else {
+      status = Response.Status.INTERNAL_SERVER_ERROR;
+    }
+    return createResponse(status, throwable);
+  }
+
+  /**
+   * Logs the HTTP status code and exception in HttpFSServer's log.
+   *
+   * @param status HTTP status code.
+   * @param throwable exception thrown.
+   */
+  @Override
+  protected void log(Response.Status status, Throwable throwable) {
+    String method = MDC.get("method");
+    String path = MDC.get("path");
+    String message = getOneLineMessage(throwable);
+    AUDIT_LOG.warn("FAILED [{}:{}] response [{}] {}", new Object[]{method, path, status, message});
+    LOG.warn("[{}:{}] response [{}] {}", new Object[]{method, path, status, message, throwable});
+  }
+
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java Thu Dec  8 19:25:28 2011
@@ -0,0 +1,536 @@
+package org.apache.hadoop.fs.http.server;
+
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.lib.wsrs.BooleanParam;
+import org.apache.hadoop.lib.wsrs.EnumParam;
+import org.apache.hadoop.lib.wsrs.LongParam;
+import org.apache.hadoop.lib.wsrs.ShortParam;
+import org.apache.hadoop.lib.wsrs.StringParam;
+import org.apache.hadoop.lib.wsrs.UserProvider;
+import org.slf4j.MDC;
+
+import java.util.regex.Pattern;
+
+/**
+ * HttpFS HTTP Parameters used by {@link HttpFSServer}.
+ */
+public class HttpFSParams {
+
+  /**
+   * To avoid instantiation.
+   */
+  private HttpFSParams() {
+  }
+
+  /**
+   * Class for access-time parameter.
+   */
+  public static class AccessTimeParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.ACCESS_TIME_PARAM;
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = "-1";
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public AccessTimeParam(String str) {
+      super(NAME, str);
+    }
+  }
+
+  /**
+   * Class for block-size parameter.
+   */
+  public static class BlockSizeParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.BLOCKSIZE_PARAM;
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = "-1";
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public BlockSizeParam(String str) {
+      super(NAME, str);
+    }
+  }
+
+  /**
+   * Class for data parameter.
+   */
+  public static class DataParam extends BooleanParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = "data";
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = "false";
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public DataParam(String str) {
+      super(NAME, str);
+    }
+  }
+
+  /**
+   * Class for DELETE operation parameter.
+   */
+  public static class DeleteOpParam extends EnumParam<HttpFSFileSystem.DeleteOpValues> {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.OP_PARAM;
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public DeleteOpParam(String str) {
+      super(NAME, str, HttpFSFileSystem.DeleteOpValues.class);
+    }
+  }
+
+  /**
+   * Class for delete's recursive parameter.
+   */
+  public static class DeleteRecursiveParam extends BooleanParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.RECURSIVE_PARAM;
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = "false";
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public DeleteRecursiveParam(String str) {
+      super(NAME, str);
+    }
+  }
+
+  /**
+   * Class for do-as parameter.
+   */
+  public static class DoAsParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.DO_AS_PARAM;
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = "";
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public DoAsParam(String str) {
+      super(NAME, str, UserProvider.USER_PATTERN);
+    }
+
+    /**
+     * Delegates to parent and then adds do-as user to
+     * MDC context for logging purposes.
+     *
+     * @param name parameter name.
+     * @param str parameter value.
+     *
+     * @return parsed parameter
+     */
+    @Override
+    public String parseParam(String name, String str) {
+      String doAs = super.parseParam(name, str);
+      MDC.put(NAME, (doAs != null) ? doAs : "-");
+      return doAs;
+    }
+  }
+
+  /**
+   * Class for filter parameter.
+   */
+  public static class FilterParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = "filter";
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = "";
+
+    /**
+     * Constructor.
+     *
+     * @param expr parameter value.
+     */
+    public FilterParam(String expr) {
+      super(NAME, expr);
+    }
+
+  }
+
+  /**
+   * Class for path parameter.
+   */
+  public static class FsPathParam extends StringParam {
+
+    /**
+     * Constructor.
+     *
+     * @param path parameter value.
+     */
+    public FsPathParam(String path) {
+      super("path", path);
+    }
+
+    /**
+     * Makes the path absolute adding '/' to it.
+     * <p/>
+     * This is required because JAX-RS resolution of paths does not add
+     * the root '/'.
+     *
+     * @returns absolute path.
+     */
+    public void makeAbsolute() {
+      String path = value();
+      path = "/" + ((path != null) ? path : "");
+      setValue(path);
+    }
+
+  }
+
+  /**
+   * Class for GET operation parameter.
+   */
+  public static class GetOpParam extends EnumParam<HttpFSFileSystem.GetOpValues> {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.OP_PARAM;
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public GetOpParam(String str) {
+      super(NAME, str, HttpFSFileSystem.GetOpValues.class);
+    }
+  }
+
+  /**
+   * Class for group parameter.
+   */
+  public static class GroupParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.GROUP_PARAM;
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = "";
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public GroupParam(String str) {
+      super(NAME, str, UserProvider.USER_PATTERN);
+    }
+
+  }
+
+  /**
+   * Class for len parameter.
+   */
+  public static class LenParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = "len";
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = "-1";
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public LenParam(String str) {
+      super(NAME, str);
+    }
+  }
+
+  /**
+   * Class for modified-time parameter.
+   */
+  public static class ModifiedTimeParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.MODIFICATION_TIME_PARAM;
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = "-1";
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public ModifiedTimeParam(String str) {
+      super(NAME, str);
+    }
+  }
+
+  /**
+   * Class for offset parameter.
+   */
+  public static class OffsetParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = "offset";
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = "0";
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public OffsetParam(String str) {
+      super(NAME, str);
+    }
+  }
+
+  /**
+   * Class for overwrite parameter.
+   */
+  public static class OverwriteParam extends BooleanParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.OVERWRITE_PARAM;
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = "true";
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public OverwriteParam(String str) {
+      super(NAME, str);
+    }
+  }
+
+  /**
+   * Class for owner parameter.
+   */
+  public static class OwnerParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.OWNER_PARAM;
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = "";
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public OwnerParam(String str) {
+      super(NAME, str, UserProvider.USER_PATTERN);
+    }
+
+  }
+
+  /**
+   * Class for permission parameter.
+   */
+  public static class PermissionParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.PERMISSION_PARAM;
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = HttpFSFileSystem.DEFAULT_PERMISSION;
+
+
+    /**
+     * Symbolic Unix permissions regular expression pattern.
+     */
+    private static final Pattern PERMISSION_PATTERN =
+      Pattern.compile(DEFAULT + "|(-[-r][-w][-x][-r][-w][-x][-r][-w][-x])" + "|[0-7][0-7][0-7]");
+
+    /**
+     * Constructor.
+     *
+     * @param permission parameter value.
+     */
+    public PermissionParam(String permission) {
+      super(NAME, permission.toLowerCase(), PERMISSION_PATTERN);
+    }
+
+  }
+
+  /**
+   * Class for POST operation parameter.
+   */
+  public static class PostOpParam extends EnumParam<HttpFSFileSystem.PostOpValues> {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.OP_PARAM;
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public PostOpParam(String str) {
+      super(NAME, str, HttpFSFileSystem.PostOpValues.class);
+    }
+  }
+
+  /**
+   * Class for PUT operation parameter.
+   */
+  public static class PutOpParam extends EnumParam<HttpFSFileSystem.PutOpValues> {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.OP_PARAM;
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public PutOpParam(String str) {
+      super(NAME, str, HttpFSFileSystem.PutOpValues.class);
+    }
+  }
+
+  /**
+   * Class for replication parameter.
+   */
+  public static class ReplicationParam extends ShortParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.REPLICATION_PARAM;
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = "-1";
+
+    /**
+     * Constructor.
+     *
+     * @param str parameter value.
+     */
+    public ReplicationParam(String str) {
+      super(NAME, str);
+    }
+  }
+
+  /**
+   * Class for to-path parameter.
+   */
+  public static class ToPathParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.DESTINATION_PARAM;
+
+    /**
+     * Default parameter value.
+     */
+    public static final String DEFAULT = "";
+
+    /**
+     * Constructor.
+     *
+     * @param path parameter value.
+     */
+    public ToPathParam(String path) {
+      super(NAME, path);
+    }
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSReleaseFilter.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSReleaseFilter.java?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSReleaseFilter.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSReleaseFilter.java Thu Dec  8 19:25:28 2011
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.http.server;
+
+import org.apache.hadoop.lib.service.FileSystemAccess;
+import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter;
+
+/**
+ * Filter that releases FileSystemAccess filesystem instances upon HTTP request
+ * completion.
+ */
+public class HttpFSReleaseFilter extends FileSystemReleaseFilter {
+
+  /**
+   * Returns the {@link FileSystemAccess} service to return the FileSystemAccess filesystem
+   * instance to.
+   *
+   * @return the FileSystemAccess service.
+   */
+  @Override
+  protected FileSystemAccess getFileSystemAccess() {
+    return HttpFSServerWebApp.get().get(FileSystemAccess.class);
+  }
+
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java Thu Dec  8 19:25:28 2011
@@ -0,0 +1,604 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.http.server;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.http.server.HttpFSParams.AccessTimeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.BlockSizeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.DataParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.DeleteOpParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.DeleteRecursiveParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.DoAsParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.FilterParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.FsPathParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.GetOpParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.GroupParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.LenParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.ModifiedTimeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.OffsetParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.OverwriteParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.OwnerParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.PermissionParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.PostOpParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.PutOpParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.ReplicationParam;
+import org.apache.hadoop.fs.http.server.HttpFSParams.ToPathParam;
+import org.apache.hadoop.lib.service.FileSystemAccess;
+import org.apache.hadoop.lib.service.FileSystemAccessException;
+import org.apache.hadoop.lib.service.Groups;
+import org.apache.hadoop.lib.service.Instrumentation;
+import org.apache.hadoop.lib.service.ProxyUser;
+import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter;
+import org.apache.hadoop.lib.servlet.HostnameFilter;
+import org.apache.hadoop.lib.wsrs.InputStreamEntity;
+import org.json.simple.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriBuilder;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.security.AccessControlException;
+import java.security.Principal;
+import java.text.MessageFormat;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Main class of HttpFSServer server.
+ * <p/>
+ * The <code>HttpFSServer</code> class uses Jersey JAX-RS to binds HTTP requests to the
+ * different operations.
+ */
+@Path(HttpFSFileSystem.SERVICE_VERSION)
+public class HttpFSServer {
+  private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
+
+  /**
+   * Special binding for '/' as it is not handled by the wildcard binding.
+   *
+   * @param user principal making the request.
+   * @param op GET operation, default value is {@link HttpFSFileSystem.GetOpValues#OPEN}.
+   * @param filter Glob filter, default value is none. Used only if the
+   * operation is {@link HttpFSFileSystem.GetOpValues#LISTSTATUS}
+   * @param doAs user being impersonated, defualt value is none. It can be used
+   * only if the current user is a HttpFSServer proxyuser.
+   *
+   * @return the request response
+   *
+   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+   * handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
+   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   */
+  @GET
+  @Path("/")
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response root(@Context Principal user,
+                       @QueryParam(GetOpParam.NAME) GetOpParam op,
+                       @QueryParam(FilterParam.NAME) @DefaultValue(FilterParam.DEFAULT) FilterParam filter,
+                       @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
+    throws IOException, FileSystemAccessException {
+    return get(user, new FsPathParam(""), op, new OffsetParam(OffsetParam.DEFAULT),
+               new LenParam(LenParam.DEFAULT), filter, doAs,
+               new OverwriteParam(OverwriteParam.DEFAULT),
+               new BlockSizeParam(BlockSizeParam.DEFAULT),
+               new PermissionParam(PermissionParam.DEFAULT),
+               new ReplicationParam(ReplicationParam.DEFAULT));
+  }
+
+  /**
+   * Resolves the effective user that will be used to request a FileSystemAccess filesystem.
+   * <p/>
+   * If the doAs-user is NULL or the same as the user, it returns the user.
+   * <p/>
+   * Otherwise it uses proxyuser rules (see {@link ProxyUser} to determine if the
+   * current user can impersonate the doAs-user.
+   * <p/>
+   * If the current user cannot impersonate the doAs-user an
+   * <code>AccessControlException</code> will be thrown.
+   *
+   * @param user principal for whom the filesystem instance is.
+   * @param doAs do-as user, if any.
+   *
+   * @return the effective user.
+   *
+   * @throws IOException thrown if an IO error occurrs.
+   * @throws AccessControlException thrown if the current user cannot impersonate
+   * the doAs-user.
+   */
+  private String getEffectiveUser(Principal user, String doAs) throws IOException {
+    String effectiveUser = user.getName();
+    if (doAs != null && !doAs.equals(user.getName())) {
+      ProxyUser proxyUser = HttpFSServerWebApp.get().get(ProxyUser.class);
+      proxyUser.validate(user.getName(), HostnameFilter.get(), doAs);
+      effectiveUser = doAs;
+      AUDIT_LOG.info("Proxy user [{}] DoAs user [{}]", user.getName(), doAs);
+    }
+    return effectiveUser;
+  }
+
+  /**
+   * Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem for the effective
+   * user.
+   *
+   * @param user principal making the request.
+   * @param doAs do-as user, if any.
+   * @param executor FileSystemExecutor to execute.
+   *
+   * @return FileSystemExecutor response
+   *
+   * @throws IOException thrown if an IO error occurrs.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
+   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   */
+  private <T> T fsExecute(Principal user, String doAs, FileSystemAccess.FileSystemExecutor<T> executor)
+    throws IOException, FileSystemAccessException {
+    String hadoopUser = getEffectiveUser(user, doAs);
+    FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
+    Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getDefaultConfiguration();
+    return fsAccess.execute(hadoopUser, conf, executor);
+  }
+
+  /**
+   * Returns a filesystem instance. The fileystem instance is wired for release at the completion of
+   * the current Servlet request via the {@link FileSystemReleaseFilter}.
+   * <p/>
+   * If a do-as user is specified, the current user must be a valid proxyuser, otherwise an
+   * <code>AccessControlException</code> will be thrown.
+   *
+   * @param user principal for whom the filesystem instance is.
+   * @param doAs do-as user, if any.
+   *
+   * @return a filesystem for the specified user or do-as user.
+   *
+   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+   * handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
+   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   */
+  private FileSystem createFileSystem(Principal user, String doAs) throws IOException, FileSystemAccessException {
+    String hadoopUser = getEffectiveUser(user, doAs);
+    FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
+    Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getDefaultConfiguration();
+    FileSystem fs = fsAccess.createFileSystem(hadoopUser, conf);
+    FileSystemReleaseFilter.setFileSystem(fs);
+    return fs;
+  }
+
+  /**
+   * Binding to handle all GET requests, supported operations are
+   * {@link HttpFSFileSystem.GetOpValues}.
+   * <p/>
+   * The {@link HttpFSFileSystem.GetOpValues#INSTRUMENTATION} operation is available only
+   * to users that are in HttpFSServer's admin group (see {@link HttpFSServer}. It returns
+   * HttpFSServer instrumentation data. The specified path must be '/'.
+   *
+   * @param user principal making the request.
+   * @param path path for the GET request.
+   * @param op GET operation, default value is {@link HttpFSFileSystem.GetOpValues#OPEN}.
+   * @param offset of the  file being fetch, used only with
+   * {@link HttpFSFileSystem.GetOpValues#OPEN} operations.
+   * @param len amounts of bytes, used only with {@link HttpFSFileSystem.GetOpValues#OPEN}
+   * operations.
+   * @param filter Glob filter, default value is none. Used only if the
+   * operation is {@link HttpFSFileSystem.GetOpValues#LISTSTATUS}
+   * @param doAs user being impersonated, defualt value is none. It can be used
+   * only if the current user is a HttpFSServer proxyuser.
+   * @param override, default is true. Used only for
+   * {@link HttpFSFileSystem.PutOpValues#CREATE} operations.
+   * @param blockSize block size to set, used only by
+   * {@link HttpFSFileSystem.PutOpValues#CREATE} operations.
+   * @param permission permission to set, used only by
+   * {@link HttpFSFileSystem.PutOpValues#SETPERMISSION}.
+   * @param replication replication factor to set, used only by
+   * {@link HttpFSFileSystem.PutOpValues#SETREPLICATION}.
+   *
+   * @return the request response.
+   *
+   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+   * handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
+   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   */
+  @GET
+  @Path("{path:.*}")
+  @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
+  public Response get(@Context Principal user,
+                      @PathParam("path") @DefaultValue("") FsPathParam path,
+                      @QueryParam(GetOpParam.NAME) GetOpParam op,
+                      @QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT) OffsetParam offset,
+                      @QueryParam(LenParam.NAME) @DefaultValue(LenParam.DEFAULT) LenParam len,
+                      @QueryParam(FilterParam.NAME) @DefaultValue(FilterParam.DEFAULT) FilterParam filter,
+                      @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs,
+
+                      //these params are only for createHandle operation acceptance purposes
+                      @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) OverwriteParam override,
+                      @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) BlockSizeParam blockSize,
+                      @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
+                      PermissionParam permission,
+                      @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
+                      ReplicationParam replication
+  )
+    throws IOException, FileSystemAccessException {
+    Response response = null;
+    if (op == null) {
+      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", GetOpParam.NAME));
+    } else {
+      path.makeAbsolute();
+      MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+      switch (op.value()) {
+        case OPEN: {
+          //Invoking the command directly using an unmanaged FileSystem that is released by the
+          //FileSystemReleaseFilter
+          FSOperations.FSOpen command = new FSOperations.FSOpen(path.value());
+          FileSystem fs = createFileSystem(user, doAs.value());
+          InputStream is = command.execute(fs);
+          AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[]{path, offset, len});
+          InputStreamEntity entity = new InputStreamEntity(is, offset.value(), len.value());
+          response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
+          break;
+        }
+        case GETFILESTATUS: {
+          FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path.value());
+          Map json = fsExecute(user, doAs.value(), command);
+          AUDIT_LOG.info("[{}]", path);
+          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+          break;
+        }
+        case LISTSTATUS: {
+          FSOperations.FSListStatus command = new FSOperations.FSListStatus(path.value(), filter.value());
+          Map json = fsExecute(user, doAs.value(), command);
+          if (filter.value() == null) {
+            AUDIT_LOG.info("[{}]", path);
+          } else {
+            AUDIT_LOG.info("[{}] filter [{}]", path, filter.value());
+          }
+          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+          break;
+        }
+        case GETHOMEDIR: {
+          FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
+          JSONObject json = fsExecute(user, doAs.value(), command);
+          AUDIT_LOG.info("");
+          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+          break;
+        }
+        case INSTRUMENTATION: {
+          if (!path.value().equals("/")) {
+            throw new UnsupportedOperationException(
+              MessageFormat.format("Invalid path for {0}={1}, must be '/'",
+                                   GetOpParam.NAME, HttpFSFileSystem.GetOpValues.INSTRUMENTATION));
+          }
+          Groups groups = HttpFSServerWebApp.get().get(Groups.class);
+          List<String> userGroups = groups.getGroups(user.getName());
+          if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
+            throw new AccessControlException("User not in HttpFSServer admin group");
+          }
+          Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class);
+          Map snapshot = instrumentation.getSnapshot();
+          response = Response.ok(snapshot).build();
+          break;
+        }
+        case GETCONTENTSUMMARY: {
+          FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path.value());
+          Map json = fsExecute(user, doAs.value(), command);
+          AUDIT_LOG.info("[{}]", path);
+          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+          break;
+        }
+        case GETFILECHECKSUM: {
+          FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path.value());
+          Map json = fsExecute(user, doAs.value(), command);
+          AUDIT_LOG.info("[{}]", path);
+          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+          break;
+        }
+        case GETDELEGATIONTOKEN: {
+          response = Response.status(Response.Status.BAD_REQUEST).build();
+          break;
+        }
+        case GETFILEBLOCKLOCATIONS: {
+          response = Response.status(Response.Status.BAD_REQUEST).build();
+          break;
+        }
+      }
+      return response;
+    }
+  }
+
+  /**
+   * Creates the URL for an upload operation (create or append).
+   *
+   * @param uriInfo uri info of the request.
+   * @param uploadOperation operation for the upload URL.
+   *
+   * @return the URI for uploading data.
+   */
+  protected URI createUploadRedirectionURL(UriInfo uriInfo, Enum<?> uploadOperation) {
+    UriBuilder uriBuilder = uriInfo.getRequestUriBuilder();
+    uriBuilder = uriBuilder.replaceQueryParam(PutOpParam.NAME, uploadOperation).
+      queryParam(DataParam.NAME, Boolean.TRUE);
+    return uriBuilder.build(null);
+  }
+
+  /**
+   * Binding to handle all DELETE requests.
+   *
+   * @param user principal making the request.
+   * @param path path for the DELETE request.
+   * @param op DELETE operation, default value is {@link HttpFSFileSystem.DeleteOpValues#DELETE}.
+   * @param recursive indicates if the delete is recursive, default is <code>false</code>
+   * @param doAs user being impersonated, defualt value is none. It can be used
+   * only if the current user is a HttpFSServer proxyuser.
+   *
+   * @return the request response.
+   *
+   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+   * handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
+   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   */
+  @DELETE
+  @Path("{path:.*}")
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response delete(@Context Principal user,
+                         @PathParam("path") FsPathParam path,
+                         @QueryParam(DeleteOpParam.NAME) DeleteOpParam op,
+                         @QueryParam(DeleteRecursiveParam.NAME) @DefaultValue(DeleteRecursiveParam.DEFAULT)
+                         DeleteRecursiveParam recursive,
+                         @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
+    throws IOException, FileSystemAccessException {
+    Response response = null;
+    if (op == null) {
+      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", DeleteOpParam.NAME));
+    }
+    switch (op.value()) {
+      case DELETE: {
+        path.makeAbsolute();
+        MDC.put(HttpFSFileSystem.OP_PARAM, "DELETE");
+        AUDIT_LOG.info("[{}] recursive [{}]", path, recursive);
+        FSOperations.FSDelete command = new FSOperations.FSDelete(path.value(), recursive.value());
+        JSONObject json = fsExecute(user, doAs.value(), command);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+    }
+    return response;
+  }
+
+
+  /**
+   * Binding to handle all PUT requests, supported operations are
+   * {@link HttpFSFileSystem.PutOpValues}.
+   *
+   * @param is request input stream, used only for
+   * {@link HttpFSFileSystem.PostOpValues#APPEND} operations.
+   * @param user principal making the request.
+   * @param uriInfo the request uriInfo.
+   * @param path path for the PUT request.
+   * @param op PUT operation, no default value.
+   * @param toPath new path, used only for
+   * {@link HttpFSFileSystem.PutOpValues#RENAME} operations.
+   * {@link HttpFSFileSystem.PutOpValues#SETTIMES}.
+   * @param owner owner to set, used only for
+   * {@link HttpFSFileSystem.PutOpValues#SETOWNER} operations.
+   * @param group group to set, used only for
+   * {@link HttpFSFileSystem.PutOpValues#SETOWNER} operations.
+   * @param override, default is true. Used only for
+   * {@link HttpFSFileSystem.PutOpValues#CREATE} operations.
+   * @param blockSize block size to set, used only by
+   * {@link HttpFSFileSystem.PutOpValues#CREATE} operations.
+   * @param permission permission to set, used only by
+   * {@link HttpFSFileSystem.PutOpValues#SETPERMISSION}.
+   * @param replication replication factor to set, used only by
+   * {@link HttpFSFileSystem.PutOpValues#SETREPLICATION}.
+   * @param modifiedTime modified time, in seconds since EPOC, used only by
+   * {@link HttpFSFileSystem.PutOpValues#SETTIMES}.
+   * @param accessTime accessed time, in seconds since EPOC, used only by
+   * {@link HttpFSFileSystem.PutOpValues#SETTIMES}.
+   * @param hasData indicates if the append request is uploading data or not
+   * (just getting the handle).
+   * @param doAs user being impersonated, defualt value is none. It can be used
+   * only if the current user is a HttpFSServer proxyuser.
+   *
+   * @return the request response.
+   *
+   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+   * handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
+   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   */
+  @PUT
+  @Path("{path:.*}")
+  @Consumes({"*/*"})
+  @Produces({MediaType.APPLICATION_JSON})
+  public Response put(InputStream is,
+                      @Context Principal user,
+                      @Context UriInfo uriInfo,
+                      @PathParam("path") FsPathParam path,
+                      @QueryParam(PutOpParam.NAME) PutOpParam op,
+                      @QueryParam(ToPathParam.NAME) @DefaultValue(ToPathParam.DEFAULT) ToPathParam toPath,
+                      @QueryParam(OwnerParam.NAME) @DefaultValue(OwnerParam.DEFAULT) OwnerParam owner,
+                      @QueryParam(GroupParam.NAME) @DefaultValue(GroupParam.DEFAULT) GroupParam group,
+                      @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) OverwriteParam override,
+                      @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) BlockSizeParam blockSize,
+                      @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
+                      PermissionParam permission,
+                      @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
+                      ReplicationParam replication,
+                      @QueryParam(ModifiedTimeParam.NAME) @DefaultValue(ModifiedTimeParam.DEFAULT)
+                      ModifiedTimeParam modifiedTime,
+                      @QueryParam(AccessTimeParam.NAME) @DefaultValue(AccessTimeParam.DEFAULT)
+                      AccessTimeParam accessTime,
+                      @QueryParam(DataParam.NAME) @DefaultValue(DataParam.DEFAULT) DataParam hasData,
+                      @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
+    throws IOException, FileSystemAccessException {
+    Response response = null;
+    if (op == null) {
+      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", PutOpParam.NAME));
+    }
+    path.makeAbsolute();
+    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+    switch (op.value()) {
+      case CREATE: {
+        if (!hasData.value()) {
+          response = Response.temporaryRedirect(
+            createUploadRedirectionURL(uriInfo, HttpFSFileSystem.PutOpValues.CREATE)).build();
+        } else {
+          FSOperations.FSCreate
+            command = new FSOperations.FSCreate(is, path.value(), permission.value(), override.value(),
+                                                replication.value(), blockSize.value());
+          fsExecute(user, doAs.value(), command);
+          AUDIT_LOG.info("[{}] permission [{}] override [{}] replication [{}] blockSize [{}]",
+                         new Object[]{path, permission, override, replication, blockSize});
+          response = Response.status(Response.Status.CREATED).build();
+        }
+        break;
+      }
+      case MKDIRS: {
+        FSOperations.FSMkdirs command = new FSOperations.FSMkdirs(path.value(), permission.value());
+        JSONObject json = fsExecute(user, doAs.value(), command);
+        AUDIT_LOG.info("[{}] permission [{}]", path, permission.value());
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case RENAME: {
+        FSOperations.FSRename command = new FSOperations.FSRename(path.value(), toPath.value());
+        JSONObject json = fsExecute(user, doAs.value(), command);
+        AUDIT_LOG.info("[{}] to [{}]", path, toPath);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case SETOWNER: {
+        FSOperations.FSSetOwner command = new FSOperations.FSSetOwner(path.value(), owner.value(), group.value());
+        fsExecute(user, doAs.value(), command);
+        AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner.value() + ":" + group.value());
+        response = Response.ok().build();
+        break;
+      }
+      case SETPERMISSION: {
+        FSOperations.FSSetPermission command = new FSOperations.FSSetPermission(path.value(), permission.value());
+        fsExecute(user, doAs.value(), command);
+        AUDIT_LOG.info("[{}] to [{}]", path, permission.value());
+        response = Response.ok().build();
+        break;
+      }
+      case SETREPLICATION: {
+        FSOperations.FSSetReplication command = new FSOperations.FSSetReplication(path.value(), replication.value());
+        JSONObject json = fsExecute(user, doAs.value(), command);
+        AUDIT_LOG.info("[{}] to [{}]", path, replication.value());
+        response = Response.ok(json).build();
+        break;
+      }
+      case SETTIMES: {
+        FSOperations.FSSetTimes
+          command = new FSOperations.FSSetTimes(path.value(), modifiedTime.value(), accessTime.value());
+        fsExecute(user, doAs.value(), command);
+        AUDIT_LOG.info("[{}] to (M/A)[{}]", path, modifiedTime.value() + ":" + accessTime.value());
+        response = Response.ok().build();
+        break;
+      }
+      case RENEWDELEGATIONTOKEN: {
+        response = Response.status(Response.Status.BAD_REQUEST).build();
+        break;
+      }
+      case CANCELDELEGATIONTOKEN: {
+        response = Response.status(Response.Status.BAD_REQUEST).build();
+        break;
+      }
+    }
+    return response;
+  }
+
+  /**
+   * Binding to handle all OPST requests, supported operations are
+   * {@link HttpFSFileSystem.PostOpValues}.
+   *
+   * @param is request input stream, used only for
+   * {@link HttpFSFileSystem.PostOpValues#APPEND} operations.
+   * @param user principal making the request.
+   * @param uriInfo the request uriInfo.
+   * @param path path for the POST request.
+   * @param op POST operation, default is {@link HttpFSFileSystem.PostOpValues#APPEND}.
+   * @param hasData indicates if the append request is uploading data or not (just getting the handle).
+   * @param doAs user being impersonated, defualt value is none. It can be used
+   * only if the current user is a HttpFSServer proxyuser.
+   *
+   * @return the request response.
+   *
+   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+   * handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
+   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   */
+  @POST
+  @Path("{path:.*}")
+  @Consumes({"*/*"})
+  @Produces({MediaType.APPLICATION_JSON})
+  public Response post(InputStream is,
+                       @Context Principal user,
+                       @Context UriInfo uriInfo,
+                       @PathParam("path") FsPathParam path,
+                       @QueryParam(PostOpParam.NAME) PostOpParam op,
+                       @QueryParam(DataParam.NAME) @DefaultValue(DataParam.DEFAULT) DataParam hasData,
+                       @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
+    throws IOException, FileSystemAccessException {
+    Response response = null;
+    if (op == null) {
+      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", PostOpParam.NAME));
+    }
+    path.makeAbsolute();
+    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+    switch (op.value()) {
+      case APPEND: {
+        if (!hasData.value()) {
+          response = Response.temporaryRedirect(
+            createUploadRedirectionURL(uriInfo, HttpFSFileSystem.PostOpValues.APPEND)).build();
+        } else {
+          FSOperations.FSAppend command = new FSOperations.FSAppend(is, path.value());
+          fsExecute(user, doAs.value(), command);
+          AUDIT_LOG.info("[{}]", path);
+          response = Response.ok().type(MediaType.APPLICATION_JSON).build();
+        }
+        break;
+      }
+    }
+    return response;
+  }
+
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java Thu Dec  8 19:25:28 2011
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.http.server;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.lib.server.ServerException;
+import org.apache.hadoop.lib.service.FileSystemAccess;
+import org.apache.hadoop.lib.servlet.ServerWebApp;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+/**
+ * Bootstrap class that manages the initialization and destruction of the
+ * HttpFSServer server, it is a <code>javax.servlet.ServletContextListener</code>
+ * implementation that is wired in HttpFSServer's WAR <code>WEB-INF/web.xml</code>.
+ * <p/>
+ * It provides acces to the server context via the singleton {@link #get}.
+ * <p/>
+ * All the configuration is loaded from configuration properties prefixed
+ * with <code>httpfs.</code>.
+ */
+public class HttpFSServerWebApp extends ServerWebApp {
+  private static final Logger LOG = LoggerFactory.getLogger(HttpFSServerWebApp.class);
+
+  /**
+   * Server name and prefix for all configuration properties.
+   */
+  public static final String NAME = "httpfs";
+
+  /**
+   * Configuration property that defines HttpFSServer admin group.
+   */
+  public static final String CONF_ADMIN_GROUP = "admin.group";
+
+  private static HttpFSServerWebApp SERVER;
+
+  private String adminGroup;
+
+  /**
+   * Default constructor.
+   *
+   * @throws IOException thrown if the home/conf/log/temp directory paths
+   * could not be resolved.
+   */
+  public HttpFSServerWebApp() throws IOException {
+    super(NAME);
+  }
+
+  /**
+   * Constructor used for testing purposes.
+   */
+  protected HttpFSServerWebApp(String homeDir, String configDir, String logDir, String tempDir,
+                               Configuration config) {
+    super(NAME, homeDir, configDir, logDir, tempDir, config);
+  }
+
+  /**
+   * Constructor used for testing purposes.
+   */
+  public HttpFSServerWebApp(String homeDir, Configuration config) {
+    super(NAME, homeDir, config);
+  }
+
+  /**
+   * Initializes the HttpFSServer server, loads configuration and required services.
+   *
+   * @throws ServerException thrown if HttpFSServer server could not be initialized.
+   */
+  @Override
+  public void init() throws ServerException {
+    super.init();
+    if (SERVER != null) {
+      throw new RuntimeException("HttpFSServer server already initialized");
+    }
+    SERVER = this;
+    adminGroup = getConfig().get(getPrefixedName(CONF_ADMIN_GROUP), "admin");
+    LOG.info("Connects to Namenode [{}]",
+             get().get(FileSystemAccess.class).getDefaultConfiguration().get("fs.default.name"));
+  }
+
+  /**
+   * Shutdowns all running services.
+   */
+  @Override
+  public void destroy() {
+    SERVER = null;
+    super.destroy();
+  }
+
+  /**
+   * Returns HttpFSServer server singleton, configuration and services are accessible through it.
+   *
+   * @return the HttpFSServer server singleton.
+   */
+  public static HttpFSServerWebApp get() {
+    return SERVER;
+  }
+
+  /**
+   * Returns HttpFSServer admin group.
+   *
+   * @return httpfs admin group.
+   */
+  public String getAdminGroup() {
+    return adminGroup;
+  }
+
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/RunnableCallable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/RunnableCallable.java?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/RunnableCallable.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/RunnableCallable.java Thu Dec  8 19:25:28 2011
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.lib.lang;
+
+import org.apache.hadoop.lib.util.Check;
+
+import java.util.concurrent.Callable;
+
+/**
+ * Adapter class that allows <code>Runnable</code>s and <code>Callable</code>s to
+ * be treated as the other.
+ */
+public class RunnableCallable implements Callable<Void>, Runnable {
+  private Runnable runnable;
+  private Callable<?> callable;
+
+  /**
+   * Constructor that takes a runnable.
+   *
+   * @param runnable runnable.
+   */
+  public RunnableCallable(Runnable runnable) {
+    this.runnable = Check.notNull(runnable, "runnable");
+  }
+
+  /**
+   * Constructor that takes a callable.
+   *
+   * @param callable callable.
+   */
+  public RunnableCallable(Callable<?> callable) {
+    this.callable = Check.notNull(callable, "callable");
+  }
+
+  /**
+   * Invokes the wrapped callable/runnable as a callable.
+   *
+   * @return void
+   *
+   * @throws Exception thrown by the wrapped callable/runnable invocation.
+   */
+  @Override
+  public Void call() throws Exception {
+    if (runnable != null) {
+      runnable.run();
+    } else {
+      callable.call();
+    }
+    return null;
+  }
+
+  /**
+   * Invokes the wrapped callable/runnable as a runnable.
+   *
+   * @return void
+   *
+   * @throws Exception thrown by the wrapped callable/runnable invocation.
+   */
+  @Override
+  public void run() {
+    if (runnable != null) {
+      runnable.run();
+    } else {
+      try {
+        callable.call();
+      } catch (Exception ex) {
+        throw new RuntimeException(ex);
+      }
+    }
+  }
+
+  /**
+   * Returns the class name of the wrapper callable/runnable.
+   *
+   * @return the class name of the wrapper callable/runnable.
+   */
+  public String toString() {
+    return (runnable != null) ? runnable.getClass().getSimpleName() : callable.getClass().getSimpleName();
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/XException.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/XException.java?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/XException.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/XException.java Thu Dec  8 19:25:28 2011
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.lib.lang;
+
+import org.apache.hadoop.lib.util.Check;
+
+import java.text.MessageFormat;
+
+/**
+ * Generic exception that requires error codes and uses the a message
+ * template from the error code.
+ */
+public class XException extends Exception {
+
+  /**
+   * Interface to define error codes.
+   */
+  public static interface ERROR {
+
+    /**
+     * Returns the template for the error.
+     *
+     * @return the template for the error, the template must be in JDK
+     *         <code>MessageFormat</code> syntax (using {#} positional parameters).
+     */
+    public String getTemplate();
+
+  }
+
+  private ERROR error;
+
+  /**
+   * Private constructor used by the public constructors.
+   *
+   * @param error error code.
+   * @param message error message.
+   * @param cause exception cause if any.
+   */
+  private XException(ERROR error, String message, Throwable cause) {
+    super(message, cause);
+    this.error = error;
+  }
+
+  /**
+   * Creates an XException using another XException as cause.
+   * <p/>
+   * The error code and error message are extracted from the cause.
+   *
+   * @param cause exception cause.
+   */
+  public XException(XException cause) {
+    this(cause.getError(), cause.getMessage(), cause);
+  }
+
+  /**
+   * Creates an XException using the specified error code. The exception
+   * message is resolved using the error code template and the passed
+   * parameters.
+   *
+   * @param error error code for the XException.
+   * @param params parameters to use when creating the error message
+   * with the error code template.
+   */
+  @SuppressWarnings({"ThrowableResultOfMethodCallIgnored"})
+  public XException(ERROR error, Object... params) {
+    this(Check.notNull(error, "error"), format(error, params), getCause(params));
+  }
+
+  /**
+   * Returns the error code of the exception.
+   *
+   * @return the error code of the exception.
+   */
+  public ERROR getError() {
+    return error;
+  }
+
+  /**
+   * Creates a message using a error message template and arguments.
+   * <p/>
+   * The template must be in JDK <code>MessageFormat</code> syntax
+   * (using {#} positional parameters).
+   *
+   * @param error error code, to get the template from.
+   * @param args arguments to use for creating the message.
+   *
+   * @return the resolved error message.
+   */
+  private static String format(ERROR error, Object... args) {
+    String template = error.getTemplate();
+    if (template == null) {
+      StringBuilder sb = new StringBuilder();
+      for (int i = 0; i < args.length; i++) {
+        sb.append(" {").append(i).append("}");
+      }
+      template = sb.deleteCharAt(0).toString();
+    }
+    return error + ": " + MessageFormat.format(error.getTemplate(), args);
+  }
+
+  /**
+   * Returns the last parameter if it is an instance of <code>Throwable</code>
+   * returns it else it returns NULL.
+   *
+   * @param params parameters to look for a cause.
+   *
+   * @return the last parameter if it is an instance of <code>Throwable</code>
+   *         returns it else it returns NULL.
+   */
+  private static Throwable getCause(Object... params) {
+    Throwable throwable = null;
+    if (params != null && params.length > 0 && params[params.length - 1] instanceof Throwable) {
+      throwable = (Throwable) params[params.length - 1];
+    }
+    return throwable;
+  }
+
+}



Mime
View raw message