hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1212060 [5/8] - in /hadoop/common/trunk/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/ hadoop-hdfs-httpfs/src/main/ hadoop-hdfs-httpfs/src/main/conf/ hadoop-hdfs-httpfs/src/main/java/ hadoop-hdfs-httpfs/src/main/java/o...
Date Thu, 08 Dec 2011 19:25:33 GMT
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/index.html
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/index.html?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/index.html (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/index.html Thu Dec  8 19:25:28 2011
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+
+
+-->
+<html>
+<body>
+<b>HttpFs service</b>, service base URL at /webhdfs/v1.
+</body>
+</html>

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/logging.properties
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/logging.properties?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/logging.properties (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/logging.properties Thu Dec  8 19:25:28 2011
@@ -0,0 +1,67 @@
+#
+#  All Rights Reserved.
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+handlers = 1catalina.org.apache.juli.FileHandler, 2localhost.org.apache.juli.FileHandler, 3manager.org.apache.juli.FileHandler, 4host-manager.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler
+
+.handlers = 1catalina.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler
+
+############################################################
+# Handler specific properties.
+# Describes specific configuration info for Handlers.
+############################################################
+
+1catalina.org.apache.juli.FileHandler.level = FINE
+1catalina.org.apache.juli.FileHandler.directory = ${httpfs.log.dir}
+1catalina.org.apache.juli.FileHandler.prefix = httpfs-catalina.
+
+2localhost.org.apache.juli.FileHandler.level = FINE
+2localhost.org.apache.juli.FileHandler.directory = ${httpfs.log.dir}
+2localhost.org.apache.juli.FileHandler.prefix = httpfs-localhost.
+
+3manager.org.apache.juli.FileHandler.level = FINE
+3manager.org.apache.juli.FileHandler.directory = ${httpfs.log.dir}
+3manager.org.apache.juli.FileHandler.prefix = httpfs-manager.
+
+4host-manager.org.apache.juli.FileHandler.level = FINE
+4host-manager.org.apache.juli.FileHandler.directory = ${httpfs.log.dir}
+4host-manager.org.apache.juli.FileHandler.prefix = httpfs-host-manager.
+
+java.util.logging.ConsoleHandler.level = FINE
+java.util.logging.ConsoleHandler.formatter = java.util.logging.SimpleFormatter
+
+
+############################################################
+# Facility specific properties.
+# Provides extra control for each logger.
+############################################################
+
+org.apache.catalina.core.ContainerBase.[Catalina].[localhost].level = INFO
+org.apache.catalina.core.ContainerBase.[Catalina].[localhost].handlers = 2localhost.org.apache.juli.FileHandler
+
+org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].level = INFO
+org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].handlers = 3manager.org.apache.juli.FileHandler
+
+org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].level = INFO
+org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].handlers = 4host-manager.org.apache.juli.FileHandler
+
+# For example, set the com.xyz.foo logger to only log SEVERE
+# messages:
+#org.apache.catalina.startup.ContextConfig.level = FINE
+#org.apache.catalina.startup.HostConfig.level = FINE
+#org.apache.catalina.session.ManagerBase.level = FINE
+#org.apache.catalina.core.AprLifecycleListener.level=FINE

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml Thu Dec  8 19:25:28 2011
@@ -0,0 +1,150 @@
+<?xml version='1.0' encoding='utf-8'?>
+<!--
+
+   All Rights Reserved.
+
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<!-- Note:  A "Server" is not itself a "Container", so you may not
+     define subcomponents such as "Valves" at this level.
+     Documentation at /docs/config/server.html
+ -->
+<Server port="${httpfs.admin.port}" shutdown="SHUTDOWN">
+
+  <!--APR library loader. Documentation at /docs/apr.html -->
+  <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on"/>
+  <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
+  <Listener className="org.apache.catalina.core.JasperListener"/>
+  <!-- Prevent memory leaks due to use of particular java/javax APIs-->
+  <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener"/>
+  <!-- JMX Support for the Tomcat server. Documentation at /docs/non-existent.html -->
+  <Listener className="org.apache.catalina.mbeans.ServerLifecycleListener"/>
+  <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener"/>
+
+  <!-- Global JNDI resources
+       Documentation at /docs/jndi-resources-howto.html
+  -->
+  <GlobalNamingResources>
+    <!-- Editable user database that can also be used by
+         UserDatabaseRealm to authenticate users
+    -->
+    <Resource name="UserDatabase" auth="Container"
+              type="org.apache.catalina.UserDatabase"
+              description="User database that can be updated and saved"
+              factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
+              pathname="conf/tomcat-users.xml"/>
+  </GlobalNamingResources>
+
+  <!-- A "Service" is a collection of one or more "Connectors" that share
+       a single "Container" Note:  A "Service" is not itself a "Container",
+       so you may not define subcomponents such as "Valves" at this level.
+       Documentation at /docs/config/service.html
+   -->
+  <Service name="Catalina">
+
+    <!--The connectors can use a shared executor, you can define one or more named thread pools-->
+    <!--
+    <Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
+        maxThreads="150" minSpareThreads="4"/>
+    -->
+
+
+    <!-- A "Connector" represents an endpoint by which requests are received
+         and responses are returned. Documentation at :
+         Java HTTP Connector: /docs/config/http.html (blocking & non-blocking)
+         Java AJP  Connector: /docs/config/ajp.html
+         APR (HTTP/AJP) Connector: /docs/apr.html
+         Define a non-SSL HTTP/1.1 Connector on port ${httpfs.http.port}
+    -->
+    <Connector port="${httpfs.http.port}" protocol="HTTP/1.1"
+               connectionTimeout="20000"
+               redirectPort="8443"/>
+    <!-- A "Connector" using the shared thread pool-->
+    <!--
+    <Connector executor="tomcatThreadPool"
+               port="${httpfs.http.port}" protocol="HTTP/1.1"
+               connectionTimeout="20000"
+               redirectPort="8443" />
+    -->
+    <!-- Define a SSL HTTP/1.1 Connector on port 8443
+         This connector uses the JSSE configuration, when using APR, the
+         connector should be using the OpenSSL style configuration
+         described in the APR documentation -->
+    <!--
+    <Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true"
+               maxThreads="150" scheme="https" secure="true"
+               clientAuth="false" sslProtocol="TLS" />
+    -->
+
+    <!-- Define an AJP 1.3 Connector on port 8009 -->
+
+
+    <!-- An Engine represents the entry point (within Catalina) that processes
+ every request.  The Engine implementation for Tomcat stand alone
+ analyzes the HTTP headers included with the request, and passes them
+ on to the appropriate Host (virtual host).
+ Documentation at /docs/config/engine.html -->
+
+    <!-- You should set jvmRoute to support load-balancing via AJP ie :
+    <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
+    -->
+    <Engine name="Catalina" defaultHost="localhost">
+
+      <!--For clustering, please take a look at documentation at:
+          /docs/cluster-howto.html  (simple how to)
+          /docs/config/cluster.html (reference documentation) -->
+      <!--
+      <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
+      -->
+
+      <!-- The request dumper valve dumps useful debugging information about
+           the request and response data received and sent by Tomcat.
+           Documentation at: /docs/config/valve.html -->
+      <!--
+      <Valve className="org.apache.catalina.valves.RequestDumperValve"/>
+      -->
+
+      <!-- This Realm uses the UserDatabase configured in the global JNDI
+           resources under the key "UserDatabase".  Any edits
+           that are performed against this UserDatabase are immediately
+           available for use by the Realm.  -->
+      <Realm className="org.apache.catalina.realm.UserDatabaseRealm"
+             resourceName="UserDatabase"/>
+
+      <!-- Define the default virtual host
+           Note: XML Schema validation will not work with Xerces 2.2.
+       -->
+      <Host name="localhost" appBase="webapps"
+            unpackWARs="true" autoDeploy="true"
+            xmlValidation="false" xmlNamespaceAware="false">
+
+        <!-- SingleSignOn valve, share authentication between web applications
+             Documentation at: /docs/config/valve.html -->
+        <!--
+        <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
+        -->
+
+        <!-- Access log processes all example.
+             Documentation at: /docs/config/valve.html -->
+        <!--
+        <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
+               prefix="localhost_access_log." suffix=".txt" pattern="common" resolveHosts="false"/>
+        -->
+
+      </Host>
+    </Engine>
+  </Service>
+</Server>

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/webapp/WEB-INF/web.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/webapp/WEB-INF/web.xml?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/webapp/WEB-INF/web.xml (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/webapp/WEB-INF/web.xml Thu Dec  8 19:25:28 2011
@@ -0,0 +1,88 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<web-app version="2.4" xmlns="http://java.sun.com/xml/ns/j2ee">
+
+  <listener>
+    <listener-class>org.apache.hadoop.fs.http.server.HttpFSServerWebApp</listener-class>
+  </listener>
+
+  <servlet>
+    <servlet-name>webservices-driver</servlet-name>
+    <servlet-class>com.sun.jersey.spi.container.servlet.ServletContainer</servlet-class>
+    <init-param>
+      <param-name>com.sun.jersey.config.property.packages</param-name>
+      <param-value>org.apache.hadoop.fs.http.server,org.apache.hadoop.lib.wsrs</param-value>
+    </init-param>
+
+    <!-- Enables detailed Jersey request/response logging -->
+    <!--
+            <init-param>
+                <param-name>com.sun.jersey.spi.container.ContainerRequestFilters</param-name>
+                <param-value>com.sun.jersey.api.container.filter.LoggingFilter</param-value>
+            </init-param>
+            <init-param>
+                <param-name>com.sun.jersey.spi.container.ContainerResponseFilters</param-name>
+                <param-value>com.sun.jersey.api.container.filter.LoggingFilter</param-value>
+            </init-param>
+    -->
+    <load-on-startup>1</load-on-startup>
+  </servlet>
+
+  <servlet-mapping>
+    <servlet-name>webservices-driver</servlet-name>
+    <url-pattern>/*</url-pattern>
+  </servlet-mapping>
+
+  <filter>
+    <filter-name>authFilter</filter-name>
+    <filter-class>org.apache.hadoop.fs.http.server.AuthFilter</filter-class>
+  </filter>
+
+  <filter>
+    <filter-name>MDCFilter</filter-name>
+    <filter-class>org.apache.hadoop.lib.servlet.MDCFilter</filter-class>
+  </filter>
+
+  <filter>
+    <filter-name>hostnameFilter</filter-name>
+    <filter-class>org.apache.hadoop.lib.servlet.HostnameFilter</filter-class>
+  </filter>
+
+  <filter>
+    <filter-name>fsReleaseFilter</filter-name>
+    <filter-class>org.apache.hadoop.fs.http.server.HttpFSReleaseFilter</filter-class>
+  </filter>
+
+  <filter-mapping>
+    <filter-name>authFilter</filter-name>
+    <url-pattern>*</url-pattern>
+  </filter-mapping>
+
+  <filter-mapping>
+    <filter-name>MDCFilter</filter-name>
+    <url-pattern>*</url-pattern>
+  </filter-mapping>
+
+  <filter-mapping>
+    <filter-name>hostnameFilter</filter-name>
+    <url-pattern>*</url-pattern>
+  </filter-mapping>
+
+  <filter-mapping>
+    <filter-name>fsReleaseFilter</filter-name>
+    <url-pattern>*</url-pattern>
+  </filter-mapping>
+
+</web-app>

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm Thu Dec  8 19:25:28 2011
@@ -0,0 +1,121 @@
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~ http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License.
+
+  ---
+  Hadoop HDFS over HTTP ${project.version} - Server Setup
+  ---
+  ---
+  ${maven.build.timestamp}
+
+Hadoop HDFS over HTTP ${project.version} - Server Setup
+
+  \[ {{{./index.html}Go Back}} \]
+
+  This page explains how to quickly setup HttpFS with Pseudo authentication
+  against a Hadoop cluster with Pseudo authentication.
+
+* Requirements
+
+    * Java 6+
+
+    * Maven 3+
+
+* Install HttpFS
+
++---+
+~ $ tar xzf  httpfs-${project.version}.tar.gz
++---+
+
+* Configure HttpFS
+
+  Edit the <<<httpfs-${project.version}/conf/httpfs-site.xml>>> file and
+  set the <<<httpfs.fsAccess.conf:fs.default.name>>> property to the HDFS
+  Namenode URI. For example:
+
++---+
+httpfs.fsAccess.conf:fs.default.name=hdfs://localhost:8021
++---+
+
+* Configure Hadoop
+
+  Edit Hadoop <<<core-site.xml>>> and defined the Unix user that will
+  run the HttpFS server as a proxyuser. For example:
+
++---+
+  ...
+  <property>
+    <name>fsAccess.proxyuser.#HTTPFSUSER#.hosts</name>
+    <value>httpfs-host.foo.com</value>
+  </property>
+  <property>
+    <name>fsAccess.proxyuser.#HTTPFSUSER#.groups</name>
+    <value>*</value>
+  </property>
+  ...
++---+
+
+  IMPORTANT: Replace <<<#HTTPFSUSER#>>> with the Unix user that will
+  start the HttpFS server.
+
+* Restart Hadoop
+
+  You need to restart Hadoop for the proxyuser configuration ot become
+  active.
+
+* Start/Stop HttpFS
+
+  To start/stop HttpFS use HttpFS's bin/httpfs.sh script. For example:
+
++---+
+httpfs-${project.version} $ bin/httpfs.sh start
++---+
+
+  NOTE: Invoking the script without any parameters list all possible
+  parameters (start, stop, run, etc.). The <<<httpfs.sh>>> script is a wrapper
+  for Tomcat's <<<catalina.sh>>> script that sets the environment variables
+  and Java System properties required to run HttpFS server.
+
+* Test HttpFS is working
+
++---+
+~ $ curl -i "http://<HTTPFSHOSTNAME>:14000?user.name=babu&op=homedir"
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{"homeDir":"http:\/\/<HTTPFS_HOST>:14000\/user\/babu"}
++---+
+
+* Embedded Tomcat Configuration
+
+  To configure the embedded Tomcat go to the <<<tomcat/conf>>>.
+
+  HttpFS preconfigures the HTTP and Admin ports in Tomcat's <<<server.xml>>> to
+  14000 and 14001.
+
+  Tomcat logs are also preconfigured to go to HttpFS's <<<logs/>>> directory.
+
+  The following environment variables (which can be set in HttpFS's
+  <<<conf/httpfs-env.sh>>> script) can be used to alter those values:
+
+  * HTTPFS_HTTP_PORT
+
+  * HTTPFS_ADMIN_PORT
+
+  * HTTPFS_LOG
+
+* HttpFS Configuration
+
+  HttpFS supports the following {{{./httpfs-default.html}configuration properties}}
+  in the HttpFS's <<<conf/httpfs-site.xml>>> configuration file.
+
+  \[ {{{./index.html}Go Back}} \]

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/UsingHttpTools.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/UsingHttpTools.apt.vm?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/UsingHttpTools.apt.vm (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/UsingHttpTools.apt.vm Thu Dec  8 19:25:28 2011
@@ -0,0 +1,91 @@
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~ http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License.
+
+  ---
+  Hadoop HDFS over HTTP ${project.version} - Using HTTP Tools
+  ---
+  ---
+  ${maven.build.timestamp}
+
+Hadoop HDFS over HTTP ${project.version} - Using HTTP Tools
+
+  \[ {{{./index.html}Go Back}} \]
+
+* Security
+
+  Out of the box HttpFS supports both pseudo authentication and Kerberos HTTP
+  SPNEGO authentication.
+
+** Pseudo Authentication
+
+  With pseudo authentication the user name must be specified in the
+  <<<user.name=\<USERNAME\>>>> query string parameter of a HttpFS URL.
+  For example:
+
++---+
+$ curl "http://<HTTFS_HOST>:14000/webhdfs/v1?op=homedir&user.name=babu"
++---+
+
+** Kerberos HTTP SPNEGO Authentication
+
+  Kerberos HTTP SPENGO authentication requires a tool or library supporting
+  Kerberos HTTP SPNEGO protocol.
+
+  IMPORTANT: If using <<<curl>>>, the <<<curl>>> version being used must support
+  GSS (<<<curl -V>>> prints out 'GSS' if it supports it).
+
+  For example:
+
++---+
+$ kinit
+Please enter the password for tucu@LOCALHOST:
+$ curl --negotiate -u foo "http://<HTTPFS_HOST>:14000/webhdfs/v1?op=homedir"
+Enter host password for user 'foo':
++---+
+
+  NOTE: the <<<-u USER>>> option is required by the <<<--negotiate>>> but it is
+  not used. Use any value as <<<USER>>> and when asked for the password press
+  [ENTER] as the password value is ignored.
+
+** {Remembering Who I Am} (Establishing an Authenticated Session)
+
+  As most authentication mechanisms, Hadoop HTTP authentication authenticates
+  users once and issues a short-lived authentication token to be presented in
+  subsequent requests. This authentication token is a signed HTTP Cookie.
+
+  When using tools like <<<curl>>>, the authentication token must be stored on
+  the first request doing authentication, and submitted in subsequent requests.
+  To do this with curl the <<<-b>>> and <<<-c>>> options to save and send HTTP
+  Cookies must be used.
+
+  For example, the first request doing authentication should save the received
+  HTTP Cookies.
+
+    Using Pseudo Authentication:
+
++---+
+$ curl -c ~/.httpfsauth "http://<HTTPFS_HOST>:14000/webhdfs/v1?op=homedir&user.name=babu"
++---+
+
+    Using Kerberos HTTP SPNEGO authentication:
+
++---+
+$ curl --negotiate -u foo -c ~/.httpfsauth "http://<HTTPFS_HOST>:14000/webhdfs/v1?op=homedir"
++---+
+
+  Then, subsequent requests forward the previously received HTTP Cookie:
+
++---+
+$ curl -b ~/.httpfsauth "http://<HTTPFS_HOST>:14000/webhdfs/v1?op=liststatus"
++---+
+
+  \[ {{{./index.html}Go Back}} \]

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm Thu Dec  8 19:25:28 2011
@@ -0,0 +1,88 @@
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~ http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License.
+
+  ---
+  Hadoop HDFS over HTTP - Documentation Sets ${project.version}
+  ---
+  ---
+  ${maven.build.timestamp}
+
+Hadoop HDFS over HTTP - Documentation Sets ${project.version}
+
+  HttpFS is a server that provides a REST HTTP gateway supporting all HDFS
+  File System operations (read and write). And it is inteoperable with the
+  <<webhdfs>> REST HTTP API.
+
+  HttpFS can be used to transfer data between clusters running different
+  versions of Hadoop (overcoming RPC versioning issues), for example using
+  Hadoop DistCP.
+
+  HttpFS can be used to access data in HDFS on a cluster behind of a firewall
+  (the HttpFS server acts as a gateway and is the only system that is allowed
+  to cross the firewall into the cluster).
+
+  HttpFS can be used to access data in HDFS using HTTP utilities (such as curl
+  and wget) and HTTP libraries Perl from other languages than Java.
+
+  The <<webhdfs>> client FileSytem implementation can be used to access HttpFS
+  using the Hadoop filesystem command (<<<hadoop fs>>>) line tool as well as
+  from Java aplications using the Hadoop FileSystem Java API.
+
+  HttpFS has built-in security supporting Hadoop pseudo authentication and
+  HTTP SPNEGO Kerberos and other pluggable authentication mechanims. It also
+  provides Hadoop proxy user support.
+
+* How Does HttpFS Works?
+
+  HttpFS is a separate service from Hadoop NameNode.
+
+  HttpFS itself is Java web-application and it runs using a preconfigured Tomcat
+  bundled with HttpFS binary distribution.
+
+  HttpFS HTTP web-service API calls are HTTP REST calls that map to a HDFS file
+  system operation. For example, using the <<<curl>>> Unix command:
+
+  * <<<$ curl http://httpfs-host:14000/webhdfs/v1/user/foo/README.txt>>> returns
+  the contents of the HDFS <<</user/foo/README.txt>>> file.
+
+  * <<<$ curl http://httpfs-host:14000/webhdfs/v1/user/foo?op=list>>> returns the
+  contents of the HDFS <<</user/foo>>> directory in JSON format.
+
+  * <<<$ curl -X POST http://httpfs-host:14000/webhdfs/v1/user/foo/bar?op=mkdirs>>>
+  creates the HDFS <<</user/foo.bar>>> directory.
+
+* How HttpFS and Hadoop HDFS Proxy differ?
+
+  HttpFS was inspired by Hadoop HDFS proxy.
+
+  HttpFS can be seening as a full rewrite of Hadoop HDFS proxy.
+
+  Hadoop HDFS proxy provides a subset of file system operations (read only),
+  HttpFS provides support for all file system operations.
+
+  HttpFS uses a clean HTTP REST API making its use with HTTP tools more
+  intuitive.
+
+  HttpFS supports Hadoop pseudo authentication, Kerberos SPENGOS authentication
+  and Hadoop proxy users. Hadoop HDFS proxy did not.
+
+* User and Developer Documentation
+
+  * {{{./ServerSetup.html}HttpFS Server Setup}}
+
+  * {{{./UsingHttpTools.html}Using HTTP Tools}}
+
+* Current Limitations
+
+  <<<GETDELEGATIONTOKEN, RENEWDELEGATIONTOKEN and CANCELDELEGATIONTOKEN>>>
+  operations are not supported.
+

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/configuration.xsl
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/configuration.xsl?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/configuration.xsl (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/configuration.xsl Thu Dec  8 19:25:28 2011
@@ -0,0 +1,49 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+
+
+-->
+
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+  <xsl:output method="html"/>
+  <xsl:template match="configuration">
+    <html>
+      <body>
+        <h2>Configuration Properties</h2>
+        <table border="1">
+          <tr>
+            <th>name</th>
+            <th>value</th>
+            <th>description</th>
+          </tr>
+          <xsl:for-each select="property">
+            <tr>
+              <td>
+                <a name="{name}">
+                  <xsl:value-of select="name"/>
+                </a>
+              </td>
+              <td>
+                <xsl:value-of select="value"/>
+              </td>
+              <td>
+                <xsl:value-of select="description"/>
+              </td>
+            </tr>
+          </xsl:for-each>
+        </table>
+      </body>
+    </html>
+  </xsl:template>
+</xsl:stylesheet>

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/site.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/site.xml?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/site.xml (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/site.xml Thu Dec  8 19:25:28 2011
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<project name="HttpFS">
+
+  <version position="right"/>
+
+  <bannerLeft>
+    <name>&nbsp;</name>
+  </bannerLeft>
+
+  <skin>
+    <groupId>org.apache.maven.skins</groupId>
+    <artifactId>maven-stylus-skin</artifactId>
+    <version>1.2</version>
+  </skin>
+
+  <body>
+    <links>
+    </links>
+  </body>
+
+</project>

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java Thu Dec  8 19:25:28 2011
@@ -0,0 +1,485 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.http.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.HFSTestCase;
+import org.apache.hadoop.test.HadoopUsersConfTestHelper;
+import org.apache.hadoop.test.TestDir;
+import org.apache.hadoop.test.TestDirHelper;
+import org.apache.hadoop.test.TestHdfs;
+import org.apache.hadoop.test.TestHdfsHelper;
+import org.apache.hadoop.test.TestJetty;
+import org.apache.hadoop.test.TestJettyHelper;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.webapp.WebAppContext;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URL;
+import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.Collection;
+
+@RunWith(value = Parameterized.class)
+public class TestHttpFSFileSystem extends HFSTestCase {
+
+  private void createHttpFSServer() throws Exception {
+    File homeDir = TestDirHelper.getTestDir();
+    Assert.assertTrue(new File(homeDir, "conf").mkdir());
+    Assert.assertTrue(new File(homeDir, "log").mkdir());
+    Assert.assertTrue(new File(homeDir, "temp").mkdir());
+    HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
+
+    String fsDefaultName = TestHdfsHelper.getHdfsConf().get("fs.default.name");
+    Configuration conf = new Configuration(false);
+    conf.set("httpfs.hadoop.conf:fs.default.name", fsDefaultName);
+    conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups", HadoopUsersConfTestHelper
+      .getHadoopProxyUserGroups());
+    conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts", HadoopUsersConfTestHelper
+      .getHadoopProxyUserHosts());
+    File hoopSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
+    OutputStream os = new FileOutputStream(hoopSite);
+    conf.writeXml(os);
+    os.close();
+
+    ClassLoader cl = Thread.currentThread().getContextClassLoader();
+    URL url = cl.getResource("webapp");
+    WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
+    Server server = TestJettyHelper.getJettyServer();
+    server.addHandler(context);
+    server.start();
+  }
+
+  protected FileSystem getHttpFileSystem() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set("fs.http.impl", HttpFSFileSystem.class.getName());
+    return FileSystem.get(TestJettyHelper.getJettyURL().toURI(), conf);
+  }
+
+  protected void testGet() throws Exception {
+    FileSystem fs = getHttpFileSystem();
+    Assert.assertNotNull(fs);
+    Assert.assertEquals(fs.getUri(), TestJettyHelper.getJettyURL().toURI());
+    fs.close();
+  }
+
+  private void testOpen() throws Exception {
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
+    OutputStream os = fs.create(path);
+    os.write(1);
+    os.close();
+    fs.close();
+    fs = getHttpFileSystem();
+    InputStream is = fs.open(new Path(path.toUri().getPath()));
+    Assert.assertEquals(is.read(), 1);
+    is.close();
+    fs.close();
+  }
+
+  private void testCreate(Path path, boolean override) throws Exception {
+    FileSystem fs = getHttpFileSystem();
+    FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
+    OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024,
+                                (short) 2, 100 * 1024 * 1024, null);
+    os.write(1);
+    os.close();
+    fs.close();
+
+    fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    FileStatus status = fs.getFileStatus(path);
+    Assert.assertEquals(status.getReplication(), 2);
+    Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
+    Assert.assertEquals(status.getPermission(), permission);
+    InputStream is = fs.open(path);
+    Assert.assertEquals(is.read(), 1);
+    is.close();
+    fs.close();
+  }
+
+  private void testCreate() throws Exception {
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
+    testCreate(path, false);
+    testCreate(path, true);
+    try {
+      testCreate(path, false);
+      Assert.fail();
+    } catch (IOException ex) {
+
+    } catch (Exception ex) {
+      Assert.fail();
+    }
+  }
+
+  private void testAppend() throws Exception {
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
+    OutputStream os = fs.create(path);
+    os.write(1);
+    os.close();
+    fs.close();
+    fs = getHttpFileSystem();
+    os = fs.append(new Path(path.toUri().getPath()));
+    os.write(2);
+    os.close();
+    fs.close();
+    fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    InputStream is = fs.open(path);
+    Assert.assertEquals(is.read(), 1);
+    Assert.assertEquals(is.read(), 2);
+    Assert.assertEquals(is.read(), -1);
+    is.close();
+    fs.close();
+  }
+
+  private void testRename() throws Exception {
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo");
+    fs.mkdirs(path);
+    fs.close();
+    fs = getHttpFileSystem();
+    Path oldPath = new Path(path.toUri().getPath());
+    Path newPath = new Path(path.getParent(), "bar");
+    fs.rename(oldPath, newPath);
+    fs.close();
+    fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    Assert.assertFalse(fs.exists(oldPath));
+    Assert.assertTrue(fs.exists(newPath));
+    fs.close();
+  }
+
+  private void testDelete() throws Exception {
+    Path foo = new Path(TestHdfsHelper.getHdfsTestDir(), "foo");
+    Path bar = new Path(TestHdfsHelper.getHdfsTestDir(), "bar");
+    Path foe = new Path(TestHdfsHelper.getHdfsTestDir(), "foe");
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    fs.mkdirs(foo);
+    fs.mkdirs(new Path(bar, "a"));
+    fs.mkdirs(foe);
+
+    FileSystem hoopFs = getHttpFileSystem();
+    Assert.assertTrue(hoopFs.delete(new Path(foo.toUri().getPath()), false));
+    Assert.assertFalse(fs.exists(foo));
+    try {
+      hoopFs.delete(new Path(bar.toUri().getPath()), false);
+      Assert.fail();
+    } catch (IOException ex) {
+    } catch (Exception ex) {
+      Assert.fail();
+    }
+    Assert.assertTrue(fs.exists(bar));
+    Assert.assertTrue(hoopFs.delete(new Path(bar.toUri().getPath()), true));
+    Assert.assertFalse(fs.exists(bar));
+
+    Assert.assertTrue(fs.exists(foe));
+    Assert.assertTrue(hoopFs.delete(foe, true));
+    Assert.assertFalse(fs.exists(foe));
+
+    hoopFs.close();
+    fs.close();
+  }
+
+  private void testListStatus() throws Exception {
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
+    OutputStream os = fs.create(path);
+    os.write(1);
+    os.close();
+    FileStatus status1 = fs.getFileStatus(path);
+    fs.close();
+
+    fs = getHttpFileSystem();
+    FileStatus status2 = fs.getFileStatus(new Path(path.toUri().getPath()));
+    fs.close();
+
+    Assert.assertEquals(status2.getPermission(), status1.getPermission());
+    Assert.assertEquals(status2.getPath().toUri().getPath(), status1.getPath().toUri().getPath());
+    Assert.assertEquals(status2.getReplication(), status1.getReplication());
+    Assert.assertEquals(status2.getBlockSize(), status1.getBlockSize());
+    Assert.assertEquals(status2.getAccessTime(), status1.getAccessTime());
+    Assert.assertEquals(status2.getModificationTime(), status1.getModificationTime());
+    Assert.assertEquals(status2.getOwner(), status1.getOwner());
+    Assert.assertEquals(status2.getGroup(), status1.getGroup());
+    Assert.assertEquals(status2.getLen(), status1.getLen());
+
+    FileStatus[] stati = fs.listStatus(path.getParent());
+    Assert.assertEquals(stati.length, 1);
+    Assert.assertEquals(stati[0].getPath().getName(), path.getName());
+  }
+
+  private void testWorkingdirectory() throws Exception {
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    Path workingDir = fs.getWorkingDirectory();
+    fs.close();
+
+    fs = getHttpFileSystem();
+    Path hoopWorkingDir = fs.getWorkingDirectory();
+    fs.close();
+    Assert.assertEquals(hoopWorkingDir.toUri().getPath(), workingDir.toUri().getPath());
+
+    fs = getHttpFileSystem();
+    fs.setWorkingDirectory(new Path("/tmp"));
+    workingDir = fs.getWorkingDirectory();
+    fs.close();
+    Assert.assertEquals(workingDir.toUri().getPath(), new Path("/tmp").toUri().getPath());
+  }
+
+  private void testMkdirs() throws Exception {
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo");
+    FileSystem fs = getHttpFileSystem();
+    fs.mkdirs(path);
+    fs.close();
+    fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    Assert.assertTrue(fs.exists(path));
+    fs.close();
+  }
+
+  private void testSetTimes() throws Exception {
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
+    OutputStream os = fs.create(path);
+    os.write(1);
+    os.close();
+    FileStatus status1 = fs.getFileStatus(path);
+    fs.close();
+    long at = status1.getAccessTime();
+    long mt = status1.getModificationTime();
+
+    fs = getHttpFileSystem();
+    fs.setTimes(path, mt + 10, at + 20);
+    fs.close();
+
+    fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    status1 = fs.getFileStatus(path);
+    fs.close();
+    long atNew = status1.getAccessTime();
+    long mtNew = status1.getModificationTime();
+    Assert.assertEquals(mtNew, mt + 10);
+    Assert.assertEquals(atNew, at + 20);
+  }
+
+  private void testSetPermission() throws Exception {
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
+    OutputStream os = fs.create(path);
+    os.write(1);
+    os.close();
+    fs.close();
+
+    fs = getHttpFileSystem();
+    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
+    fs.setPermission(path, permission1);
+    fs.close();
+
+    fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    FileStatus status1 = fs.getFileStatus(path);
+    fs.close();
+    FsPermission permission2 = status1.getPermission();
+    Assert.assertEquals(permission2, permission1);
+  }
+
+  private void testSetOwner() throws Exception {
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
+    OutputStream os = fs.create(path);
+    os.write(1);
+    os.close();
+    fs.close();
+
+    fs = getHttpFileSystem();
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[1];
+    String group = HadoopUsersConfTestHelper.getHadoopUserGroups(user)[0];
+    fs.setOwner(path, user, group);
+    fs.close();
+
+    fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    FileStatus status1 = fs.getFileStatus(path);
+    fs.close();
+    Assert.assertEquals(status1.getOwner(), user);
+    Assert.assertEquals(status1.getGroup(), group);
+  }
+
+  private void testSetReplication() throws Exception {
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
+    OutputStream os = fs.create(path);
+    os.write(1);
+    os.close();
+    fs.close();
+    fs.setReplication(path, (short) 2);
+
+    fs = getHttpFileSystem();
+    fs.setReplication(path, (short) 1);
+    fs.close();
+
+    fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    FileStatus status1 = fs.getFileStatus(path);
+    fs.close();
+    Assert.assertEquals(status1.getReplication(), (short) 1);
+  }
+
+  private void testChecksum() throws Exception {
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
+    OutputStream os = fs.create(path);
+    os.write(1);
+    os.close();
+    FileChecksum hdfsChecksum = fs.getFileChecksum(path);
+    fs.close();
+    fs = getHttpFileSystem();
+    FileChecksum httpChecksum = fs.getFileChecksum(path);
+    fs.close();
+    Assert.assertEquals(httpChecksum.getAlgorithmName(), hdfsChecksum.getAlgorithmName());
+    Assert.assertEquals(httpChecksum.getLength(), hdfsChecksum.getLength());
+    Assert.assertArrayEquals(httpChecksum.getBytes(), hdfsChecksum.getBytes());
+  }
+
+  private void testContentSummary() throws Exception {
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
+    OutputStream os = fs.create(path);
+    os.write(1);
+    os.close();
+    ContentSummary hdfsContentSummary = fs.getContentSummary(path);
+    fs.close();
+    fs = getHttpFileSystem();
+    ContentSummary httpContentSummary = fs.getContentSummary(path);
+    fs.close();
+    Assert.assertEquals(httpContentSummary.getDirectoryCount(), hdfsContentSummary.getDirectoryCount());
+    Assert.assertEquals(httpContentSummary.getFileCount(), hdfsContentSummary.getFileCount());
+    Assert.assertEquals(httpContentSummary.getLength(), hdfsContentSummary.getLength());
+    Assert.assertEquals(httpContentSummary.getQuota(), hdfsContentSummary.getQuota());
+    Assert.assertEquals(httpContentSummary.getSpaceConsumed(), hdfsContentSummary.getSpaceConsumed());
+    Assert.assertEquals(httpContentSummary.getSpaceQuota(), hdfsContentSummary.getSpaceQuota());
+  }
+
+  protected enum Operation {
+    GET, OPEN, CREATE, APPEND, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS,
+    SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY
+  }
+
+  private void operation(Operation op) throws Exception {
+    switch (op) {
+      case GET:
+        testGet();
+        break;
+      case OPEN:
+        testOpen();
+        break;
+      case CREATE:
+        testCreate();
+        break;
+      case APPEND:
+        testAppend();
+        break;
+      case RENAME:
+        testRename();
+        break;
+      case DELETE:
+        testDelete();
+        break;
+      case LIST_STATUS:
+        testListStatus();
+        break;
+      case WORKING_DIRECTORY:
+        testWorkingdirectory();
+        break;
+      case MKDIRS:
+        testMkdirs();
+        break;
+      case SET_TIMES:
+        testSetTimes();
+        break;
+      case SET_PERMISSION:
+        testSetPermission();
+        break;
+      case SET_OWNER:
+        testSetOwner();
+        break;
+      case SET_REPLICATION:
+        testSetReplication();
+        break;
+      case CHECKSUM:
+        testChecksum();
+        break;
+      case CONTENT_SUMMARY:
+        testContentSummary();
+        break;
+    }
+  }
+
+  @Parameterized.Parameters
+  public static Collection operations() {
+    Object[][] ops = new Object[Operation.values().length][];
+    for (int i = 0; i < Operation.values().length; i++) {
+      ops[i] = new Object[]{Operation.values()[i]};
+    }
+    return Arrays.asList(ops);
+  }
+
+  private Operation operation;
+
+  public TestHttpFSFileSystem(Operation operation) {
+    this.operation = operation;
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testOperation() throws Exception {
+    createHttpFSServer();
+    operation(operation);
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testOperationDoAs() throws Exception {
+    createHttpFSServer();
+    UserGroupInformation ugi = UserGroupInformation.createProxyUser(HadoopUsersConfTestHelper.getHadoopUsers()[0],
+                                                                    UserGroupInformation.getCurrentUser());
+    ugi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        operation(operation);
+        return null;
+      }
+    });
+  }
+
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java Thu Dec  8 19:25:28 2011
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.http.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.test.TestJettyHelper;
+import org.junit.Assert;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.net.URI;
+
+@RunWith(value = Parameterized.class)
+public class TestWebhdfsFileSystem extends TestHttpFSFileSystem {
+
+  public TestWebhdfsFileSystem(TestHttpFSFileSystem.Operation operation) {
+    super(operation);
+  }
+
+  @Override
+  protected FileSystem getHttpFileSystem() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set("fs.webhdfs.impl", WebHdfsFileSystem.class.getName());
+    URI uri = new URI("webhdfs://" + TestJettyHelper.getJettyURL().toURI().getAuthority());
+    return FileSystem.get(uri, conf);
+  }
+
+  @Override
+  protected void testGet() throws Exception {
+    FileSystem fs = getHttpFileSystem();
+    Assert.assertNotNull(fs);
+    URI uri = new URI("webhdfs://" + TestJettyHelper.getJettyURL().toURI().getAuthority());
+    Assert.assertEquals(fs.getUri(), uri);
+    fs.close();
+  }
+
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java Thu Dec  8 19:25:28 2011
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.http.server;
+
+import junit.framework.Assert;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.test.HFSTestCase;
+import org.apache.hadoop.test.HadoopUsersConfTestHelper;
+import org.apache.hadoop.test.TestDir;
+import org.apache.hadoop.test.TestDirHelper;
+import org.apache.hadoop.test.TestHdfs;
+import org.apache.hadoop.test.TestHdfsHelper;
+import org.apache.hadoop.test.TestJetty;
+import org.apache.hadoop.test.TestJettyHelper;
+import org.junit.Test;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.webapp.WebAppContext;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.text.MessageFormat;
+
+public class TestHttpFSServer extends HFSTestCase {
+
+  @Test
+  @TestDir
+  @TestJetty
+  public void server() throws Exception {
+    String dir = TestDirHelper.getTestDir().getAbsolutePath();
+    Configuration hoopConf = new Configuration(false);
+    HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, hoopConf);
+    server.init();
+    server.destroy();
+  }
+
+  private void createHttpFSServer() throws Exception {
+    File homeDir = TestDirHelper.getTestDir();
+    Assert.assertTrue(new File(homeDir, "conf").mkdir());
+    Assert.assertTrue(new File(homeDir, "log").mkdir());
+    Assert.assertTrue(new File(homeDir, "temp").mkdir());
+    HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
+
+    String fsDefaultName = TestHdfsHelper.getHdfsConf().get("fs.default.name");
+    Configuration conf = new Configuration(false);
+    conf.set("httpfs.hadoop.conf:fs.default.name", fsDefaultName);
+    File hoopSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
+    OutputStream os = new FileOutputStream(hoopSite);
+    conf.writeXml(os);
+    os.close();
+
+    ClassLoader cl = Thread.currentThread().getContextClassLoader();
+    URL url = cl.getResource("webapp");
+    WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
+    Server server = TestJettyHelper.getJettyServer();
+    server.addHandler(context);
+    server.start();
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void instrumentation() throws Exception {
+    createHttpFSServer();
+
+    URL url = new URL(TestJettyHelper.getJettyURL(),
+                      MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "nobody"));
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED);
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "root"));
+    conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+    BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
+    String line = reader.readLine();
+    reader.close();
+    Assert.assertTrue(line.contains("\"counters\":{"));
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation", "root"));
+    conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testHdfsAccess() throws Exception {
+    createHttpFSServer();
+
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    URL url = new URL(TestJettyHelper.getJettyURL(),
+                      MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus", user));
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+    BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
+    reader.readLine();
+    reader.close();
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testGlobFilter() throws Exception {
+    createHttpFSServer();
+
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    fs.mkdirs(new Path("/tmp"));
+    fs.create(new Path("/tmp/foo.txt")).close();
+
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    URL url = new URL(TestJettyHelper.getJettyURL(),
+                      MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+    BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
+    reader.readLine();
+    reader.close();
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testPutNoOperation() throws Exception {
+    createHttpFSServer();
+
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    URL url = new URL(TestJettyHelper.getJettyURL(),
+                      MessageFormat.format("/webhdfs/v1/foo?user.name={0}", user));
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.setDoInput(true);
+    conn.setDoOutput(true);
+    conn.setRequestMethod("PUT");
+    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
+  }
+
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java Thu Dec  8 19:25:28 2011
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.lib.lang;
+
+
+import junit.framework.Assert;
+import org.apache.hadoop.test.HTestCase;
+import org.junit.Test;
+
+import java.util.concurrent.Callable;
+
+public class TestRunnableCallable extends HTestCase {
+
+  public static class R implements Runnable {
+    boolean RUN;
+
+    @Override
+    public void run() {
+      RUN = true;
+    }
+  }
+
+  public static class C implements Callable {
+    boolean RUN;
+
+    @Override
+    public Object call() throws Exception {
+      RUN = true;
+      return null;
+    }
+  }
+
+  public static class CEx implements Callable {
+
+    @Override
+    public Object call() throws Exception {
+      throw new Exception();
+    }
+  }
+
+  @Test
+  public void runnable() throws Exception {
+    R r = new R();
+    RunnableCallable rc = new RunnableCallable(r);
+    rc.run();
+    Assert.assertTrue(r.RUN);
+
+    r = new R();
+    rc = new RunnableCallable(r);
+    rc.call();
+    Assert.assertTrue(r.RUN);
+
+    Assert.assertEquals(rc.toString(), "R");
+  }
+
+  @Test
+  public void callable() throws Exception {
+    C c = new C();
+    RunnableCallable rc = new RunnableCallable(c);
+    rc.run();
+    Assert.assertTrue(c.RUN);
+
+    c = new C();
+    rc = new RunnableCallable(c);
+    rc.call();
+    Assert.assertTrue(c.RUN);
+
+    Assert.assertEquals(rc.toString(), "C");
+  }
+
+  @Test(expected = RuntimeException.class)
+  public void callableExRun() throws Exception {
+    CEx c = new CEx();
+    RunnableCallable rc = new RunnableCallable(c);
+    rc.run();
+  }
+
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java Thu Dec  8 19:25:28 2011
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.lib.lang;
+
+
+import junit.framework.Assert;
+import org.apache.hadoop.test.HTestCase;
+import org.junit.Test;
+
+public class TestXException extends HTestCase {
+
+  public static enum TestERROR implements XException.ERROR {
+    TC;
+
+    @Override
+    public String getTemplate() {
+      return "{0}";
+    }
+  }
+
+  @Test
+  public void testXException() throws Exception {
+    XException ex = new XException(TestERROR.TC);
+    Assert.assertEquals(ex.getError(), TestERROR.TC);
+    Assert.assertEquals(ex.getMessage(), "TC: {0}");
+    Assert.assertNull(ex.getCause());
+
+    ex = new XException(TestERROR.TC, "msg");
+    Assert.assertEquals(ex.getError(), TestERROR.TC);
+    Assert.assertEquals(ex.getMessage(), "TC: msg");
+    Assert.assertNull(ex.getCause());
+
+    Exception cause = new Exception();
+    ex = new XException(TestERROR.TC, cause);
+    Assert.assertEquals(ex.getError(), TestERROR.TC);
+    Assert.assertEquals(ex.getMessage(), "TC: " + cause.toString());
+    Assert.assertEquals(ex.getCause(), cause);
+
+    XException xcause = ex;
+    ex = new XException(xcause);
+    Assert.assertEquals(ex.getError(), TestERROR.TC);
+    Assert.assertEquals(ex.getMessage(), xcause.getMessage());
+    Assert.assertEquals(ex.getCause(), xcause);
+  }
+
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java?rev=1212060&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java Thu Dec  8 19:25:28 2011
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.lib.server;
+
+import junit.framework.Assert;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.test.HTestCase;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+public class TestBaseService extends HTestCase {
+
+  public static class MyService extends BaseService {
+    static Boolean INIT;
+
+    public MyService() {
+      super("myservice");
+    }
+
+    @Override
+    protected void init() throws ServiceException {
+      INIT = true;
+    }
+
+    @Override
+    public Class getInterface() {
+      return null;
+    }
+  }
+
+  @Test
+  public void baseService() throws Exception {
+    BaseService service = new MyService();
+    Assert.assertNull(service.getInterface());
+    Assert.assertEquals(service.getPrefix(), "myservice");
+    Assert.assertEquals(service.getServiceDependencies().length, 0);
+
+    Server server = Mockito.mock(Server.class);
+    Configuration conf = new Configuration(false);
+    conf.set("server.myservice.foo", "FOO");
+    conf.set("server.myservice1.bar", "BAR");
+    Mockito.when(server.getConfig()).thenReturn(conf);
+    Mockito.when(server.getPrefixedName("myservice.foo")).thenReturn("server.myservice.foo");
+    Mockito.when(server.getPrefixedName("myservice.")).thenReturn("server.myservice.");
+
+    service.init(server);
+    Assert.assertEquals(service.getPrefixedName("foo"), "server.myservice.foo");
+    Assert.assertEquals(service.getServiceConfig().size(), 1);
+    Assert.assertEquals(service.getServiceConfig().get("foo"), "FOO");
+    Assert.assertTrue(MyService.INIT);
+  }
+}



Mime
View raw message