hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From xkro...@apache.org
Subject [39/50] [abbrv] hadoop git commit: HDDS-177. Create a releasable ozonefs artifact Contributed by Marton, Elek.
Date Tue, 26 Jun 2018 05:33:17 GMT
HDDS-177. Create a releasable ozonefs artifact
Contributed by Marton, Elek.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e16e5b30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e16e5b30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e16e5b30

Branch: refs/heads/HDFS-12943
Commit: e16e5b307d6c4404db0698b9d128e5bf4aa16a8a
Parents: ca14fec
Author: Anu Engineer <aengineer@apache.org>
Authored: Sun Jun 24 01:05:04 2018 -0700
Committer: Anu Engineer <aengineer@apache.org>
Committed: Sun Jun 24 01:05:04 2018 -0700

----------------------------------------------------------------------
 dev-support/bin/ozone-dist-layout-stitching     |   4 +-
 hadoop-dist/pom.xml                             |   5 +
 .../test/acceptance/ozonefs/docker-compose.yaml |  71 ++
 .../src/test/acceptance/ozonefs/docker-config   |  39 ++
 .../src/test/acceptance/ozonefs/ozonefs.robot   |  39 ++
 hadoop-ozone/ozonefs/pom.xml                    | 211 ++++++
 .../org/apache/hadoop/fs/ozone/Constants.java   |  42 ++
 .../java/org/apache/hadoop/fs/ozone/OzFs.java   |  44 ++
 .../hadoop/fs/ozone/OzoneFSInputStream.java     |  79 +++
 .../hadoop/fs/ozone/OzoneFSOutputStream.java    |  59 ++
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java | 689 +++++++++++++++++++
 .../apache/hadoop/fs/ozone/package-info.java    |  30 +
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java | 157 +++++
 .../fs/ozone/TestOzoneFileInterfaces.java       | 231 +++++++
 .../contract/ITestOzoneContractCreate.java      |  48 ++
 .../contract/ITestOzoneContractDelete.java      |  48 ++
 .../contract/ITestOzoneContractDistCp.java      |  50 ++
 .../ITestOzoneContractGetFileStatus.java        |  61 ++
 .../ozone/contract/ITestOzoneContractMkdir.java |  48 ++
 .../ozone/contract/ITestOzoneContractOpen.java  |  47 ++
 .../contract/ITestOzoneContractRename.java      |  49 ++
 .../contract/ITestOzoneContractRootDir.java     |  51 ++
 .../ozone/contract/ITestOzoneContractSeek.java  |  47 ++
 .../hadoop/fs/ozone/contract/OzoneContract.java | 123 ++++
 .../src/test/resources/contract/ozone.xml       | 113 +++
 .../ozonefs/src/test/resources/log4j.properties |  23 +
 hadoop-ozone/pom.xml                            |   1 +
 hadoop-project/pom.xml                          |   6 +-
 hadoop-tools/hadoop-ozone/pom.xml               | 174 -----
 .../org/apache/hadoop/fs/ozone/Constants.java   |  42 --
 .../java/org/apache/hadoop/fs/ozone/OzFs.java   |  44 --
 .../hadoop/fs/ozone/OzoneFSInputStream.java     |  79 ---
 .../hadoop/fs/ozone/OzoneFSOutputStream.java    |  59 --
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java | 689 -------------------
 .../apache/hadoop/fs/ozone/package-info.java    |  30 -
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java | 157 -----
 .../fs/ozone/TestOzoneFileInterfaces.java       | 231 -------
 .../contract/ITestOzoneContractCreate.java      |  48 --
 .../contract/ITestOzoneContractDelete.java      |  48 --
 .../contract/ITestOzoneContractDistCp.java      |  50 --
 .../ITestOzoneContractGetFileStatus.java        |  61 --
 .../ozone/contract/ITestOzoneContractMkdir.java |  48 --
 .../ozone/contract/ITestOzoneContractOpen.java  |  47 --
 .../contract/ITestOzoneContractRename.java      |  49 --
 .../contract/ITestOzoneContractRootDir.java     |  51 --
 .../ozone/contract/ITestOzoneContractSeek.java  |  47 --
 .../hadoop/fs/ozone/contract/OzoneContract.java | 123 ----
 .../src/test/resources/contract/ozone.xml       | 113 ---
 .../src/test/resources/log4j.properties         |  23 -
 hadoop-tools/hadoop-tools-dist/pom.xml          |  15 -
 hadoop-tools/pom.xml                            |  11 -
 51 files changed, 2413 insertions(+), 2241 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/dev-support/bin/ozone-dist-layout-stitching
----------------------------------------------------------------------
diff --git a/dev-support/bin/ozone-dist-layout-stitching b/dev-support/bin/ozone-dist-layout-stitching
index ad8abe2..be330d5 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -145,6 +145,8 @@ run copy "${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-$
 run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}" .
 run copy "${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" .
 run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" .
+mkdir -p "./share/hadoop/ozonefs"
+cp "${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" "./share/hadoop/ozonefs/hadoop-ozone-filesystem.jar"
 # Optional documentation, could be missing
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ksm/
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdds/webapps/scm/
@@ -153,5 +155,5 @@ cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdd
 mkdir -p ./share/hadoop/mapreduce
 mkdir -p ./share/hadoop/yarn
 echo
-echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone-${HDDS_VERSION}"
+echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone"
 echo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index dfbf818..5de6759 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -265,6 +265,11 @@
           <artifactId>hadoop-ozone-docs</artifactId>
           <scope>provided</scope>
         </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-ozone-filesystem</artifactId>
+          <scope>provided</scope>
+        </dependency>
       </dependencies>
       <build>
         <plugins>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
new file mode 100644
index 0000000..3323557
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
@@ -0,0 +1,71 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "3"
+services:
+   namenode:
+      image: apache/hadoop-runner
+      hostname: namenode
+      volumes:
+         - ${OZONEDIR}:/opt/hadoop
+      ports:
+         - 9870
+      environment:
+          ENSURE_NAMENODE_DIR: /data/namenode
+      env_file:
+         - ./docker-config
+      command: ["/opt/hadoop/bin/hdfs","namenode"]
+   datanode:
+      image: apache/hadoop-runner
+      volumes:
+        - ${OZONEDIR}:/opt/hadoop
+      ports:
+        - 9864
+      command: ["/opt/hadoop/bin/ozone","datanode"]
+      env_file:
+        - ./docker-config
+   ksm:
+      image: apache/hadoop-runner
+      hostname: ksm
+      volumes:
+         - ${OZONEDIR}:/opt/hadoop
+      ports:
+         - 9874
+      environment:
+         ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+      env_file:
+          - ./docker-config
+      command: ["/opt/hadoop/bin/ozone","ksm"]
+   scm:
+      image: apache/hadoop-runner
+      volumes:
+         - ${OZONEDIR}:/opt/hadoop
+      ports:
+         - 9876
+      env_file:
+          - ./docker-config
+      environment:
+          ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
+      command: ["/opt/hadoop/bin/ozone","scm"]
+   hadooplast:
+      image: flokkr/hadoop:3.1.0
+      volumes:
+        - ${OZONEDIR}:/opt/ozone
+      env_file:
+        - ./docker-config
+      environment:
+         HADOOP_CLASSPATH: /opt/ozone/share/hadoop/ozonefs/hadoop-ozone-filesystem.jar
+      command: ["watch","-n","100000","ls"]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
new file mode 100644
index 0000000..dec863e
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
@@ -0,0 +1,39 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
+CORE-SITE.XML_fs.o3.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
+OZONE-SITE.XML_ozone.ksm.address=ksm
+OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874
+OZONE-SITE.XML_ozone.scm.names=scm
+OZONE-SITE.XML_ozone.enabled=True
+OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id
+OZONE-SITE.XML_ozone.scm.block.client.address=scm
+OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
+OZONE-SITE.XML_ozone.handler.type=distributed
+OZONE-SITE.XML_ozone.scm.client.address=scm
+OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
+OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s
+HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
+HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
+HDFS-SITE.XML_rpc.metrics.quantile.enable=true
+HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
+LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
+LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
+LOG4J.PROPERTIES_log4j.category.org.apache.hadoop.util.NativeCodeLoader=ERROR

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
new file mode 100644
index 0000000..9e8a5d2
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
@@ -0,0 +1,39 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       Ozonefs test
+Library             OperatingSystem
+Suite Setup         Startup Ozone cluster with size          5
+Suite Teardown      Teardown Ozone cluster
+Resource            ../commonlib.robot
+
+*** Variables ***
+${COMPOSEFILE}          ${CURDIR}/docker-compose.yaml
+${PROJECTDIR}           ${CURDIR}/../../../../../..
+
+
+*** Test Cases ***
+Create volume and bucket
+    Execute on          datanode        ozone oz -createVolume http://ksm/fstest -user bilbo -quota 100TB -root
+    Execute on          datanode        ozone oz -createBucket http://ksm/fstest/bucket1
+
+Check volume from ozonefs
+    ${result} =         Execute on          hadooplast        hdfs dfs -ls o3://bucket1.fstest/
+
+Create directory from ozonefs
+                        Execute on          hadooplast        hdfs dfs -mkdir -p o3://bucket1.fstest/testdir/deep
+    ${result} =         Execute on          ksm               ozone oz -listKey o3://ksm/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+                                            Should contain    ${result}         testdir/deep

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml
new file mode 100644
index 0000000..c3de4d1
--- /dev/null
+++ b/hadoop-ozone/ozonefs/pom.xml
@@ -0,0 +1,211 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-ozone</artifactId>
+    <version>0.2.1-SNAPSHOT</version>
+  </parent>
+  <artifactId>hadoop-ozone-filesystem</artifactId>
+  <name>Apache Hadoop Ozone FileSystem</name>
+  <packaging>jar</packaging>
+  <version>0.2.1-SNAPSHOT</version>
+  <properties>
+    <file.encoding>UTF-8</file.encoding>
+    <downloadSources>true</downloadSources>
+  </properties>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-shade-plugin</artifactId>
+        <version>3.1.1</version>
+        <configuration>
+          <artifactSet>
+            <includes>
+              <include>com.google.guava:guava:jar</include>
+              <include>org.slf4j:slf4j-api:jar</include>
+              <include>com.google.protobuf:protobuf-java</include>
+              <include>com.nimbusds:nimbus-jose-jwt:jar</include>
+              <include>com.github.stephenc.jcip:jcip-annotations</include>
+              <include>com.google.code.findbugs:jsr305:jar</include>
+              <include>org.apache.hadoop:hadoop-ozone-client</include>
+              <include>org.apache.hadoop:hadoop-hdds-client</include>
+              <include>org.apache.hadoop:hadoop-hdds-common</include>
+              <include>org.fusesource.leveldbjni:leveldbjni-all</include>
+              <include>org.apache.ratis:ratis-server</include>
+              <include>org.apache.ratis:ratis-proto-shaded:jar</include>
+              <include>com.google.auto.value:auto-value-annotations</include>
+              <include>com.squareup:javapoet:jar</include>
+              <include>org.jctools:jctools-core</include>
+              <include>org.apache.ratis:ratis-common</include>
+              <include>org.apache.ratis:ratis-client</include>
+              <include>org.apache.ratis:ratis-netty</include>
+              <include>org.apache.ratis:ratis-grpc</include>
+              <include>org.rocksdb:rocksdbjni</include>
+              <include>org.apache.hadoop:hadoop-ozone-common</include>
+            </includes>
+          </artifactSet>
+        </configuration>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <goals>
+              <goal>shade</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>deplist</id>
+            <phase>compile</phase>
+            <goals>
+              <goal>list</goal>
+            </goals>
+            <configuration>
+              <!-- build a shellprofile -->
+              <outputFile>
+                ${project.basedir}/target/hadoop-tools-deps/${project.artifactId}.tools-optional.txt
+              </outputFile>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs-client</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-server-scm</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-server-framework</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-ozone-manager</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-container-service</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-objectstore-service</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-integration-test</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-distcp</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-distcp</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
new file mode 100644
index 0000000..832a0cb
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+/**
+ * Constants for Ozone FileSystem implementation.
+ */
+public final class Constants {
+
+  public static final String OZONE_DEFAULT_USER = "hdfs";
+
+  public static final String OZONE_USER_DIR = "/user";
+
+  /** Local buffer directory. */
+  public static final String BUFFER_DIR_KEY = "fs.ozone.buffer.dir";
+
+  /** Temporary directory. */
+  public static final String BUFFER_TMP_KEY = "hadoop.tmp.dir";
+
+  /** Page size for Ozone listing operation. */
+  public static final int LISTING_PAGE_SIZE = 1024;
+
+  private Constants() {
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
new file mode 100644
index 0000000..4163c13
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DelegateToFileSystem;
+import org.apache.hadoop.ozone.OzoneConsts;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+/**
+ * ozone implementation of AbstractFileSystem.
+ * This impl delegates to the OzoneFileSystem
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class OzFs extends DelegateToFileSystem {
+
+  public OzFs(URI theUri, Configuration conf)
+      throws IOException, URISyntaxException {
+    super(theUri, new OzoneFileSystem(), conf,
+        OzoneConsts.OZONE_URI_SCHEME, false);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
new file mode 100644
index 0000000..4c5c0c8
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FSInputStream;
+import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * The input stream for Ozone file system.
+ *
+ * TODO: Make inputStream generic for both rest and rpc clients
+ * This class is not thread safe.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class OzoneFSInputStream extends FSInputStream {
+
+  private final ChunkGroupInputStream inputStream;
+
+  public OzoneFSInputStream(InputStream inputStream) {
+    this.inputStream = (ChunkGroupInputStream)inputStream;
+  }
+
+  @Override
+  public int read() throws IOException {
+    return inputStream.read();
+  }
+
+  @Override
+  public int read(byte[] b, int off, int len) throws IOException {
+    return inputStream.read(b, off, len);
+  }
+
+  @Override
+  public synchronized void close() throws IOException {
+    inputStream.close();
+  }
+
+  @Override
+  public void seek(long pos) throws IOException {
+    inputStream.seek(pos);
+  }
+
+  @Override
+  public long getPos() throws IOException {
+    return inputStream.getPos();
+  }
+
+  @Override
+  public boolean seekToNewSource(long targetPos) throws IOException {
+    return false;
+  }
+
+  @Override
+  public int available() throws IOException {
+    return inputStream.available();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java
new file mode 100644
index 0000000..faa3628
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
+
+
+/**
+ * The output stream for Ozone file system.
+ *
+ * TODO: Make outputStream generic for both rest and rpc clients
+ * This class is not thread safe.
+ */
+public class OzoneFSOutputStream extends OutputStream {
+
+  private final ChunkGroupOutputStream outputStream;
+
+  public OzoneFSOutputStream(OutputStream outputStream) {
+    this.outputStream = (ChunkGroupOutputStream)outputStream;
+  }
+
+  @Override
+  public void write(int b) throws IOException {
+    outputStream.write(b);
+  }
+
+  @Override
+  public void write(byte[] b, int off, int len) throws IOException {
+    outputStream.write(b, off, len);
+  }
+
+  @Override
+  public synchronized void flush() throws IOException {
+    outputStream.flush();
+  }
+
+  @Override
+  public synchronized void close() throws IOException {
+    outputStream.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
new file mode 100644
index 0000000..6906a9d
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
@@ -0,0 +1,689 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Iterator;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.http.client.utils.URIBuilder;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+
+import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
+import static org.apache.hadoop.fs.ozone.Constants.OZONE_USER_DIR;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
+import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE;
+
+/**
+ * The Ozone Filesystem implementation.
+ *
+ * This subclass is marked as private as code should not be creating it
+ * directly; use {@link FileSystem#get(Configuration)} and variants to create
+ * one. If cast to {@link OzoneFileSystem}, extra methods and features may be
+ * accessed. Consider those private and unstable.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class OzoneFileSystem extends FileSystem {
+  static final Logger LOG = LoggerFactory.getLogger(OzoneFileSystem.class);
+
+  /** The Ozone client for connecting to Ozone server. */
+  private OzoneClient ozoneClient;
+  private ObjectStore objectStore;
+  private OzoneVolume volume;
+  private OzoneBucket bucket;
+  private URI uri;
+  private String userName;
+  private Path workingDir;
+  private ReplicationType replicationType;
+  private ReplicationFactor replicationFactor;
+
+  private static final Pattern URL_SCHEMA_PATTERN =
+      Pattern.compile("(.+)\\.([^\\.]+)");
+
+  @Override
+  public void initialize(URI name, Configuration conf) throws IOException {
+    super.initialize(name, conf);
+    setConf(conf);
+    Objects.requireNonNull(name.getScheme(), "No scheme provided in " + name);
+    assert getScheme().equals(name.getScheme());
+
+    String authority = name.getAuthority();
+
+    Matcher matcher = URL_SCHEMA_PATTERN.matcher(authority);
+
+    if (!matcher.matches()) {
+      throw new IllegalArgumentException("Ozone file system url should be "
+          + "in the form o3://bucket.volume");
+    }
+    String bucketStr = matcher.group(1);
+    String volumeStr = matcher.group(2);
+
+    try {
+      uri = new URIBuilder().setScheme(OZONE_URI_SCHEME)
+          .setHost(authority).build();
+      LOG.trace("Ozone URI for ozfs initialization is " + uri);
+      this.ozoneClient = OzoneClientFactory.getRpcClient(conf);
+      objectStore = ozoneClient.getObjectStore();
+      this.volume = objectStore.getVolume(volumeStr);
+      this.bucket = volume.getBucket(bucketStr);
+      this.replicationType = ReplicationType.valueOf(
+          conf.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE,
+              OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT));
+      this.replicationFactor = ReplicationFactor.valueOf(
+          conf.getInt(OzoneConfigKeys.OZONE_REPLICATION,
+              OzoneConfigKeys.OZONE_REPLICATION_DEFAULT));
+      try {
+        this.userName =
+            UserGroupInformation.getCurrentUser().getShortUserName();
+      } catch (IOException e) {
+        this.userName = OZONE_DEFAULT_USER;
+      }
+      this.workingDir = new Path(OZONE_USER_DIR, this.userName)
+              .makeQualified(this.uri, this.workingDir);
+    } catch (URISyntaxException ue) {
+      final String msg = "Invalid Ozone endpoint " + name;
+      LOG.error(msg, ue);
+      throw new IOException(msg, ue);
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    try {
+      ozoneClient.close();
+    } finally {
+      super.close();
+    }
+  }
+
+  @Override
+  public URI getUri() {
+    return uri;
+  }
+
+  @Override
+  public String getScheme() {
+    return OZONE_URI_SCHEME;
+  }
+
+  @Override
+  public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+    LOG.trace("open() path:{}", f);
+    final FileStatus fileStatus = getFileStatus(f);
+    final String key = pathToKey(f);
+    if (fileStatus.isDirectory()) {
+      throw new FileNotFoundException("Can't open directory " + f + " to read");
+    }
+
+    return new FSDataInputStream(
+        new OzoneFSInputStream(bucket.readKey(key).getInputStream()));
+  }
+
+  @Override
+  public FSDataOutputStream create(Path f, FsPermission permission,
+                                   boolean overwrite, int bufferSize,
+                                   short replication, long blockSize,
+                                   Progressable progress) throws IOException {
+    LOG.trace("create() path:{}", f);
+    final String key = pathToKey(f);
+    final FileStatus status;
+    try {
+      status = getFileStatus(f);
+      if (status.isDirectory()) {
+        throw new FileAlreadyExistsException(f + " is a directory");
+      } else {
+        if (!overwrite) {
+          // path references a file and overwrite is disabled
+          throw new FileAlreadyExistsException(f + " already exists");
+        }
+        LOG.trace("Overwriting file {}", f);
+        deleteObject(key);
+      }
+    } catch (FileNotFoundException ignored) {
+      // check if the parent directory needs to be created
+      Path parent = f.getParent();
+      try {
+        // create all the directories for the parent
+        FileStatus parentStatus = getFileStatus(parent);
+        LOG.trace("parent key:{} status:{}", key, parentStatus);
+      } catch (FileNotFoundException e) {
+        mkdirs(parent);
+      }
+      // This exception needs to ignored as this means that the file currently
+      // does not exists and a new file can thus be created.
+    }
+
+    OzoneOutputStream ozoneOutputStream =
+        bucket.createKey(key, 0, replicationType, replicationFactor);
+    // We pass null to FSDataOutputStream so it won't count writes that
+    // are being buffered to a file
+    return new FSDataOutputStream(
+        new OzoneFSOutputStream(ozoneOutputStream.getOutputStream()), null);
+  }
+
+  @Override
+  public FSDataOutputStream createNonRecursive(Path path,
+      FsPermission permission,
+      EnumSet<CreateFlag> flags,
+      int bufferSize,
+      short replication,
+      long blockSize,
+      Progressable progress) throws IOException {
+    final Path parent = path.getParent();
+    if (parent != null) {
+      // expect this to raise an exception if there is no parent
+      if (!getFileStatus(parent).isDirectory()) {
+        throw new FileAlreadyExistsException("Not a directory: " + parent);
+      }
+    }
+    return create(path, permission, flags.contains(CreateFlag.OVERWRITE),
+        bufferSize, replication, blockSize, progress);
+  }
+
+  @Override
+  public FSDataOutputStream append(Path f, int bufferSize,
+      Progressable progress) throws IOException {
+    throw new UnsupportedOperationException("append() Not implemented by the "
+        + getClass().getSimpleName() + " FileSystem implementation");
+  }
+
+  private class RenameIterator extends OzoneListingIterator {
+    private final String srcKey;
+    private final String dstKey;
+
+    RenameIterator(Path srcPath, Path dstPath)
+        throws IOException {
+      super(srcPath);
+      srcKey = pathToKey(srcPath);
+      dstKey = pathToKey(dstPath);
+      LOG.trace("rename from:{} to:{}", srcKey, dstKey);
+    }
+
+    boolean processKey(String key) throws IOException {
+      String newKeyName = dstKey.concat(key.substring(srcKey.length()));
+      bucket.renameKey(key, newKeyName);
+      return true;
+    }
+  }
+
+  /**
+   * Check whether the source and destination path are valid and then perform
+   * rename from source path to destination path.
+   *
+   * The rename operation is performed by renaming the keys with src as prefix.
+   * For such keys the prefix is changed from src to dst.
+   *
+   * @param src source path for rename
+   * @param dst destination path for rename
+   * @return true if rename operation succeeded or
+   * if the src and dst have the same path and are of the same type
+   * @throws IOException on I/O errors or if the src/dst paths are invalid.
+   */
+  @Override
+  public boolean rename(Path src, Path dst) throws IOException {
+    if (src.equals(dst)) {
+      return true;
+    }
+
+    LOG.trace("rename() from:{} to:{}", src, dst);
+    if (src.isRoot()) {
+      // Cannot rename root of file system
+      LOG.trace("Cannot rename the root of a filesystem");
+      return false;
+    }
+
+    // Cannot rename a directory to its own subdirectory
+    Path dstParent = dst.getParent();
+    while (dstParent != null && !src.equals(dstParent)) {
+      dstParent = dstParent.getParent();
+    }
+    Preconditions.checkArgument(dstParent == null,
+        "Cannot rename a directory to its own subdirectory");
+    // Check if the source exists
+    FileStatus srcStatus;
+    try {
+      srcStatus = getFileStatus(src);
+    } catch (FileNotFoundException fnfe) {
+      // source doesn't exist, return
+      return false;
+    }
+
+    // Check if the destination exists
+    FileStatus dstStatus;
+    try {
+      dstStatus = getFileStatus(dst);
+    } catch (FileNotFoundException fnde) {
+      dstStatus = null;
+    }
+
+    if (dstStatus == null) {
+      // If dst doesn't exist, check whether dst parent dir exists or not
+      // if the parent exists, the source can still be renamed to dst path
+      dstStatus = getFileStatus(dst.getParent());
+      if (!dstStatus.isDirectory()) {
+        throw new IOException(String.format(
+            "Failed to rename %s to %s, %s is a file", src, dst,
+            dst.getParent()));
+      }
+    } else {
+      // if dst exists and source and destination are same,
+      // check both the src and dst are of same type
+      if (srcStatus.getPath().equals(dstStatus.getPath())) {
+        return !srcStatus.isDirectory();
+      } else if (dstStatus.isDirectory()) {
+        // If dst is a directory, rename source as subpath of it.
+        // for example rename /source to /dst will lead to /dst/source
+        dst = new Path(dst, src.getName());
+        FileStatus[] statuses;
+        try {
+          statuses = listStatus(dst);
+        } catch (FileNotFoundException fnde) {
+          statuses = null;
+        }
+
+        if (statuses != null && statuses.length > 0) {
+          // If dst exists and not a directory not empty
+          throw new FileAlreadyExistsException(String.format(
+              "Failed to rename %s to %s, file already exists or not empty!",
+              src, dst));
+        }
+      } else {
+        // If dst is not a directory
+        throw new FileAlreadyExistsException(String.format(
+            "Failed to rename %s to %s, file already exists!", src, dst));
+      }
+    }
+
+    if (srcStatus.isDirectory()) {
+      if (dst.toString().startsWith(src.toString())) {
+        LOG.trace("Cannot rename a directory to a subdirectory of self");
+        return false;
+      }
+    }
+    RenameIterator iterator = new RenameIterator(src, dst);
+    return iterator.iterate();
+  }
+
+  private class DeleteIterator extends OzoneListingIterator {
+    private boolean recursive;
+    DeleteIterator(Path f, boolean recursive)
+        throws IOException {
+      super(f);
+      this.recursive = recursive;
+      if (getStatus().isDirectory()
+          && !this.recursive
+          && listStatus(f).length != 0) {
+        throw new PathIsNotEmptyDirectoryException(f.toString());
+      }
+    }
+
+    boolean processKey(String key) throws IOException {
+      if (key.equals("")) {
+        LOG.trace("Skipping deleting root directory");
+        return true;
+      } else {
+        LOG.trace("deleting key:" + key);
+        boolean succeed = deleteObject(key);
+        // if recursive delete is requested ignore the return value of
+        // deleteObject and issue deletes for other keys.
+        return recursive || succeed;
+      }
+    }
+  }
+
+  @Override
+  public boolean delete(Path f, boolean recursive) throws IOException {
+    LOG.trace("delete() path:{} recursive:{}", f, recursive);
+    try {
+      DeleteIterator iterator = new DeleteIterator(f, recursive);
+      return iterator.iterate();
+    } catch (FileNotFoundException e) {
+      LOG.debug("Couldn't delete {} - does not exist", f);
+      return false;
+    }
+  }
+
+  private class ListStatusIterator extends OzoneListingIterator {
+    private  List<FileStatus> statuses = new ArrayList<>(LISTING_PAGE_SIZE);
+    private Path f;
+
+    ListStatusIterator(Path f) throws IOException  {
+      super(f);
+      this.f = f;
+    }
+
+    boolean processKey(String key) throws IOException {
+      Path keyPath = new Path(OZONE_URI_DELIMITER + key);
+      if (key.equals(getPathKey())) {
+        if (pathIsDirectory()) {
+          return true;
+        } else {
+          statuses.add(getFileStatus(keyPath));
+          return true;
+        }
+      }
+      // left with only subkeys now
+      if (pathToKey(keyPath.getParent()).equals(pathToKey(f))) {
+        // skip keys which are for subdirectories of the directory
+        statuses.add(getFileStatus(keyPath));
+      }
+      return true;
+    }
+
+    FileStatus[] getStatuses() {
+      return statuses.toArray(new FileStatus[statuses.size()]);
+    }
+  }
+
+  @Override
+  public FileStatus[] listStatus(Path f) throws IOException {
+    LOG.trace("listStatus() path:{}", f);
+    ListStatusIterator iterator = new ListStatusIterator(f);
+    iterator.iterate();
+    return iterator.getStatuses();
+  }
+
+  @Override
+  public void setWorkingDirectory(Path newDir) {
+    workingDir = newDir;
+  }
+
+  @Override
+  public Path getWorkingDirectory() {
+    return workingDir;
+  }
+
+  /**
+   * Check whether the path is valid and then create directories.
+   * Directory is represented using a key with no value.
+   * All the non-existent parent directories are also created.
+   *
+   * @param path directory path to be created
+   * @return true if directory exists or created successfully.
+   * @throws IOException
+   */
+  private boolean mkdir(Path path) throws IOException {
+    Path fPart = path;
+    Path prevfPart = null;
+    do {
+      LOG.trace("validating path:{}", fPart);
+      try {
+        FileStatus fileStatus = getFileStatus(fPart);
+        if (fileStatus.isDirectory()) {
+          // If path exists and a directory, exit
+          break;
+        } else {
+          // Found a file here, rollback and delete newly created directories
+          LOG.trace("Found a file with same name as directory, path:{}", fPart);
+          if (prevfPart != null) {
+            delete(prevfPart, true);
+          }
+          throw new FileAlreadyExistsException(String.format(
+              "Can't make directory for path '%s', it is a file.", fPart));
+        }
+      } catch (FileNotFoundException fnfe) {
+        LOG.trace("creating directory for fpart:{}", fPart);
+        String key = pathToKey(fPart);
+        String dirKey = addTrailingSlashIfNeeded(key);
+        if (!createDirectory(dirKey)) {
+          // Directory creation failed here,
+          // rollback and delete newly created directories
+          LOG.trace("Directory creation failed, path:{}", fPart);
+          if (prevfPart != null) {
+            delete(prevfPart, true);
+          }
+          return false;
+        }
+      }
+      prevfPart = fPart;
+      fPart = fPart.getParent();
+    } while (fPart != null);
+    return true;
+  }
+
+  @Override
+  public boolean mkdirs(Path f, FsPermission permission) throws IOException {
+    LOG.trace("mkdir() path:{} ", f);
+    String key = pathToKey(f);
+    if (StringUtils.isEmpty(key)) {
+      return false;
+    }
+    return mkdir(f);
+  }
+
+  @Override
+  public FileStatus getFileStatus(Path f) throws IOException {
+    LOG.trace("getFileStatus() path:{}", f);
+    Path qualifiedPath = f.makeQualified(uri, workingDir);
+    String key = pathToKey(qualifiedPath);
+
+    if (key.length() == 0) {
+      return new FileStatus(0, true, 1, 0,
+          bucket.getCreationTime(), qualifiedPath);
+    }
+
+    // consider this a file and get key status
+    OzoneKey meta = getKeyInfo(key);
+    if (meta == null) {
+      key = addTrailingSlashIfNeeded(key);
+      meta = getKeyInfo(key);
+    }
+
+    if (meta == null) {
+      LOG.trace("File:{} not found", f);
+      throw new FileNotFoundException(f + ": No such file or directory!");
+    } else if (isDirectory(meta)) {
+      return new FileStatus(0, true, 1, 0,
+          meta.getModificationTime(), qualifiedPath);
+    } else {
+      //TODO: Fetch replication count from ratis config
+      return new FileStatus(meta.getDataSize(), false, 1,
+            getDefaultBlockSize(f), meta.getModificationTime(), qualifiedPath);
+    }
+  }
+
+  /**
+   * Helper method to fetch the key metadata info.
+   * @param key key whose metadata information needs to be fetched
+   * @return metadata info of the key
+   */
+  private OzoneKey getKeyInfo(String key) {
+    try {
+      return bucket.getKey(key);
+    } catch (IOException e) {
+      LOG.trace("Key:{} does not exists", key);
+      return null;
+    }
+  }
+
+  /**
+   * Helper method to check if an Ozone key is representing a directory.
+   * @param key key to be checked as a directory
+   * @return true if key is a directory, false otherwise
+   */
+  private boolean isDirectory(OzoneKey key) {
+    LOG.trace("key name:{} size:{}", key.getName(),
+        key.getDataSize());
+    return key.getName().endsWith(OZONE_URI_DELIMITER)
+        && (key.getDataSize() == 0);
+  }
+
+  /**
+   * Helper method to create an directory specified by key name in bucket.
+   * @param keyName key name to be created as directory
+   * @return true if the key is created, false otherwise
+   */
+  private boolean createDirectory(String keyName) {
+    try {
+      LOG.trace("creating dir for key:{}", keyName);
+      bucket.createKey(keyName, 0, replicationType, replicationFactor).close();
+      return true;
+    } catch (IOException ioe) {
+      LOG.error("create key failed for key:{}", keyName, ioe);
+      return false;
+    }
+  }
+
+  /**
+   * Helper method to delete an object specified by key name in bucket.
+   * @param keyName key name to be deleted
+   * @return true if the key is deleted, false otherwise
+   */
+  private boolean deleteObject(String keyName) {
+    LOG.trace("issuing delete for key" + keyName);
+    try {
+      bucket.deleteKey(keyName);
+      return true;
+    } catch (IOException ioe) {
+      LOG.error("delete key failed " + ioe.getMessage());
+      return false;
+    }
+  }
+
+  /**
+   * Turn a path (relative or otherwise) into an Ozone key.
+   *
+   * @param path the path of the file.
+   * @return the key of the object that represents the file.
+   */
+  public String pathToKey(Path path) {
+    Objects.requireNonNull(path, "Path can not be null!");
+    if (!path.isAbsolute()) {
+      path = new Path(workingDir, path);
+    }
+    // removing leading '/' char
+    String key = path.toUri().getPath().substring(1);
+    LOG.trace("path for key:{} is:{}", key, path);
+    return key;
+  }
+
+  /**
+   * Add trailing delimiter to path if it is already not present.
+   *
+   * @param key the ozone Key which needs to be appended
+   * @return delimiter appended key
+   */
+  private String addTrailingSlashIfNeeded(String key) {
+    if (StringUtils.isNotEmpty(key) && !key.endsWith(OZONE_URI_DELIMITER)) {
+      return key + OZONE_URI_DELIMITER;
+    } else {
+      return key;
+    }
+  }
+
+  @Override
+  public String toString() {
+    return "OzoneFileSystem{URI=" + uri + ", "
+        + "workingDir=" + workingDir + ", "
+        + "userName=" + userName + ", "
+        + "statistics=" + statistics
+        + "}";
+  }
+
+  private abstract class OzoneListingIterator {
+    private final Path path;
+    private final FileStatus status;
+    private String pathKey;
+    private Iterator<OzoneKey> keyIterator;
+
+    OzoneListingIterator(Path path)
+        throws IOException {
+      this.path = path;
+      this.status = getFileStatus(path);
+      this.pathKey = pathToKey(path);
+      if (status.isDirectory()) {
+        this.pathKey = addTrailingSlashIfNeeded(pathKey);
+      }
+      keyIterator = bucket.listKeys(pathKey);
+    }
+
+    abstract boolean processKey(String key) throws IOException;
+
+    // iterates all the keys in the particular path
+    boolean iterate() throws IOException {
+      LOG.trace("Iterating path {}", path);
+      if (status.isDirectory()) {
+        LOG.trace("Iterating directory:{}", pathKey);
+        while (keyIterator.hasNext()) {
+          OzoneKey key = keyIterator.next();
+          LOG.trace("iterating key:{}", key.getName());
+          if (!processKey(key.getName())) {
+            return false;
+          }
+        }
+        return true;
+      } else {
+        LOG.trace("iterating file:{}", path);
+        return processKey(pathKey);
+      }
+    }
+
+    String getPathKey() {
+      return pathKey;
+    }
+
+    boolean pathIsDirectory() {
+      return status.isDirectory();
+    }
+
+    FileStatus getStatus() {
+      return status;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
new file mode 100644
index 0000000..93e82c3
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/**
+ * Ozone Filesystem.
+ *
+ * Except for the exceptions, it should all be hidden as implementation details.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
new file mode 100644
index 0000000..ad21f28
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+/**
+ * Test OzoneFSInputStream by reading through multiple interfaces.
+ */
+public class TestOzoneFSInputStream {
+  private static MiniOzoneCluster cluster = null;
+  private static FileSystem fs;
+  private static StorageHandler storageHandler;
+  private static Path filePath = null;
+  private static byte[] data = null;
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setLong(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB, 10);
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(10)
+        .build();
+    cluster.waitForClusterToBeReady();
+    storageHandler =
+        new ObjectStoreHandler(conf).getStorageHandler();
+
+    // create a volume and a bucket to be used by OzoneFileSystem
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    UserArgs userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+        null, null, null, null);
+    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(volumeArgs);
+    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    storageHandler.createBucket(bucketArgs);
+
+    // Fetch the host and port for File System init
+    DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
+        .getDatanodeDetails();
+    int port = datanodeDetails
+        .getPort(DatanodeDetails.Port.Name.REST).getValue();
+    String host = datanodeDetails.getHostName();
+
+    // Set the fs.defaultFS and start the filesystem
+    String uri = String.format("%s://%s.%s/",
+        OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri);
+    fs =  FileSystem.get(conf);
+    int fileLen = 100 * 1024 * 1024;
+    data = DFSUtil.string2Bytes(RandomStringUtils.randomAlphanumeric(fileLen));
+    filePath = new Path("/" + RandomStringUtils.randomAlphanumeric(5));
+    try (FSDataOutputStream stream = fs.create(filePath)) {
+      stream.write(data);
+    }
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() throws IOException {
+    fs.close();
+    storageHandler.close();
+    cluster.shutdown();
+  }
+
+  @Test
+  public void testO3FSSingleByteRead() throws IOException {
+    FSDataInputStream inputStream = fs.open(filePath);
+    byte[] value = new byte[data.length];
+    int i = 0;
+    while(true) {
+      int val = inputStream.read();
+      if (val == -1) {
+        break;
+      }
+      value[i] = (byte)val;
+      Assert.assertEquals("value mismatch at:" + i, value[i], data[i]);
+      i++;
+    }
+    Assert.assertEquals(i, data.length);
+    Assert.assertTrue(Arrays.equals(value, data));
+    inputStream.close();
+  }
+
+  @Test
+  public void testO3FSMultiByteRead() throws IOException {
+    FSDataInputStream inputStream = fs.open(filePath);
+    byte[] value = new byte[data.length];
+    byte[] tmp = new byte[1* 1024 *1024];
+    int i = 0;
+    while(true) {
+      int val = inputStream.read(tmp);
+      if (val == -1) {
+        break;
+      }
+      System.arraycopy(tmp, 0, value, i * tmp.length, tmp.length);
+      i++;
+    }
+    Assert.assertEquals(i * tmp.length, data.length);
+    Assert.assertTrue(Arrays.equals(value, data));
+    inputStream.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
new file mode 100644
index 0000000..a225702
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
@@ -0,0 +1,231 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.junit.After;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Time;
+
+import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test OzoneFileSystem Interfaces.
+ *
+ * This test will test the various interfaces i.e.
+ * create, read, write, getFileStatus
+ */
+@RunWith(Parameterized.class)
+public class TestOzoneFileInterfaces {
+
+  private String rootPath;
+  private String userName;
+
+  /**
+   * Parameter class to set absolute url/defaultFS handling.
+   * <p>
+   * Hadoop file systems could be used in multiple ways: Using the defaultfs
+   * and file path without the schema, or use absolute url-s even with
+   * different defaultFS. This parameter matrix would test both the use cases.
+   */
+  @Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(new Object[][] {{false, true}, {true, false}});
+  }
+
+  private boolean setDefaultFs;
+
+  private boolean useAbsolutePath;
+
+  private static MiniOzoneCluster cluster = null;
+
+  private static FileSystem fs;
+
+  private static StorageHandler storageHandler;
+
+  public TestOzoneFileInterfaces(boolean setDefaultFs,
+      boolean useAbsolutePath) {
+    this.setDefaultFs = setDefaultFs;
+    this.useAbsolutePath = useAbsolutePath;
+  }
+
+  @Before
+  public void init() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(3)
+        .build();
+    cluster.waitForClusterToBeReady();
+    storageHandler =
+        new ObjectStoreHandler(conf).getStorageHandler();
+
+    // create a volume and a bucket to be used by OzoneFileSystem
+    userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    UserArgs userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+        null, null, null, null);
+    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(volumeArgs);
+    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    storageHandler.createBucket(bucketArgs);
+
+    rootPath = String
+        .format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucketName,
+            volumeName);
+    if (setDefaultFs) {
+      // Set the fs.defaultFS and start the filesystem
+      conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+      fs = FileSystem.get(conf);
+    } else {
+      fs = FileSystem.get(new URI(rootPath + "/test.txt"), conf);
+    }
+  }
+
+  @After
+  public void teardown() throws IOException {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    IOUtils.closeQuietly(fs);
+    IOUtils.closeQuietly(storageHandler);
+  }
+
+  @Test
+  public void testFileSystemInit() throws IOException {
+    if (setDefaultFs) {
+      assertTrue(
+          "The initialized file system is not OzoneFileSystem but " +
+              fs.getClass(),
+          fs instanceof OzoneFileSystem);
+      assertEquals(OzoneConsts.OZONE_URI_SCHEME, fs.getUri().getScheme());
+    }
+  }
+
+  @Test
+  public void testOzFsReadWrite() throws IOException {
+    long currentTime = Time.now();
+    int stringLen = 20;
+    String data = RandomStringUtils.randomAlphanumeric(stringLen);
+    String filePath = RandomStringUtils.randomAlphanumeric(5);
+    Path path = createPath("/" + filePath);
+    try (FSDataOutputStream stream = fs.create(path)) {
+      stream.writeBytes(data);
+    }
+
+    FileStatus status = fs.getFileStatus(path);
+    // The timestamp of the newly created file should always be greater than
+    // the time when the test was started
+    assertTrue("Modification time has not been recorded: " + status,
+        status.getModificationTime() > currentTime);
+
+    try (FSDataInputStream inputStream = fs.open(path)) {
+      byte[] buffer = new byte[stringLen];
+      inputStream.readFully(0, buffer);
+      String out = new String(buffer, 0, buffer.length);
+      assertEquals(data, out);
+    }
+  }
+
+
+  @Test
+  public void testDirectory() throws IOException {
+    String dirPath = RandomStringUtils.randomAlphanumeric(5);
+    Path path = createPath("/" + dirPath);
+    assertTrue("Makedirs returned with false for the path " + path,
+        fs.mkdirs(path));
+
+    FileStatus status = fs.getFileStatus(path);
+    assertTrue("The created path is not directory.", status.isDirectory());
+
+    assertEquals(0, status.getLen());
+
+    FileStatus[] statusList = fs.listStatus(createPath("/"));
+    assertEquals(1, statusList.length);
+    assertEquals(status, statusList[0]);
+
+    FileStatus statusRoot = fs.getFileStatus(createPath("/"));
+    assertTrue("Root dir (/) is not a directory.", status.isDirectory());
+    assertEquals(0, status.getLen());
+
+
+  }
+
+  @Test
+  public void testPathToKey() throws Exception {
+    OzoneFileSystem ozoneFs = (OzoneFileSystem) TestOzoneFileInterfaces.fs;
+
+    assertEquals("a/b/1", ozoneFs.pathToKey(new Path("/a/b/1")));
+
+    assertEquals("user/" + getCurrentUser() + "/key1/key2",
+        ozoneFs.pathToKey(new Path("key1/key2")));
+
+    assertEquals("key1/key2",
+        ozoneFs.pathToKey(new Path("o3://test1/key1/key2")));
+  }
+
+  private String getCurrentUser() {
+    try {
+      return UserGroupInformation.getCurrentUser().getShortUserName();
+    } catch (IOException e) {
+      return OZONE_DEFAULT_USER;
+    }
+  }
+
+  private Path createPath(String relativePath) {
+    if (useAbsolutePath) {
+      return new Path(
+          rootPath + (relativePath.startsWith("/") ? "" : "/") + relativePath);
+    } else {
+      return new Path(relativePath);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
new file mode 100644
index 0000000..dd54315
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+
+/**
+ * Ozone contract tests creating files.
+ */
+public class ITestOzoneContractCreate extends AbstractContractCreateTest {
+
+  @BeforeClass
+  public static void createCluster() throws IOException {
+    OzoneContract.createCluster();
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws IOException {
+    OzoneContract.destroyCluster();
+  }
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+    return new OzoneContract(conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
new file mode 100644
index 0000000..f0a3d8d
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+
+/**
+ * Ozone contract tests covering deletes.
+ */
+public class ITestOzoneContractDelete extends AbstractContractDeleteTest {
+
+  @BeforeClass
+  public static void createCluster() throws IOException {
+    OzoneContract.createCluster();
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws IOException {
+    OzoneContract.destroyCluster();
+  }
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+    return new OzoneContract(conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java
new file mode 100644
index 0000000..134a9ad
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+
+
+/**
+ * Contract test suite covering S3A integration with DistCp.
+ * Uses the block output stream, buffered to disk. This is the
+ * recommended output mechanism for DistCP due to its scalability.
+ */
+public class ITestOzoneContractDistCp extends AbstractContractDistCpTest {
+
+  @BeforeClass
+  public static void createCluster() throws IOException {
+    OzoneContract.createCluster();
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws IOException {
+    OzoneContract.destroyCluster();
+  }
+
+  @Override
+  protected OzoneContract createContract(Configuration conf) {
+    return new OzoneContract(conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
new file mode 100644
index 0000000..98bbb14
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+
+/**
+ * Ozone contract tests covering getFileStatus.
+ */
+public class ITestOzoneContractGetFileStatus
+    extends AbstractContractGetFileStatusTest {
+
+
+  @BeforeClass
+  public static void createCluster() throws IOException {
+    OzoneContract.createCluster();
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws IOException {
+    OzoneContract.destroyCluster();
+  }
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+    return new OzoneContract(conf);
+  }
+
+  @Override
+  public void teardown() throws Exception {
+    getLog().info("FS details {}", getFileSystem());
+    super.teardown();
+  }
+
+  @Override
+  protected Configuration createConfiguration() {
+    return super.createConfiguration();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
new file mode 100644
index 0000000..bc0de5d
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+
+/**
+ * Test dir operations on Ozone.
+ */
+public class ITestOzoneContractMkdir extends AbstractContractMkdirTest {
+
+  @BeforeClass
+  public static void createCluster() throws IOException {
+    OzoneContract.createCluster();
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws IOException {
+    OzoneContract.destroyCluster();
+  }
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+    return new OzoneContract(conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
new file mode 100644
index 0000000..0bc57d4
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+
+/**
+ * Ozone contract tests opening files.
+ */
+public class ITestOzoneContractOpen extends AbstractContractOpenTest {
+  @BeforeClass
+  public static void createCluster() throws IOException {
+    OzoneContract.createCluster();
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws IOException {
+    OzoneContract.destroyCluster();
+  }
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+    return new OzoneContract(conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
new file mode 100644
index 0000000..8ce1d1b
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+
+/**
+ * Ozone contract tests covering rename.
+ */
+public class ITestOzoneContractRename extends AbstractContractRenameTest {
+
+  @BeforeClass
+  public static void createCluster() throws IOException {
+    OzoneContract.createCluster();
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws IOException {
+    OzoneContract.destroyCluster();
+  }
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+    return new OzoneContract(conf);
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message