hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject [17/41] git commit: HADOOP-11074. Move s3-related FS connector code to hadoop-aws. (David S. Wang via Colin Patrick McCabe)
Date Fri, 12 Sep 2014 18:47:00 GMT
HADOOP-11074. Move s3-related FS connector code to hadoop-aws. (David S. Wang via Colin Patrick McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ec7fcd9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ec7fcd9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ec7fcd9

Branch: refs/heads/HDFS-6581
Commit: 5ec7fcd9dd6bb86858c6e2583321bb9a615bd392
Parents: 7f80e14
Author: Colin Patrick Mccabe <cmccabe@cloudera.com>
Authored: Wed Sep 10 16:14:08 2014 -0700
Committer: Colin Patrick Mccabe <cmccabe@cloudera.com>
Committed: Wed Sep 10 16:14:53 2014 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/s3/Block.java     |  52 --
 .../apache/hadoop/fs/s3/FileSystemStore.java    |  67 --
 .../java/org/apache/hadoop/fs/s3/INode.java     | 128 ---
 .../hadoop/fs/s3/Jets3tFileSystemStore.java     | 445 -----------
 .../org/apache/hadoop/fs/s3/MigrationTool.java  | 291 -------
 .../org/apache/hadoop/fs/s3/S3Credentials.java  | 103 ---
 .../org/apache/hadoop/fs/s3/S3Exception.java    |  39 -
 .../org/apache/hadoop/fs/s3/S3FileSystem.java   | 486 -----------
 .../hadoop/fs/s3/S3FileSystemConfigKeys.java    |  47 --
 .../hadoop/fs/s3/S3FileSystemException.java     |  36 -
 .../org/apache/hadoop/fs/s3/S3InputStream.java  | 215 -----
 .../org/apache/hadoop/fs/s3/S3OutputStream.java | 235 ------
 .../hadoop/fs/s3/VersionMismatchException.java  |  37 -
 .../java/org/apache/hadoop/fs/s3/package.html   |  55 --
 .../apache/hadoop/fs/s3native/FileMetadata.java |  59 --
 .../s3native/Jets3tNativeFileSystemStore.java   | 483 -----------
 .../fs/s3native/NativeFileSystemStore.java      |  67 --
 .../hadoop/fs/s3native/NativeS3FileSystem.java  | 796 -------------------
 .../hadoop/fs/s3native/PartialListing.java      |  64 --
 .../s3native/S3NativeFileSystemConfigKeys.java  |  47 --
 .../org/apache/hadoop/fs/s3native/package.html  |  32 -
 .../services/org.apache.hadoop.fs.FileSystem    |   2 -
 .../fs/contract/s3n/NativeS3Contract.java       |  43 -
 .../fs/contract/s3n/TestS3NContractCreate.java  |  38 -
 .../fs/contract/s3n/TestS3NContractDelete.java  |  31 -
 .../fs/contract/s3n/TestS3NContractMkdir.java   |  34 -
 .../fs/contract/s3n/TestS3NContractOpen.java    |  31 -
 .../fs/contract/s3n/TestS3NContractRename.java  |  32 -
 .../fs/contract/s3n/TestS3NContractRootDir.java |  35 -
 .../fs/contract/s3n/TestS3NContractSeek.java    |  31 -
 .../hadoop/fs/s3/InMemoryFileSystemStore.java   | 200 -----
 .../fs/s3/Jets3tS3FileSystemContractTest.java   |  31 -
 .../fs/s3/S3FileSystemContractBaseTest.java     |  63 --
 .../hadoop/fs/s3/S3InMemoryFileSystem.java      |  32 -
 .../java/org/apache/hadoop/fs/s3/TestINode.java |  60 --
 .../fs/s3/TestInMemoryS3FileSystemContract.java |  31 -
 .../apache/hadoop/fs/s3/TestS3Credentials.java  |  36 -
 .../apache/hadoop/fs/s3/TestS3FileSystem.java   |  50 --
 .../hadoop/fs/s3/TestS3InMemoryFileSystem.java  |  67 --
 .../s3native/InMemoryNativeFileSystemStore.java | 206 -----
 .../Jets3tNativeS3FileSystemContractTest.java   |  30 -
 .../NativeS3FileSystemContractBaseTest.java     | 233 ------
 .../fs/s3native/S3NInMemoryFileSystem.java      |  32 -
 .../TestInMemoryNativeS3FileSystemContract.java |  30 -
 .../TestJets3tNativeFileSystemStore.java        | 126 ---
 .../fs/s3native/TestS3NInMemoryFileSystem.java  |  69 --
 .../src/test/resources/contract/s3n.xml         |  95 ---
 hadoop-project/pom.xml                          |  12 +
 .../hadoop-aws/dev-support/findbugs-exclude.xml | 358 ++++++++-
 .../java/org/apache/hadoop/fs/s3/Block.java     |  52 ++
 .../apache/hadoop/fs/s3/FileSystemStore.java    |  67 ++
 .../java/org/apache/hadoop/fs/s3/INode.java     | 128 +++
 .../hadoop/fs/s3/Jets3tFileSystemStore.java     | 445 +++++++++++
 .../org/apache/hadoop/fs/s3/MigrationTool.java  | 291 +++++++
 .../org/apache/hadoop/fs/s3/S3Credentials.java  | 103 +++
 .../org/apache/hadoop/fs/s3/S3Exception.java    |  39 +
 .../org/apache/hadoop/fs/s3/S3FileSystem.java   | 486 +++++++++++
 .../hadoop/fs/s3/S3FileSystemConfigKeys.java    |  47 ++
 .../hadoop/fs/s3/S3FileSystemException.java     |  36 +
 .../org/apache/hadoop/fs/s3/S3InputStream.java  | 215 +++++
 .../org/apache/hadoop/fs/s3/S3OutputStream.java | 235 ++++++
 .../hadoop/fs/s3/VersionMismatchException.java  |  37 +
 .../java/org/apache/hadoop/fs/s3/package.html   |  55 ++
 .../apache/hadoop/fs/s3native/FileMetadata.java |  59 ++
 .../s3native/Jets3tNativeFileSystemStore.java   | 483 +++++++++++
 .../fs/s3native/NativeFileSystemStore.java      |  67 ++
 .../hadoop/fs/s3native/NativeS3FileSystem.java  | 796 +++++++++++++++++++
 .../hadoop/fs/s3native/PartialListing.java      |  64 ++
 .../s3native/S3NativeFileSystemConfigKeys.java  |  47 ++
 .../org/apache/hadoop/fs/s3native/package.html  |  32 +
 .../services/org.apache.hadoop.fs.FileSystem    |  17 +
 .../fs/contract/s3n/NativeS3Contract.java       |  43 +
 .../fs/contract/s3n/TestS3NContractCreate.java  |  38 +
 .../fs/contract/s3n/TestS3NContractDelete.java  |  31 +
 .../fs/contract/s3n/TestS3NContractMkdir.java   |  34 +
 .../fs/contract/s3n/TestS3NContractOpen.java    |  31 +
 .../fs/contract/s3n/TestS3NContractRename.java  |  32 +
 .../fs/contract/s3n/TestS3NContractRootDir.java |  35 +
 .../fs/contract/s3n/TestS3NContractSeek.java    |  31 +
 .../hadoop/fs/s3/InMemoryFileSystemStore.java   | 200 +++++
 .../fs/s3/Jets3tS3FileSystemContractTest.java   |  31 +
 .../fs/s3/S3FileSystemContractBaseTest.java     |  54 ++
 .../hadoop/fs/s3/S3InMemoryFileSystem.java      |  32 +
 .../java/org/apache/hadoop/fs/s3/TestINode.java |  60 ++
 .../fs/s3/TestInMemoryS3FileSystemContract.java |  31 +
 .../apache/hadoop/fs/s3/TestS3Credentials.java  |  36 +
 .../apache/hadoop/fs/s3/TestS3FileSystem.java   |  50 ++
 .../hadoop/fs/s3/TestS3InMemoryFileSystem.java  |  67 ++
 .../s3native/InMemoryNativeFileSystemStore.java | 206 +++++
 .../Jets3tNativeS3FileSystemContractTest.java   |  30 +
 .../NativeS3FileSystemContractBaseTest.java     | 233 ++++++
 .../fs/s3native/S3NInMemoryFileSystem.java      |  32 +
 .../TestInMemoryNativeS3FileSystemContract.java |  30 +
 .../TestJets3tNativeFileSystemStore.java        | 126 +++
 .../fs/s3native/TestS3NInMemoryFileSystem.java  |  69 ++
 .../src/test/resources/contract/s3n.xml         |  95 +++
 hadoop-tools/hadoop-tools-dist/pom.xml          |   6 +
 97 files changed, 5833 insertions(+), 5453 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Block.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Block.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Block.java
deleted file mode 100644
index 6926f17..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Block.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Holds metadata about a block of data being stored in a {@link FileSystemStore}.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class Block {
-  private long id;
-
-  private long length;
-
-  public Block(long id, long length) {
-    this.id = id;
-    this.length = length;
-  }
-
-  public long getId() {
-    return id;
-  }
-
-  public long getLength() {
-    return length;
-  }
-
-  @Override
-  public String toString() {
-    return "Block[" + id + ", " + length + "]";
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/FileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/FileSystemStore.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/FileSystemStore.java
deleted file mode 100644
index 07e456b..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/FileSystemStore.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URI;
-import java.util.Set;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-
-/**
- * A facility for storing and retrieving {@link INode}s and {@link Block}s.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public interface FileSystemStore {
-  
-  void initialize(URI uri, Configuration conf) throws IOException;
-  String getVersion() throws IOException;
-
-  void storeINode(Path path, INode inode) throws IOException;
-  void storeBlock(Block block, File file) throws IOException;
-  
-  boolean inodeExists(Path path) throws IOException;
-  boolean blockExists(long blockId) throws IOException;
-
-  INode retrieveINode(Path path) throws IOException;
-  File retrieveBlock(Block block, long byteRangeStart) throws IOException;
-
-  void deleteINode(Path path) throws IOException;
-  void deleteBlock(Block block) throws IOException;
-
-  Set<Path> listSubPaths(Path path) throws IOException;
-  Set<Path> listDeepSubPaths(Path path) throws IOException;
-
-  /**
-   * Delete everything. Used for testing.
-   * @throws IOException
-   */
-  void purge() throws IOException;
-  
-  /**
-   * Diagnostic method to dump all INodes to the console.
-   * @throws IOException
-   */
-  void dump() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/INode.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/INode.java
deleted file mode 100644
index 5d08b77..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/INode.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.IOUtils;
-
-/**
- * Holds file metadata including type (regular file, or directory),
- * and the list of blocks that are pointers to the data.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class INode {
-	
-  enum FileType {
-    DIRECTORY, FILE
-  }
-  
-  public static final FileType[] FILE_TYPES = {
-    FileType.DIRECTORY,
-    FileType.FILE
-  };
-
-  public static final INode DIRECTORY_INODE = new INode(FileType.DIRECTORY, null);
-  
-  private FileType fileType;
-  private Block[] blocks;
-
-  public INode(FileType fileType, Block[] blocks) {
-    this.fileType = fileType;
-    if (isDirectory() && blocks != null) {
-      throw new IllegalArgumentException("A directory cannot contain blocks.");
-    }
-    this.blocks = blocks;
-  }
-
-  public Block[] getBlocks() {
-    return blocks;
-  }
-  
-  public FileType getFileType() {
-    return fileType;
-  }
-
-  public boolean isDirectory() {
-    return fileType == FileType.DIRECTORY;
-  }  
-
-  public boolean isFile() {
-    return fileType == FileType.FILE;
-  }
-  
-  public long getSerializedLength() {
-    return 1L + (blocks == null ? 0 : 4 + blocks.length * 16);
-  }
-  
-
-  public InputStream serialize() throws IOException {
-    ByteArrayOutputStream bytes = new ByteArrayOutputStream();
-    DataOutputStream out = new DataOutputStream(bytes);
-    try {
-      out.writeByte(fileType.ordinal());
-      if (isFile()) {
-        out.writeInt(blocks.length);
-        for (int i = 0; i < blocks.length; i++) {
-          out.writeLong(blocks[i].getId());
-          out.writeLong(blocks[i].getLength());
-        }
-      }
-      out.close();
-      out = null;
-    } finally {
-      IOUtils.closeStream(out);
-    }
-    return new ByteArrayInputStream(bytes.toByteArray());
-  }
-  
-  public static INode deserialize(InputStream in) throws IOException {
-    if (in == null) {
-      return null;
-    }
-    DataInputStream dataIn = new DataInputStream(in);
-    FileType fileType = INode.FILE_TYPES[dataIn.readByte()];
-    switch (fileType) {
-    case DIRECTORY:
-      in.close();
-      return INode.DIRECTORY_INODE;
-    case FILE:
-      int numBlocks = dataIn.readInt();
-      Block[] blocks = new Block[numBlocks];
-      for (int i = 0; i < numBlocks; i++) {
-        long id = dataIn.readLong();
-        long length = dataIn.readLong();
-        blocks[i] = new Block(id, length);
-      }
-      in.close();
-      return new INode(fileType, blocks);
-    default:
-      throw new IllegalArgumentException("Cannot deserialize inode.");
-    }    
-  }  
-  
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
deleted file mode 100644
index 241ec0f..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
+++ /dev/null
@@ -1,445 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.Closeable;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.URI;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.s3.INode.FileType;
-import org.jets3t.service.S3Service;
-import org.jets3t.service.S3ServiceException;
-import org.jets3t.service.ServiceException;
-import org.jets3t.service.impl.rest.httpclient.RestS3Service;
-import org.jets3t.service.model.S3Bucket;
-import org.jets3t.service.model.S3Object;
-import org.jets3t.service.security.AWSCredentials;
-
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-class Jets3tFileSystemStore implements FileSystemStore {
-  
-  private static final String FILE_SYSTEM_NAME = "fs";
-  private static final String FILE_SYSTEM_VALUE = "Hadoop";
-
-  private static final String FILE_SYSTEM_TYPE_NAME = "fs-type";
-  private static final String FILE_SYSTEM_TYPE_VALUE = "block";
-
-  private static final String FILE_SYSTEM_VERSION_NAME = "fs-version";
-  private static final String FILE_SYSTEM_VERSION_VALUE = "1";
-  
-  private static final Map<String, Object> METADATA =
-    new HashMap<String, Object>();
-  
-  static {
-    METADATA.put(FILE_SYSTEM_NAME, FILE_SYSTEM_VALUE);
-    METADATA.put(FILE_SYSTEM_TYPE_NAME, FILE_SYSTEM_TYPE_VALUE);
-    METADATA.put(FILE_SYSTEM_VERSION_NAME, FILE_SYSTEM_VERSION_VALUE);
-  }
-
-  private static final String PATH_DELIMITER = Path.SEPARATOR;
-  private static final String BLOCK_PREFIX = "block_";
-
-  private Configuration conf;
-  
-  private S3Service s3Service;
-
-  private S3Bucket bucket;
-  
-  private int bufferSize;
-  
-  private static final Log LOG = 
-    LogFactory.getLog(Jets3tFileSystemStore.class.getName());
-  
-  @Override
-  public void initialize(URI uri, Configuration conf) throws IOException {
-    
-    this.conf = conf;
-    
-    S3Credentials s3Credentials = new S3Credentials();
-    s3Credentials.initialize(uri, conf);
-    try {
-      AWSCredentials awsCredentials =
-        new AWSCredentials(s3Credentials.getAccessKey(),
-            s3Credentials.getSecretAccessKey());
-      this.s3Service = new RestS3Service(awsCredentials);
-    } catch (S3ServiceException e) {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException) e.getCause();
-      }
-      throw new S3Exception(e);
-    }
-    bucket = new S3Bucket(uri.getHost());
-
-    this.bufferSize = conf.getInt(
-                       S3FileSystemConfigKeys.S3_STREAM_BUFFER_SIZE_KEY,
-                       S3FileSystemConfigKeys.S3_STREAM_BUFFER_SIZE_DEFAULT
-		      );
-  }
-
-  @Override
-  public String getVersion() throws IOException {
-    return FILE_SYSTEM_VERSION_VALUE;
-  }
-
-  private void delete(String key) throws IOException {
-    try {
-      s3Service.deleteObject(bucket, key);
-    } catch (S3ServiceException e) {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException) e.getCause();
-      }
-      throw new S3Exception(e);
-    }
-  }
-
-  @Override
-  public void deleteINode(Path path) throws IOException {
-    delete(pathToKey(path));
-  }
-
-  @Override
-  public void deleteBlock(Block block) throws IOException {
-    delete(blockToKey(block));
-  }
-
-  @Override
-  public boolean inodeExists(Path path) throws IOException {
-    String key = pathToKey(path);
-    InputStream in = get(key, true);
-    if (in == null) {
-      if (isRoot(key)) {
-        storeINode(path, INode.DIRECTORY_INODE);
-        return true;
-      } else {
-        return false;
-      }
-    }
-    in.close();
-    return true;
-  }
-  
-  @Override
-  public boolean blockExists(long blockId) throws IOException {
-    InputStream in = get(blockToKey(blockId), false);
-    if (in == null) {
-      return false;
-    }
-    in.close();
-    return true;
-  }
-
-  private InputStream get(String key, boolean checkMetadata)
-      throws IOException {
-    
-    try {
-      S3Object object = s3Service.getObject(bucket.getName(), key);
-      if (checkMetadata) {
-        checkMetadata(object);
-      }
-      return object.getDataInputStream();
-    } catch (S3ServiceException e) {
-      if ("NoSuchKey".equals(e.getS3ErrorCode())) {
-        return null;
-      }
-      if (e.getCause() instanceof IOException) {
-        throw (IOException) e.getCause();
-      }
-      throw new S3Exception(e);
-    } catch (ServiceException e) {
-      handleServiceException(e);
-      return null;
-    }
-  }
-
-  private InputStream get(String key, long byteRangeStart) throws IOException {
-    try {
-      S3Object object = s3Service.getObject(bucket, key, null, null, null,
-                                            null, byteRangeStart, null);
-      return object.getDataInputStream();
-    } catch (S3ServiceException e) {
-      if ("NoSuchKey".equals(e.getS3ErrorCode())) {
-        return null;
-      }
-      if (e.getCause() instanceof IOException) {
-        throw (IOException) e.getCause();
-      }
-      throw new S3Exception(e);
-    } catch (ServiceException e) {
-      handleServiceException(e);
-      return null;
-    }
-  }
-
-  private void checkMetadata(S3Object object) throws S3FileSystemException,
-      S3ServiceException {
-    
-    String name = (String) object.getMetadata(FILE_SYSTEM_NAME);
-    if (!FILE_SYSTEM_VALUE.equals(name)) {
-      throw new S3FileSystemException("Not a Hadoop S3 file.");
-    }
-    String type = (String) object.getMetadata(FILE_SYSTEM_TYPE_NAME);
-    if (!FILE_SYSTEM_TYPE_VALUE.equals(type)) {
-      throw new S3FileSystemException("Not a block file.");
-    }
-    String dataVersion = (String) object.getMetadata(FILE_SYSTEM_VERSION_NAME);
-    if (!FILE_SYSTEM_VERSION_VALUE.equals(dataVersion)) {
-      throw new VersionMismatchException(FILE_SYSTEM_VERSION_VALUE,
-          dataVersion);
-    }
-  }
-
-  @Override
-  public INode retrieveINode(Path path) throws IOException {
-    String key = pathToKey(path);
-    InputStream in = get(key, true);
-    if (in == null && isRoot(key)) {
-      storeINode(path, INode.DIRECTORY_INODE);
-      return INode.DIRECTORY_INODE;
-    }
-    return INode.deserialize(in);
-  }
-
-  @Override
-  public File retrieveBlock(Block block, long byteRangeStart)
-    throws IOException {
-    File fileBlock = null;
-    InputStream in = null;
-    OutputStream out = null;
-    try {
-      fileBlock = newBackupFile();
-      in = get(blockToKey(block), byteRangeStart);
-      out = new BufferedOutputStream(new FileOutputStream(fileBlock));
-      byte[] buf = new byte[bufferSize];
-      int numRead;
-      while ((numRead = in.read(buf)) >= 0) {
-        out.write(buf, 0, numRead);
-      }
-      return fileBlock;
-    } catch (IOException e) {
-      // close output stream to file then delete file
-      closeQuietly(out);
-      out = null; // to prevent a second close
-      if (fileBlock != null) {
-        boolean b = fileBlock.delete();
-        if (!b) {
-          LOG.warn("Ignoring failed delete");
-        }
-      }
-      throw e;
-    } finally {
-      closeQuietly(out);
-      closeQuietly(in);
-    }
-  }
-  
-  private File newBackupFile() throws IOException {
-    File dir = new File(conf.get("fs.s3.buffer.dir"));
-    if (!dir.exists() && !dir.mkdirs()) {
-      throw new IOException("Cannot create S3 buffer directory: " + dir);
-    }
-    File result = File.createTempFile("input-", ".tmp", dir);
-    result.deleteOnExit();
-    return result;
-  }
-
-  @Override
-  public Set<Path> listSubPaths(Path path) throws IOException {
-    try {
-      String prefix = pathToKey(path);
-      if (!prefix.endsWith(PATH_DELIMITER)) {
-        prefix += PATH_DELIMITER;
-      }
-      S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, PATH_DELIMITER);
-      Set<Path> prefixes = new TreeSet<Path>();
-      for (int i = 0; i < objects.length; i++) {
-        prefixes.add(keyToPath(objects[i].getKey()));
-      }
-      prefixes.remove(path);
-      return prefixes;
-    } catch (S3ServiceException e) {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException) e.getCause();
-      }
-      throw new S3Exception(e);
-    }
-  }
-  
-  @Override
-  public Set<Path> listDeepSubPaths(Path path) throws IOException {
-    try {
-      String prefix = pathToKey(path);
-      if (!prefix.endsWith(PATH_DELIMITER)) {
-        prefix += PATH_DELIMITER;
-      }
-      S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
-      Set<Path> prefixes = new TreeSet<Path>();
-      for (int i = 0; i < objects.length; i++) {
-        prefixes.add(keyToPath(objects[i].getKey()));
-      }
-      prefixes.remove(path);
-      return prefixes;
-    } catch (S3ServiceException e) {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException) e.getCause();
-      }
-      throw new S3Exception(e);
-    }    
-  }
-
-  private void put(String key, InputStream in, long length, boolean storeMetadata)
-      throws IOException {
-    
-    try {
-      S3Object object = new S3Object(key);
-      object.setDataInputStream(in);
-      object.setContentType("binary/octet-stream");
-      object.setContentLength(length);
-      if (storeMetadata) {
-        object.addAllMetadata(METADATA);
-      }
-      s3Service.putObject(bucket, object);
-    } catch (S3ServiceException e) {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException) e.getCause();
-      }
-      throw new S3Exception(e);
-    }
-  }
-
-  @Override
-  public void storeINode(Path path, INode inode) throws IOException {
-    put(pathToKey(path), inode.serialize(), inode.getSerializedLength(), true);
-  }
-
-  @Override
-  public void storeBlock(Block block, File file) throws IOException {
-    BufferedInputStream in = null;
-    try {
-      in = new BufferedInputStream(new FileInputStream(file));
-      put(blockToKey(block), in, block.getLength(), false);
-    } finally {
-      closeQuietly(in);
-    }    
-  }
-
-  private void closeQuietly(Closeable closeable) {
-    if (closeable != null) {
-      try {
-        closeable.close();
-      } catch (IOException e) {
-        // ignore
-      }
-    }
-  }
-
-  private String pathToKey(Path path) {
-    if (!path.isAbsolute()) {
-      throw new IllegalArgumentException("Path must be absolute: " + path);
-    }
-    return path.toUri().getPath();
-  }
-
-  private Path keyToPath(String key) {
-    return new Path(key);
-  }
-  
-  private String blockToKey(long blockId) {
-    return BLOCK_PREFIX + blockId;
-  }
-
-  private String blockToKey(Block block) {
-    return blockToKey(block.getId());
-  }
-
-  private boolean isRoot(String key) {
-    return key.isEmpty() || key.equals("/");
-  }
-
-  @Override
-  public void purge() throws IOException {
-    try {
-      S3Object[] objects = s3Service.listObjects(bucket.getName());
-      for (int i = 0; i < objects.length; i++) {
-        s3Service.deleteObject(bucket, objects[i].getKey());
-      }
-    } catch (S3ServiceException e) {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException) e.getCause();
-      }
-      throw new S3Exception(e);
-    }
-  }
-
-  @Override
-  public void dump() throws IOException {
-    StringBuilder sb = new StringBuilder("S3 Filesystem, ");
-    sb.append(bucket.getName()).append("\n");
-    try {
-      S3Object[] objects = s3Service.listObjects(bucket.getName(), PATH_DELIMITER, null);
-      for (int i = 0; i < objects.length; i++) {
-        Path path = keyToPath(objects[i].getKey());
-        sb.append(path).append("\n");
-        INode m = retrieveINode(path);
-        sb.append("\t").append(m.getFileType()).append("\n");
-        if (m.getFileType() == FileType.DIRECTORY) {
-          continue;
-        }
-        for (int j = 0; j < m.getBlocks().length; j++) {
-          sb.append("\t").append(m.getBlocks()[j]).append("\n");
-        }
-      }
-    } catch (S3ServiceException e) {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException) e.getCause();
-      }
-      throw new S3Exception(e);
-    }
-    System.out.println(sb);
-  }
-
-  private void handleServiceException(ServiceException e) throws IOException {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException) e.getCause();
-      }
-      else {
-        if(LOG.isDebugEnabled()) {
-          LOG.debug("Got ServiceException with Error code: " + e.getErrorCode() + ";and Error message: " + e.getErrorMessage());
-        }
-      }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
deleted file mode 100644
index 429c272..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
+++ /dev/null
@@ -1,291 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.UnsupportedEncodingException;
-import java.net.URI;
-import java.net.URLDecoder;
-import java.net.URLEncoder;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.jets3t.service.S3Service;
-import org.jets3t.service.S3ServiceException;
-import org.jets3t.service.ServiceException;
-import org.jets3t.service.impl.rest.httpclient.RestS3Service;
-import org.jets3t.service.model.S3Bucket;
-import org.jets3t.service.model.S3Object;
-import org.jets3t.service.security.AWSCredentials;
-
-/**
- * <p>
- * This class is a tool for migrating data from an older to a newer version
- * of an S3 filesystem.
- * </p>
- * <p>
- * All files in the filesystem are migrated by re-writing the block metadata
- * - no datafiles are touched.
- * </p>
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public class MigrationTool extends Configured implements Tool {
-  
-  private S3Service s3Service;
-  private S3Bucket bucket;
-  
-  public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(new MigrationTool(), args);
-    System.exit(res);
-  }
-  
-  @Override
-  public int run(String[] args) throws Exception {
-    
-    if (args.length == 0) {
-      System.err.println("Usage: MigrationTool <S3 file system URI>");
-      System.err.println("\t<S3 file system URI>\tfilesystem to migrate");
-      ToolRunner.printGenericCommandUsage(System.err);
-      return -1;
-    }
-    
-    URI uri = URI.create(args[0]);
-    
-    initialize(uri);
-    
-    FileSystemStore newStore = new Jets3tFileSystemStore();
-    newStore.initialize(uri, getConf());
-    
-    if (get("%2F") != null) { 
-      System.err.println("Current version number is [unversioned].");
-      System.err.println("Target version number is " +
-          newStore.getVersion() + ".");
-      Store oldStore = new UnversionedStore();
-      migrate(oldStore, newStore);
-      return 0;
-    } else {
-      S3Object root = get("/");
-      if (root != null) {
-        String version = (String) root.getMetadata("fs-version");
-        if (version == null) {
-          System.err.println("Can't detect version - exiting.");
-        } else {
-          String newVersion = newStore.getVersion();
-          System.err.println("Current version number is " + version + ".");
-          System.err.println("Target version number is " + newVersion + ".");
-          if (version.equals(newStore.getVersion())) {
-            System.err.println("No migration required.");
-            return 0;
-          }
-          // use version number to create Store
-          //Store oldStore = ... 
-          //migrate(oldStore, newStore);
-          System.err.println("Not currently implemented.");
-          return 0;
-        }
-      }
-      System.err.println("Can't detect version - exiting.");
-      return 0;
-    }
-    
-  }
-  
-  public void initialize(URI uri) throws IOException {
-    
-    
-    
-    try {
-      String accessKey = null;
-      String secretAccessKey = null;
-      String userInfo = uri.getUserInfo();
-      if (userInfo != null) {
-        int index = userInfo.indexOf(':');
-        if (index != -1) {
-          accessKey = userInfo.substring(0, index);
-          secretAccessKey = userInfo.substring(index + 1);
-        } else {
-          accessKey = userInfo;
-        }
-      }
-      if (accessKey == null) {
-        accessKey = getConf().get("fs.s3.awsAccessKeyId");
-      }
-      if (secretAccessKey == null) {
-        secretAccessKey = getConf().get("fs.s3.awsSecretAccessKey");
-      }
-      if (accessKey == null && secretAccessKey == null) {
-        throw new IllegalArgumentException("AWS " +
-                                           "Access Key ID and Secret Access Key " +
-                                           "must be specified as the username " +
-                                           "or password (respectively) of a s3 URL, " +
-                                           "or by setting the " +
-                                           "fs.s3.awsAccessKeyId or " +                         
-                                           "fs.s3.awsSecretAccessKey properties (respectively).");
-      } else if (accessKey == null) {
-        throw new IllegalArgumentException("AWS " +
-                                           "Access Key ID must be specified " +
-                                           "as the username of a s3 URL, or by setting the " +
-                                           "fs.s3.awsAccessKeyId property.");
-      } else if (secretAccessKey == null) {
-        throw new IllegalArgumentException("AWS " +
-                                           "Secret Access Key must be specified " +
-                                           "as the password of a s3 URL, or by setting the " +
-                                           "fs.s3.awsSecretAccessKey property.");         
-      }
-      AWSCredentials awsCredentials =
-        new AWSCredentials(accessKey, secretAccessKey);
-      this.s3Service = new RestS3Service(awsCredentials);
-    } catch (S3ServiceException e) {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException) e.getCause();
-      }
-      throw new S3Exception(e);
-    }
-    bucket = new S3Bucket(uri.getHost());
-  }
-  
-  private void migrate(Store oldStore, FileSystemStore newStore)
-      throws IOException {
-    for (Path path : oldStore.listAllPaths()) {
-      INode inode = oldStore.retrieveINode(path);
-      oldStore.deleteINode(path);
-      newStore.storeINode(path, inode);
-    }
-  }
-  
-  private S3Object get(String key) {
-    try {
-      return s3Service.getObject(bucket.getName(), key);
-    } catch (S3ServiceException e) {
-      if ("NoSuchKey".equals(e.getS3ErrorCode())) {
-        return null;
-      }
-    }
-    return null;
-  }
-  
-  interface Store {
-
-    Set<Path> listAllPaths() throws IOException;
-    INode retrieveINode(Path path) throws IOException;
-    void deleteINode(Path path) throws IOException;
-    
-  }
-  
-  class UnversionedStore implements Store {
-
-    @Override
-    public Set<Path> listAllPaths() throws IOException {
-      try {
-        String prefix = urlEncode(Path.SEPARATOR);
-        S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
-        Set<Path> prefixes = new TreeSet<Path>();
-        for (int i = 0; i < objects.length; i++) {
-          prefixes.add(keyToPath(objects[i].getKey()));
-        }
-        return prefixes;
-      } catch (S3ServiceException e) {
-        if (e.getCause() instanceof IOException) {
-          throw (IOException) e.getCause();
-        }
-        throw new S3Exception(e);
-      }   
-    }
-
-    @Override
-    public void deleteINode(Path path) throws IOException {
-      delete(pathToKey(path));
-    }
-    
-    private void delete(String key) throws IOException {
-      try {
-        s3Service.deleteObject(bucket, key);
-      } catch (S3ServiceException e) {
-        if (e.getCause() instanceof IOException) {
-          throw (IOException) e.getCause();
-        }
-        throw new S3Exception(e);
-      }
-    }
-    
-    @Override
-    public INode retrieveINode(Path path) throws IOException {
-      return INode.deserialize(get(pathToKey(path)));
-    }
-
-    private InputStream get(String key) throws IOException {
-      try {
-        S3Object object = s3Service.getObject(bucket.getName(), key);
-        return object.getDataInputStream();
-      } catch (S3ServiceException e) {
-        if ("NoSuchKey".equals(e.getS3ErrorCode())) {
-          return null;
-        }
-        if (e.getCause() instanceof IOException) {
-          throw (IOException) e.getCause();
-        }
-        throw new S3Exception(e);
-      } catch (ServiceException e) {
-        return null;
-      }
-    }
-    
-    private String pathToKey(Path path) {
-      if (!path.isAbsolute()) {
-        throw new IllegalArgumentException("Path must be absolute: " + path);
-      }
-      return urlEncode(path.toUri().getPath());
-    }
-    
-    private Path keyToPath(String key) {
-      return new Path(urlDecode(key));
-    }
-
-    private String urlEncode(String s) {
-      try {
-        return URLEncoder.encode(s, "UTF-8");
-      } catch (UnsupportedEncodingException e) {
-        // Should never happen since every implementation of the Java Platform
-        // is required to support UTF-8.
-        // See http://java.sun.com/j2se/1.5.0/docs/api/java/nio/charset/Charset.html
-        throw new IllegalStateException(e);
-      }
-    }
-    
-    private String urlDecode(String s) {
-      try {
-        return URLDecoder.decode(s, "UTF-8");
-      } catch (UnsupportedEncodingException e) {
-        // Should never happen since every implementation of the Java Platform
-        // is required to support UTF-8.
-        // See http://java.sun.com/j2se/1.5.0/docs/api/java/nio/charset/Charset.html
-        throw new IllegalStateException(e);
-      }
-    }
-    
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
deleted file mode 100644
index 312bf65..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.net.URI;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * <p>
- * Extracts AWS credentials from the filesystem URI or configuration.
- * </p>
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class S3Credentials {
-  
-  private String accessKey;
-  private String secretAccessKey; 
-
-  /**
-   * @throws IllegalArgumentException if credentials for S3 cannot be
-   * determined.
-   */
-  public void initialize(URI uri, Configuration conf) {
-    if (uri.getHost() == null) {
-      throw new IllegalArgumentException("Invalid hostname in URI " + uri);
-    }
-    
-    String userInfo = uri.getUserInfo();
-    if (userInfo != null) {
-      int index = userInfo.indexOf(':');
-      if (index != -1) {
-        accessKey = userInfo.substring(0, index);
-        secretAccessKey = userInfo.substring(index + 1);
-      } else {
-        accessKey = userInfo;
-      }
-    }
-    
-    String scheme = uri.getScheme();
-    String accessKeyProperty = String.format("fs.%s.awsAccessKeyId", scheme);
-    String secretAccessKeyProperty =
-      String.format("fs.%s.awsSecretAccessKey", scheme);
-    if (accessKey == null) {
-      accessKey = conf.get(accessKeyProperty);
-    }
-    if (secretAccessKey == null) {
-      secretAccessKey = conf.get(secretAccessKeyProperty);
-    }
-    if (accessKey == null && secretAccessKey == null) {
-      throw new IllegalArgumentException("AWS " +
-                                         "Access Key ID and Secret Access " +
-                                         "Key must be specified as the " +
-                                         "username or password " +
-                                         "(respectively) of a " + scheme +
-                                         " URL, or by setting the " +
-                                         accessKeyProperty + " or " +
-                                         secretAccessKeyProperty +
-                                         " properties (respectively).");
-    } else if (accessKey == null) {
-      throw new IllegalArgumentException("AWS " +
-                                         "Access Key ID must be specified " +
-                                         "as the username of a " + scheme +
-                                         " URL, or by setting the " +
-                                         accessKeyProperty + " property.");
-    } else if (secretAccessKey == null) {
-      throw new IllegalArgumentException("AWS " +
-                                         "Secret Access Key must be " +
-                                         "specified as the password of a " +
-                                         scheme + " URL, or by setting the " +
-                                         secretAccessKeyProperty +
-                                         " property.");       
-    }
-
-  }
-  
-  public String getAccessKey() {
-    return accessKey;
-  }
-  
-  public String getSecretAccessKey() {
-    return secretAccessKey;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3Exception.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3Exception.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3Exception.java
deleted file mode 100644
index 4f07c4e..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3Exception.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Thrown if there is a problem communicating with Amazon S3.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class S3Exception extends IOException {
-
-  private static final long serialVersionUID = 1L;
-
-  public S3Exception(Throwable t) {
-    super(t);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
deleted file mode 100644
index dda3cf6..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
+++ /dev/null
@@ -1,486 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.s3native.NativeS3FileSystem;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.io.retry.RetryProxy;
-import org.apache.hadoop.util.Progressable;
-
-/**
- * <p>
- * A block-based {@link FileSystem} backed by
- * <a href="http://aws.amazon.com/s3">Amazon S3</a>.
- * </p>
- * @see NativeS3FileSystem
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class S3FileSystem extends FileSystem {
-
-  private URI uri;
-
-  private FileSystemStore store;
-
-  private Path workingDir;
-
-  public S3FileSystem() {
-    // set store in initialize()
-  }
-  
-  public S3FileSystem(FileSystemStore store) {
-    this.store = store;
-  }
-
-  /**
-   * Return the protocol scheme for the FileSystem.
-   * <p/>
-   *
-   * @return <code>s3</code>
-   */
-  @Override
-  public String getScheme() {
-    return "s3";
-  }
-
-  @Override
-  public URI getUri() {
-    return uri;
-  }
-
-  @Override
-  public void initialize(URI uri, Configuration conf) throws IOException {
-    super.initialize(uri, conf);
-    if (store == null) {
-      store = createDefaultStore(conf);
-    }
-    store.initialize(uri, conf);
-    setConf(conf);
-    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());    
-    this.workingDir =
-      new Path("/user", System.getProperty("user.name")).makeQualified(this);
-  }  
-
-  private static FileSystemStore createDefaultStore(Configuration conf) {
-    FileSystemStore store = new Jets3tFileSystemStore();
-    
-    RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
-                                                                               conf.getInt("fs.s3.maxRetries", 4),
-                                                                               conf.getLong("fs.s3.sleepTimeSeconds", 10), TimeUnit.SECONDS);
-    Map<Class<? extends Exception>,RetryPolicy> exceptionToPolicyMap =
-      new HashMap<Class<? extends Exception>, RetryPolicy>();
-    exceptionToPolicyMap.put(IOException.class, basePolicy);
-    exceptionToPolicyMap.put(S3Exception.class, basePolicy);
-    
-    RetryPolicy methodPolicy = RetryPolicies.retryByException(
-                                                              RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
-    Map<String,RetryPolicy> methodNameToPolicyMap = new HashMap<String,RetryPolicy>();
-    methodNameToPolicyMap.put("storeBlock", methodPolicy);
-    methodNameToPolicyMap.put("retrieveBlock", methodPolicy);
-    
-    return (FileSystemStore) RetryProxy.create(FileSystemStore.class,
-                                               store, methodNameToPolicyMap);
-  }
-
-  @Override
-  public Path getWorkingDirectory() {
-    return workingDir;
-  }
-
-  @Override
-  public void setWorkingDirectory(Path dir) {
-    workingDir = makeAbsolute(dir);
-  }
-
-  private Path makeAbsolute(Path path) {
-    if (path.isAbsolute()) {
-      return path;
-    }
-    return new Path(workingDir, path);
-  }
-
-  /**
-   * @param permission Currently ignored.
-   */
-  @Override
-  public boolean mkdirs(Path path, FsPermission permission) throws IOException {
-    Path absolutePath = makeAbsolute(path);
-    List<Path> paths = new ArrayList<Path>();
-    do {
-      paths.add(0, absolutePath);
-      absolutePath = absolutePath.getParent();
-    } while (absolutePath != null);
-    
-    boolean result = true;
-    for (Path p : paths) {
-      result &= mkdir(p);
-    }
-    return result;
-  }
-  
-  private boolean mkdir(Path path) throws IOException {
-    Path absolutePath = makeAbsolute(path);
-    INode inode = store.retrieveINode(absolutePath);
-    if (inode == null) {
-      store.storeINode(absolutePath, INode.DIRECTORY_INODE);
-    } else if (inode.isFile()) {
-      throw new IOException(String.format(
-          "Can't make directory for path %s since it is a file.",
-          absolutePath));
-    }
-    return true;
-  }
-
-  @Override
-  public boolean isFile(Path path) throws IOException {
-    INode inode = store.retrieveINode(makeAbsolute(path));
-    if (inode == null) {
-      return false;
-    }
-    return inode.isFile();
-  }
-
-  private INode checkFile(Path path) throws IOException {
-    INode inode = store.retrieveINode(makeAbsolute(path));
-    if (inode == null) {
-      throw new IOException("No such file.");
-    }
-    if (inode.isDirectory()) {
-      throw new IOException("Path " + path + " is a directory.");
-    }
-    return inode;
-  }
-
-  @Override
-  public FileStatus[] listStatus(Path f) throws IOException {
-    Path absolutePath = makeAbsolute(f);
-    INode inode = store.retrieveINode(absolutePath);
-    if (inode == null) {
-      throw new FileNotFoundException("File " + f + " does not exist.");
-    }
-    if (inode.isFile()) {
-      return new FileStatus[] {
-        new S3FileStatus(f.makeQualified(this), inode)
-      };
-    }
-    ArrayList<FileStatus> ret = new ArrayList<FileStatus>();
-    for (Path p : store.listSubPaths(absolutePath)) {
-      ret.add(getFileStatus(p.makeQualified(this)));
-    }
-    return ret.toArray(new FileStatus[0]);
-  }
-
-  /** This optional operation is not yet supported. */
-  @Override
-  public FSDataOutputStream append(Path f, int bufferSize,
-      Progressable progress) throws IOException {
-    throw new IOException("Not supported");
-  }
-
-  /**
-   * @param permission Currently ignored.
-   */
-  @Override
-  public FSDataOutputStream create(Path file, FsPermission permission,
-      boolean overwrite, int bufferSize,
-      short replication, long blockSize, Progressable progress)
-    throws IOException {
-
-    INode inode = store.retrieveINode(makeAbsolute(file));
-    if (inode != null) {
-      if (overwrite) {
-        delete(file, true);
-      } else {
-        throw new FileAlreadyExistsException("File already exists: " + file);
-      }
-    } else {
-      Path parent = file.getParent();
-      if (parent != null) {
-        if (!mkdirs(parent)) {
-          throw new IOException("Mkdirs failed to create " + parent.toString());
-        }
-      }      
-    }
-    return new FSDataOutputStream
-        (new S3OutputStream(getConf(), store, makeAbsolute(file),
-                            blockSize, progress, bufferSize),
-         statistics);
-  }
-
-  @Override
-  public FSDataInputStream open(Path path, int bufferSize) throws IOException {
-    INode inode = checkFile(path);
-    return new FSDataInputStream(new S3InputStream(getConf(), store, inode,
-                                                   statistics));
-  }
-
-  @Override
-  public boolean rename(Path src, Path dst) throws IOException {
-    Path absoluteSrc = makeAbsolute(src);
-    final String debugPreamble = "Renaming '" + src + "' to '" + dst + "' - ";
-    INode srcINode = store.retrieveINode(absoluteSrc);
-    boolean debugEnabled = LOG.isDebugEnabled();
-    if (srcINode == null) {
-      // src path doesn't exist
-      if (debugEnabled) {
-        LOG.debug(debugPreamble + "returning false as src does not exist");
-      }
-      return false; 
-    }
-
-    Path absoluteDst = makeAbsolute(dst);
-
-    //validate the parent dir of the destination
-    Path dstParent = absoluteDst.getParent();
-    if (dstParent != null) {
-      //if the dst parent is not root, make sure it exists
-      INode dstParentINode = store.retrieveINode(dstParent);
-      if (dstParentINode == null) {
-        // dst parent doesn't exist
-        if (debugEnabled) {
-          LOG.debug(debugPreamble +
-                    "returning false as dst parent does not exist");
-        }
-        return false;
-      }
-      if (dstParentINode.isFile()) {
-        // dst parent exists but is a file
-        if (debugEnabled) {
-          LOG.debug(debugPreamble +
-                    "returning false as dst parent exists and is a file");
-        }
-        return false;
-      }
-    }
-
-    //get status of source
-    boolean srcIsFile = srcINode.isFile();
-
-    INode dstINode = store.retrieveINode(absoluteDst);
-    boolean destExists = dstINode != null;
-    boolean destIsDir = destExists && !dstINode.isFile();
-    if (srcIsFile) {
-
-      //source is a simple file
-      if (destExists) {
-        if (destIsDir) {
-          //outcome #1 dest exists and is dir -filename to subdir of dest
-          if (debugEnabled) {
-            LOG.debug(debugPreamble +
-                      "copying src file under dest dir to " + absoluteDst);
-          }
-          absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
-        } else {
-          //outcome #2 dest it's a file: fail iff different from src
-          boolean renamingOnToSelf = absoluteSrc.equals(absoluteDst);
-          if (debugEnabled) {
-            LOG.debug(debugPreamble +
-                      "copying file onto file, outcome is " + renamingOnToSelf);
-          }
-          return renamingOnToSelf;
-        }
-      } else {
-        // #3 dest does not exist: use dest as path for rename
-        if (debugEnabled) {
-          LOG.debug(debugPreamble +
-                    "copying file onto file");
-        }
-      }
-    } else {
-      //here the source exists and is a directory
-      // outcomes (given we know the parent dir exists if we get this far)
-      // #1 destination is a file: fail
-      // #2 destination is a directory: create a new dir under that one
-      // #3 destination doesn't exist: create a new dir with that name
-      // #3 and #4 are only allowed if the dest path is not == or under src
-
-      if (destExists) {
-        if (!destIsDir) {
-          // #1 destination is a file: fail
-          if (debugEnabled) {
-            LOG.debug(debugPreamble +
-                      "returning false as src is a directory, but not dest");
-          }
-          return false;
-        } else {
-          // the destination dir exists
-          // destination for rename becomes a subdir of the target name
-          absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
-          if (debugEnabled) {
-            LOG.debug(debugPreamble +
-                      "copying src dir under dest dir to " + absoluteDst);
-          }
-        }
-      }
-      //the final destination directory is now know, so validate it for
-      //illegal moves
-
-      if (absoluteSrc.equals(absoluteDst)) {
-        //you can't rename a directory onto itself
-        if (debugEnabled) {
-          LOG.debug(debugPreamble +
-                    "Dest==source && isDir -failing");
-        }
-        return false;
-      }
-      if (absoluteDst.toString().startsWith(absoluteSrc.toString() + "/")) {
-        //you can't move a directory under itself
-        if (debugEnabled) {
-          LOG.debug(debugPreamble +
-                    "dst is equal to or under src dir -failing");
-        }
-        return false;
-      }
-    }
-    //here the dest path is set up -so rename
-    return renameRecursive(absoluteSrc, absoluteDst);
-  }
-
-  private boolean renameRecursive(Path src, Path dst) throws IOException {
-    INode srcINode = store.retrieveINode(src);
-    store.storeINode(dst, srcINode);
-    store.deleteINode(src);
-    if (srcINode.isDirectory()) {
-      for (Path oldSrc : store.listDeepSubPaths(src)) {
-        INode inode = store.retrieveINode(oldSrc);
-        if (inode == null) {
-          return false;
-        }
-        String oldSrcPath = oldSrc.toUri().getPath();
-        String srcPath = src.toUri().getPath();
-        String dstPath = dst.toUri().getPath();
-        Path newDst = new Path(oldSrcPath.replaceFirst(srcPath, dstPath));
-        store.storeINode(newDst, inode);
-        store.deleteINode(oldSrc);
-      }
-    }
-    return true;
-  }
-
-  @Override
-  public boolean delete(Path path, boolean recursive) throws IOException {
-   Path absolutePath = makeAbsolute(path);
-   INode inode = store.retrieveINode(absolutePath);
-   if (inode == null) {
-     return false;
-   }
-   if (inode.isFile()) {
-     store.deleteINode(absolutePath);
-     for (Block block: inode.getBlocks()) {
-       store.deleteBlock(block);
-     }
-   } else {
-     FileStatus[] contents = null; 
-     try {
-       contents = listStatus(absolutePath);
-     } catch(FileNotFoundException fnfe) {
-       return false;
-     }
-
-     if ((contents.length !=0) && (!recursive)) {
-       throw new IOException("Directory " + path.toString() 
-           + " is not empty.");
-     }
-     for (FileStatus p:contents) {
-       if (!delete(p.getPath(), recursive)) {
-         return false;
-       }
-     }
-     store.deleteINode(absolutePath);
-   }
-   return true;
-  }
-  
-  /**
-   * FileStatus for S3 file systems. 
-   */
-  @Override
-  public FileStatus getFileStatus(Path f)  throws IOException {
-    INode inode = store.retrieveINode(makeAbsolute(f));
-    if (inode == null) {
-      throw new FileNotFoundException(f + ": No such file or directory.");
-    }
-    return new S3FileStatus(f.makeQualified(this), inode);
-  }
-  
-  @Override
-  public long getDefaultBlockSize() {
-    return getConf().getLong("fs.s3.block.size", 64 * 1024 * 1024);
-  }
-
-  @Override
-  public String getCanonicalServiceName() {
-    // Does not support Token
-    return null;
-  }
-
-  // diagnostic methods
-
-  void dump() throws IOException {
-    store.dump();
-  }
-
-  void purge() throws IOException {
-    store.purge();
-  }
-
-  private static class S3FileStatus extends FileStatus {
-
-    S3FileStatus(Path f, INode inode) throws IOException {
-      super(findLength(inode), inode.isDirectory(), 1,
-            findBlocksize(inode), 0, f);
-    }
-
-    private static long findLength(INode inode) {
-      if (!inode.isDirectory()) {
-        long length = 0L;
-        for (Block block : inode.getBlocks()) {
-          length += block.getLength();
-        }
-        return length;
-      }
-      return 0;
-    }
-
-    private static long findBlocksize(INode inode) {
-      final Block[] ret = inode.getBlocks();
-      return ret == null ? 0L : ret[0].getLength();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemConfigKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemConfigKeys.java
deleted file mode 100644
index 8172a46..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemConfigKeys.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-
-/** 
- * This class contains constants for configuration keys used
- * in the s3 file system. 
- *
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class S3FileSystemConfigKeys extends CommonConfigurationKeys {
-  public static final String  S3_BLOCK_SIZE_KEY = "s3.blocksize";
-  public static final long    S3_BLOCK_SIZE_DEFAULT = 64*1024*1024;
-  public static final String  S3_REPLICATION_KEY = "s3.replication";
-  public static final short   S3_REPLICATION_DEFAULT = 1;
-  public static final String  S3_STREAM_BUFFER_SIZE_KEY = 
-                                                    "s3.stream-buffer-size";
-  public static final int     S3_STREAM_BUFFER_SIZE_DEFAULT = 4096;
-  public static final String  S3_BYTES_PER_CHECKSUM_KEY = 
-                                                    "s3.bytes-per-checksum";
-  public static final int     S3_BYTES_PER_CHECKSUM_DEFAULT = 512;
-  public static final String  S3_CLIENT_WRITE_PACKET_SIZE_KEY =
-                                                    "s3.client-write-packet-size";
-  public static final int     S3_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
-}
-  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemException.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemException.java
deleted file mode 100644
index cc1b463..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemException.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Thrown when there is a fatal exception while using {@link S3FileSystem}.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class S3FileSystemException extends IOException {
-  private static final long serialVersionUID = 1L;
-
-  public S3FileSystemException(String message) {
-    super(message);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3InputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3InputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3InputStream.java
deleted file mode 100644
index 5af57e6..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3InputStream.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.DataInputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSInputStream;
-import org.apache.hadoop.fs.FileSystem;
-
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-class S3InputStream extends FSInputStream {
-
-  private FileSystemStore store;
-
-  private Block[] blocks;
-
-  private boolean closed;
-
-  private long fileLength;
-
-  private long pos = 0;
-
-  private File blockFile;
-  
-  private DataInputStream blockStream;
-
-  private long blockEnd = -1;
-  
-  private FileSystem.Statistics stats;
-  
-  private static final Log LOG = 
-    LogFactory.getLog(S3InputStream.class.getName());
-
-
-  @Deprecated
-  public S3InputStream(Configuration conf, FileSystemStore store,
-                       INode inode) {
-    this(conf, store, inode, null);
-  }
-
-  public S3InputStream(Configuration conf, FileSystemStore store,
-                       INode inode, FileSystem.Statistics stats) {
-    
-    this.store = store;
-    this.stats = stats;
-    this.blocks = inode.getBlocks();
-    for (Block block : blocks) {
-      this.fileLength += block.getLength();
-    }
-  }
-
-  @Override
-  public synchronized long getPos() throws IOException {
-    return pos;
-  }
-
-  @Override
-  public synchronized int available() throws IOException {
-    return (int) (fileLength - pos);
-  }
-
-  @Override
-  public synchronized void seek(long targetPos) throws IOException {
-    if (targetPos > fileLength) {
-      throw new IOException("Cannot seek after EOF");
-    }
-    pos = targetPos;
-    blockEnd = -1;
-  }
-
-  @Override
-  public synchronized boolean seekToNewSource(long targetPos) throws IOException {
-    return false;
-  }
-
-  @Override
-  public synchronized int read() throws IOException {
-    if (closed) {
-      throw new IOException("Stream closed");
-    }
-    int result = -1;
-    if (pos < fileLength) {
-      if (pos > blockEnd) {
-        blockSeekTo(pos);
-      }
-      result = blockStream.read();
-      if (result >= 0) {
-        pos++;
-      }
-    }
-    if (stats != null && result >= 0) {
-      stats.incrementBytesRead(1);
-    }
-    return result;
-  }
-
-  @Override
-  public synchronized int read(byte buf[], int off, int len) throws IOException {
-    if (closed) {
-      throw new IOException("Stream closed");
-    }
-    if (pos < fileLength) {
-      if (pos > blockEnd) {
-        blockSeekTo(pos);
-      }
-      int realLen = (int) Math.min((long) len, (blockEnd - pos + 1L));
-      int result = blockStream.read(buf, off, realLen);
-      if (result >= 0) {
-        pos += result;
-      }
-      if (stats != null && result > 0) {
-        stats.incrementBytesRead(result);
-      }
-      return result;
-    }
-    return -1;
-  }
-
-  private synchronized void blockSeekTo(long target) throws IOException {
-    //
-    // Compute desired block
-    //
-    int targetBlock = -1;
-    long targetBlockStart = 0;
-    long targetBlockEnd = 0;
-    for (int i = 0; i < blocks.length; i++) {
-      long blockLength = blocks[i].getLength();
-      targetBlockEnd = targetBlockStart + blockLength - 1;
-
-      if (target >= targetBlockStart && target <= targetBlockEnd) {
-        targetBlock = i;
-        break;
-      } else {
-        targetBlockStart = targetBlockEnd + 1;
-      }
-    }
-    if (targetBlock < 0) {
-      throw new IOException(
-                            "Impossible situation: could not find target position " + target);
-    }
-    long offsetIntoBlock = target - targetBlockStart;
-
-    // read block blocks[targetBlock] from position offsetIntoBlock
-
-    this.blockFile = store.retrieveBlock(blocks[targetBlock], offsetIntoBlock);
-
-    this.pos = target;
-    this.blockEnd = targetBlockEnd;
-    this.blockStream = new DataInputStream(new FileInputStream(blockFile));
-
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (closed) {
-      return;
-    }
-    if (blockStream != null) {
-      blockStream.close();
-      blockStream = null;
-    }
-    if (blockFile != null) {
-      boolean b = blockFile.delete();
-      if (!b) {
-        LOG.warn("Ignoring failed delete");
-      }
-    }
-    super.close();
-    closed = true;
-  }
-
-  /**
-   * We don't support marks.
-   */
-  @Override
-  public boolean markSupported() {
-    return false;
-  }
-
-  @Override
-  public void mark(int readLimit) {
-    // Do nothing
-  }
-
-  @Override
-  public void reset() throws IOException {
-    throw new IOException("Mark not supported");
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3OutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3OutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3OutputStream.java
deleted file mode 100644
index 761f2ce..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3OutputStream.java
+++ /dev/null
@@ -1,235 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.s3.INode.FileType;
-import org.apache.hadoop.util.Progressable;
-
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-class S3OutputStream extends OutputStream {
-
-  private Configuration conf;
-  
-  private int bufferSize;
-
-  private FileSystemStore store;
-
-  private Path path;
-
-  private long blockSize;
-
-  private File backupFile;
-
-  private OutputStream backupStream;
-
-  private Random r = new Random();
-
-  private boolean closed;
-
-  private int pos = 0;
-
-  private long filePos = 0;
-
-  private int bytesWrittenToBlock = 0;
-
-  private byte[] outBuf;
-
-  private List<Block> blocks = new ArrayList<Block>();
-
-  private Block nextBlock;
-  
-  private static final Log LOG = 
-    LogFactory.getLog(S3OutputStream.class.getName());
-
-
-  public S3OutputStream(Configuration conf, FileSystemStore store,
-                        Path path, long blockSize, Progressable progress,
-                        int buffersize) throws IOException {
-    
-    this.conf = conf;
-    this.store = store;
-    this.path = path;
-    this.blockSize = blockSize;
-    this.backupFile = newBackupFile();
-    this.backupStream = new FileOutputStream(backupFile);
-    this.bufferSize = buffersize;
-    this.outBuf = new byte[bufferSize];
-
-  }
-
-  private File newBackupFile() throws IOException {
-    File dir = new File(conf.get("fs.s3.buffer.dir"));
-    if (!dir.exists() && !dir.mkdirs()) {
-      throw new IOException("Cannot create S3 buffer directory: " + dir);
-    }
-    File result = File.createTempFile("output-", ".tmp", dir);
-    result.deleteOnExit();
-    return result;
-  }
-
-  public long getPos() throws IOException {
-    return filePos;
-  }
-
-  @Override
-  public synchronized void write(int b) throws IOException {
-    if (closed) {
-      throw new IOException("Stream closed");
-    }
-
-    if ((bytesWrittenToBlock + pos == blockSize) || (pos >= bufferSize)) {
-      flush();
-    }
-    outBuf[pos++] = (byte) b;
-    filePos++;
-  }
-
-  @Override
-  public synchronized void write(byte b[], int off, int len) throws IOException {
-    if (closed) {
-      throw new IOException("Stream closed");
-    }
-    while (len > 0) {
-      int remaining = bufferSize - pos;
-      int toWrite = Math.min(remaining, len);
-      System.arraycopy(b, off, outBuf, pos, toWrite);
-      pos += toWrite;
-      off += toWrite;
-      len -= toWrite;
-      filePos += toWrite;
-
-      if ((bytesWrittenToBlock + pos >= blockSize) || (pos == bufferSize)) {
-        flush();
-      }
-    }
-  }
-
-  @Override
-  public synchronized void flush() throws IOException {
-    if (closed) {
-      throw new IOException("Stream closed");
-    }
-
-    if (bytesWrittenToBlock + pos >= blockSize) {
-      flushData((int) blockSize - bytesWrittenToBlock);
-    }
-    if (bytesWrittenToBlock == blockSize) {
-      endBlock();
-    }
-    flushData(pos);
-  }
-
-  private synchronized void flushData(int maxPos) throws IOException {
-    int workingPos = Math.min(pos, maxPos);
-
-    if (workingPos > 0) {
-      //
-      // To the local block backup, write just the bytes
-      //
-      backupStream.write(outBuf, 0, workingPos);
-
-      //
-      // Track position
-      //
-      bytesWrittenToBlock += workingPos;
-      System.arraycopy(outBuf, workingPos, outBuf, 0, pos - workingPos);
-      pos -= workingPos;
-    }
-  }
-
-  private synchronized void endBlock() throws IOException {
-    //
-    // Done with local copy
-    //
-    backupStream.close();
-
-    //
-    // Send it to S3
-    //
-    // TODO: Use passed in Progressable to report progress.
-    nextBlockOutputStream();
-    store.storeBlock(nextBlock, backupFile);
-    internalClose();
-
-    //
-    // Delete local backup, start new one
-    //
-    boolean b = backupFile.delete();
-    if (!b) {
-      LOG.warn("Ignoring failed delete");
-    }
-    backupFile = newBackupFile();
-    backupStream = new FileOutputStream(backupFile);
-    bytesWrittenToBlock = 0;
-  }
-
-  private synchronized void nextBlockOutputStream() throws IOException {
-    long blockId = r.nextLong();
-    while (store.blockExists(blockId)) {
-      blockId = r.nextLong();
-    }
-    nextBlock = new Block(blockId, bytesWrittenToBlock);
-    blocks.add(nextBlock);
-    bytesWrittenToBlock = 0;
-  }
-
-  private synchronized void internalClose() throws IOException {
-    INode inode = new INode(FileType.FILE, blocks.toArray(new Block[blocks
-                                                                    .size()]));
-    store.storeINode(path, inode);
-  }
-
-  @Override
-  public synchronized void close() throws IOException {
-    if (closed) {
-      return;
-    }
-
-    flush();
-    if (filePos == 0 || bytesWrittenToBlock != 0) {
-      endBlock();
-    }
-
-    backupStream.close();
-    boolean b = backupFile.delete();
-    if (!b) {
-      LOG.warn("Ignoring failed delete");
-    }
-
-    super.close();
-
-    closed = true;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/VersionMismatchException.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/VersionMismatchException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/VersionMismatchException.java
deleted file mode 100644
index ccc8969..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/VersionMismatchException.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Thrown when Hadoop cannot read the version of the data stored
- * in {@link S3FileSystem}.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class VersionMismatchException extends S3FileSystemException {
-  private static final long serialVersionUID = 1L;
-
-  public VersionMismatchException(String clientVersion, String dataVersion) {
-    super("Version mismatch: client expects version " + clientVersion +
-        ", but data has version " +
-        (dataVersion == null ? "[unversioned]" : dataVersion));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/package.html
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/package.html b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/package.html
deleted file mode 100644
index dd601e1..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/package.html
+++ /dev/null
@@ -1,55 +0,0 @@
-<html>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<body>
-
-<p>A distributed, block-based implementation of {@link
-org.apache.hadoop.fs.FileSystem} that uses <a href="http://aws.amazon.com/s3">Amazon S3</a>
-as a backing store.</p>
-
-<p>
-Files are stored in S3 as blocks (represented by 
-{@link org.apache.hadoop.fs.s3.Block}), which have an ID and a length.
-Block metadata is stored in S3 as a small record (represented by 
-{@link org.apache.hadoop.fs.s3.INode}) using the URL-encoded
-path string as a key. Inodes record the file type (regular file or directory) and the list of blocks.
-This design makes it easy to seek to any given position in a file by reading the inode data to compute
-which block to access, then using S3's support for 
-<a href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.2">HTTP Range</a> headers
-to start streaming from the correct position.
-Renames are also efficient since only the inode is moved (by a DELETE followed by a PUT since 
-S3 does not support renames).
-</p>
-<p>
-For a single file <i>/dir1/file1</i> which takes two blocks of storage, the file structure in S3
-would be something like this:
-</p>
-<pre>
-/
-/dir1
-/dir1/file1
-block-6415776850131549260
-block-3026438247347758425
-</pre>
-<p>
-Inodes start with a leading <code>/</code>, while blocks are prefixed with <code>block-</code>.
-</p>
-
-</body>
-</html>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/FileMetadata.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/FileMetadata.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/FileMetadata.java
deleted file mode 100644
index 2746af4..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/FileMetadata.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3native;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * <p>
- * Holds basic metadata for a file stored in a {@link NativeFileSystemStore}.
- * </p>
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-class FileMetadata {
-  private final String key;
-  private final long length;
-  private final long lastModified;
-  
-  public FileMetadata(String key, long length, long lastModified) {
-    this.key = key;
-    this.length = length;
-    this.lastModified = lastModified;
-  }
-  
-  public String getKey() {
-    return key;
-  }
-  
-  public long getLength() {
-    return length;
-  }
-
-  public long getLastModified() {
-    return lastModified;
-  }
-  
-  @Override
-  public String toString() {
-    return "FileMetadata[" + key + ", " + length + ", " + lastModified + "]";
-  }
-  
-}


Mime
View raw message