hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cmcc...@apache.org
Subject [4/6] HADOOP-11074. Move s3-related FS connector code to hadoop-aws. (David S. Wang via Colin Patrick McCabe) (cherry picked from commit 5ec7fcd9dd6bb86858c6e2583321bb9a615bd392)
Date Thu, 11 Sep 2014 22:01:02 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/TestS3Credentials.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/TestS3Credentials.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/TestS3Credentials.java
deleted file mode 100644
index bcbf0dc..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/TestS3Credentials.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3;
-
-import java.net.URI;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.conf.Configuration;
-
-public class TestS3Credentials extends TestCase {
-  public void testInvalidHostnameWithUnderscores() throws Exception {
-    S3Credentials s3Credentials = new S3Credentials();
-    try {
-      s3Credentials.initialize(new URI("s3://a:b@c_d"), new Configuration());
-      fail("Should throw IllegalArgumentException");
-    } catch (IllegalArgumentException e) {
-      assertEquals("Invalid hostname in URI s3://a:b@c_d", e.getMessage());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/TestS3FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/TestS3FileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/TestS3FileSystem.java
deleted file mode 100644
index f21989c..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/TestS3FileSystem.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.IOException;
-import java.net.URI;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.conf.Configuration;
-
-public class TestS3FileSystem extends TestCase {
-
-  public void testInitialization() throws IOException {
-    initializationTest("s3://a:b@c", "s3://a:b@c");
-    initializationTest("s3://a:b@c/", "s3://a:b@c");
-    initializationTest("s3://a:b@c/path", "s3://a:b@c");
-    initializationTest("s3://a@c", "s3://a@c");
-    initializationTest("s3://a@c/", "s3://a@c");
-    initializationTest("s3://a@c/path", "s3://a@c");
-    initializationTest("s3://c", "s3://c");
-    initializationTest("s3://c/", "s3://c");
-    initializationTest("s3://c/path", "s3://c");
-  }
-  
-  private void initializationTest(String initializationUri, String expectedUri)
-    throws IOException {
-    
-    S3FileSystem fs = new S3FileSystem(new InMemoryFileSystemStore());
-    fs.initialize(URI.create(initializationUri), new Configuration());
-    assertEquals(URI.create(expectedUri), fs.getUri());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/TestS3InMemoryFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/TestS3InMemoryFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/TestS3InMemoryFileSystem.java
deleted file mode 100644
index fbdcd68..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/TestS3InMemoryFileSystem.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.net.URI;
-import junit.framework.TestCase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
-
-public class TestS3InMemoryFileSystem extends TestCase {
-
-  private static final String TEST_PATH = "s3://test/data.txt";
-  
-  private static final String TEST_DATA = "Sample data for testing.";
-  
-  private S3InMemoryFileSystem fs;
-  
-  @Override
-  public void setUp() throws IOException {
-    fs = new S3InMemoryFileSystem();
-    fs.initialize(URI.create("s3://test/"), new Configuration());
-  }
- 
-  public void testBasicReadWriteIO() throws IOException {
-    FSDataOutputStream writeStream = fs.create(new Path(TEST_PATH));
-    writeStream.write(TEST_DATA.getBytes());
-    writeStream.flush();
-    writeStream.close();
-    
-    FSDataInputStream readStream = fs.open(new Path(TEST_PATH));
-    BufferedReader br = new BufferedReader(new InputStreamReader(readStream));
-    String line = "";
-    StringBuffer stringBuffer = new StringBuffer();
-    while ((line = br.readLine()) != null) {
-        stringBuffer.append(line);
-    }
-    br.close();
-    
-    assert(TEST_DATA.equals(stringBuffer.toString()));
-  }
-  
-  @Override
-  public void tearDown() throws IOException {
-    fs.close();  
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java
deleted file mode 100644
index ac572aa..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3native;
-
-import static org.apache.hadoop.fs.s3native.NativeS3FileSystem.PATH_DELIMITER;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.Map.Entry;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.util.Time;
-
-/**
- * <p>
- * A stub implementation of {@link NativeFileSystemStore} for testing
- * {@link NativeS3FileSystem} without actually connecting to S3.
- * </p>
- */
-public class InMemoryNativeFileSystemStore implements NativeFileSystemStore {
-  
-  private Configuration conf;
-  
-  private SortedMap<String, FileMetadata> metadataMap =
-    new TreeMap<String, FileMetadata>();
-  private SortedMap<String, byte[]> dataMap = new TreeMap<String, byte[]>();
-
-  @Override
-  public void initialize(URI uri, Configuration conf) throws IOException {
-    this.conf = conf;
-  }
-
-  @Override
-  public void storeEmptyFile(String key) throws IOException {
-    metadataMap.put(key, new FileMetadata(key, 0, Time.now()));
-    dataMap.put(key, new byte[0]);
-  }
-
-  @Override
-  public void storeFile(String key, File file, byte[] md5Hash)
-    throws IOException {
-    
-    ByteArrayOutputStream out = new ByteArrayOutputStream();
-    byte[] buf = new byte[8192];
-    int numRead;
-    BufferedInputStream in = null;
-    try {
-      in = new BufferedInputStream(new FileInputStream(file));
-      while ((numRead = in.read(buf)) >= 0) {
-        out.write(buf, 0, numRead);
-      }
-    } finally {
-      if (in != null) {
-        in.close();
-      }
-    }
-    metadataMap.put(key,
-        new FileMetadata(key, file.length(), Time.now()));
-    dataMap.put(key, out.toByteArray());
-  }
-
-  @Override
-  public InputStream retrieve(String key) throws IOException {
-    return retrieve(key, 0);
-  }
-  
-  @Override
-  public InputStream retrieve(String key, long byteRangeStart)
-    throws IOException {
-    
-    byte[] data = dataMap.get(key);
-    File file = createTempFile();
-    BufferedOutputStream out = null;
-    try {
-      out = new BufferedOutputStream(new FileOutputStream(file));
-      out.write(data, (int) byteRangeStart,
-          data.length - (int) byteRangeStart);
-    } finally {
-      if (out != null) {
-        out.close();
-      }
-    }
-    return new FileInputStream(file);
-  }
-  
-  private File createTempFile() throws IOException {
-    File dir = new File(conf.get("fs.s3.buffer.dir"));
-    if (!dir.exists() && !dir.mkdirs()) {
-      throw new IOException("Cannot create S3 buffer directory: " + dir);
-    }
-    File result = File.createTempFile("test-", ".tmp", dir);
-    result.deleteOnExit();
-    return result;
-  }
-
-  @Override
-  public FileMetadata retrieveMetadata(String key) throws IOException {
-    return metadataMap.get(key);
-  }
-
-  @Override
-  public PartialListing list(String prefix, int maxListingLength)
-      throws IOException {
-    return list(prefix, maxListingLength, null, false);
-  }
-
-  @Override
-  public PartialListing list(String prefix, int maxListingLength,
-      String priorLastKey, boolean recursive) throws IOException {
-
-    return list(prefix, recursive ? null : PATH_DELIMITER, maxListingLength, priorLastKey);
-  }
-
-  private PartialListing list(String prefix, String delimiter,
-      int maxListingLength, String priorLastKey) throws IOException {
-
-    if (prefix.length() > 0 && !prefix.endsWith(PATH_DELIMITER)) {
-      prefix += PATH_DELIMITER;
-    }
-    
-    List<FileMetadata> metadata = new ArrayList<FileMetadata>();
-    SortedSet<String> commonPrefixes = new TreeSet<String>();
-    for (String key : dataMap.keySet()) {
-      if (key.startsWith(prefix)) {
-        if (delimiter == null) {
-          metadata.add(retrieveMetadata(key));
-        } else {
-          int delimIndex = key.indexOf(delimiter, prefix.length());
-          if (delimIndex == -1) {
-            metadata.add(retrieveMetadata(key));
-          } else {
-            String commonPrefix = key.substring(0, delimIndex);
-            commonPrefixes.add(commonPrefix);
-          }
-        }
-      }
-      if (metadata.size() + commonPrefixes.size() == maxListingLength) {
-        new PartialListing(key, metadata.toArray(new FileMetadata[0]),
-            commonPrefixes.toArray(new String[0]));
-      }
-    }
-    return new PartialListing(null, metadata.toArray(new FileMetadata[0]),
-        commonPrefixes.toArray(new String[0]));
-  }
-
-  @Override
-  public void delete(String key) throws IOException {
-    metadataMap.remove(key);
-    dataMap.remove(key);
-  }
-
-  @Override
-  public void copy(String srcKey, String dstKey) throws IOException {
-    metadataMap.put(dstKey, metadataMap.get(srcKey));
-    dataMap.put(dstKey, dataMap.get(srcKey));
-  }
-  
-  @Override
-  public void purge(String prefix) throws IOException {
-    Iterator<Entry<String, FileMetadata>> i =
-      metadataMap.entrySet().iterator();
-    while (i.hasNext()) {
-      Entry<String, FileMetadata> entry = i.next();
-      if (entry.getKey().startsWith(prefix)) {
-        dataMap.remove(entry.getKey());
-        i.remove();
-      }
-    }
-  }
-
-  @Override
-  public void dump() throws IOException {
-    System.out.println(metadataMap.values());
-    System.out.println(dataMap.keySet());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/Jets3tNativeS3FileSystemContractTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/Jets3tNativeS3FileSystemContractTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/Jets3tNativeS3FileSystemContractTest.java
deleted file mode 100644
index 6516c83..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/Jets3tNativeS3FileSystemContractTest.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3native;
-
-import java.io.IOException;
-
-public class Jets3tNativeS3FileSystemContractTest
-  extends NativeS3FileSystemContractBaseTest {
-  
-  @Override
-  NativeFileSystemStore getNativeFileSystemStore() throws IOException {
-    return new Jets3tNativeFileSystemStore();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
deleted file mode 100644
index ac6b9ec..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3native;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URI;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.s3native.NativeS3FileSystem.NativeS3FsInputStream;
-
-public abstract class NativeS3FileSystemContractBaseTest
-  extends FileSystemContractBaseTest {
-  
-  private NativeFileSystemStore store;
-  
-  abstract NativeFileSystemStore getNativeFileSystemStore() throws IOException;
-
-  @Override
-  protected void setUp() throws Exception {
-    Configuration conf = new Configuration();
-    store = getNativeFileSystemStore();
-    fs = new NativeS3FileSystem(store);
-    fs.initialize(URI.create(conf.get("test.fs.s3n.name")), conf);
-  }
-  
-  @Override
-  protected void tearDown() throws Exception {
-    store.purge("test");
-    super.tearDown();
-  }
-
-  public void testCanonicalName() throws Exception {
-    assertNull("s3n doesn't support security token and shouldn't have canonical name",
-               fs.getCanonicalServiceName());
-  }
-
-  public void testListStatusForRoot() throws Exception {
-    FileStatus[] paths = fs.listStatus(path("/"));
-    assertEquals(0, paths.length);
-    
-    Path testDir = path("/test");
-    assertTrue(fs.mkdirs(testDir));
-    
-    paths = fs.listStatus(path("/"));
-    assertEquals(1, paths.length);
-    assertEquals(path("/test"), paths[0].getPath());
-  }
-
-  public void testNoTrailingBackslashOnBucket() throws Exception {
-    assertTrue(fs.getFileStatus(new Path(fs.getUri().toString())).isDirectory());
-  }
-
-  private void createTestFiles(String base) throws IOException {
-    store.storeEmptyFile(base + "/file1");
-    store.storeEmptyFile(base + "/dir/file2");
-    store.storeEmptyFile(base + "/dir/file3");
-  }
-
-  public void testDirWithDifferentMarkersWorks() throws Exception {
-
-    for (int i = 0; i < 3; i++) {
-      String base = "test/hadoop" + i;
-      Path path = path("/" + base);
-
-      createTestFiles(base);
-
-      if (i == 0 ) {
-        //do nothing, we are testing correctness with no markers
-      }
-      else if (i == 1) {
-        // test for _$folder$ marker
-        store.storeEmptyFile(base + "_$folder$");
-        store.storeEmptyFile(base + "/dir_$folder$");
-      }
-      else if (i == 2) {
-        // test the end slash file marker
-        store.storeEmptyFile(base + "/");
-        store.storeEmptyFile(base + "/dir/");
-      }
-      else if (i == 3) {
-        // test both markers
-        store.storeEmptyFile(base + "_$folder$");
-        store.storeEmptyFile(base + "/dir_$folder$");
-        store.storeEmptyFile(base + "/");
-        store.storeEmptyFile(base + "/dir/");
-      }
-
-      assertTrue(fs.getFileStatus(path).isDirectory());
-      assertEquals(2, fs.listStatus(path).length);
-    }
-  }
-
-  public void testDeleteWithNoMarker() throws Exception {
-    String base = "test/hadoop";
-    Path path = path("/" + base);
-
-    createTestFiles(base);
-
-    fs.delete(path, true);
-
-    path = path("/test");
-    assertTrue(fs.getFileStatus(path).isDirectory());
-    assertEquals(0, fs.listStatus(path).length);
-  }
-
-  public void testRenameWithNoMarker() throws Exception {
-    String base = "test/hadoop";
-    Path dest = path("/test/hadoop2");
-
-    createTestFiles(base);
-
-    fs.rename(path("/" + base), dest);
-
-    Path path = path("/test");
-    assertTrue(fs.getFileStatus(path).isDirectory());
-    assertEquals(1, fs.listStatus(path).length);
-    assertTrue(fs.getFileStatus(dest).isDirectory());
-    assertEquals(2, fs.listStatus(dest).length);
-  }
-
-  public void testEmptyFile() throws Exception {
-    store.storeEmptyFile("test/hadoop/file1");
-    fs.open(path("/test/hadoop/file1")).close();
-  }
-  
-  public void testBlockSize() throws Exception {
-    Path file = path("/test/hadoop/file");
-    createFile(file);
-    assertEquals("Default block size", fs.getDefaultBlockSize(file),
-    fs.getFileStatus(file).getBlockSize());
-
-    // Block size is determined at read time
-    long newBlockSize = fs.getDefaultBlockSize(file) * 2;
-    fs.getConf().setLong("fs.s3n.block.size", newBlockSize);
-    assertEquals("Double default block size", newBlockSize,
-    fs.getFileStatus(file).getBlockSize());
-  }
-  
-  public void testRetryOnIoException() throws Exception {
-    class TestInputStream extends InputStream {
-      boolean shouldThrow = false;
-      int throwCount = 0;
-      int pos = 0;
-      byte[] bytes;
-      
-      public TestInputStream() {
-        bytes = new byte[256];
-        for (int i = 0; i < 256; i++) {
-          bytes[i] = (byte)i;
-        }
-      }
-      
-      @Override
-      public int read() throws IOException {
-        shouldThrow = !shouldThrow;
-        if (shouldThrow) {
-          throwCount++;
-          throw new IOException();
-        }
-        return pos++;
-      }
-      
-      @Override
-      public int read(byte[] b, int off, int len) throws IOException {
-        shouldThrow = !shouldThrow;
-        if (shouldThrow) {
-          throwCount++;
-          throw new IOException();
-        }
-        
-        int sizeToRead = Math.min(len, 256 - pos);
-        for (int i = 0; i < sizeToRead; i++) {
-          b[i] = bytes[pos + i];
-        }
-        pos += sizeToRead;
-        return sizeToRead;
-      }
-    }
-    
-    final InputStream is = new TestInputStream();
-    
-    class MockNativeFileSystemStore extends Jets3tNativeFileSystemStore {
-      @Override
-      public InputStream retrieve(String key, long byteRangeStart) throws IOException {
-        return is;
-      }
-    }
-    
-    NativeS3FsInputStream stream = new NativeS3FsInputStream(new MockNativeFileSystemStore(), null, is, "");
-    
-    // Test reading methods.
-    byte[] result = new byte[256];
-    for (int i = 0; i < 128; i++) {
-      result[i] = (byte)stream.read();
-    }
-    for (int i = 128; i < 256; i += 8) {
-      byte[] temp = new byte[8];
-      int read = stream.read(temp, 0, 8);
-      assertEquals(8, read);
-      System.arraycopy(temp, 0, result, i, 8);
-    }
-    
-    // Assert correct
-    for (int i = 0; i < 256; i++) {
-      assertEquals((byte)i, result[i]);
-    }
-    
-    // Test to make sure the throw path was exercised.
-    // 144 = 128 + (128 / 8)
-    assertEquals(144, ((TestInputStream)is).throwCount);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/S3NInMemoryFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/S3NInMemoryFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/S3NInMemoryFileSystem.java
deleted file mode 100644
index c0ea85b..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/S3NInMemoryFileSystem.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3native;
-
-import org.apache.hadoop.fs.s3native.NativeS3FileSystem;
-import org.apache.hadoop.fs.s3native.InMemoryNativeFileSystemStore;
-
-/**
- * A helper implementation of {@link NativeS3FileSystem}
- * without actually connecting to S3 for unit testing.
- */
-public class S3NInMemoryFileSystem extends NativeS3FileSystem {
-    public S3NInMemoryFileSystem() {
-        super(new InMemoryNativeFileSystemStore());
-    }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java
deleted file mode 100644
index 664d39e..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3native;
-
-import java.io.IOException;
-
-public class TestInMemoryNativeS3FileSystemContract
-  extends NativeS3FileSystemContractBaseTest {
-
-  @Override
-  NativeFileSystemStore getNativeFileSystemStore() throws IOException {
-    return new InMemoryNativeFileSystemStore();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
deleted file mode 100644
index b1078a4..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3native;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-
-import static org.junit.Assert.*;
-import static org.junit.Assume.*;
-
-import org.junit.Before;
-import org.junit.After;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.URI;
-import java.security.DigestInputStream;
-import java.security.DigestOutputStream;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-
-
-public class TestJets3tNativeFileSystemStore {
-  private Configuration conf;
-  private Jets3tNativeFileSystemStore store;
-  private NativeS3FileSystem fs;
-
-  @Before
-  public void setUp() throws Exception {
-    conf = new Configuration();
-    store = new Jets3tNativeFileSystemStore();
-    fs = new NativeS3FileSystem(store);
-    conf.setBoolean("fs.s3n.multipart.uploads.enabled", true);
-    conf.setLong("fs.s3n.multipart.uploads.block.size", 64 * 1024 * 1024);
-    fs.initialize(URI.create(conf.get("test.fs.s3n.name")), conf);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    try {
-      store.purge("test");
-    } catch (Exception e) {}
-  }
-
-  @BeforeClass
-  public static void checkSettings() throws Exception {
-    Configuration conf = new Configuration();
-    assumeNotNull(conf.get("fs.s3n.awsAccessKeyId"));
-    assumeNotNull(conf.get("fs.s3n.awsSecretAccessKey"));
-    assumeNotNull(conf.get("test.fs.s3n.name"));
-  }
-
-  protected void writeRenameReadCompare(Path path, long len)
-      throws IOException, NoSuchAlgorithmException {
-    // If len > fs.s3n.multipart.uploads.block.size,
-    // we'll use a multipart upload copy
-    MessageDigest digest = MessageDigest.getInstance("MD5");
-    OutputStream out = new BufferedOutputStream(
-        new DigestOutputStream(fs.create(path, false), digest));
-    for (long i = 0; i < len; i++) {
-      out.write('Q');
-    }
-    out.flush();
-    out.close();
-
-    assertTrue("Exists", fs.exists(path));
-
-    // Depending on if this file is over 5 GB or not,
-    // rename will cause a multipart upload copy
-    Path copyPath = path.suffix(".copy");
-    fs.rename(path, copyPath);
-
-    assertTrue("Copy exists", fs.exists(copyPath));
-
-    // Download file from S3 and compare the digest against the original
-    MessageDigest digest2 = MessageDigest.getInstance("MD5");
-    InputStream in = new BufferedInputStream(
-        new DigestInputStream(fs.open(copyPath), digest2));
-    long copyLen = 0;
-    while (in.read() != -1) {copyLen++;}
-    in.close();
-
-    assertEquals("Copy length matches original", len, copyLen);
-    assertArrayEquals("Digests match", digest.digest(), digest2.digest());
-  }
-
-  @Test
-  public void testSmallUpload() throws IOException, NoSuchAlgorithmException {
-    // Regular upload, regular copy
-    writeRenameReadCompare(new Path("/test/small"), 16384);
-  }
-
-  @Test
-  public void testMediumUpload() throws IOException, NoSuchAlgorithmException {
-    // Multipart upload, regular copy
-    writeRenameReadCompare(new Path("/test/medium"), 33554432);    // 100 MB
-  }
-
-  @Test
-  public void testExtraLargeUpload()
-      throws IOException, NoSuchAlgorithmException {
-    // Multipart upload, multipart copy
-    writeRenameReadCompare(new Path("/test/xlarge"), 5368709121L); // 5GB+1byte
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/TestS3NInMemoryFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/TestS3NInMemoryFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/TestS3NInMemoryFileSystem.java
deleted file mode 100644
index b457df2..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/TestS3NInMemoryFileSystem.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3native;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.net.URI;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
-
-public class TestS3NInMemoryFileSystem extends TestCase {
-
-  private static final String TEST_PATH = "s3n://test/data.txt";
-  
-  private static final String TEST_DATA = "Sample data for testing.";
-  
-  private S3NInMemoryFileSystem fs;
-  
-  @Override
-  public void setUp() throws IOException {
-    fs = new S3NInMemoryFileSystem();
-    fs.initialize(URI.create("s3n://test/"), new Configuration());
-  }
- 
-  public void testBasicReadWriteIO() throws IOException {
-    FSDataOutputStream writeData = fs.create(new Path(TEST_PATH));
-    writeData.write(TEST_DATA.getBytes());
-    writeData.flush();
-    writeData.close();
-    
-    FSDataInputStream readData = fs.open(new Path(TEST_PATH));
-    BufferedReader br = new BufferedReader(new InputStreamReader(readData));
-    String line = "";
-    StringBuffer stringBuffer = new StringBuffer();
-    while ((line = br.readLine()) != null) {
-        stringBuffer.append(line);
-    }
-    br.close();
-    
-    assert(TEST_DATA.equals(stringBuffer.toString()));
-  }
-  
-  @Override
-  public void tearDown() throws IOException {
-    fs.close();  
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-common-project/hadoop-common/src/test/resources/contract/s3n.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/contract/s3n.xml b/hadoop-common-project/hadoop-common/src/test/resources/contract/s3n.xml
deleted file mode 100644
index ab46178..0000000
--- a/hadoop-common-project/hadoop-common/src/test/resources/contract/s3n.xml
+++ /dev/null
@@ -1,95 +0,0 @@
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~  or more contributor license agreements.  See the NOTICE file
-  ~  distributed with this work for additional information
-  ~  regarding copyright ownership.  The ASF licenses this file
-  ~  to you under the Apache License, Version 2.0 (the
-  ~  "License"); you may not use this file except in compliance
-  ~  with the License.  You may obtain a copy of the License at
-  ~
-  ~       http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~  Unless required by applicable law or agreed to in writing, software
-  ~  distributed under the License is distributed on an "AS IS" BASIS,
-  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~  See the License for the specific language governing permissions and
-  ~  limitations under the License.
-  -->
-
-<configuration>
-  <!--
-  S3N is a blobstore, with very different behavior than a
-  classic filesystem.
-  -->
-
-  <property>
-    <name>fs.contract.test.root-tests-enabled</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.test.random-seek-count</name>
-    <value>10</value>
-  </property>
-
-  <property>
-    <name>fs.contract.is-blobstore</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.is-case-sensitive</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.rename-returns-false-if-source-missing</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-append</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-atomic-directory-delete</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-atomic-rename</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-block-locality</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-concat</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-seek</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.rejects-seek-past-eof</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-strict-exceptions</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-unix-permissions</name>
-    <value>false</value>
-  </property>
-
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 0c1f50d..db32cd4 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -321,6 +321,12 @@
       </dependency>
 
       <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-aws</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+
+      <dependency>
         <groupId>com.google.guava</groupId>
         <artifactId>guava</artifactId>
         <version>11.0.2</version>
@@ -599,6 +605,12 @@
         <groupId>com.amazonaws</groupId>
         <artifactId>aws-java-sdk</artifactId>
         <version>1.7.2</version>
+        <exclusions>
+          <exclusion>
+            <groupId>com.fasterxml.jackson.core</groupId>
+            <artifactId>jackson-core</artifactId>
+          </exclusion>
+        </exclusions>
       </dependency>
       <dependency>
         <groupId>org.apache.mina</groupId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml b/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml
index 74e4923..204e6ab 100644
--- a/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml
+++ b/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml
@@ -15,5 +15,361 @@
    limitations under the License.
 -->
 <FindBugsFilter>
+     <Match>
+       <Package name="org.apache.hadoop.security.proto" />
+     </Match>
+     <Match>
+       <Package name="org.apache.hadoop.tools.proto" />
+     </Match>
+     <Match>
+       <Bug pattern="EI_EXPOSE_REP" />
+     </Match>
+     <Match>
+       <Bug pattern="EI_EXPOSE_REP2" />
+     </Match>
+     <Match>
+       <Bug pattern="SE_COMPARATOR_SHOULD_BE_SERIALIZABLE" />
+     </Match>
+     <Match>
+       <Class name="~.*_jsp" />
+       <Bug pattern="DLS_DEAD_LOCAL_STORE" />
+     </Match>
+     <Match>
+       <Field name="_jspx_dependants" />
+       <Bug pattern="UWF_UNWRITTEN_FIELD" />
+     </Match>
+     <!-- 
+       Inconsistent synchronization for Client.Connection.out is
+       is intentional to make a connection to be closed instantly. 
+     --> 
+     <Match>
+       <Class name="org.apache.hadoop.ipc.Client$Connection" />
+       <Field name="out" />
+       <Bug pattern="IS2_INCONSISTENT_SYNC" />
+     </Match>
+     <!-- 
+       Further SaslException should be ignored during cleanup and
+       original exception should be re-thrown.
+     --> 
+     <Match>
+       <Class name="org.apache.hadoop.security.SaslRpcClient" />
+       <Bug pattern="DE_MIGHT_IGNORE" />
+     </Match>
+     <!-- 
+       Ignore Cross Scripting Vulnerabilities
+     -->
+     <Match>
+       <Package name="~org.apache.hadoop.mapred.*" />
+       <Bug code="XSS" />
+     </Match>
+     <Match>
+       <Class name="org.apache.hadoop.mapred.taskdetails_jsp" />
+       <Bug code="HRS" />
+     </Match>
+     <!--
+       Ignore warnings where child class has the same name as
+       super class. Classes based on Old API shadow names from
+       new API. Should go off after HADOOP-1.0
+     -->
+     <Match>
+       <Class name="~org.apache.hadoop.mapred.*" />
+       <Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS" />
+     </Match>
+     <!--
+       Ignore warnings for usage of System.exit. This is
+       required and have been well thought out
+     -->
+     <Match>
+       <Class name="org.apache.hadoop.mapred.Child$2" />
+       <Method name="run" />
+       <Bug pattern="DM_EXIT" />
+     </Match>
+     <Match>
+       <Class name="org.apache.hadoop.mapred.JobTracker" />
+       <Method name="addHostToNodeMapping" />
+       <Bug pattern="DM_EXIT" />
+     </Match>
+     <Match>
+       <Class name="org.apache.hadoop.mapred.Task" />
+       <Or>
+       <Method name="done" />
+       <Method name="commit" />
+       <Method name="statusUpdate" />
+       </Or>
+       <Bug pattern="DM_EXIT" />
+     </Match>
+     <Match>
+       <Class name="org.apache.hadoop.mapred.Task$TaskReporter" />
+       <Method name="run" />
+       <Bug pattern="DM_EXIT" />
+     </Match>
+     <Match>
+       <Class name="org.apache.hadoop.util.ProgramDriver" />
+       <Method name="driver" />
+       <Bug pattern="DM_EXIT" />
+     </Match>
+     <Match>
+       <Class name="org.apache.hadoop.util.RunJar" />
+       <Method name="run" />
+       <Bug pattern="DM_EXIT" />
+     </Match>
+     <!--
+       We need to cast objects between old and new api objects
+     -->
+     <Match>
+       <Class name="org.apache.hadoop.mapred.OutputCommitter" />
+       <Bug pattern="BC_UNCONFIRMED_CAST" />
+     </Match>
+     <!--
+       We intentionally do the get name from the inner class
+     -->
+     <Match>
+       <Class name="org.apache.hadoop.mapred.TaskTracker$MapEventsFetcherThread" />
+       <Method name="run" />
+       <Bug pattern="IA_AMBIGUOUS_INVOCATION_OF_INHERITED_OR_OUTER_METHOD" />
+     </Match>
+     <Match>
+       <Class name="org.apache.hadoop.mapred.FileOutputCommitter" />
+       <Bug pattern="NM_WRONG_PACKAGE_INTENTIONAL" />
+     </Match>
+     <!--
+       Ignoring this warning as resolving this would need a non-trivial change in code 
+     -->
+     <Match>
+       <Class name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor" />
+       <Method name="configure" />
+       <Field name="maxNumItems" />
+       <Bug pattern="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD" />
+     </Match>
+     <!--
+       Comes from org.apache.jasper.runtime.ResourceInjector. Cannot do much.
+     -->
+     <Match>
+       <Class name="org.apache.hadoop.mapred.jobqueue_005fdetails_jsp" />
+       <Field name="_jspx_resourceInjector" />
+       <Bug pattern="SE_BAD_FIELD" />
+     </Match>
+     <!--
+       Storing textInputFormat and then passing it as a parameter. Safe to ignore.
+     -->
+     <Match>
+       <Class name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob" />
+       <Method name="createValueAggregatorJob" />
+       <Bug pattern="DLS_DEAD_STORE_OF_CLASS_LITERAL" />
+     </Match>
+     <!--
+       Can remove this after the upgrade to findbugs1.3.8
+     -->
+     <Match>
+       <Class name="org.apache.hadoop.mapred.lib.db.DBInputFormat" />
+       <Method name="getSplits" />
+       <Bug pattern="DLS_DEAD_LOCAL_STORE" />
+     </Match>
+    <!--
+      This is a spurious warning. Just ignore
+    -->
+     <Match>
+       <Class name="org.apache.hadoop.mapred.MapTask$MapOutputBuffer" />
+       <Field name="kvindex" />
+       <Bug pattern="IS2_INCONSISTENT_SYNC" />
+     </Match>
 
- </FindBugsFilter>
+     <!-- 
+        core changes 
+     -->
+     <Match>
+       <Class name="~org.apache.hadoop.*" />
+       <Bug code="MS" />
+     </Match>
+
+     <Match>
+       <Class name="org.apache.hadoop.fs.FileSystem" />
+       <Method name="checkPath" />
+       <Bug pattern="ES_COMPARING_STRINGS_WITH_EQ" />
+     </Match>
+
+     <Match>
+       <Class name="org.apache.hadoop.io.Closeable" />
+       <Bug pattern="NM_SAME_SIMPLE_NAME_AS_INTERFACE" />
+     </Match>
+
+     <Match>
+       <Class name="org.apache.hadoop.security.AccessControlException" />
+       <Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS" />
+     </Match>
+
+     <Match>
+       <Class name="org.apache.hadoop.util.ProcfsBasedProcessTree" />
+       <Bug pattern="DMI_HARDCODED_ABSOLUTE_FILENAME" />
+     </Match>
+
+     <!--
+       Streaming, Examples
+     -->
+     <Match>
+       <Class name="org.apache.hadoop.streaming.StreamUtil$TaskId" />
+       <Bug pattern="URF_UNREAD_FIELD" />
+     </Match>
+
+     <Match>
+       <Class name="org.apache.hadoop.examples.DBCountPageView" />
+       <Method name="verify" />
+       <Bug pattern="OBL_UNSATISFIED_OBLIGATION" />
+     </Match>
+
+     <Match>
+       <Class name="org.apache.hadoop.examples.ContextFactory" />
+       <Method name="setAttributes" />
+       <Bug pattern="OBL_UNSATISFIED_OBLIGATION" />
+     </Match>
+
+     <!--
+       TFile
+     -->
+      <Match>
+       <Class name="org.apache.hadoop.io.file.tfile.Chunk$ChunkDecoder" />
+       <Method name="close" />
+       <Bug pattern="SR_NOT_CHECKED" />
+      </Match>
+    <!--
+      The purpose of skip() is to drain remaining bytes of the chunk-encoded
+	  stream (one chunk at a time). The termination condition is checked by
+	  checkEOF().
+    -->
+     <Match>
+       <Class name="org.apache.hadoop.io.file.tfile.Utils" />
+       <Method name="writeVLong" />
+       <Bug pattern="SF_SWITCH_FALLTHROUGH" />
+     </Match>
+    <!--
+	  The switch condition fall through is intentional and for performance
+	  purposes.
+    -->
+
+    <Match>
+      <Class name="org.apache.hadoop.log.EventCounter"/>
+      <!-- backward compatibility -->
+      <Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS"/>
+    </Match>
+    <Match>
+      <Class name="org.apache.hadoop.metrics.jvm.EventCounter"/>
+      <!-- backward compatibility -->
+      <Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS"/>
+    </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.ProtobufRpcEngineProtos.*"/>
+    </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.ProtocolInfoProtos.*"/>
+    </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.IpcConnectionContextProtos.*"/>
+    </Match>
+        <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.RpcHeaderProtos.*"/>
+    </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ha\.proto\.HAServiceProtocolProtos.*"/>
+    </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ha\.proto\.ZKFCProtocolProtos.*"/>
+    </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.security\.proto\.SecurityProtos.*"/>
+    </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.TestProtos.*"/>
+    </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ipc\.proto\.RefreshCallQueueProtocolProtos.*"/>
+    </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ipc\.proto\.GenericRefreshProtocolProtos.*"/>
+    </Match>
+
+    <!--
+       Manually checked, misses child thread manually syncing on parent's intrinsic lock.
+    -->
+     <Match>
+       <Class name="org.apache.hadoop.metrics2.lib.MutableQuantiles" />
+       <Field name="previousSnapshot" />
+       <Bug pattern="IS2_INCONSISTENT_SYNC" />
+     </Match>
+     <!--
+       The method uses a generic type T that extends two other types
+       T1 and T2. Findbugs complains of a cast from T1 to T2.
+     -->
+     <Match>
+       <Class name="org.apache.hadoop.fs.DelegationTokenRenewer" />
+       <Method name="removeRenewAction" />
+       <Bug pattern="BC_UNCONFIRMED_CAST" />
+     </Match>
+     
+     <!-- Inconsistent synchronization flagged by findbugs is not valid. -->
+     <Match>
+       <Class name="org.apache.hadoop.ipc.Client$Connection" />
+       <Field name="in" />
+       <Bug pattern="IS2_INCONSISTENT_SYNC" />
+     </Match>
+     <!-- 
+       The switch condition for INITIATE is expected to fallthru to RESPONSE
+       to process initial sasl response token included in the INITIATE
+     -->
+     <Match>
+       <Class name="org.apache.hadoop.ipc.Server$Connection" />
+       <Method name="processSaslMessage" />
+       <Bug pattern="SF_SWITCH_FALLTHROUGH" />
+     </Match>
+
+     <!-- Synchronization performed on util.concurrent instance. -->
+     <Match>
+       <Class name="org.apache.hadoop.service.AbstractService" />
+       <Method name="stop" />
+       <Bug code="JLM" />
+     </Match>
+
+     <Match>
+       <Class name="org.apache.hadoop.service.AbstractService" />
+       <Method name="waitForServiceToStop" />
+       <Bug code="JLM" />
+     </Match>
+
+  <!--
+  OpenStack Swift FS module -closes streams in a different method
+  from where they are opened.
+  -->
+    <Match>
+      <Class name="org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream"/>
+      <Method name="uploadFileAttempt"/>
+      <Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
+    </Match>
+    <Match>
+      <Class name="org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream"/>
+      <Method name="uploadFilePartAttempt"/>
+      <Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
+    </Match>
+
+     <!-- code from maven source, null value is checked at callee side. -->
+     <Match>
+       <Class name="org.apache.hadoop.util.ComparableVersion$ListItem" />
+       <Method name="compareTo" />
+       <Bug code="NP" />
+     </Match>
+
+  <Match>
+    <Class name="org.apache.hadoop.util.HttpExceptionUtils"/>
+    <Method name="validateResponse"/>
+    <Bug pattern="REC_CATCH_EXCEPTION"/>
+  </Match>
+
+</FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Block.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Block.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Block.java
new file mode 100644
index 0000000..6926f17
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Block.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Holds metadata about a block of data being stored in a {@link FileSystemStore}.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class Block {
+  private long id;
+
+  private long length;
+
+  public Block(long id, long length) {
+    this.id = id;
+    this.length = length;
+  }
+
+  public long getId() {
+    return id;
+  }
+
+  public long getLength() {
+    return length;
+  }
+
+  @Override
+  public String toString() {
+    return "Block[" + id + ", " + length + "]";
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/FileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/FileSystemStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/FileSystemStore.java
new file mode 100644
index 0000000..07e456b
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/FileSystemStore.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * A facility for storing and retrieving {@link INode}s and {@link Block}s.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface FileSystemStore {
+  
+  void initialize(URI uri, Configuration conf) throws IOException;
+  String getVersion() throws IOException;
+
+  void storeINode(Path path, INode inode) throws IOException;
+  void storeBlock(Block block, File file) throws IOException;
+  
+  boolean inodeExists(Path path) throws IOException;
+  boolean blockExists(long blockId) throws IOException;
+
+  INode retrieveINode(Path path) throws IOException;
+  File retrieveBlock(Block block, long byteRangeStart) throws IOException;
+
+  void deleteINode(Path path) throws IOException;
+  void deleteBlock(Block block) throws IOException;
+
+  Set<Path> listSubPaths(Path path) throws IOException;
+  Set<Path> listDeepSubPaths(Path path) throws IOException;
+
+  /**
+   * Delete everything. Used for testing.
+   * @throws IOException
+   */
+  void purge() throws IOException;
+  
+  /**
+   * Diagnostic method to dump all INodes to the console.
+   * @throws IOException
+   */
+  void dump() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/INode.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/INode.java
new file mode 100644
index 0000000..5d08b77
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/INode.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.IOUtils;
+
+/**
+ * Holds file metadata including type (regular file, or directory),
+ * and the list of blocks that are pointers to the data.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class INode {
+	
+  enum FileType {
+    DIRECTORY, FILE
+  }
+  
+  public static final FileType[] FILE_TYPES = {
+    FileType.DIRECTORY,
+    FileType.FILE
+  };
+
+  public static final INode DIRECTORY_INODE = new INode(FileType.DIRECTORY, null);
+  
+  private FileType fileType;
+  private Block[] blocks;
+
+  public INode(FileType fileType, Block[] blocks) {
+    this.fileType = fileType;
+    if (isDirectory() && blocks != null) {
+      throw new IllegalArgumentException("A directory cannot contain blocks.");
+    }
+    this.blocks = blocks;
+  }
+
+  public Block[] getBlocks() {
+    return blocks;
+  }
+  
+  public FileType getFileType() {
+    return fileType;
+  }
+
+  public boolean isDirectory() {
+    return fileType == FileType.DIRECTORY;
+  }  
+
+  public boolean isFile() {
+    return fileType == FileType.FILE;
+  }
+  
+  public long getSerializedLength() {
+    return 1L + (blocks == null ? 0 : 4 + blocks.length * 16);
+  }
+  
+
+  public InputStream serialize() throws IOException {
+    ByteArrayOutputStream bytes = new ByteArrayOutputStream();
+    DataOutputStream out = new DataOutputStream(bytes);
+    try {
+      out.writeByte(fileType.ordinal());
+      if (isFile()) {
+        out.writeInt(blocks.length);
+        for (int i = 0; i < blocks.length; i++) {
+          out.writeLong(blocks[i].getId());
+          out.writeLong(blocks[i].getLength());
+        }
+      }
+      out.close();
+      out = null;
+    } finally {
+      IOUtils.closeStream(out);
+    }
+    return new ByteArrayInputStream(bytes.toByteArray());
+  }
+  
+  public static INode deserialize(InputStream in) throws IOException {
+    if (in == null) {
+      return null;
+    }
+    DataInputStream dataIn = new DataInputStream(in);
+    FileType fileType = INode.FILE_TYPES[dataIn.readByte()];
+    switch (fileType) {
+    case DIRECTORY:
+      in.close();
+      return INode.DIRECTORY_INODE;
+    case FILE:
+      int numBlocks = dataIn.readInt();
+      Block[] blocks = new Block[numBlocks];
+      for (int i = 0; i < numBlocks; i++) {
+        long id = dataIn.readLong();
+        long length = dataIn.readLong();
+        blocks[i] = new Block(id, length);
+      }
+      in.close();
+      return new INode(fileType, blocks);
+    default:
+      throw new IllegalArgumentException("Cannot deserialize inode.");
+    }    
+  }  
+  
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
new file mode 100644
index 0000000..7f07bbb
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
@@ -0,0 +1,429 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3.INode.FileType;
+import org.jets3t.service.S3Service;
+import org.jets3t.service.S3ServiceException;
+import org.jets3t.service.ServiceException;
+import org.jets3t.service.impl.rest.httpclient.RestS3Service;
+import org.jets3t.service.model.S3Bucket;
+import org.jets3t.service.model.S3Object;
+import org.jets3t.service.security.AWSCredentials;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+class Jets3tFileSystemStore implements FileSystemStore {
+  
+  private static final String FILE_SYSTEM_NAME = "fs";
+  private static final String FILE_SYSTEM_VALUE = "Hadoop";
+
+  private static final String FILE_SYSTEM_TYPE_NAME = "fs-type";
+  private static final String FILE_SYSTEM_TYPE_VALUE = "block";
+
+  private static final String FILE_SYSTEM_VERSION_NAME = "fs-version";
+  private static final String FILE_SYSTEM_VERSION_VALUE = "1";
+  
+  private static final Map<String, Object> METADATA =
+    new HashMap<String, Object>();
+  
+  static {
+    METADATA.put(FILE_SYSTEM_NAME, FILE_SYSTEM_VALUE);
+    METADATA.put(FILE_SYSTEM_TYPE_NAME, FILE_SYSTEM_TYPE_VALUE);
+    METADATA.put(FILE_SYSTEM_VERSION_NAME, FILE_SYSTEM_VERSION_VALUE);
+  }
+
+  private static final String PATH_DELIMITER = Path.SEPARATOR;
+  private static final String BLOCK_PREFIX = "block_";
+
+  private Configuration conf;
+  
+  private S3Service s3Service;
+
+  private S3Bucket bucket;
+  
+  private int bufferSize;
+  
+  private static final Log LOG = 
+    LogFactory.getLog(Jets3tFileSystemStore.class.getName());
+  
+  @Override
+  public void initialize(URI uri, Configuration conf) throws IOException {
+    
+    this.conf = conf;
+    
+    S3Credentials s3Credentials = new S3Credentials();
+    s3Credentials.initialize(uri, conf);
+    try {
+      AWSCredentials awsCredentials =
+        new AWSCredentials(s3Credentials.getAccessKey(),
+            s3Credentials.getSecretAccessKey());
+      this.s3Service = new RestS3Service(awsCredentials);
+    } catch (S3ServiceException e) {
+      if (e.getCause() instanceof IOException) {
+        throw (IOException) e.getCause();
+      }
+      throw new S3Exception(e);
+    }
+    bucket = new S3Bucket(uri.getHost());
+
+    this.bufferSize = conf.getInt(
+                       S3FileSystemConfigKeys.S3_STREAM_BUFFER_SIZE_KEY,
+                       S3FileSystemConfigKeys.S3_STREAM_BUFFER_SIZE_DEFAULT
+		      );
+  }
+
+  @Override
+  public String getVersion() throws IOException {
+    return FILE_SYSTEM_VERSION_VALUE;
+  }
+
+  private void delete(String key) throws IOException {
+    try {
+      s3Service.deleteObject(bucket, key);
+    } catch (S3ServiceException e) {
+      if (e.getCause() instanceof IOException) {
+        throw (IOException) e.getCause();
+      }
+      throw new S3Exception(e);
+    }
+  }
+
+  @Override
+  public void deleteINode(Path path) throws IOException {
+    delete(pathToKey(path));
+  }
+
+  @Override
+  public void deleteBlock(Block block) throws IOException {
+    delete(blockToKey(block));
+  }
+
+  @Override
+  public boolean inodeExists(Path path) throws IOException {
+    InputStream in = get(pathToKey(path), true);
+    if (in == null) {
+      return false;
+    }
+    in.close();
+    return true;
+  }
+  
+  @Override
+  public boolean blockExists(long blockId) throws IOException {
+    InputStream in = get(blockToKey(blockId), false);
+    if (in == null) {
+      return false;
+    }
+    in.close();
+    return true;
+  }
+
+  private InputStream get(String key, boolean checkMetadata)
+      throws IOException {
+    
+    try {
+      S3Object object = s3Service.getObject(bucket.getName(), key);
+      if (checkMetadata) {
+        checkMetadata(object);
+      }
+      return object.getDataInputStream();
+    } catch (S3ServiceException e) {
+      if ("NoSuchKey".equals(e.getS3ErrorCode())) {
+        return null;
+      }
+      if (e.getCause() instanceof IOException) {
+        throw (IOException) e.getCause();
+      }
+      throw new S3Exception(e);
+    } catch (ServiceException e) {
+      handleServiceException(e);
+      return null;
+    }
+  }
+
+  private InputStream get(String key, long byteRangeStart) throws IOException {
+    try {
+      S3Object object = s3Service.getObject(bucket, key, null, null, null,
+                                            null, byteRangeStart, null);
+      return object.getDataInputStream();
+    } catch (S3ServiceException e) {
+      if ("NoSuchKey".equals(e.getS3ErrorCode())) {
+        return null;
+      }
+      if (e.getCause() instanceof IOException) {
+        throw (IOException) e.getCause();
+      }
+      throw new S3Exception(e);
+    } catch (ServiceException e) {
+      handleServiceException(e);
+      return null;
+    }
+  }
+
+  private void checkMetadata(S3Object object) throws S3FileSystemException,
+      S3ServiceException {
+    
+    String name = (String) object.getMetadata(FILE_SYSTEM_NAME);
+    if (!FILE_SYSTEM_VALUE.equals(name)) {
+      throw new S3FileSystemException("Not a Hadoop S3 file.");
+    }
+    String type = (String) object.getMetadata(FILE_SYSTEM_TYPE_NAME);
+    if (!FILE_SYSTEM_TYPE_VALUE.equals(type)) {
+      throw new S3FileSystemException("Not a block file.");
+    }
+    String dataVersion = (String) object.getMetadata(FILE_SYSTEM_VERSION_NAME);
+    if (!FILE_SYSTEM_VERSION_VALUE.equals(dataVersion)) {
+      throw new VersionMismatchException(FILE_SYSTEM_VERSION_VALUE,
+          dataVersion);
+    }
+  }
+
+  @Override
+  public INode retrieveINode(Path path) throws IOException {
+    return INode.deserialize(get(pathToKey(path), true));
+  }
+
+  @Override
+  public File retrieveBlock(Block block, long byteRangeStart)
+    throws IOException {
+    File fileBlock = null;
+    InputStream in = null;
+    OutputStream out = null;
+    try {
+      fileBlock = newBackupFile();
+      in = get(blockToKey(block), byteRangeStart);
+      out = new BufferedOutputStream(new FileOutputStream(fileBlock));
+      byte[] buf = new byte[bufferSize];
+      int numRead;
+      while ((numRead = in.read(buf)) >= 0) {
+        out.write(buf, 0, numRead);
+      }
+      return fileBlock;
+    } catch (IOException e) {
+      // close output stream to file then delete file
+      closeQuietly(out);
+      out = null; // to prevent a second close
+      if (fileBlock != null) {
+        boolean b = fileBlock.delete();
+        if (!b) {
+          LOG.warn("Ignoring failed delete");
+        }
+      }
+      throw e;
+    } finally {
+      closeQuietly(out);
+      closeQuietly(in);
+    }
+  }
+  
+  private File newBackupFile() throws IOException {
+    File dir = new File(conf.get("fs.s3.buffer.dir"));
+    if (!dir.exists() && !dir.mkdirs()) {
+      throw new IOException("Cannot create S3 buffer directory: " + dir);
+    }
+    File result = File.createTempFile("input-", ".tmp", dir);
+    result.deleteOnExit();
+    return result;
+  }
+
+  @Override
+  public Set<Path> listSubPaths(Path path) throws IOException {
+    try {
+      String prefix = pathToKey(path);
+      if (!prefix.endsWith(PATH_DELIMITER)) {
+        prefix += PATH_DELIMITER;
+      }
+      S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, PATH_DELIMITER);
+      Set<Path> prefixes = new TreeSet<Path>();
+      for (int i = 0; i < objects.length; i++) {
+        prefixes.add(keyToPath(objects[i].getKey()));
+      }
+      prefixes.remove(path);
+      return prefixes;
+    } catch (S3ServiceException e) {
+      if (e.getCause() instanceof IOException) {
+        throw (IOException) e.getCause();
+      }
+      throw new S3Exception(e);
+    }
+  }
+  
+  @Override
+  public Set<Path> listDeepSubPaths(Path path) throws IOException {
+    try {
+      String prefix = pathToKey(path);
+      if (!prefix.endsWith(PATH_DELIMITER)) {
+        prefix += PATH_DELIMITER;
+      }
+      S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
+      Set<Path> prefixes = new TreeSet<Path>();
+      for (int i = 0; i < objects.length; i++) {
+        prefixes.add(keyToPath(objects[i].getKey()));
+      }
+      prefixes.remove(path);
+      return prefixes;
+    } catch (S3ServiceException e) {
+      if (e.getCause() instanceof IOException) {
+        throw (IOException) e.getCause();
+      }
+      throw new S3Exception(e);
+    }    
+  }
+
+  private void put(String key, InputStream in, long length, boolean storeMetadata)
+      throws IOException {
+    
+    try {
+      S3Object object = new S3Object(key);
+      object.setDataInputStream(in);
+      object.setContentType("binary/octet-stream");
+      object.setContentLength(length);
+      if (storeMetadata) {
+        object.addAllMetadata(METADATA);
+      }
+      s3Service.putObject(bucket, object);
+    } catch (S3ServiceException e) {
+      if (e.getCause() instanceof IOException) {
+        throw (IOException) e.getCause();
+      }
+      throw new S3Exception(e);
+    }
+  }
+
+  @Override
+  public void storeINode(Path path, INode inode) throws IOException {
+    put(pathToKey(path), inode.serialize(), inode.getSerializedLength(), true);
+  }
+
+  @Override
+  public void storeBlock(Block block, File file) throws IOException {
+    BufferedInputStream in = null;
+    try {
+      in = new BufferedInputStream(new FileInputStream(file));
+      put(blockToKey(block), in, block.getLength(), false);
+    } finally {
+      closeQuietly(in);
+    }    
+  }
+
+  private void closeQuietly(Closeable closeable) {
+    if (closeable != null) {
+      try {
+        closeable.close();
+      } catch (IOException e) {
+        // ignore
+      }
+    }
+  }
+
+  private String pathToKey(Path path) {
+    if (!path.isAbsolute()) {
+      throw new IllegalArgumentException("Path must be absolute: " + path);
+    }
+    return path.toUri().getPath();
+  }
+
+  private Path keyToPath(String key) {
+    return new Path(key);
+  }
+  
+  private String blockToKey(long blockId) {
+    return BLOCK_PREFIX + blockId;
+  }
+
+  private String blockToKey(Block block) {
+    return blockToKey(block.getId());
+  }
+
+  @Override
+  public void purge() throws IOException {
+    try {
+      S3Object[] objects = s3Service.listObjects(bucket.getName());
+      for (int i = 0; i < objects.length; i++) {
+        s3Service.deleteObject(bucket, objects[i].getKey());
+      }
+    } catch (S3ServiceException e) {
+      if (e.getCause() instanceof IOException) {
+        throw (IOException) e.getCause();
+      }
+      throw new S3Exception(e);
+    }
+  }
+
+  @Override
+  public void dump() throws IOException {
+    StringBuilder sb = new StringBuilder("S3 Filesystem, ");
+    sb.append(bucket.getName()).append("\n");
+    try {
+      S3Object[] objects = s3Service.listObjects(bucket.getName(), PATH_DELIMITER, null);
+      for (int i = 0; i < objects.length; i++) {
+        Path path = keyToPath(objects[i].getKey());
+        sb.append(path).append("\n");
+        INode m = retrieveINode(path);
+        sb.append("\t").append(m.getFileType()).append("\n");
+        if (m.getFileType() == FileType.DIRECTORY) {
+          continue;
+        }
+        for (int j = 0; j < m.getBlocks().length; j++) {
+          sb.append("\t").append(m.getBlocks()[j]).append("\n");
+        }
+      }
+    } catch (S3ServiceException e) {
+      if (e.getCause() instanceof IOException) {
+        throw (IOException) e.getCause();
+      }
+      throw new S3Exception(e);
+    }
+    System.out.println(sb);
+  }
+
+  private void handleServiceException(ServiceException e) throws IOException {
+      if (e.getCause() instanceof IOException) {
+        throw (IOException) e.getCause();
+      }
+      else {
+        if(LOG.isDebugEnabled()) {
+          LOG.debug("Got ServiceException with Error code: " + e.getErrorCode() + ";and Error message: " + e.getErrorMessage());
+        }
+      }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4cb3d0/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
new file mode 100644
index 0000000..429c272
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
@@ -0,0 +1,291 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.s3;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.UnsupportedEncodingException;
+import java.net.URI;
+import java.net.URLDecoder;
+import java.net.URLEncoder;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.jets3t.service.S3Service;
+import org.jets3t.service.S3ServiceException;
+import org.jets3t.service.ServiceException;
+import org.jets3t.service.impl.rest.httpclient.RestS3Service;
+import org.jets3t.service.model.S3Bucket;
+import org.jets3t.service.model.S3Object;
+import org.jets3t.service.security.AWSCredentials;
+
+/**
+ * <p>
+ * This class is a tool for migrating data from an older to a newer version
+ * of an S3 filesystem.
+ * </p>
+ * <p>
+ * All files in the filesystem are migrated by re-writing the block metadata
+ * - no datafiles are touched.
+ * </p>
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class MigrationTool extends Configured implements Tool {
+  
+  private S3Service s3Service;
+  private S3Bucket bucket;
+  
+  public static void main(String[] args) throws Exception {
+    int res = ToolRunner.run(new MigrationTool(), args);
+    System.exit(res);
+  }
+  
+  @Override
+  public int run(String[] args) throws Exception {
+    
+    if (args.length == 0) {
+      System.err.println("Usage: MigrationTool <S3 file system URI>");
+      System.err.println("\t<S3 file system URI>\tfilesystem to migrate");
+      ToolRunner.printGenericCommandUsage(System.err);
+      return -1;
+    }
+    
+    URI uri = URI.create(args[0]);
+    
+    initialize(uri);
+    
+    FileSystemStore newStore = new Jets3tFileSystemStore();
+    newStore.initialize(uri, getConf());
+    
+    if (get("%2F") != null) { 
+      System.err.println("Current version number is [unversioned].");
+      System.err.println("Target version number is " +
+          newStore.getVersion() + ".");
+      Store oldStore = new UnversionedStore();
+      migrate(oldStore, newStore);
+      return 0;
+    } else {
+      S3Object root = get("/");
+      if (root != null) {
+        String version = (String) root.getMetadata("fs-version");
+        if (version == null) {
+          System.err.println("Can't detect version - exiting.");
+        } else {
+          String newVersion = newStore.getVersion();
+          System.err.println("Current version number is " + version + ".");
+          System.err.println("Target version number is " + newVersion + ".");
+          if (version.equals(newStore.getVersion())) {
+            System.err.println("No migration required.");
+            return 0;
+          }
+          // use version number to create Store
+          //Store oldStore = ... 
+          //migrate(oldStore, newStore);
+          System.err.println("Not currently implemented.");
+          return 0;
+        }
+      }
+      System.err.println("Can't detect version - exiting.");
+      return 0;
+    }
+    
+  }
+  
+  public void initialize(URI uri) throws IOException {
+    
+    
+    
+    try {
+      String accessKey = null;
+      String secretAccessKey = null;
+      String userInfo = uri.getUserInfo();
+      if (userInfo != null) {
+        int index = userInfo.indexOf(':');
+        if (index != -1) {
+          accessKey = userInfo.substring(0, index);
+          secretAccessKey = userInfo.substring(index + 1);
+        } else {
+          accessKey = userInfo;
+        }
+      }
+      if (accessKey == null) {
+        accessKey = getConf().get("fs.s3.awsAccessKeyId");
+      }
+      if (secretAccessKey == null) {
+        secretAccessKey = getConf().get("fs.s3.awsSecretAccessKey");
+      }
+      if (accessKey == null && secretAccessKey == null) {
+        throw new IllegalArgumentException("AWS " +
+                                           "Access Key ID and Secret Access Key " +
+                                           "must be specified as the username " +
+                                           "or password (respectively) of a s3 URL, " +
+                                           "or by setting the " +
+                                           "fs.s3.awsAccessKeyId or " +                         
+                                           "fs.s3.awsSecretAccessKey properties (respectively).");
+      } else if (accessKey == null) {
+        throw new IllegalArgumentException("AWS " +
+                                           "Access Key ID must be specified " +
+                                           "as the username of a s3 URL, or by setting the " +
+                                           "fs.s3.awsAccessKeyId property.");
+      } else if (secretAccessKey == null) {
+        throw new IllegalArgumentException("AWS " +
+                                           "Secret Access Key must be specified " +
+                                           "as the password of a s3 URL, or by setting the " +
+                                           "fs.s3.awsSecretAccessKey property.");         
+      }
+      AWSCredentials awsCredentials =
+        new AWSCredentials(accessKey, secretAccessKey);
+      this.s3Service = new RestS3Service(awsCredentials);
+    } catch (S3ServiceException e) {
+      if (e.getCause() instanceof IOException) {
+        throw (IOException) e.getCause();
+      }
+      throw new S3Exception(e);
+    }
+    bucket = new S3Bucket(uri.getHost());
+  }
+  
+  private void migrate(Store oldStore, FileSystemStore newStore)
+      throws IOException {
+    for (Path path : oldStore.listAllPaths()) {
+      INode inode = oldStore.retrieveINode(path);
+      oldStore.deleteINode(path);
+      newStore.storeINode(path, inode);
+    }
+  }
+  
+  private S3Object get(String key) {
+    try {
+      return s3Service.getObject(bucket.getName(), key);
+    } catch (S3ServiceException e) {
+      if ("NoSuchKey".equals(e.getS3ErrorCode())) {
+        return null;
+      }
+    }
+    return null;
+  }
+  
+  interface Store {
+
+    Set<Path> listAllPaths() throws IOException;
+    INode retrieveINode(Path path) throws IOException;
+    void deleteINode(Path path) throws IOException;
+    
+  }
+  
+  class UnversionedStore implements Store {
+
+    @Override
+    public Set<Path> listAllPaths() throws IOException {
+      try {
+        String prefix = urlEncode(Path.SEPARATOR);
+        S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
+        Set<Path> prefixes = new TreeSet<Path>();
+        for (int i = 0; i < objects.length; i++) {
+          prefixes.add(keyToPath(objects[i].getKey()));
+        }
+        return prefixes;
+      } catch (S3ServiceException e) {
+        if (e.getCause() instanceof IOException) {
+          throw (IOException) e.getCause();
+        }
+        throw new S3Exception(e);
+      }   
+    }
+
+    @Override
+    public void deleteINode(Path path) throws IOException {
+      delete(pathToKey(path));
+    }
+    
+    private void delete(String key) throws IOException {
+      try {
+        s3Service.deleteObject(bucket, key);
+      } catch (S3ServiceException e) {
+        if (e.getCause() instanceof IOException) {
+          throw (IOException) e.getCause();
+        }
+        throw new S3Exception(e);
+      }
+    }
+    
+    @Override
+    public INode retrieveINode(Path path) throws IOException {
+      return INode.deserialize(get(pathToKey(path)));
+    }
+
+    private InputStream get(String key) throws IOException {
+      try {
+        S3Object object = s3Service.getObject(bucket.getName(), key);
+        return object.getDataInputStream();
+      } catch (S3ServiceException e) {
+        if ("NoSuchKey".equals(e.getS3ErrorCode())) {
+          return null;
+        }
+        if (e.getCause() instanceof IOException) {
+          throw (IOException) e.getCause();
+        }
+        throw new S3Exception(e);
+      } catch (ServiceException e) {
+        return null;
+      }
+    }
+    
+    private String pathToKey(Path path) {
+      if (!path.isAbsolute()) {
+        throw new IllegalArgumentException("Path must be absolute: " + path);
+      }
+      return urlEncode(path.toUri().getPath());
+    }
+    
+    private Path keyToPath(String key) {
+      return new Path(urlDecode(key));
+    }
+
+    private String urlEncode(String s) {
+      try {
+        return URLEncoder.encode(s, "UTF-8");
+      } catch (UnsupportedEncodingException e) {
+        // Should never happen since every implementation of the Java Platform
+        // is required to support UTF-8.
+        // See http://java.sun.com/j2se/1.5.0/docs/api/java/nio/charset/Charset.html
+        throw new IllegalStateException(e);
+      }
+    }
+    
+    private String urlDecode(String s) {
+      try {
+        return URLDecoder.decode(s, "UTF-8");
+      } catch (UnsupportedEncodingException e) {
+        // Should never happen since every implementation of the Java Platform
+        // is required to support UTF-8.
+        // See http://java.sun.com/j2se/1.5.0/docs/api/java/nio/charset/Charset.html
+        throw new IllegalStateException(e);
+      }
+    }
+    
+  }
+  
+}


Mime
View raw message