hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From aengin...@apache.org
Subject [23/50] [abbrv] hadoop git commit: HADOOP-14553. Add (parallelized) integration tests to hadoop-azure Contributed by Steve Loughran
Date Mon, 18 Sep 2017 22:12:06 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
index a3f2843..4bf6f04 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
 import org.junit.Assume;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 
@@ -49,8 +48,8 @@ public class TestNativeAzureFileSystemAuthorization
   protected MockWasbAuthorizerImpl authorizer;
 
   @Override
-  public Configuration getConfiguration() {
-    Configuration conf = super.getConfiguration();
+  public Configuration createConfiguration() {
+    Configuration conf = super.createConfiguration();
     conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
     conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, "http://localhost/");
     conf.set(NativeAzureFileSystem.AZURE_CHOWN_USERLIST_PROPERTY_NAME, "user1 , user2");
@@ -59,13 +58,12 @@ public class TestNativeAzureFileSystemAuthorization
 
   @Override
   protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
-    Configuration conf = getConfiguration();
-    return AzureBlobStorageTestAccount.create(conf);
+    return AzureBlobStorageTestAccount.create(createConfiguration());
   }
 
-
-  @Before
-  public void beforeMethod() {
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
     boolean useSecureMode = fs.getConf().getBoolean(KEY_USE_SECURE_MODE, false);
     boolean useAuthorization = fs.getConf().getBoolean(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, false);
     Assume.assumeTrue("Test valid when both SecureMode and Authorization are enabled .. skipping",
@@ -76,7 +74,6 @@ public class TestNativeAzureFileSystemAuthorization
     fs.updateWasbAuthorizer(authorizer);
   }
 
-
   @Rule
   public ExpectedException expectedEx = ExpectedException.none();
 
@@ -95,7 +92,7 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /**
-   * Setup the expected exception class, and exception message that the test is supposed to fail with
+   * Setup the expected exception class, and exception message that the test is supposed to fail with.
    */
   protected void setExpectedFailureMessage(String operation, Path path) {
     expectedEx.expect(WasbAuthorizationException.class);
@@ -104,7 +101,7 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /**
-   * Positive test to verify Create access check
+   * Positive test to verify Create access check.
    * The file is created directly under an existing folder.
    * No intermediate folders need to be created.
    * @throws Throwable
@@ -128,7 +125,7 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /**
-   * Positive test to verify Create access check
+   * Positive test to verify Create access check.
    * The test tries to create a file whose parent is non-existent to ensure that
    * the intermediate folders between ancestor and direct parent are being created
    * when proper ranger policies are configured.
@@ -155,7 +152,7 @@ public class TestNativeAzureFileSystemAuthorization
 
 
   /**
-   * Negative test to verify that create fails when trying to overwrite an existing file
+   * Negative test to verify that create fails when trying to overwrite an existing file.
    * without proper write permissions on the file being overwritten.
    * @throws Throwable
    */
@@ -181,7 +178,7 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /**
-   * Positive test to verify that create succeeds when trying to overwrite an existing file
+   * Positive test to verify that create succeeds when trying to overwrite an existing file.
    * when proper write permissions on the file being overwritten are provided.
    * @throws Throwable
    */
@@ -232,7 +229,7 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /**
-   * Positive test to verify listStatus access check
+   * Positive test to verify listStatus access check.
    * @throws Throwable
    */
   @Test
@@ -257,7 +254,7 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /**
-   * Negative test to verify listStatus access check
+   * Negative test to verify listStatus access check.
    * @throws Throwable
    */
 
@@ -342,7 +339,7 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /**
-   * Negative test to verify rename access check - the dstFolder disallows rename
+   * Negative test to verify rename access check - the dstFolder disallows rename.
    * @throws Throwable
    */
   @Test //(expected=WasbAuthorizationException.class)
@@ -373,7 +370,7 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /**
-   * Positive test to verify rename access check - the dstFolder allows rename
+   * Positive test to verify rename access check - the dstFolder allows rename.
    * @throws Throwable
    */
   @Test
@@ -484,7 +481,7 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /**
-   * Positive test to verify file delete access check
+   * Positive test to verify file delete access check.
    * @throws Throwable
    */
   @Test
@@ -506,7 +503,7 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /**
-   * Negative test to verify file delete access check
+   * Negative test to verify file delete access check.
    * @throws Throwable
    */
   @Test //(expected=WasbAuthorizationException.class)
@@ -544,7 +541,7 @@ public class TestNativeAzureFileSystemAuthorization
 
   /**
    * Positive test to verify file delete access check, with intermediate folders
-   * Uses wildcard recursive permissions
+   * Uses wildcard recursive permissions.
    * @throws Throwable
    */
   @Test
@@ -582,7 +579,7 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /**
-   * Positive test for mkdirs access check
+   * Positive test for mkdirs access check.
    * @throws Throwable
    */
   @Test
@@ -668,7 +665,7 @@ public class TestNativeAzureFileSystemAuthorization
     }
   }
   /**
-   * Negative test for mkdirs access check
+   * Negative test for mkdirs access check.
    * @throws Throwable
    */
   @Test //(expected=WasbAuthorizationException.class)
@@ -692,7 +689,7 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /**
-   * Positive test triple slash format (wasb:///) access check
+   * Positive test triple slash format (wasb:///) access check.
    * @throws Throwable
    */
   @Test
@@ -708,7 +705,7 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /**
-   * Negative test for setOwner when Authorization is enabled
+   * Negative test for setOwner when Authorization is enabled.
    */
   @Test
   public void testSetOwnerThrowsForUnauthorisedUsers() throws Throwable {
@@ -744,7 +741,7 @@ public class TestNativeAzureFileSystemAuthorization
 
   /**
    * Test for setOwner when Authorization is enabled and
-   * the user is specified in chown allowed user list
+   * the user is specified in chown allowed user list.
    * */
   @Test
   public void testSetOwnerSucceedsForAuthorisedUsers() throws Throwable {
@@ -785,7 +782,7 @@ public class TestNativeAzureFileSystemAuthorization
 
   /**
    * Test for setOwner when Authorization is enabled and
-   * the userlist is specified as '*'
+   * the userlist is specified as '*'.
    * */
   @Test
   public void testSetOwnerSucceedsForAnyUserWhenWildCardIsSpecified() throws Throwable {
@@ -829,7 +826,7 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /** Test for setOwner  throws for illegal setup of chown
-   * allowed testSetOwnerSucceedsForAuthorisedUsers
+   * allowed testSetOwnerSucceedsForAuthorisedUsers.
    */
   @Test
   public void testSetOwnerFailsForIllegalSetup() throws Throwable {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
deleted file mode 100644
index 4bd4633..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import java.security.PrivilegedExceptionAction;
-
-import org.apache.hadoop.fs.Path;
-import org.junit.Test;
-import org.junit.Before;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Test class that runs wasb authorization tests with owner check enabled.
- */
-public class TestNativeAzureFileSystemAuthorizationWithOwner
-  extends TestNativeAzureFileSystemAuthorization {
-
-  @Before
-  public void beforeMethod() {
-    super.beforeMethod();
-    authorizer.init(fs.getConf(), true);
-  }
-
-  /**
-   * Test case when owner matches current user
-   */
-  @Test
-  public void testOwnerPermissionPositive() throws Throwable {
-
-    Path parentDir = new Path("/testOwnerPermissionPositive");
-    Path testPath = new Path(parentDir, "test.data");
-
-    authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
-    authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
-    authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
-    // additional rule used for assertPathExists
-    authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.READ.toString(), true);
-    fs.updateWasbAuthorizer(authorizer);
-
-    try {
-      // creates parentDir with owner as current user
-      fs.mkdirs(parentDir);
-      ContractTestUtils.assertPathExists(fs, "parentDir does not exist", parentDir);
-
-      fs.create(testPath);
-      fs.getFileStatus(testPath);
-      ContractTestUtils.assertPathExists(fs, "testPath does not exist", testPath);
-
-    } finally {
-      allowRecursiveDelete(fs, parentDir.toString());
-      fs.delete(parentDir, true);
-    }
-  }
-
-  /**
-   * Negative test case for owner does not match current user
-   */
-  @Test
-  public void testOwnerPermissionNegative() throws Throwable {
-    expectedEx.expect(WasbAuthorizationException.class);
-
-    Path parentDir = new Path("/testOwnerPermissionNegative");
-    Path childDir = new Path(parentDir, "childDir");
-
-    setExpectedFailureMessage("mkdirs", childDir);
-
-    authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
-    authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
-
-    fs.updateWasbAuthorizer(authorizer);
-
-    try{
-      fs.mkdirs(parentDir);
-      UserGroupInformation ugiSuperUser = UserGroupInformation.createUserForTesting(
-          "testuser", new String[] {});
-
-      ugiSuperUser.doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws Exception {
-          fs.mkdirs(childDir);
-          return null;
-        }
-      });
-
-    } finally {
-       allowRecursiveDelete(fs, parentDir.toString());
-       fs.delete(parentDir, true);
-    }
-  }
-
-  /**
-   * Test to verify that retrieving owner information does not
-   * throw when file/folder does not exist
-   */
-  @Test
-  public void testRetrievingOwnerDoesNotFailWhenFileDoesNotExist() throws Throwable {
-
-    Path testdirectory = new Path("/testDirectory123454565");
-
-    String owner = fs.getOwnerForPath(testdirectory);
-    assertEquals("", owner);
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java
index b2660bb..b280cac 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.fs.azure;
 
-import static org.junit.Assert.assertEquals;
-
 import java.io.OutputStream;
 
 import org.apache.hadoop.conf.Configuration;
@@ -29,7 +27,11 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.junit.Test;
 
-public class TestNativeAzureFileSystemBlockLocations {
+/**
+ * Test block location logic.
+ */
+public class TestNativeAzureFileSystemBlockLocations
+    extends AbstractWasbTestWithTimeout {
   @Test
   public void testNumberOfBlocks() throws Exception {
     Configuration conf = new Configuration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
deleted file mode 100644
index 4114e60..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.net.URI;
-import java.util.StringTokenizer;
-
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.apache.log4j.Logger;
-import org.junit.Test;
-
-/**
- * Test to validate Azure storage client side logging. Tests works only when
- * testing with Live Azure storage because Emulator does not have support for
- * client-side logging.
- *
- */
-public class TestNativeAzureFileSystemClientLogging
-    extends AbstractWasbTestBase {
-
-  private AzureBlobStorageTestAccount testAccount;
-
-  // Core-site config controlling Azure Storage Client logging
-  private static final String KEY_LOGGING_CONF_STRING = "fs.azure.storage.client.logging";
-
-  // Temporary directory created using WASB.
-  private static final String TEMP_DIR = "tempDir";
-
-  /*
-   * Helper method to verify the client logging is working. This check primarily
-   * checks to make sure we see a line in the logs corresponding to the entity
-   * that is created during test run.
-   */
-  private boolean verifyStorageClientLogs(String capturedLogs, String entity)
-      throws Exception {
-
-    URI uri = testAccount.getRealAccount().getBlobEndpoint();
-    String container = testAccount.getRealContainer().getName();
-    String validateString = uri + Path.SEPARATOR + container + Path.SEPARATOR
-        + entity;
-    boolean entityFound = false;
-
-    StringTokenizer tokenizer = new StringTokenizer(capturedLogs, "\n");
-
-    while (tokenizer.hasMoreTokens()) {
-      String token = tokenizer.nextToken();
-      if (token.contains(validateString)) {
-        entityFound = true;
-        break;
-      }
-    }
-    return entityFound;
-  }
-
-  /*
-   * Helper method that updates the core-site config to enable/disable logging.
-   */
-  private void updateFileSystemConfiguration(Boolean loggingFlag)
-      throws Exception {
-
-    Configuration conf = fs.getConf();
-    conf.set(KEY_LOGGING_CONF_STRING, loggingFlag.toString());
-    URI uri = fs.getUri();
-    fs.initialize(uri, conf);
-  }
-
-  // Using WASB code to communicate with Azure Storage.
-  private void performWASBOperations() throws Exception {
-
-    Path tempDir = new Path(Path.SEPARATOR + TEMP_DIR);
-    fs.mkdirs(tempDir);
-    fs.delete(tempDir, true);
-  }
-
-  @Test
-  public void testLoggingEnabled() throws Exception {
-
-    LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
-        .getRootLogger()));
-
-    // Update configuration based on the Test.
-    updateFileSystemConfiguration(true);
-
-    performWASBOperations();
-
-    String output = getLogOutput(logs);
-    assertTrue("Log entry " + TEMP_DIR + " not found  in " + output,
-        verifyStorageClientLogs(output, TEMP_DIR));
-  }
-
-  protected String getLogOutput(LogCapturer logs) {
-    String output = logs.getOutput();
-    assertTrue("No log created/captured", !output.isEmpty());
-    return output;
-  }
-
-  @Test
-  public void testLoggingDisabled() throws Exception {
-
-    LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
-        .getRootLogger()));
-
-    // Update configuration based on the Test.
-    updateFileSystemConfiguration(false);
-
-    performWASBOperations();
-    String output = getLogOutput(logs);
-
-    assertFalse("Log entry " + TEMP_DIR + " found  in " + output,
-        verifyStorageClientLogs(output, TEMP_DIR));
-  }
-
-  @Override
-  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
-    testAccount = AzureBlobStorageTestAccount.create();
-    return testAccount;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java
index cbfc563..655ae90 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java
@@ -18,11 +18,6 @@
 
 package org.apache.hadoop.fs.azure;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
 import java.io.OutputStream;
 import java.io.PrintWriter;
 import java.io.StringWriter;
@@ -33,32 +28,30 @@ import java.util.concurrent.ConcurrentLinkedQueue;
 
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 
-public class TestNativeAzureFileSystemConcurrency {
-  private AzureBlobStorageTestAccount testAccount;
-  private FileSystem fs;
+public class TestNativeAzureFileSystemConcurrency extends AbstractWasbTestBase {
   private InMemoryBlockBlobStore backingStore;
 
-  @Before
+  @Override
   public void setUp() throws Exception {
-    testAccount = AzureBlobStorageTestAccount.createMock();
-    fs = testAccount.getFileSystem();
-    backingStore = testAccount.getMockStorage().getBackingStore();
+    super.setUp();
+    backingStore = getTestAccount().getMockStorage().getBackingStore();
   }
 
-  @After
+  @Override
   public void tearDown() throws Exception {
-    testAccount.cleanup();
-    fs = null;
+    super.tearDown();
     backingStore = null;
   }
 
+  @Override
+  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+    return AzureBlobStorageTestAccount.createMock();
+  }
+
   @Test
   public void testLinkBlobs() throws Exception {
     Path filePath = new Path("/inProgress");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java
deleted file mode 100644
index 7c5899d..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-/***
- * Test class to hold all Live Azure storage concurrency tests.
- */
-public class TestNativeAzureFileSystemConcurrencyLive
-    extends AbstractWasbTestBase {
-
-  private static final int THREAD_COUNT = 102;
-  private static final int TEST_EXECUTION_TIMEOUT = 5000;
-  @Override
-  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
-    return AzureBlobStorageTestAccount.create();
-  }
-
-  /**
-   * Validate contract for FileSystem.create when overwrite is true and there
-   * are concurrent callers of FileSystem.delete.  An existing file should be
-   * overwritten, even if the original destination exists but is deleted by an
-   * external agent during the create operation.
-   */
-  @Test(timeout = TEST_EXECUTION_TIMEOUT)
-  public void testConcurrentCreateDeleteFile() throws Exception {
-    Path testFile = new Path("test.dat");
-
-    List<CreateFileTask> tasks = new ArrayList<>(THREAD_COUNT);
-
-    for (int i = 0; i < THREAD_COUNT; i++) {
-      tasks.add(new CreateFileTask(fs, testFile));
-    }
-
-    ExecutorService es = null;
-
-    try {
-      es = Executors.newFixedThreadPool(THREAD_COUNT);
-
-      List<Future<Void>> futures = es.invokeAll(tasks);
-
-      for (Future<Void> future : futures) {
-        Assert.assertTrue(future.isDone());
-
-        // we are using Callable<V>, so if an exception
-        // occurred during the operation, it will be thrown
-        // when we call get
-        Assert.assertEquals(null, future.get());
-      }
-    } finally {
-      if (es != null) {
-        es.shutdownNow();
-      }
-    }
-  }
-
-  /**
-   * Validate contract for FileSystem.delete when invoked concurrently.
-   * One of the threads should successfully delete the file and return true;
-   * all other threads should return false.
-   */
-  @Test(timeout = TEST_EXECUTION_TIMEOUT)
-  public void testConcurrentDeleteFile() throws Exception {
-    Path testFile = new Path("test.dat");
-    fs.create(testFile).close();
-
-    List<DeleteFileTask> tasks = new ArrayList<>(THREAD_COUNT);
-
-    for (int i = 0; i < THREAD_COUNT; i++) {
-      tasks.add(new DeleteFileTask(fs, testFile));
-    }
-
-    ExecutorService es = null;
-    try {
-      es = Executors.newFixedThreadPool(THREAD_COUNT);
-
-      List<Future<Boolean>> futures = es.invokeAll(tasks);
-
-      int successCount = 0;
-      for (Future<Boolean> future : futures) {
-        Assert.assertTrue(future.isDone());
-
-        // we are using Callable<V>, so if an exception
-        // occurred during the operation, it will be thrown
-        // when we call get
-        Boolean success = future.get();
-        if (success) {
-          successCount++;
-        }
-      }
-
-      Assert.assertEquals(
-          "Exactly one delete operation should return true.",
-          1,
-          successCount);
-    } finally {
-      if (es != null) {
-        es.shutdownNow();
-      }
-    }
-  }
-}
-
-abstract class FileSystemTask<V> implements Callable<V> {
-  private final FileSystem fileSystem;
-  private final Path path;
-
-  protected FileSystem getFileSystem() {
-    return this.fileSystem;
-  }
-
-  protected Path getFilePath() {
-    return this.path;
-  }
-
-  FileSystemTask(FileSystem fs, Path p) {
-    this.fileSystem = fs;
-    this.path = p;
-  }
-
-  public abstract V call() throws Exception;
-}
-
-class DeleteFileTask extends FileSystemTask<Boolean> {
-
-  DeleteFileTask(FileSystem fs, Path p) {
-    super(fs, p);
-  }
-
-  @Override
-  public Boolean call() throws Exception {
-    return this.getFileSystem().delete(this.getFilePath(), false);
-  }
-}
-
-class CreateFileTask extends FileSystemTask<Void> {
-  CreateFileTask(FileSystem fs, Path p) {
-    super(fs, p);
-  }
-
-  public Void call() throws Exception {
-    FileSystem fs = getFileSystem();
-    Path p = getFilePath();
-
-    // Create an empty file and close the stream.
-    FSDataOutputStream stream = fs.create(p, true);
-    stream.close();
-
-    // Delete the file.  We don't care if delete returns true or false.
-    // We just want to ensure the file does not exist.
-    this.getFileSystem().delete(this.getFilePath(), false);
-
-    return null;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java
deleted file mode 100644
index 217ca81..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assume.assumeNotNull;
-
-import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.junit.After;
-import org.junit.Before;
-
-public class TestNativeAzureFileSystemContractEmulator extends
-    FileSystemContractBaseTest {
-  private AzureBlobStorageTestAccount testAccount;
-
-  @Before
-  public void setUp() throws Exception {
-    testAccount = AzureBlobStorageTestAccount.createForEmulator();
-    if (testAccount != null) {
-      fs = testAccount.getFileSystem();
-    }
-    assumeNotNull(fs);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (testAccount != null) {
-      testAccount.cleanup();
-      testAccount = null;
-      fs = null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java
deleted file mode 100644
index b546009..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assume.assumeNotNull;
-
-import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-
-public class TestNativeAzureFileSystemContractLive extends
-    FileSystemContractBaseTest {
-  private AzureBlobStorageTestAccount testAccount;
-
-  @Before
-  public void setUp() throws Exception {
-    testAccount = AzureBlobStorageTestAccount.create();
-    if (testAccount != null) {
-      fs = testAccount.getFileSystem();
-    }
-    assumeNotNull(fs);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (testAccount != null) {
-      testAccount.cleanup();
-      testAccount = null;
-      fs = null;
-    }
-  }
-
-  /**
-   * The following tests are failing on Azure and the Azure 
-   * file system code needs to be modified to make them pass.
-   * A separate work item has been opened for this.
-   */
-  @Ignore
-  @Test
-  public void testMoveFileUnderParent() throws Throwable {
-  }
-
-  @Ignore
-  @Test
-  public void testRenameFileToSelf() throws Throwable {
-  }
-
-  @Ignore
-  @Test
-  public void testRenameChildDirForbidden() throws Exception {
-  }
-
-  @Ignore
-  @Test
-  public void testMoveDirUnderParent() throws Throwable {
-  }
-
-  @Ignore
-  @Test
-  public void testRenameDirToSelf() throws Throwable {
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java
index f458bb3..2809260 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java
@@ -23,6 +23,9 @@ import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
 
+/**
+ * Mocked testing of FileSystemContractBaseTest.
+ */
 public class TestNativeAzureFileSystemContractMocked extends
     FileSystemContractBaseTest {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java
deleted file mode 100644
index 2a88ad2..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.junit.After;
-import static org.junit.Assume.assumeNotNull;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-
-public class TestNativeAzureFileSystemContractPageBlobLive extends
-    FileSystemContractBaseTest {
-  private AzureBlobStorageTestAccount testAccount;
-
-  private AzureBlobStorageTestAccount createTestAccount()
-      throws Exception {
-    Configuration conf = new Configuration();
-
-    // Configure the page blob directories key so every file created is a page blob.
-    conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/");
-
-    // Configure the atomic rename directories key so every folder will have
-    // atomic rename applied.
-    conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/");
-    return AzureBlobStorageTestAccount.create(conf);
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    testAccount = createTestAccount();
-    if (testAccount != null) {
-      fs = testAccount.getFileSystem();
-    }
-    assumeNotNull(fs);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (testAccount != null) {
-      testAccount.cleanup();
-      testAccount = null;
-      fs = null;
-    }
-  }
-
-  /**
-   * The following tests are failing on Azure and the Azure 
-   * file system code needs to be modified to make them pass.
-   * A separate work item has been opened for this.
-   */
-  @Ignore
-  @Test
-  public void testMoveFileUnderParent() throws Throwable {
-  }
-
-  @Ignore
-  @Test
-  public void testRenameFileToSelf() throws Throwable {
-  }
-  
-  @Ignore
-  @Test
-  public void testRenameChildDirForbidden() throws Exception {
-  }
-  
-  @Ignore
-  @Test
-  public void testMoveDirUnderParent() throws Throwable {
-  }
-  
-  @Ignore
-  @Test
-  public void testRenameDirToSelf() throws Throwable {
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java
index 82eabaa..0dfbb37 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java
@@ -18,17 +18,11 @@
 
 package org.apache.hadoop.fs.azure;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 import java.io.IOException;
 import java.util.HashMap;
 
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import org.junit.Before;
+
 import org.junit.Test;
 
 /**
@@ -38,24 +32,18 @@ import org.junit.Test;
  * creation/rename of files/directories through WASB that have colons in the
  * names.
  */
-public class TestNativeAzureFileSystemFileNameCheck {
-  private FileSystem fs = null;
-  private AzureBlobStorageTestAccount testAccount = null;
+public class TestNativeAzureFileSystemFileNameCheck extends AbstractWasbTestBase {
   private String root = null;
 
-  @Before
+  @Override
   public void setUp() throws Exception {
-    testAccount = AzureBlobStorageTestAccount.createMock();
-    fs = testAccount.getFileSystem();
+    super.setUp();
     root = fs.getUri().toString();
   }
 
-  @After
-  public void tearDown() throws Exception {
-    testAccount.cleanup();
-    root = null;
-    fs = null;
-    testAccount = null;
+  @Override
+  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+    return AzureBlobStorageTestAccount.createMock();
   }
 
   @Test
@@ -138,4 +126,4 @@ public class TestNativeAzureFileSystemFileNameCheck {
     fsck.run(new String[] { p.toString() });
     return fsck.getPathNameWarning();
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java
deleted file mode 100644
index 6baba33..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.concurrent.CountDownLatch;
-
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-
-import org.junit.Test;
-
-import com.microsoft.azure.storage.StorageException;
-
-/*
- * Tests the Native Azure file system (WASB) against an actual blob store if
- * provided in the environment.
- */
-public class TestNativeAzureFileSystemLive extends
-    NativeAzureFileSystemBaseTest {
-
-  @Override
-  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
-    return AzureBlobStorageTestAccount.create();
-  }
-
-  @Test
-  public void testLazyRenamePendingCanOverwriteExistingFile()
-    throws Exception {
-    final String SRC_FILE_KEY = "srcFile";
-    final String DST_FILE_KEY = "dstFile";
-    Path srcPath = new Path(SRC_FILE_KEY);
-    FSDataOutputStream srcStream = fs.create(srcPath);
-    assertTrue(fs.exists(srcPath));
-    Path dstPath = new Path(DST_FILE_KEY);
-    FSDataOutputStream dstStream = fs.create(dstPath);
-    assertTrue(fs.exists(dstPath));
-    NativeAzureFileSystem nfs = (NativeAzureFileSystem)fs;
-    final String fullSrcKey = nfs.pathToKey(nfs.makeAbsolute(srcPath));
-    final String fullDstKey = nfs.pathToKey(nfs.makeAbsolute(dstPath));
-    nfs.getStoreInterface().rename(fullSrcKey, fullDstKey, true, null);
-    assertTrue(fs.exists(dstPath));
-    assertFalse(fs.exists(srcPath));
-    IOUtils.cleanup(null, srcStream);
-    IOUtils.cleanup(null, dstStream);
-  }
-  /**
-   * Tests fs.delete() function to delete a blob when another blob is holding a
-   * lease on it. Delete if called without a lease should fail if another process
-   * is holding a lease and throw appropriate exception
-   * This is a scenario that would happen in HMaster startup when it tries to
-   * clean up the temp dirs while the HMaster process which was killed earlier
-   * held lease on the blob when doing some DDL operation
-   */
-  @Test
-  public void testDeleteThrowsExceptionWithLeaseExistsErrorMessage()
-      throws Exception {
-    LOG.info("Starting test");
-    final String FILE_KEY = "fileWithLease";
-    // Create the file
-    Path path = new Path(FILE_KEY);
-    fs.create(path);
-    assertTrue(fs.exists(path));
-    NativeAzureFileSystem nfs = (NativeAzureFileSystem)fs;
-    final String fullKey = nfs.pathToKey(nfs.makeAbsolute(path));
-    final AzureNativeFileSystemStore store = nfs.getStore();
-
-    // Acquire the lease on the file in a background thread
-    final CountDownLatch leaseAttemptComplete = new CountDownLatch(1);
-    final CountDownLatch beginningDeleteAttempt = new CountDownLatch(1);
-    Thread t = new Thread() {
-      @Override
-      public void run() {
-        // Acquire the lease and then signal the main test thread.
-        SelfRenewingLease lease = null;
-        try {
-          lease = store.acquireLease(fullKey);
-          LOG.info("Lease acquired: " + lease.getLeaseID());
-        } catch (AzureException e) {
-          LOG.warn("Lease acqusition thread unable to acquire lease", e);
-        } finally {
-          leaseAttemptComplete.countDown();
-        }
-
-        // Wait for the main test thread to signal it will attempt the delete.
-        try {
-          beginningDeleteAttempt.await();
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-        }
-
-        // Keep holding the lease past the lease acquisition retry interval, so
-        // the test covers the case of delete retrying to acquire the lease.
-        try {
-          Thread.sleep(SelfRenewingLease.LEASE_ACQUIRE_RETRY_INTERVAL * 3);
-        } catch (InterruptedException ex) {
-          Thread.currentThread().interrupt();
-        }
-
-        try {
-          if (lease != null){
-            LOG.info("Freeing lease");
-            lease.free();
-          }
-        } catch (StorageException se) {
-          LOG.warn("Unable to free lease.", se);
-        }
-      }
-    };
-
-    // Start the background thread and wait for it to signal the lease is held.
-    t.start();
-    try {
-      leaseAttemptComplete.await();
-    } catch (InterruptedException ex) {
-      Thread.currentThread().interrupt();
-    }
-
-    // Try to delete the same file
-    beginningDeleteAttempt.countDown();
-    store.delete(fullKey);
-
-    // At this point file SHOULD BE DELETED
-    assertFalse(fs.exists(path));
-  }
-
-  /**
-   * Check that isPageBlobKey works as expected. This assumes that
-   * in the test configuration, the list of supported page blob directories
-   * only includes "pageBlobs". That's why this test is made specific
-   * to this subclass.
-   */
-  @Test
-  public void testIsPageBlobKey() {
-    AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
-
-    // Use literal strings so it's easier to understand the tests.
-    // In case the constant changes, we want to know about it so we can update this test.
-    assertEquals(AzureBlobStorageTestAccount.DEFAULT_PAGE_BLOB_DIRECTORY, "pageBlobs");
-
-    // URI prefix for test environment.
-    String uriPrefix = "file:///";
-
-    // negative tests
-    String[] negativeKeys = { "", "/", "bar", "bar/", "bar/pageBlobs", "bar/pageBlobs/foo",
-        "bar/pageBlobs/foo/", "/pageBlobs/", "/pageBlobs", "pageBlobs", "pageBlobsxyz/" };
-    for (String s : negativeKeys) {
-      assertFalse(store.isPageBlobKey(s));
-      assertFalse(store.isPageBlobKey(uriPrefix + s));
-    }
-
-    // positive tests
-    String[] positiveKeys = { "pageBlobs/", "pageBlobs/foo/", "pageBlobs/foo/bar/" };
-    for (String s : positiveKeys) {
-      assertTrue(store.isPageBlobKey(s));
-      assertTrue(store.isPageBlobKey(uriPrefix + s));
-    }
-  }
-
-  /**
-   * Test that isAtomicRenameKey() works as expected.
-   */
-  @Test
-  public void testIsAtomicRenameKey() {
-
-    AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
-
-    // We want to know if the default configuration changes so we can fix
-    // this test.
-    assertEquals(AzureBlobStorageTestAccount.DEFAULT_ATOMIC_RENAME_DIRECTORIES,
-        "/atomicRenameDir1,/atomicRenameDir2");
-
-    // URI prefix for test environment.
-    String uriPrefix = "file:///";
-
-    // negative tests
-    String[] negativeKeys = { "", "/", "bar", "bar/", "bar/hbase",
-        "bar/hbase/foo", "bar/hbase/foo/", "/hbase/", "/hbase", "hbase",
-        "hbasexyz/", "foo/atomicRenameDir1/"};
-    for (String s : negativeKeys) {
-      assertFalse(store.isAtomicRenameKey(s));
-      assertFalse(store.isAtomicRenameKey(uriPrefix + s));
-    }
-
-    // Positive tests. The directories for atomic rename are /hbase
-    // plus the ones in the configuration (DEFAULT_ATOMIC_RENAME_DIRECTORIES
-    // for this test).
-    String[] positiveKeys = { "hbase/", "hbase/foo/", "hbase/foo/bar/",
-        "atomicRenameDir1/foo/", "atomicRenameDir2/bar/"};
-    for (String s : positiveKeys) {
-      assertTrue(store.isAtomicRenameKey(s));
-      assertTrue(store.isAtomicRenameKey(uriPrefix + s));
-    }
-  }
-
-  /**
-   * Tests fs.mkdir() function to create a target blob while another thread
-   * is holding the lease on the blob. mkdir should not fail since the blob
-   * already exists.
-   * This is a scenario that would happen in HBase distributed log splitting.
-   * Multiple threads will try to create and update "recovered.edits" folder
-   * under the same path.
-   */
-  @Test
-  public void testMkdirOnExistingFolderWithLease() throws Exception {
-    SelfRenewingLease lease;
-    final String FILE_KEY = "folderWithLease";
-    // Create the folder
-    fs.mkdirs(new Path(FILE_KEY));
-    NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs;
-    String fullKey = nfs.pathToKey(nfs.makeAbsolute(new Path(FILE_KEY)));
-    AzureNativeFileSystemStore store = nfs.getStore();
-    // Acquire the lease on the folder
-    lease = store.acquireLease(fullKey);
-    assertTrue(lease.getLeaseID() != null);
-    // Try to create the same folder
-    store.storeEmptyFolder(fullKey,
-      nfs.createPermissionStatus(FsPermission.getDirDefault()));
-    lease.free();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java
index aa1e4f7..20d45b2 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java
@@ -21,6 +21,10 @@ package org.apache.hadoop.fs.azure;
 import java.io.IOException;
 import org.junit.Ignore;
 
+/**
+ * Run {@link NativeAzureFileSystemBaseTest} tests against a mocked store,
+ * skipping tests of unsupported features
+ */
 public class TestNativeAzureFileSystemMocked extends
     NativeAzureFileSystemBaseTest {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java
index 4c2df8d..7f63295 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java
@@ -18,41 +18,27 @@
 
 package org.apache.hadoop.fs.azure;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
 import java.io.ByteArrayInputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 
 import org.apache.hadoop.fs.Path;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
 
 /**
  * Tests for the upload, buffering and flush logic in WASB.
  */
-public class TestNativeAzureFileSystemUploadLogic {
-  private AzureBlobStorageTestAccount testAccount;
+public class TestNativeAzureFileSystemUploadLogic extends AbstractWasbTestBase {
 
   // Just an arbitrary number so that the values I write have a predictable
   // pattern: 0, 1, 2, .. , 45, 46, 0, 1, 2, ...
   static final int byteValuePeriod = 47;
 
-  @Before
-  public void setUp() throws Exception {
-    testAccount = AzureBlobStorageTestAccount.createMock();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (testAccount != null) {
-      testAccount.cleanup();
-      testAccount = null;
-    }
+  @Override
+  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+    return AzureBlobStorageTestAccount.createMock();
   }
 
   /**
@@ -126,9 +112,9 @@ public class TestNativeAzureFileSystemUploadLogic {
    * @param expectedSize The expected size of the data in there.
    */
   private void assertDataInFile(Path file, int expectedSize) throws Exception {
-    InputStream inStream = testAccount.getFileSystem().open(file);
-    assertDataInStream(inStream, expectedSize);
-    inStream.close();
+    try(InputStream inStream = getFileSystem().open(file)) {
+      assertDataInStream(inStream, expectedSize);
+    }
   }
 
   /**
@@ -139,7 +125,7 @@ public class TestNativeAzureFileSystemUploadLogic {
   private void assertDataInTempBlob(int expectedSize) throws Exception {
     // Look for the temporary upload blob in the backing store.
     InMemoryBlockBlobStore backingStore =
-        testAccount.getMockStorage().getBackingStore();
+        getTestAccount().getMockStorage().getBackingStore();
     String tempKey = null;
     for (String key : backingStore.getKeys()) {
       if (key.contains(NativeAzureFileSystem.AZURE_TEMP_FOLDER)) {
@@ -149,9 +135,10 @@ public class TestNativeAzureFileSystemUploadLogic {
       }
     }
     assertNotNull(tempKey);
-    InputStream inStream = new ByteArrayInputStream(backingStore.getContent(tempKey));
-    assertDataInStream(inStream, expectedSize);
-    inStream.close();
+    try (InputStream inStream = new ByteArrayInputStream(
+        backingStore.getContent(tempKey))) {
+      assertDataInStream(inStream, expectedSize);
+    }
   }
 
   /**
@@ -162,25 +149,30 @@ public class TestNativeAzureFileSystemUploadLogic {
    */
   private void testConsistencyAfterManyFlushes(FlushFrequencyVariation variation)
       throws Exception {
-    Path uploadedFile = new Path("/uploadedFile");
-    OutputStream outStream = testAccount.getFileSystem().create(uploadedFile);
-    final int totalSize = 9123;
-    int flushPeriod;
-    switch (variation) {
-      case BeforeSingleBufferFull: flushPeriod = 300; break;
-      case AfterSingleBufferFull: flushPeriod = 600; break;
-      case AfterAllRingBufferFull: flushPeriod = 1600; break;
-      default:
-        throw new IllegalArgumentException("Unknown variation: " + variation);
-    }
-    for (int i = 0; i < totalSize; i++) {
-      outStream.write(i % byteValuePeriod);
-      if ((i + 1) % flushPeriod == 0) {
-        outStream.flush();
-        assertDataInTempBlob(i + 1);
+    Path uploadedFile = methodPath();
+    try {
+      OutputStream outStream = getFileSystem().create(uploadedFile);
+      final int totalSize = 9123;
+      int flushPeriod;
+      switch (variation) {
+        case BeforeSingleBufferFull: flushPeriod = 300; break;
+        case AfterSingleBufferFull: flushPeriod = 600; break;
+        case AfterAllRingBufferFull: flushPeriod = 1600; break;
+        default:
+          throw new IllegalArgumentException("Unknown variation: " + variation);
       }
+      for (int i = 0; i < totalSize; i++) {
+        outStream.write(i % byteValuePeriod);
+        if ((i + 1) % flushPeriod == 0) {
+          outStream.flush();
+          assertDataInTempBlob(i + 1);
+        }
+      }
+      outStream.close();
+      assertDataInFile(uploadedFile, totalSize);
+    } finally {
+      getFileSystem().delete(uploadedFile, false);
+
     }
-    outStream.close();
-    assertDataInFile(uploadedFile, totalSize);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java
index 544d6ab..303a89a 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java
@@ -18,11 +18,6 @@
 
 package org.apache.hadoop.fs.azure;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
 import java.util.HashMap;
 
 import org.apache.hadoop.fs.FileStatus;
@@ -37,7 +32,8 @@ import org.junit.Test;
  * Tests that WASB handles things gracefully when users add blobs to the Azure
  * Storage container from outside WASB's control.
  */
-public class TestOutOfBandAzureBlobOperations {
+public class TestOutOfBandAzureBlobOperations
+    extends AbstractWasbTestWithTimeout {
   private AzureBlobStorageTestAccount testAccount;
   private FileSystem fs;
   private InMemoryBlockBlobStore backingStore;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
deleted file mode 100644
index 60b01c6..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.microsoft.azure.storage.blob.BlobOutputStream;
-import com.microsoft.azure.storage.blob.CloudBlockBlob;
-
-public class TestOutOfBandAzureBlobOperationsLive {
-  private FileSystem fs;
-  private AzureBlobStorageTestAccount testAccount;
-
-  @Before
-  public void setUp() throws Exception {
-    testAccount = AzureBlobStorageTestAccount.create();
-    if (testAccount != null) {
-      fs = testAccount.getFileSystem();
-    }
-    assumeNotNull(testAccount);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (testAccount != null) {
-      testAccount.cleanup();
-      testAccount = null;
-      fs = null;
-    }
-  }
-
-  // scenario for this particular test described at MONARCH-HADOOP-764
-  // creating a file out-of-band would confuse mkdirs("<oobfilesUncleFolder>")
-  // eg oob creation of "user/<name>/testFolder/a/input/file"
-  // Then wasb creation of "user/<name>/testFolder/a/output" fails
-  @Test
-  public void outOfBandFolder_uncleMkdirs() throws Exception {
-
-    // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
-    // WASB driver methods prepend working directory implicitly.
-    String workingDir = "user/"
-        + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
-
-    CloudBlockBlob blob = testAccount.getBlobReference(workingDir
-        + "testFolder1/a/input/file");
-    BlobOutputStream s = blob.openOutputStream();
-    s.close();
-    assertTrue(fs.exists(new Path("testFolder1/a/input/file")));
-
-    Path targetFolder = new Path("testFolder1/a/output");
-    assertTrue(fs.mkdirs(targetFolder));
-  }
-
-  // scenario for this particular test described at MONARCH-HADOOP-764
-  @Test
-  public void outOfBandFolder_parentDelete() throws Exception {
-
-    // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
-    // WASB driver methods prepend working directory implicitly.
-    String workingDir = "user/"
-        + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
-    CloudBlockBlob blob = testAccount.getBlobReference(workingDir
-        + "testFolder2/a/input/file");
-    BlobOutputStream s = blob.openOutputStream();
-    s.close();
-    assertTrue(fs.exists(new Path("testFolder2/a/input/file")));
-
-    Path targetFolder = new Path("testFolder2/a/input");
-    assertTrue(fs.delete(targetFolder, true));
-  }
-
-  @Test
-  public void outOfBandFolder_rootFileDelete() throws Exception {
-
-    CloudBlockBlob blob = testAccount.getBlobReference("fileY");
-    BlobOutputStream s = blob.openOutputStream();
-    s.close();
-    assertTrue(fs.exists(new Path("/fileY")));
-    assertTrue(fs.delete(new Path("/fileY"), true));
-  }
-
-  @Test
-  public void outOfBandFolder_firstLevelFolderDelete() throws Exception {
-
-    CloudBlockBlob blob = testAccount.getBlobReference("folderW/file");
-    BlobOutputStream s = blob.openOutputStream();
-    s.close();
-    assertTrue(fs.exists(new Path("/folderW")));
-    assertTrue(fs.exists(new Path("/folderW/file")));
-    assertTrue(fs.delete(new Path("/folderW"), true));
-  }
-
-  // scenario for this particular test described at MONARCH-HADOOP-764
-  @Test
-  public void outOfBandFolder_siblingCreate() throws Exception {
-
-    // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
-    // WASB driver methods prepend working directory implicitly.
-    String workingDir = "user/"
-        + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
-    CloudBlockBlob blob = testAccount.getBlobReference(workingDir
-        + "testFolder3/a/input/file");
-    BlobOutputStream s = blob.openOutputStream();
-    s.close();
-    assertTrue(fs.exists(new Path("testFolder3/a/input/file")));
-
-    Path targetFile = new Path("testFolder3/a/input/file2");
-    FSDataOutputStream s2 = fs.create(targetFile);
-    s2.close();
-  }
-
-  // scenario for this particular test described at MONARCH-HADOOP-764
-  // creating a new file in the root folder
-  @Test
-  public void outOfBandFolder_create_rootDir() throws Exception {
-    Path targetFile = new Path("/newInRoot");
-    FSDataOutputStream s2 = fs.create(targetFile);
-    s2.close();
-  }
-
-  // scenario for this particular test described at MONARCH-HADOOP-764
-  @Test
-  public void outOfBandFolder_rename() throws Exception {
-
-    // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
-    // WASB driver methods prepend working directory implicitly.
-    String workingDir = "user/"
-        + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
-    CloudBlockBlob blob = testAccount.getBlobReference(workingDir
-        + "testFolder4/a/input/file");
-    BlobOutputStream s = blob.openOutputStream();
-    s.close();
-
-    Path srcFilePath = new Path("testFolder4/a/input/file");
-    assertTrue(fs.exists(srcFilePath));
-
-    Path destFilePath = new Path("testFolder4/a/input/file2");
-    fs.rename(srcFilePath, destFilePath);
-  }
-
-  // Verify that you can rename a file which is the only file in an implicit folder in the
-  // WASB file system.
-  // scenario for this particular test described at MONARCH-HADOOP-892
-  @Test
-  public void outOfBandSingleFile_rename() throws Exception {
-
-    //NOTE: manual use of CloubBlockBlob targets working directory explicitly.
-    //       WASB driver methods prepend working directory implicitly.
-    String workingDir = "user/" + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
-    CloudBlockBlob blob = testAccount.getBlobReference(workingDir + "testFolder5/a/input/file");
-    BlobOutputStream s = blob.openOutputStream();
-    s.close();
-
-    Path srcFilePath = new Path("testFolder5/a/input/file");
-    assertTrue(fs.exists(srcFilePath));
-
-    Path destFilePath = new Path("testFolder5/file2");
-    fs.rename(srcFilePath, destFilePath);
-  }
-
-  // WASB must force explicit parent directories in create, delete, mkdirs, rename.
-  // scenario for this particular test described at MONARCH-HADOOP-764
-  @Test
-  public void outOfBandFolder_rename_rootLevelFiles() throws Exception {
-
-    // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
-    // WASB driver methods prepend working directory implicitly.
-    CloudBlockBlob blob = testAccount.getBlobReference("fileX");
-    BlobOutputStream s = blob.openOutputStream();
-    s.close();
-
-    Path srcFilePath = new Path("/fileX");
-    assertTrue(fs.exists(srcFilePath));
-
-    Path destFilePath = new Path("/fileXrename");
-    fs.rename(srcFilePath, destFilePath);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java
deleted file mode 100644
index 41b8386..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java
+++ /dev/null
@@ -1,355 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Arrays;
-import java.util.Random;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.azure.AzureException;
-import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Write data into a page blob and verify you can read back all of it
- * or just a part of it.
- */
-public class TestReadAndSeekPageBlobAfterWrite {
-  private static final Log LOG = LogFactory.getLog(TestReadAndSeekPageBlobAfterWrite.class);
-
-  private FileSystem fs;
-  private AzureBlobStorageTestAccount testAccount;
-  private byte[] randomData;
-
-  // Page blob physical page size
-  private static final int PAGE_SIZE = PageBlobFormatHelpers.PAGE_SIZE;
-
-  // Size of data on page (excluding header)
-  private static final int PAGE_DATA_SIZE = PAGE_SIZE - PageBlobFormatHelpers.PAGE_HEADER_SIZE;
-  private static final int MAX_BYTES = 33554432; // maximum bytes in a file that we'll test
-  private static final int MAX_PAGES = MAX_BYTES / PAGE_SIZE; // maximum number of pages we'll test
-  private Random rand = new Random();
-
-  // A key with a prefix under /pageBlobs, which for the test file system will
-  // force use of a page blob.
-  private static final String KEY = "/pageBlobs/file.dat";
-  private static final Path PATH = new Path(KEY); // path of page blob file to read and write
-
-  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
-    return AzureBlobStorageTestAccount.create();
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    testAccount = createTestAccount();
-    if (testAccount != null) {
-      fs = testAccount.getFileSystem();
-    }
-    assumeNotNull(testAccount);
-
-    // Make sure we are using an integral number of pages.
-    assertEquals(0, MAX_BYTES % PAGE_SIZE);
-
-    // load an in-memory array of random data
-    randomData = new byte[PAGE_SIZE * MAX_PAGES];
-    rand.nextBytes(randomData);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (testAccount != null) {
-      testAccount.cleanup();
-      testAccount = null;
-      fs = null;
-    }
-  }
-
-  /**
-   * Make sure the file name (key) is a page blob file name. If anybody changes that,
-   * we need to come back and update this test class.
-   */
-  @Test
-  public void testIsPageBlobFileName() {
-    AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
-    String[] a = KEY.split("/");
-    String key2 = a[1] + "/";
-    assertTrue(store.isPageBlobKey(key2));
-  }
-
-  /**
-   * For a set of different file sizes, write some random data to a page blob,
-   * read it back, and compare that what was read is the same as what was written.
-   */
-  @Test
-  public void testReadAfterWriteRandomData() throws IOException {
-
-    // local shorthand
-    final int PDS = PAGE_DATA_SIZE;
-
-    // Test for sizes at and near page boundaries
-    int[] dataSizes = {
-
-        // on first page
-        0, 1, 2, 3,
-
-        // Near first physical page boundary (because the implementation
-        // stores PDS + the page header size bytes on each page).
-        PDS - 1, PDS, PDS + 1, PDS + 2, PDS + 3,
-
-        // near second physical page boundary
-        (2 * PDS) - 1, (2 * PDS), (2 * PDS) + 1, (2 * PDS) + 2, (2 * PDS) + 3,
-
-        // near tenth physical page boundary
-        (10 * PDS) - 1, (10 * PDS), (10 * PDS) + 1, (10 * PDS) + 2, (10 * PDS) + 3,
-
-        // test one big size, >> 4MB (an internal buffer size in the code)
-        MAX_BYTES
-    };
-
-    for (int i : dataSizes) {
-      testReadAfterWriteRandomData(i);
-    }
-  }
-
-  private void testReadAfterWriteRandomData(int size) throws IOException {
-    writeRandomData(size);
-    readRandomDataAndVerify(size);
-  }
-
-  /**
-   * Read "size" bytes of data and verify that what was read and what was written
-   * are the same.
-   */
-  private void readRandomDataAndVerify(int size) throws AzureException, IOException {
-    byte[] b = new byte[size];
-    FSDataInputStream stream = fs.open(PATH);
-    int bytesRead = stream.read(b);
-    stream.close();
-    assertEquals(bytesRead, size);
-
-    // compare the data read to the data written
-    assertTrue(comparePrefix(randomData, b, size));
-  }
-
-  // return true if the beginning "size" values of the arrays are the same
-  private boolean comparePrefix(byte[] a, byte[] b, int size) {
-    if (a.length < size || b.length < size) {
-      return false;
-    }
-    for (int i = 0; i < size; i++) {
-      if (a[i] != b[i]) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  // Write a specified amount of random data to the file path for this test class.
-  private void writeRandomData(int size) throws IOException {
-    OutputStream output = fs.create(PATH);
-    output.write(randomData, 0, size);
-    output.close();
-  }
-
-  /**
-   * Write data to a page blob, open it, seek, and then read a range of data.
-   * Then compare that the data read from that range is the same as the data originally written.
-   */
-  @Test
-  public void testPageBlobSeekAndReadAfterWrite() throws IOException {
-    writeRandomData(PAGE_SIZE * MAX_PAGES);
-    int recordSize = 100;
-    byte[] b = new byte[recordSize];
-    FSDataInputStream stream = fs.open(PATH);
-
-    // Seek to a boundary around the middle of the 6th page
-    int seekPosition = 5 * PAGE_SIZE + 250;
-    stream.seek(seekPosition);
-
-    // Read a record's worth of bytes and verify results
-    int bytesRead = stream.read(b);
-    verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
-
-    // Seek to another spot and read a record greater than a page
-    seekPosition = 10 * PAGE_SIZE + 250;
-    stream.seek(seekPosition);
-    recordSize = 1000;
-    b = new byte[recordSize];
-    bytesRead = stream.read(b);
-    verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
-
-    // Read the last 100 bytes of the file
-    recordSize = 100;
-    seekPosition = PAGE_SIZE * MAX_PAGES - recordSize;
-    stream.seek(seekPosition);
-    b = new byte[recordSize];
-    bytesRead = stream.read(b);
-    verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
-
-    // Read past the end of the file and we should get only partial data.
-    recordSize = 100;
-    seekPosition = PAGE_SIZE * MAX_PAGES - recordSize + 50;
-    stream.seek(seekPosition);
-    b = new byte[recordSize];
-    bytesRead = stream.read(b);
-    assertEquals(50, bytesRead);
-
-    // compare last 50 bytes written with those read
-    byte[] tail = Arrays.copyOfRange(randomData, seekPosition, randomData.length);
-    assertTrue(comparePrefix(tail, b, 50));
-  }
-
-  // Verify that reading a record of data after seeking gives the expected data.
-  private void verifyReadRandomData(byte[] b, int bytesRead, int seekPosition, int recordSize) {
-    byte[] originalRecordData =
-        Arrays.copyOfRange(randomData, seekPosition, seekPosition + recordSize + 1);
-    assertEquals(recordSize, bytesRead);
-    assertTrue(comparePrefix(originalRecordData, b, recordSize));
-  }
-
-  // Test many small flushed writes interspersed with periodic hflush calls.
-  // For manual testing, increase NUM_WRITES to a large number.
-  // The goal for a long-running manual test is to make sure that it finishes
-  // and the close() call does not time out. It also facilitates debugging into
-  // hflush/hsync.
-  @Test
-  public void testManySmallWritesWithHFlush() throws IOException {
-    writeAndReadOneFile(50, 100, 20);
-  }
-
-  /**
-   * Write a total of numWrites * recordLength data to a file, read it back,
-   * and check to make sure what was read is the same as what was written.
-   * The syncInterval is the number of writes after which to call hflush to
-   * force the data to storage.
-   */
-  private void writeAndReadOneFile(int numWrites, int recordLength, int syncInterval) throws IOException {
-    final int NUM_WRITES = numWrites;
-    final int RECORD_LENGTH = recordLength;
-    final int SYNC_INTERVAL = syncInterval;
-
-    // A lower bound on the minimum time we think it will take to do
-    // a write to Azure storage.
-    final long MINIMUM_EXPECTED_TIME = 20;
-    LOG.info("Writing " + NUM_WRITES * RECORD_LENGTH + " bytes to " + PATH.getName());
-    FSDataOutputStream output = fs.create(PATH);
-    int writesSinceHFlush = 0;
-    try {
-
-      // Do a flush and hflush to exercise case for empty write queue in PageBlobOutputStream,
-      // to test concurrent execution gates.
-      output.flush();
-      output.hflush();
-      for (int i = 0; i < NUM_WRITES; i++) {
-        output.write(randomData, i * RECORD_LENGTH, RECORD_LENGTH);
-        writesSinceHFlush++;
-        output.flush();
-        if ((i % SYNC_INTERVAL) == 0) {
-          output.hflush();
-          writesSinceHFlush = 0;
-        }
-      }
-    } finally {
-      long start = Time.monotonicNow();
-      output.close();
-      long end = Time.monotonicNow();
-      LOG.debug("close duration = " + (end - start) + " msec.");
-      if (writesSinceHFlush > 0) {
-        assertTrue(String.format(
-            "close duration with >= 1 pending write is %d, less than minimum expected of %d",
-            end - start, MINIMUM_EXPECTED_TIME),
-            end - start >= MINIMUM_EXPECTED_TIME);
-        }
-    }
-
-    // Read the data back and check it.
-    FSDataInputStream stream = fs.open(PATH);
-    int SIZE = NUM_WRITES * RECORD_LENGTH;
-    byte[] b = new byte[SIZE];
-    try {
-      stream.seek(0);
-      stream.read(b, 0, SIZE);
-      verifyReadRandomData(b, SIZE, 0, SIZE);
-    } finally {
-      stream.close();
-    }
-
-    // delete the file
-    fs.delete(PATH, false);
-  }
-
-  // Test writing to a large file repeatedly as a stress test.
-  // Set the repetitions to a larger number for manual testing
-  // for a longer stress run.
-  @Test
-  public void testLargeFileStress() throws IOException {
-    int numWrites = 32;
-    int recordSize = 1024 * 1024;
-    int syncInterval = 10;
-    int repetitions = 1;
-    for (int i = 0; i < repetitions; i++) {
-      writeAndReadOneFile(numWrites, recordSize, syncInterval);
-    }
-  }
-  
-  // Write to a file repeatedly to verify that it extends.
-  // The page blob file should start out at 128MB and finish at 256MB.
-  @Test(timeout=300000)
-  public void testFileSizeExtension() throws IOException {
-    final int writeSize = 1024 * 1024;
-    final int numWrites = 129;
-    final byte dataByte = 5;
-    byte[] data = new byte[writeSize];
-    Arrays.fill(data, dataByte);
-    FSDataOutputStream output = fs.create(PATH);
-    try {
-      for (int i = 0; i < numWrites; i++) {
-        output.write(data);
-        output.hflush();
-        LOG.debug("total writes = " + (i + 1));
-      }
-    } finally {
-      output.close();
-    }
-
-    // Show that we wrote more than the default page blob file size.
-    assertTrue(numWrites * writeSize > PageBlobOutputStream.PAGE_BLOB_MIN_SIZE);
-
-    // Verify we can list the new size. That will prove we expanded the file.
-    FileStatus[] status = fs.listStatus(PATH);
-    assertTrue(status[0].getLen() == numWrites * writeSize);
-    LOG.debug("Total bytes written to " + PATH + " = " + status[0].getLen());
-    fs.delete(PATH, false);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java
index 0bf33d8..0334c39 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java
@@ -19,20 +19,23 @@
 package org.apache.hadoop.fs.azure;
 
 import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows;
-import static org.junit.Assert.assertEquals;
 
 import java.io.File;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.junit.Assert;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-public class TestShellDecryptionKeyProvider {
-  public static final Log LOG = LogFactory
-      .getLog(TestShellDecryptionKeyProvider.class);
+/**
+ * Windows only tests of shell scripts to provide decryption keys.
+ */
+public class TestShellDecryptionKeyProvider
+    extends AbstractWasbTestWithTimeout {
+  public static final Logger LOG = LoggerFactory
+      .getLogger(TestShellDecryptionKeyProvider.class);
   private static File TEST_ROOT_DIR = new File(System.getProperty(
       "test.build.data", "/tmp"), "TestShellDecryptionKeyProvider");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java
index 467424b..9d32fb2 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java
@@ -18,10 +18,6 @@
 
 package org.apache.hadoop.fs.azure;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -32,7 +28,10 @@ import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
 
-public class TestWasbFsck {
+/**
+ * Tests which look at fsck recovery.
+ */
+public class TestWasbFsck extends AbstractWasbTestWithTimeout {
   private AzureBlobStorageTestAccount testAccount;
   private FileSystem fs;
   private InMemoryBlockBlobStore backingStore;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message