hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cnaur...@apache.org
Subject svn commit: r1601781 [5/6] - in /hadoop/common/trunk: hadoop-project/ hadoop-tools/ hadoop-tools/hadoop-azure/ hadoop-tools/hadoop-azure/dev-support/ hadoop-tools/hadoop-azure/src/ hadoop-tools/hadoop-azure/src/config/ hadoop-tools/hadoop-azure/src/mai...
Date Tue, 10 Jun 2014 22:26:47 GMT
Added: hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java?rev=1601781&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java (added)
+++ hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java Tue Jun 10 22:26:45 2014
@@ -0,0 +1,433 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.TimeZone;
+
+import org.apache.commons.httpclient.URIException;
+import org.apache.commons.httpclient.util.URIUtil;
+import org.apache.commons.io.output.ByteArrayOutputStream;
+
+import com.microsoft.windowsazure.storage.CloudStorageAccount;
+import com.microsoft.windowsazure.storage.OperationContext;
+import com.microsoft.windowsazure.storage.RetryPolicyFactory;
+import com.microsoft.windowsazure.storage.StorageCredentials;
+import com.microsoft.windowsazure.storage.StorageException;
+import com.microsoft.windowsazure.storage.StorageUri;
+import com.microsoft.windowsazure.storage.blob.BlobListingDetails;
+import com.microsoft.windowsazure.storage.blob.BlobProperties;
+import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
+import com.microsoft.windowsazure.storage.blob.CloudBlobContainer;
+import com.microsoft.windowsazure.storage.blob.CloudBlobDirectory;
+import com.microsoft.windowsazure.storage.blob.CopyState;
+import com.microsoft.windowsazure.storage.blob.ListBlobItem;
+
+/**
+ * A mock implementation of the Azure Storage interaction layer for unit tests.
+ * Just does in-memory storage.
+ */
+public class MockStorageInterface extends StorageInterface {
+  private InMemoryBlockBlobStore backingStore;
+  private final ArrayList<PreExistingContainer> preExistingContainers = new ArrayList<MockStorageInterface.PreExistingContainer>();
+  private String baseUriString;
+
+  public InMemoryBlockBlobStore getBackingStore() {
+    return backingStore;
+  }
+
+  /**
+   * Mocks the situation where a container already exists before WASB comes in,
+   * i.e. the situation where a user creates a container then mounts WASB on the
+   * pre-existing container.
+   * 
+   * @param uri
+   *          The URI of the container.
+   * @param metadata
+   *          The metadata on the container.
+   */
+  public void addPreExistingContainer(String uri,
+      HashMap<String, String> metadata) {
+    preExistingContainers.add(new PreExistingContainer(uri, metadata));
+  }
+
+  @Override
+  public void setRetryPolicyFactory(final RetryPolicyFactory retryPolicyFactory) {
+  }
+
+  @Override
+  public void setTimeoutInMs(int timeoutInMs) {
+  }
+
+  @Override
+  public void createBlobClient(CloudStorageAccount account) {
+    backingStore = new InMemoryBlockBlobStore();
+  }
+
+  @Override
+  public void createBlobClient(URI baseUri) {
+    backingStore = new InMemoryBlockBlobStore();
+  }
+
+  @Override
+  public void createBlobClient(URI baseUri, StorageCredentials credentials) {
+    this.baseUriString = baseUri.toString();
+    backingStore = new InMemoryBlockBlobStore();
+  }
+
+  @Override
+  public StorageCredentials getCredentials() {
+    // Not implemented for mock interface.
+    return null;
+  }
+
+  @Override
+  public CloudBlobContainerWrapper getContainerReference(String name)
+      throws URISyntaxException, StorageException {
+    String fullUri;
+    try {
+      fullUri = baseUriString + "/" + URIUtil.encodePath(name);
+    } catch (URIException e) {
+      throw new RuntimeException("problem encoding fullUri", e);
+    }
+
+    MockCloudBlobContainerWrapper container = new MockCloudBlobContainerWrapper(
+        fullUri, name);
+    // Check if we have a pre-existing container with that name, and prime
+    // the wrapper with that knowledge if it's found.
+    for (PreExistingContainer existing : preExistingContainers) {
+      if (fullUri.equalsIgnoreCase(existing.containerUri)) {
+        // We have a pre-existing container. Mark the wrapper as created and
+        // make sure we use the metadata for it.
+        container.created = true;
+        backingStore.setContainerMetadata(existing.containerMetadata);
+        break;
+      }
+    }
+    return container;
+  }
+
+  class MockCloudBlobContainerWrapper extends CloudBlobContainerWrapper {
+    private boolean created = false;
+    private HashMap<String, String> metadata;
+    private final String baseUri;
+    private final String name;
+
+    public MockCloudBlobContainerWrapper(String baseUri, String name) {
+      this.baseUri = baseUri;
+      this.name = name;
+    }
+
+    @Override
+    public String getName() {
+      return name;
+    }
+
+    @Override
+    public boolean exists(OperationContext opContext) throws StorageException {
+      return created;
+    }
+
+    @Override
+    public void create(OperationContext opContext) throws StorageException {
+      created = true;
+      backingStore.setContainerMetadata(metadata);
+    }
+
+    @Override
+    public HashMap<String, String> getMetadata() {
+      return metadata;
+    }
+
+    @Override
+    public void setMetadata(HashMap<String, String> metadata) {
+      this.metadata = metadata;
+    }
+
+    @Override
+    public void downloadAttributes(OperationContext opContext)
+        throws StorageException {
+      metadata = backingStore.getContainerMetadata();
+    }
+
+    @Override
+    public void uploadMetadata(OperationContext opContext)
+        throws StorageException {
+      backingStore.setContainerMetadata(metadata);
+    }
+
+    @Override
+    public CloudBlobDirectoryWrapper getDirectoryReference(String relativePath)
+        throws URISyntaxException, StorageException {
+      return new MockCloudBlobDirectoryWrapper(new URI(fullUriString(
+          relativePath, true)));
+    }
+
+    @Override
+    public CloudBlockBlobWrapper getBlockBlobReference(String relativePath)
+        throws URISyntaxException, StorageException {
+      return new MockCloudBlockBlobWrapper(new URI(fullUriString(relativePath,
+          false)), null, 0);
+    }
+
+    // helper to create full URIs for directory and blob.
+    // use withTrailingSlash=true to get a good path for a directory.
+    private String fullUriString(String relativePath, boolean withTrailingSlash) {
+      String fullUri;
+
+      String baseUri = this.baseUri;
+      if (!baseUri.endsWith("/")) {
+        baseUri += "/";
+      }
+      if (withTrailingSlash && !relativePath.equals("")
+          && !relativePath.endsWith("/")) {
+        relativePath += "/";
+      }
+
+      try {
+        fullUri = baseUri + URIUtil.encodePath(relativePath);
+      } catch (URIException e) {
+        throw new RuntimeException("problem encoding fullUri", e);
+      }
+
+      return fullUri;
+    }
+  }
+
+  private static class PreExistingContainer {
+    final String containerUri;
+    final HashMap<String, String> containerMetadata;
+
+    public PreExistingContainer(String uri, HashMap<String, String> metadata) {
+      this.containerUri = uri;
+      this.containerMetadata = metadata;
+    }
+  }
+
+  class MockCloudBlobDirectoryWrapper extends CloudBlobDirectoryWrapper {
+    private URI uri;
+
+    public MockCloudBlobDirectoryWrapper(URI uri) {
+      this.uri = uri;
+    }
+
+    @Override
+    public CloudBlobContainer getContainer() throws URISyntaxException,
+        StorageException {
+      return null;
+    }
+
+    @Override
+    public CloudBlobDirectory getParent() throws URISyntaxException,
+        StorageException {
+      return null;
+    }
+
+    @Override
+    public URI getUri() {
+      return uri;
+    }
+
+    @Override
+    public Iterable<ListBlobItem> listBlobs(String prefix,
+        boolean useFlatBlobListing, EnumSet<BlobListingDetails> listingDetails,
+        BlobRequestOptions options, OperationContext opContext)
+        throws URISyntaxException, StorageException {
+      ArrayList<ListBlobItem> ret = new ArrayList<ListBlobItem>();
+      String fullPrefix = prefix == null ? uri.toString() : new URI(
+          uri.getScheme(), uri.getAuthority(), uri.getPath() + prefix,
+          uri.getQuery(), uri.getFragment()).toString();
+      boolean includeMetadata = listingDetails
+          .contains(BlobListingDetails.METADATA);
+      HashSet<String> addedDirectories = new HashSet<String>();
+      for (InMemoryBlockBlobStore.ListBlobEntry current : backingStore
+          .listBlobs(fullPrefix, includeMetadata)) {
+        int indexOfSlash = current.getKey().indexOf('/', fullPrefix.length());
+        if (useFlatBlobListing || indexOfSlash < 0) {
+          ret.add(new MockCloudBlockBlobWrapper(new URI(current.getKey()),
+              current.getMetadata(), current.getContentLength()));
+        } else {
+          String directoryName = current.getKey().substring(0, indexOfSlash);
+          if (!addedDirectories.contains(directoryName)) {
+            addedDirectories.add(current.getKey());
+            ret.add(new MockCloudBlobDirectoryWrapper(new URI(directoryName
+                + "/")));
+          }
+        }
+      }
+      return ret;
+    }
+
+    @Override
+    public StorageUri getStorageUri() {
+      throw new UnsupportedOperationException();
+    }
+
+  }
+
+  class MockCloudBlockBlobWrapper extends CloudBlockBlobWrapper {
+    private URI uri;
+    private HashMap<String, String> metadata = new HashMap<String, String>();
+    private BlobProperties properties;
+
+    public MockCloudBlockBlobWrapper(URI uri, HashMap<String, String> metadata,
+        int length) {
+      this.uri = uri;
+      this.metadata = metadata;
+      this.properties = new BlobProperties();
+      this.properties.setLength(length);
+      this.properties.setLastModified(Calendar.getInstance(
+          TimeZone.getTimeZone("UTC")).getTime());
+    }
+
+    private void refreshProperties(boolean getMetadata) {
+      if (backingStore.exists(uri.toString())) {
+        byte[] content = backingStore.getContent(uri.toString());
+        properties = new BlobProperties();
+        properties.setLength(content.length);
+        properties.setLastModified(Calendar.getInstance(
+            TimeZone.getTimeZone("UTC")).getTime());
+        if (getMetadata) {
+          metadata = backingStore.getMetadata(uri.toString());
+        }
+      }
+    }
+
+    @Override
+    public CloudBlobContainer getContainer() throws URISyntaxException,
+        StorageException {
+      return null;
+    }
+
+    @Override
+    public CloudBlobDirectory getParent() throws URISyntaxException,
+        StorageException {
+      return null;
+    }
+
+    @Override
+    public URI getUri() {
+      return uri;
+    }
+
+    @Override
+    public HashMap<String, String> getMetadata() {
+      return metadata;
+    }
+
+    @Override
+    public void setMetadata(HashMap<String, String> metadata) {
+      this.metadata = metadata;
+    }
+
+    @Override
+    public void startCopyFromBlob(CloudBlockBlobWrapper sourceBlob,
+        OperationContext opContext) throws StorageException, URISyntaxException {
+      backingStore.copy(sourceBlob.getUri().toString(), uri.toString());
+      // it would be best if backingStore.properties.CopyState were tracked
+      // If implemented, update azureNativeFileSystemStore.waitForCopyToComplete
+    }
+
+    @Override
+    public CopyState getCopyState() {
+      return this.properties.getCopyState();
+    }
+
+    @Override
+    public void delete(OperationContext opContext) throws StorageException {
+      backingStore.delete(uri.toString());
+    }
+
+    @Override
+    public boolean exists(OperationContext opContext) throws StorageException {
+      return backingStore.exists(uri.toString());
+    }
+
+    @Override
+    public void downloadAttributes(OperationContext opContext)
+        throws StorageException {
+      refreshProperties(true);
+    }
+
+    @Override
+    public BlobProperties getProperties() {
+      return properties;
+    }
+
+    @Override
+    public InputStream openInputStream(BlobRequestOptions options,
+        OperationContext opContext) throws StorageException {
+      return new ByteArrayInputStream(backingStore.getContent(uri.toString()));
+    }
+
+    @Override
+    public OutputStream openOutputStream(BlobRequestOptions options,
+        OperationContext opContext) throws StorageException {
+      return backingStore.upload(uri.toString(), metadata);
+    }
+
+    @Override
+    public void upload(InputStream sourceStream, OperationContext opContext)
+        throws StorageException, IOException {
+      ByteArrayOutputStream allContent = new ByteArrayOutputStream();
+      allContent.write(sourceStream);
+      backingStore.setContent(uri.toString(), allContent.toByteArray(),
+          metadata);
+      refreshProperties(false);
+      allContent.close();
+    }
+
+    @Override
+    public void uploadMetadata(OperationContext opContext)
+        throws StorageException {
+      backingStore.setContent(uri.toString(),
+          backingStore.getContent(uri.toString()), metadata);
+    }
+
+    @Override
+    public void uploadProperties(OperationContext opContext)
+        throws StorageException {
+      refreshProperties(false);
+    }
+
+    @Override
+    public void setStreamMinimumReadSizeInBytes(int minimumReadSize) {
+    }
+
+    @Override
+    public void setWriteBlockSizeInBytes(int writeBlockSizeInBytes) {
+    }
+
+    @Override
+    public StorageUri getStorageUri() {
+      throw new UnsupportedOperationException();
+    }
+
+  }
+}

Added: hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java?rev=1601781&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java (added)
+++ hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java Tue Jun 10 22:26:45 2014
@@ -0,0 +1,584 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeNotNull;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.TimeZone;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/*
+ * Tests the Native Azure file system (WASB) against an actual blob store if
+ * provided in the environment.
+ * Subclasses implement createTestAccount() to hit local&mock storage with the same test code.
+ * 
+ * For hand-testing: remove "abstract" keyword and copy in an implementation of createTestAccount
+ * from one of the subclasses
+ */
+public abstract class NativeAzureFileSystemBaseTest {
+
+  private FileSystem fs;
+  private AzureBlobStorageTestAccount testAccount;
+  private final long modifiedTimeErrorMargin = 5 * 1000; // Give it +/-5 seconds
+
+  protected abstract AzureBlobStorageTestAccount createTestAccount()
+      throws Exception;
+
+  @Before
+  public void setUp() throws Exception {
+    testAccount = createTestAccount();
+    if (testAccount != null) {
+      fs = testAccount.getFileSystem();
+    }
+    assumeNotNull(testAccount);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (testAccount != null) {
+      testAccount.cleanup();
+      testAccount = null;
+      fs = null;
+    }
+  }
+
+  @Test
+  public void testCheckingNonExistentOneLetterFile() throws Exception {
+    assertFalse(fs.exists(new Path("/a")));
+  }
+
+  @Test
+  public void testStoreRetrieveFile() throws Exception {
+    Path testFile = new Path("unit-test-file");
+    writeString(testFile, "Testing");
+    assertTrue(fs.exists(testFile));
+    FileStatus status = fs.getFileStatus(testFile);
+    assertNotNull(status);
+    // By default, files should be have masked permissions
+    // that grant RW to user, and R to group/other
+    assertEquals(new FsPermission((short) 0644), status.getPermission());
+    assertEquals("Testing", readString(testFile));
+    fs.delete(testFile, true);
+  }
+
+  @Test
+  public void testStoreDeleteFolder() throws Exception {
+    Path testFolder = new Path("storeDeleteFolder");
+    assertFalse(fs.exists(testFolder));
+    assertTrue(fs.mkdirs(testFolder));
+    assertTrue(fs.exists(testFolder));
+    FileStatus status = fs.getFileStatus(testFolder);
+    assertNotNull(status);
+    assertTrue(status.isDirectory());
+    // By default, directories should be have masked permissions
+    // that grant RWX to user, and RX to group/other
+    assertEquals(new FsPermission((short) 0755), status.getPermission());
+    Path innerFile = new Path(testFolder, "innerFile");
+    assertTrue(fs.createNewFile(innerFile));
+    assertTrue(fs.exists(innerFile));
+    assertTrue(fs.delete(testFolder, true));
+    assertFalse(fs.exists(innerFile));
+    assertFalse(fs.exists(testFolder));
+  }
+
+  @Test
+  public void testFileOwnership() throws Exception {
+    Path testFile = new Path("ownershipTestFile");
+    writeString(testFile, "Testing");
+    testOwnership(testFile);
+  }
+
+  @Test
+  public void testFolderOwnership() throws Exception {
+    Path testFolder = new Path("ownershipTestFolder");
+    fs.mkdirs(testFolder);
+    testOwnership(testFolder);
+  }
+
+  private void testOwnership(Path pathUnderTest) throws IOException {
+    FileStatus ret = fs.getFileStatus(pathUnderTest);
+    UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
+    assertEquals(ret.getOwner(), currentUser.getShortUserName());
+    fs.delete(pathUnderTest, true);
+  }
+
+  private static FsPermission ignoreStickyBit(FsPermission original) {
+    return new FsPermission(original.getUserAction(),
+        original.getGroupAction(), original.getOtherAction());
+  }
+
+  // When FsPermission applies a UMask, it loses sticky bit information.
+  // And since we always apply UMask, we should ignore whether the sticky
+  // bit is equal or not.
+  private static void assertEqualsIgnoreStickyBit(FsPermission expected,
+      FsPermission actual) {
+    assertEquals(ignoreStickyBit(expected), ignoreStickyBit(actual));
+  }
+
+  @Test
+  public void testFilePermissions() throws Exception {
+    Path testFile = new Path("permissionTestFile");
+    FsPermission permission = FsPermission.createImmutable((short) 644);
+    createEmptyFile(testFile, permission);
+    FileStatus ret = fs.getFileStatus(testFile);
+    assertEqualsIgnoreStickyBit(permission, ret.getPermission());
+    fs.delete(testFile, true);
+  }
+
+  @Test
+  public void testFolderPermissions() throws Exception {
+    Path testFolder = new Path("permissionTestFolder");
+    FsPermission permission = FsPermission.createImmutable((short) 644);
+    fs.mkdirs(testFolder, permission);
+    FileStatus ret = fs.getFileStatus(testFolder);
+    assertEqualsIgnoreStickyBit(permission, ret.getPermission());
+    fs.delete(testFolder, true);
+  }
+
+  @Test
+  public void testDeepFileCreation() throws Exception {
+    Path testFile = new Path("deep/file/creation/test");
+    FsPermission permission = FsPermission.createImmutable((short) 644);
+    createEmptyFile(testFile, permission);
+    assertTrue(fs.exists(testFile));
+    assertTrue(fs.exists(new Path("deep")));
+    assertTrue(fs.exists(new Path("deep/file/creation")));
+    FileStatus ret = fs.getFileStatus(new Path("deep/file"));
+    assertTrue(ret.isDirectory());
+    assertEqualsIgnoreStickyBit(permission, ret.getPermission());
+    assertTrue(fs.delete(new Path("deep"), true));
+    assertFalse(fs.exists(testFile));
+
+    // An alternative test scenario would've been to delete the file first,
+    // and then check for the existence of the upper folders still. But that
+    // doesn't actually work as expected right now.
+  }
+
+  private static enum RenameVariation {
+    NormalFileName, SourceInAFolder, SourceWithSpace, SourceWithPlusAndPercent
+  }
+
+  @Test
+  public void testRename() throws Exception {
+    for (RenameVariation variation : RenameVariation.values()) {
+      System.out.printf("Rename variation: %s\n", variation);
+      Path originalFile;
+      switch (variation) {
+      case NormalFileName:
+        originalFile = new Path("fileToRename");
+        break;
+      case SourceInAFolder:
+        originalFile = new Path("file/to/rename");
+        break;
+      case SourceWithSpace:
+        originalFile = new Path("file to rename");
+        break;
+      case SourceWithPlusAndPercent:
+        originalFile = new Path("file+to%rename");
+        break;
+      default:
+        throw new Exception("Unknown variation");
+      }
+      Path destinationFile = new Path("file/resting/destination");
+      assertTrue(fs.createNewFile(originalFile));
+      assertTrue(fs.exists(originalFile));
+      assertFalse(fs.rename(originalFile, destinationFile)); // Parent directory
+      // doesn't exist
+      assertTrue(fs.mkdirs(destinationFile.getParent()));
+      assertTrue(fs.rename(originalFile, destinationFile));
+      assertTrue(fs.exists(destinationFile));
+      assertFalse(fs.exists(originalFile));
+      fs.delete(destinationFile.getParent(), true);
+    }
+  }
+
+  @Test
+  public void testRenameImplicitFolder() throws Exception {
+    Path testFile = new Path("deep/file/rename/test");
+    FsPermission permission = FsPermission.createImmutable((short) 644);
+    createEmptyFile(testFile, permission);
+    assertTrue(fs.rename(new Path("deep/file"), new Path("deep/renamed")));
+    assertFalse(fs.exists(testFile));
+    FileStatus newStatus = fs
+        .getFileStatus(new Path("deep/renamed/rename/test"));
+    assertNotNull(newStatus);
+    assertEqualsIgnoreStickyBit(permission, newStatus.getPermission());
+    assertTrue(fs.delete(new Path("deep"), true));
+  }
+
+  private static enum RenameFolderVariation {
+    CreateFolderAndInnerFile, CreateJustInnerFile, CreateJustFolder
+  }
+
+  @Test
+  public void testRenameFolder() throws Exception {
+    for (RenameFolderVariation variation : RenameFolderVariation.values()) {
+      Path originalFolder = new Path("folderToRename");
+      if (variation != RenameFolderVariation.CreateJustInnerFile){
+        assertTrue(fs.mkdirs(originalFolder));
+      }
+      Path innerFile = new Path(originalFolder, "innerFile");
+      if (variation != RenameFolderVariation.CreateJustFolder){
+        assertTrue(fs.createNewFile(innerFile));
+      }
+      Path destination = new Path("renamedFolder");
+      assertTrue(fs.rename(originalFolder, destination));
+      assertTrue(fs.exists(destination));
+      if (variation != RenameFolderVariation.CreateJustFolder){
+        assertTrue(fs.exists(new Path(destination, innerFile.getName())));
+      }
+      assertFalse(fs.exists(originalFolder));
+      assertFalse(fs.exists(innerFile));
+      fs.delete(destination, true);
+    }
+  }
+
+  @Test
+  public void testCopyFromLocalFileSystem() throws Exception {
+    Path localFilePath = new Path(System.getProperty("test.build.data",
+        "azure_test"));
+    FileSystem localFs = FileSystem.get(new Configuration());
+    localFs.delete(localFilePath, true);
+    try {
+      writeString(localFs, localFilePath, "Testing");
+      Path dstPath = new Path("copiedFromLocal");
+      assertTrue(FileUtil.copy(localFs, localFilePath, fs, dstPath, false,
+          fs.getConf()));
+      assertTrue(fs.exists(dstPath));
+      assertEquals("Testing", readString(fs, dstPath));
+      fs.delete(dstPath, true);
+    } finally {
+      localFs.delete(localFilePath, true);
+    }
+  }
+
+  @Test
+  public void testListDirectory() throws Exception {
+    Path rootFolder = new Path("testingList");
+    assertTrue(fs.mkdirs(rootFolder));
+    FileStatus[] listed = fs.listStatus(rootFolder);
+    assertEquals(0, listed.length);
+    Path innerFolder = new Path(rootFolder, "inner");
+    assertTrue(fs.mkdirs(innerFolder));
+    listed = fs.listStatus(rootFolder);
+    assertEquals(1, listed.length);
+    assertTrue(listed[0].isDirectory());
+    Path innerFile = new Path(innerFolder, "innerFile");
+    writeString(innerFile, "testing");
+    listed = fs.listStatus(rootFolder);
+    assertEquals(1, listed.length);
+    assertTrue(listed[0].isDirectory());
+    listed = fs.listStatus(innerFolder);
+    assertEquals(1, listed.length);
+    assertFalse(listed[0].isDirectory());
+    assertTrue(fs.delete(rootFolder, true));
+  }
+
+  @Test
+  public void testStatistics() throws Exception {
+    FileSystem.clearStatistics();
+    FileSystem.Statistics stats = FileSystem.getStatistics("wasb",
+        NativeAzureFileSystem.class);
+    assertEquals(0, stats.getBytesRead());
+    assertEquals(0, stats.getBytesWritten());
+    Path newFile = new Path("testStats");
+    writeString(newFile, "12345678");
+    assertEquals(8, stats.getBytesWritten());
+    assertEquals(0, stats.getBytesRead());
+    String readBack = readString(newFile);
+    assertEquals("12345678", readBack);
+    assertEquals(8, stats.getBytesRead());
+    assertEquals(8, stats.getBytesWritten());
+    assertTrue(fs.delete(newFile, true));
+    assertEquals(8, stats.getBytesRead());
+    assertEquals(8, stats.getBytesWritten());
+  }
+
+  @Test
+  public void testUriEncoding() throws Exception {
+    fs.create(new Path("p/t%5Fe")).close();
+    FileStatus[] listing = fs.listStatus(new Path("p"));
+    assertEquals(1, listing.length);
+    assertEquals("t%5Fe", listing[0].getPath().getName());
+    assertTrue(fs.rename(new Path("p"), new Path("q")));
+    assertTrue(fs.delete(new Path("q"), true));
+  }
+
+  @Test
+  public void testUriEncodingMoreComplexCharacters() throws Exception {
+    // Create a file name with URI reserved characters, plus the percent
+    String fileName = "!#$'()*;=[]%";
+    String directoryName = "*;=[]%!#$'()";
+    fs.create(new Path(directoryName, fileName)).close();
+    FileStatus[] listing = fs.listStatus(new Path(directoryName));
+    assertEquals(1, listing.length);
+    assertEquals(fileName, listing[0].getPath().getName());
+    FileStatus status = fs.getFileStatus(new Path(directoryName, fileName));
+    assertEquals(fileName, status.getPath().getName());
+    InputStream stream = fs.open(new Path(directoryName, fileName));
+    assertNotNull(stream);
+    stream.close();
+    assertTrue(fs.delete(new Path(directoryName, fileName), true));
+    assertTrue(fs.delete(new Path(directoryName), true));
+  }
+
+  @Test
+  public void testReadingDirectoryAsFile() throws Exception {
+    Path dir = new Path("/x");
+    assertTrue(fs.mkdirs(dir));
+    try {
+      fs.open(dir).close();
+      assertTrue("Should've thrown", false);
+    } catch (FileNotFoundException ex) {
+      assertEquals("/x is a directory not a file.", ex.getMessage());
+    }
+  }
+
+  @Test
+  public void testCreatingFileOverDirectory() throws Exception {
+    Path dir = new Path("/x");
+    assertTrue(fs.mkdirs(dir));
+    try {
+      fs.create(dir).close();
+      assertTrue("Should've thrown", false);
+    } catch (IOException ex) {
+      assertEquals("Cannot create file /x; already exists as a directory.",
+          ex.getMessage());
+    }
+  }
+
+  @Test
+  public void testSetPermissionOnFile() throws Exception {
+    Path newFile = new Path("testPermission");
+    OutputStream output = fs.create(newFile);
+    output.write(13);
+    output.close();
+    FsPermission newPermission = new FsPermission((short) 0700);
+    fs.setPermission(newFile, newPermission);
+    FileStatus newStatus = fs.getFileStatus(newFile);
+    assertNotNull(newStatus);
+    assertEquals(newPermission, newStatus.getPermission());
+    assertEquals("supergroup", newStatus.getGroup());
+    assertEquals(UserGroupInformation.getCurrentUser().getShortUserName(),
+        newStatus.getOwner());
+    assertEquals(1, newStatus.getLen());
+  }
+
+  @Test
+  public void testSetPermissionOnFolder() throws Exception {
+    Path newFolder = new Path("testPermission");
+    assertTrue(fs.mkdirs(newFolder));
+    FsPermission newPermission = new FsPermission((short) 0600);
+    fs.setPermission(newFolder, newPermission);
+    FileStatus newStatus = fs.getFileStatus(newFolder);
+    assertNotNull(newStatus);
+    assertEquals(newPermission, newStatus.getPermission());
+    assertTrue(newStatus.isDirectory());
+  }
+
+  @Test
+  public void testSetOwnerOnFile() throws Exception {
+    Path newFile = new Path("testOwner");
+    OutputStream output = fs.create(newFile);
+    output.write(13);
+    output.close();
+    fs.setOwner(newFile, "newUser", null);
+    FileStatus newStatus = fs.getFileStatus(newFile);
+    assertNotNull(newStatus);
+    assertEquals("newUser", newStatus.getOwner());
+    assertEquals("supergroup", newStatus.getGroup());
+    assertEquals(1, newStatus.getLen());
+    fs.setOwner(newFile, null, "newGroup");
+    newStatus = fs.getFileStatus(newFile);
+    assertNotNull(newStatus);
+    assertEquals("newUser", newStatus.getOwner());
+    assertEquals("newGroup", newStatus.getGroup());
+  }
+
+  @Test
+  public void testSetOwnerOnFolder() throws Exception {
+    Path newFolder = new Path("testOwner");
+    assertTrue(fs.mkdirs(newFolder));
+    fs.setOwner(newFolder, "newUser", null);
+    FileStatus newStatus = fs.getFileStatus(newFolder);
+    assertNotNull(newStatus);
+    assertEquals("newUser", newStatus.getOwner());
+    assertTrue(newStatus.isDirectory());
+  }
+
+  @Test
+  public void testModifiedTimeForFile() throws Exception {
+    Path testFile = new Path("testFile");
+    fs.create(testFile).close();
+    testModifiedTime(testFile);
+  }
+
+  @Test
+  public void testModifiedTimeForFolder() throws Exception {
+    Path testFolder = new Path("testFolder");
+    assertTrue(fs.mkdirs(testFolder));
+    testModifiedTime(testFolder);
+  }
+
+  @Test
+  public void testFolderLastModifiedTime() throws Exception {
+    Path parentFolder = new Path("testFolder");
+    Path innerFile = new Path(parentFolder, "innerfile");
+    assertTrue(fs.mkdirs(parentFolder));
+
+    // Create file
+    long lastModifiedTime = fs.getFileStatus(parentFolder)
+        .getModificationTime();
+    // Wait at least the error margin
+    Thread.sleep(modifiedTimeErrorMargin + 1);
+    assertTrue(fs.createNewFile(innerFile));
+    // The parent folder last modified time should have changed because we
+    // create an inner file.
+    assertFalse(testModifiedTime(parentFolder, lastModifiedTime));
+    testModifiedTime(parentFolder);
+
+    // Rename file
+    lastModifiedTime = fs.getFileStatus(parentFolder).getModificationTime();
+    Path destFolder = new Path("testDestFolder");
+    assertTrue(fs.mkdirs(destFolder));
+    long destLastModifiedTime = fs.getFileStatus(destFolder)
+        .getModificationTime();
+    Thread.sleep(modifiedTimeErrorMargin + 1);
+    Path destFile = new Path(destFolder, "innerfile");
+    assertTrue(fs.rename(innerFile, destFile));
+    // Both source and destination folder last modified time should have changed
+    // because of renaming.
+    assertFalse(testModifiedTime(parentFolder, lastModifiedTime));
+    assertFalse(testModifiedTime(destFolder, destLastModifiedTime));
+    testModifiedTime(parentFolder);
+    testModifiedTime(destFolder);
+
+    // Delete file
+    destLastModifiedTime = fs.getFileStatus(destFolder).getModificationTime();
+    // Wait at least the error margin
+    Thread.sleep(modifiedTimeErrorMargin + 1);
+    fs.delete(destFile, false);
+    // The parent folder last modified time should have changed because we
+    // delete an inner file.
+    assertFalse(testModifiedTime(destFolder, destLastModifiedTime));
+    testModifiedTime(destFolder);
+  }
+
+  @Test
+  public void testListSlash() throws Exception {
+    Path testFolder = new Path("/testFolder");
+    Path testFile = new Path(testFolder, "testFile");
+    assertTrue(fs.mkdirs(testFolder));
+    assertTrue(fs.createNewFile(testFile));
+    FileStatus status = fs.getFileStatus(new Path("/testFolder/."));
+    assertNotNull(status);
+  }
+
+  private boolean testModifiedTime(Path testPath, long time) throws Exception {
+    FileStatus fileStatus = fs.getFileStatus(testPath);
+    final long errorMargin = modifiedTimeErrorMargin;
+    long lastModified = fileStatus.getModificationTime();
+    return (lastModified > (time - errorMargin) && lastModified < (time + errorMargin));
+  }
+
+  private void testModifiedTime(Path testPath) throws Exception {
+    Calendar utc = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
+    long currentUtcTime = utc.getTime().getTime();
+    FileStatus fileStatus = fs.getFileStatus(testPath);
+    assertTrue("Modification time "
+        + new Date(fileStatus.getModificationTime()) + " is not close to now: "
+        + utc.getTime(), testModifiedTime(testPath, currentUtcTime));
+  }
+
+   private void createEmptyFile(Path testFile, FsPermission permission)
+      throws IOException {
+    FSDataOutputStream outputStream = fs.create(testFile, permission, true,
+        4096, (short) 1, 1024, null);
+    outputStream.close();
+  }
+
+  private String readString(Path testFile) throws IOException {
+    return readString(fs, testFile);
+  }
+
+  private String readString(FileSystem fs, Path testFile) throws IOException {
+    FSDataInputStream inputStream = fs.open(testFile);
+    String ret = readString(inputStream);
+    inputStream.close();
+    return ret;
+  }
+
+  private String readString(FSDataInputStream inputStream) throws IOException {
+    BufferedReader reader = new BufferedReader(new InputStreamReader(
+        inputStream));
+    final int BUFFER_SIZE = 1024;
+    char[] buffer = new char[BUFFER_SIZE];
+    int count = reader.read(buffer, 0, BUFFER_SIZE);
+    if (count >= BUFFER_SIZE) {
+      throw new IOException("Exceeded buffer size");
+    }
+    inputStream.close();
+    return new String(buffer, 0, count);
+  }
+
+  private void writeString(Path path, String value) throws IOException {
+    writeString(fs, path, value);
+  }
+
+  private void writeString(FileSystem fs, Path path, String value)
+      throws IOException {
+    FSDataOutputStream outputStream = fs.create(path, true);
+    writeString(outputStream, value);
+    outputStream.close();
+  }
+
+  private void writeString(FSDataOutputStream outputStream, String value)
+      throws IOException {
+    BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(
+        outputStream));
+    writer.write(value);
+    writer.close();
+  }
+}

Added: hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java?rev=1601781&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java (added)
+++ hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java Tue Jun 10 22:26:45 2014
@@ -0,0 +1,194 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeNotNull;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestAzureConcurrentOutOfBandIo {
+
+  // Class constants.
+  static final int DOWNLOAD_BLOCK_SIZE = 8 * 1024 * 1024;
+  static final int UPLOAD_BLOCK_SIZE = 4 * 1024 * 1024;
+  static final int BLOB_SIZE = 32 * 1024 * 1024;
+
+  // Number of blocks to be written before flush.
+  private static final int NUMBER_OF_BLOCKS = 2;
+
+  private AzureBlobStorageTestAccount testAccount;
+
+  // Overridden TestCase methods.
+  @Before
+  public void setUp() throws Exception {
+    testAccount = AzureBlobStorageTestAccount.createOutOfBandStore(
+        UPLOAD_BLOCK_SIZE, DOWNLOAD_BLOCK_SIZE);
+    assumeNotNull(testAccount);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (testAccount != null) {
+      testAccount.cleanup();
+      testAccount = null;
+    }
+  }
+
+  class DataBlockWriter implements Runnable {
+
+    Thread runner;
+    AzureBlobStorageTestAccount writerStorageAccount;
+    String key;
+    boolean done = false;
+
+    /**
+     * Constructor captures the test account.
+     * 
+     * @param testAccount
+     */
+    public DataBlockWriter(AzureBlobStorageTestAccount testAccount, String key) {
+      writerStorageAccount = testAccount;
+      this.key = key;
+    }
+
+    /**
+     * Start writing blocks to Azure storage.
+     */
+    public void startWriting() {
+      runner = new Thread(this); // Create the block writer thread.
+      runner.start(); // Start the block writer thread.
+    }
+
+    /**
+     * Stop writing blocks to Azure storage.
+     */
+    public void stopWriting() {
+      done = true;
+    }
+
+    /**
+     * Implementation of the runnable interface. The run method is a tight loop
+     * which repeatedly updates the blob with a 4 MB block.
+     */
+    public void run() {
+      byte[] dataBlockWrite = new byte[UPLOAD_BLOCK_SIZE];
+
+      DataOutputStream outputStream = null;
+
+      try {
+        for (int i = 0; !done; i++) {
+          // Write two 4 MB blocks to the blob.
+          //
+          outputStream = writerStorageAccount.getStore().storefile(key,
+              new PermissionStatus("", "", FsPermission.getDefault()));
+
+          Arrays.fill(dataBlockWrite, (byte) (i % 256));
+          for (int j = 0; j < NUMBER_OF_BLOCKS; j++) {
+            outputStream.write(dataBlockWrite);
+          }
+
+          outputStream.flush();
+          outputStream.close();
+        }
+      } catch (AzureException e) {
+        System.out
+            .println("DatablockWriter thread encountered a storage exception."
+                + e.getMessage());
+      } catch (IOException e) {
+        System.out
+            .println("DatablockWriter thread encountered an I/O exception."
+                + e.getMessage());
+      }
+    }
+  }
+
+  @Test
+  public void testReadOOBWrites() throws Exception {
+
+    byte[] dataBlockWrite = new byte[UPLOAD_BLOCK_SIZE];
+    byte[] dataBlockRead = new byte[UPLOAD_BLOCK_SIZE];
+
+    // Write to blob to make sure it exists.
+    //
+    // Write five 4 MB blocks to the blob. To ensure there is data in the blob
+    // before reading. This eliminates the race between the reader and writer
+    // threads.
+    DataOutputStream outputStream = testAccount.getStore().storefile(
+        "WASB_String.txt",
+        new PermissionStatus("", "", FsPermission.getDefault()));
+    Arrays.fill(dataBlockWrite, (byte) 255);
+    for (int i = 0; i < NUMBER_OF_BLOCKS; i++) {
+      outputStream.write(dataBlockWrite);
+    }
+
+    outputStream.flush();
+    outputStream.close();
+
+    // Start writing blocks to Azure store using the DataBlockWriter thread.
+    DataBlockWriter writeBlockTask = new DataBlockWriter(testAccount,
+        "WASB_String.txt");
+    writeBlockTask.startWriting();
+    int count = 0;
+    DataInputStream inputStream = null;
+
+    for (int i = 0; i < 5; i++) {
+      try {
+        inputStream = testAccount.getStore().retrieve("WASB_String.txt", 0);
+        count = 0;
+        int c = 0;
+
+        while (c >= 0) {
+          c = inputStream.read(dataBlockRead, 0, UPLOAD_BLOCK_SIZE);
+          if (c < 0) {
+            break;
+          }
+
+          // Counting the number of bytes.
+          count += c;
+        }
+      } catch (IOException e) {
+        System.out.println(e.getCause().toString());
+        e.printStackTrace();
+        fail();
+      }
+
+      // Close the stream.
+      if (null != inputStream) {
+        inputStream.close();
+      }
+    }
+
+    // Stop writing blocks.
+    writeBlockTask.stopWriting();
+
+    // Validate that a block was read.
+    assertEquals(NUMBER_OF_BLOCKS * UPLOAD_BLOCK_SIZE, count);
+  }
+}
\ No newline at end of file

Added: hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java?rev=1601781&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java (added)
+++ hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java Tue Jun 10 22:26:45 2014
@@ -0,0 +1,257 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeNotNull;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.HashMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.junit.Test;
+
+import com.microsoft.windowsazure.storage.OperationContext;
+import com.microsoft.windowsazure.storage.SendingRequestEvent;
+import com.microsoft.windowsazure.storage.StorageEvent;
+
+public class TestAzureFileSystemErrorConditions {
+  private static final int ALL_THREE_FILE_SIZE = 1024;
+
+  @Test
+  public void testNoInitialize() throws Exception {
+    AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
+    boolean passed = false;
+    try {
+      store.retrieveMetadata("foo");
+      passed = true;
+    } catch (AssertionError e) {
+    }
+    assertFalse(
+        "Doing an operation on the store should throw if not initalized.",
+        passed);
+  }
+
+  /**
+   * Try accessing an unauthorized or non-existent (treated the same) container
+   * from WASB.
+   */
+  @Test
+  public void testAccessUnauthorizedPublicContainer() throws Exception {
+    Configuration conf = new Configuration();
+    AzureBlobStorageTestAccount.addWasbToConfiguration(conf);
+    Path noAccessPath = new Path(
+        "wasb://nonExistentContainer@hopefullyNonExistentAccount/someFile");
+    NativeAzureFileSystem.suppressRetryPolicy();
+    try {
+      FileSystem.get(noAccessPath.toUri(), conf).open(noAccessPath);
+      assertTrue("Should've thrown.", false);
+    } catch (AzureException ex) {
+      assertTrue("Unexpected message in exception " + ex, ex.getMessage()
+          .contains(
+              "Unable to access container nonExistentContainer in account"
+                  + " hopefullyNonExistentAccount"));
+    } finally {
+      NativeAzureFileSystem.resumeRetryPolicy();
+    }
+  }
+
+  @Test
+  public void testAccessContainerWithWrongVersion() throws Exception {
+    AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
+    MockStorageInterface mockStorage = new MockStorageInterface();
+    store.setAzureStorageInteractionLayer(mockStorage);
+    FileSystem fs = new NativeAzureFileSystem(store);
+    try {
+      Configuration conf = new Configuration();
+      AzureBlobStorageTestAccount.setMockAccountKey(conf);
+      HashMap<String, String> metadata = new HashMap<String, String>();
+      metadata.put(AzureNativeFileSystemStore.VERSION_METADATA_KEY,
+          "2090-04-05"); // It's from the future!
+      mockStorage.addPreExistingContainer(
+          AzureBlobStorageTestAccount.getMockContainerUri(), metadata);
+
+      boolean passed = false;
+      try {
+        fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI), conf);
+        fs.listStatus(new Path("/"));
+        passed = true;
+      } catch (AzureException ex) {
+        assertTrue("Unexpected exception message: " + ex, ex.getMessage()
+            .contains("unsupported version: 2090-04-05."));
+      }
+      assertFalse(
+          "Should've thrown an exception because of the wrong version.", passed);
+    } finally {
+      fs.close();
+    }
+  }
+
+  private interface ConnectionRecognizer {
+    boolean isTargetConnection(HttpURLConnection connection);
+  }
+
+  private class TransientErrorInjector extends
+      StorageEvent<SendingRequestEvent> {
+    final ConnectionRecognizer connectionRecognizer;
+    private boolean injectedErrorOnce = false;
+
+    public TransientErrorInjector(ConnectionRecognizer connectionRecognizer) {
+      this.connectionRecognizer = connectionRecognizer;
+    }
+
+    @Override
+    public void eventOccurred(SendingRequestEvent eventArg) {
+      HttpURLConnection connection = (HttpURLConnection) eventArg
+          .getConnectionObject();
+      if (!connectionRecognizer.isTargetConnection(connection)) {
+        return;
+      }
+      if (!injectedErrorOnce) {
+        connection.setReadTimeout(1);
+        connection.disconnect();
+        injectedErrorOnce = true;
+      }
+    }
+  }
+
+  private void injectTransientError(NativeAzureFileSystem fs,
+      final ConnectionRecognizer connectionRecognizer) {
+    fs.getStore().addTestHookToOperationContext(new TestHookOperationContext() {
+      @Override
+      public OperationContext modifyOperationContext(OperationContext original) {
+        original.getSendingRequestEventHandler().addListener(
+            new TransientErrorInjector(connectionRecognizer));
+        return original;
+      }
+    });
+  }
+
+  @Test
+  public void testTransientErrorOnDelete() throws Exception {
+    // Need to do this test against a live storage account
+    AzureBlobStorageTestAccount testAccount = AzureBlobStorageTestAccount
+        .create();
+    assumeNotNull(testAccount);
+    try {
+      NativeAzureFileSystem fs = testAccount.getFileSystem();
+      injectTransientError(fs, new ConnectionRecognizer() {
+        @Override
+        public boolean isTargetConnection(HttpURLConnection connection) {
+          return connection.getRequestMethod().equals("DELETE");
+        }
+      });
+      Path testFile = new Path("/a/b");
+      assertTrue(fs.createNewFile(testFile));
+      assertTrue(fs.rename(testFile, new Path("/x")));
+    } finally {
+      testAccount.cleanup();
+    }
+  }
+
+  private void writeAllThreeFile(NativeAzureFileSystem fs, Path testFile)
+      throws IOException {
+    byte[] buffer = new byte[ALL_THREE_FILE_SIZE];
+    Arrays.fill(buffer, (byte) 3);
+    OutputStream stream = fs.create(testFile);
+    stream.write(buffer);
+    stream.close();
+  }
+
+  private void readAllThreeFile(NativeAzureFileSystem fs, Path testFile)
+      throws IOException {
+    byte[] buffer = new byte[ALL_THREE_FILE_SIZE];
+    InputStream inStream = fs.open(testFile);
+    assertEquals(buffer.length, inStream.read(buffer, 0, buffer.length));
+    inStream.close();
+    for (int i = 0; i < buffer.length; i++) {
+      assertEquals(3, buffer[i]);
+    }
+  }
+
+  @Test
+  public void testTransientErrorOnCommitBlockList() throws Exception {
+    // Need to do this test against a live storage account
+    AzureBlobStorageTestAccount testAccount = AzureBlobStorageTestAccount
+        .create();
+    assumeNotNull(testAccount);
+    try {
+      NativeAzureFileSystem fs = testAccount.getFileSystem();
+      injectTransientError(fs, new ConnectionRecognizer() {
+        @Override
+        public boolean isTargetConnection(HttpURLConnection connection) {
+          return connection.getRequestMethod().equals("PUT")
+              && connection.getURL().getQuery().contains("blocklist");
+        }
+      });
+      Path testFile = new Path("/a/b");
+      writeAllThreeFile(fs, testFile);
+      readAllThreeFile(fs, testFile);
+    } finally {
+      testAccount.cleanup();
+    }
+  }
+
+  @Test
+  public void testTransientErrorOnRead() throws Exception {
+    // Need to do this test against a live storage account
+    AzureBlobStorageTestAccount testAccount = AzureBlobStorageTestAccount
+        .create();
+    assumeNotNull(testAccount);
+    try {
+      NativeAzureFileSystem fs = testAccount.getFileSystem();
+      Path testFile = new Path("/a/b");
+      writeAllThreeFile(fs, testFile);
+      injectTransientError(fs, new ConnectionRecognizer() {
+        @Override
+        public boolean isTargetConnection(HttpURLConnection connection) {
+          return connection.getRequestMethod().equals("GET");
+        }
+      });
+      readAllThreeFile(fs, testFile);
+    } finally {
+      testAccount.cleanup();
+    }
+  }
+  
+  // Tests an error during stream creation (in this case in the seek() implementation
+  // to verify the close-stream-on-error logic.
+  @Test (expected=AzureException.class)
+  public void testErrorDuringRetrieve() throws Exception {
+    NativeAzureFileSystem fs = AzureBlobStorageTestAccount.createMock().getFileSystem();
+    Path testFile = new Path("/testErrorDuringRetrieve");
+    writeAllThreeFile(fs, testFile);
+
+    FSDataInputStream stream = fs.open(testFile);
+    stream.seek(Integer.MAX_VALUE);
+  }
+}

Added: hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java?rev=1601781&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java (added)
+++ hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java Tue Jun 10 22:26:45 2014
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_CHECK_BLOCK_MD5;
+import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_STORE_BLOB_MD5;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeNotNull;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.util.Arrays;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
+import org.junit.After;
+import org.junit.Test;
+
+import com.microsoft.windowsazure.storage.Constants;
+import com.microsoft.windowsazure.storage.OperationContext;
+import com.microsoft.windowsazure.storage.ResponseReceivedEvent;
+import com.microsoft.windowsazure.storage.StorageErrorCodeStrings;
+import com.microsoft.windowsazure.storage.StorageEvent;
+import com.microsoft.windowsazure.storage.StorageException;
+import com.microsoft.windowsazure.storage.blob.BlockEntry;
+import com.microsoft.windowsazure.storage.blob.BlockSearchMode;
+import com.microsoft.windowsazure.storage.blob.CloudBlockBlob;
+import com.microsoft.windowsazure.storage.core.Base64;
+
+/**
+ * Test that we do proper data integrity validation with MD5 checks as
+ * configured.
+ */
+public class TestBlobDataValidation {
+  private AzureBlobStorageTestAccount testAccount;
+
+  @After
+  public void tearDown() throws Exception {
+    if (testAccount != null) {
+      testAccount.cleanup();
+      testAccount = null;
+    }
+  }
+
+  /**
+   * Test that by default we don't store the blob-level MD5.
+   */
+  @Test
+  public void testBlobMd5StoreOffByDefault() throws Exception {
+    testAccount = AzureBlobStorageTestAccount.create();
+    testStoreBlobMd5(false);
+  }
+
+  /**
+   * Test that we get blob-level MD5 storage and validation if we specify that
+   * in the configuration.
+   */
+  @Test
+  public void testStoreBlobMd5() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(KEY_STORE_BLOB_MD5, true);
+    testAccount = AzureBlobStorageTestAccount.create(conf);
+    testStoreBlobMd5(true);
+  }
+
+  private void testStoreBlobMd5(boolean expectMd5Stored) throws Exception {
+    assumeNotNull(testAccount);
+    // Write a test file.
+    String testFileKey = "testFile";
+    Path testFilePath = new Path("/" + testFileKey);
+    OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
+    outStream.write(new byte[] { 5, 15 });
+    outStream.close();
+
+    // Check that we stored/didn't store the MD5 field as configured.
+    CloudBlockBlob blob = testAccount.getBlobReference(testFileKey);
+    blob.downloadAttributes();
+    String obtainedMd5 = blob.getProperties().getContentMD5();
+    if (expectMd5Stored) {
+      assertNotNull(obtainedMd5);
+    } else {
+      assertNull("Expected no MD5, found: " + obtainedMd5, obtainedMd5);
+    }
+
+    // Mess with the content so it doesn't match the MD5.
+    String newBlockId = Base64.encode(new byte[] { 55, 44, 33, 22 });
+    blob.uploadBlock(newBlockId,
+        new ByteArrayInputStream(new byte[] { 6, 45 }), 2);
+    blob.commitBlockList(Arrays.asList(new BlockEntry[] { new BlockEntry(
+        newBlockId, BlockSearchMode.UNCOMMITTED) }));
+
+    // Now read back the content. If we stored the MD5 for the blob content
+    // we should get a data corruption error.
+    InputStream inStream = testAccount.getFileSystem().open(testFilePath);
+    try {
+      byte[] inBuf = new byte[100];
+      while (inStream.read(inBuf) > 0){
+        //nothing;
+      }
+      inStream.close();
+      if (expectMd5Stored) {
+        fail("Should've thrown because of data corruption.");
+      }
+    } catch (IOException ex) {
+      if (!expectMd5Stored) {
+        throw ex;
+      }
+      StorageException cause = (StorageException) ex.getCause();
+      assertNotNull(cause);
+      assertTrue("Unexpected cause: " + cause,
+          cause.getErrorCode().equals(StorageErrorCodeStrings.INVALID_MD5));
+    }
+  }
+
+  /**
+   * Test that by default we check block-level MD5.
+   */
+  @Test
+  public void testCheckBlockMd5() throws Exception {
+    testAccount = AzureBlobStorageTestAccount.create();
+    testCheckBlockMd5(true);
+  }
+
+  /**
+   * Test that we don't check block-level MD5 if we specify that in the
+   * configuration.
+   */
+  @Test
+  public void testDontCheckBlockMd5() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(KEY_CHECK_BLOCK_MD5, false);
+    testAccount = AzureBlobStorageTestAccount.create(conf);
+    testCheckBlockMd5(false);
+  }
+
+  /**
+   * Connection inspector to check that MD5 fields for content is set/not set as
+   * expected.
+   */
+  private static class ContentMD5Checker extends
+      StorageEvent<ResponseReceivedEvent> {
+    private final boolean expectMd5;
+
+    public ContentMD5Checker(boolean expectMd5) {
+      this.expectMd5 = expectMd5;
+    }
+
+    @Override
+    public void eventOccurred(ResponseReceivedEvent eventArg) {
+      HttpURLConnection connection = (HttpURLConnection) eventArg
+          .getConnectionObject();
+      if (isGetRange(connection)) {
+        checkObtainedMd5(connection
+            .getHeaderField(Constants.HeaderConstants.CONTENT_MD5));
+      } else if (isPutBlock(connection)) {
+        checkObtainedMd5(connection
+            .getRequestProperty(Constants.HeaderConstants.CONTENT_MD5));
+      }
+    }
+
+    private void checkObtainedMd5(String obtainedMd5) {
+      if (expectMd5) {
+        assertNotNull(obtainedMd5);
+      } else {
+        assertNull("Expected no MD5, found: " + obtainedMd5, obtainedMd5);
+      }
+    }
+
+    private static boolean isPutBlock(HttpURLConnection connection) {
+      return connection.getRequestMethod().equals("PUT")
+          && connection.getURL().getQuery().contains("blockid");
+    }
+
+    private static boolean isGetRange(HttpURLConnection connection) {
+      return connection.getRequestMethod().equals("GET")
+          && connection
+              .getHeaderField(Constants.HeaderConstants.STORAGE_RANGE_HEADER) != null;
+    }
+  }
+
+  private void testCheckBlockMd5(final boolean expectMd5Checked)
+      throws Exception {
+    assumeNotNull(testAccount);
+    Path testFilePath = new Path("/testFile");
+
+    // Add a hook to check that for GET/PUT requests we set/don't set
+    // the block-level MD5 field as configured. I tried to do clever
+    // testing by also messing with the raw data to see if we actually
+    // validate the data as expected, but the HttpURLConnection wasn't
+    // pluggable enough for me to do that.
+    testAccount.getFileSystem().getStore()
+        .addTestHookToOperationContext(new TestHookOperationContext() {
+          @Override
+          public OperationContext modifyOperationContext(
+              OperationContext original) {
+            original.getResponseReceivedEventHandler().addListener(
+                new ContentMD5Checker(expectMd5Checked));
+            return original;
+          }
+        });
+
+    OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
+    outStream.write(new byte[] { 5, 15 });
+    outStream.close();
+
+    InputStream inStream = testAccount.getFileSystem().open(testFilePath);
+    byte[] inBuf = new byte[100];
+    while (inStream.read(inBuf) > 0){
+      //nothing;
+    }
+    inStream.close();
+  }
+}

Added: hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java?rev=1601781&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java (added)
+++ hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java Tue Jun 10 22:26:45 2014
@@ -0,0 +1,265 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.URI;
+import java.util.HashMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Tests that we put the correct metadata on blobs created through WASB.
+ */
+public class TestBlobMetadata {
+  private AzureBlobStorageTestAccount testAccount;
+  private FileSystem fs;
+  private InMemoryBlockBlobStore backingStore;
+
+  @Before
+  public void setUp() throws Exception {
+    testAccount = AzureBlobStorageTestAccount.createMock();
+    fs = testAccount.getFileSystem();
+    backingStore = testAccount.getMockStorage().getBackingStore();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    testAccount.cleanup();
+    fs = null;
+    backingStore = null;
+  }
+
+  private static String getExpectedOwner() throws Exception {
+    return UserGroupInformation.getCurrentUser().getShortUserName();
+  }
+
+  private static String getExpectedPermissionString(String permissionString)
+      throws Exception {
+    return String.format(
+        "{\"owner\":\"%s\",\"group\":\"%s\",\"permissions\":\"%s\"}",
+        getExpectedOwner(), NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,
+        permissionString);
+  }
+
+  /**
+   * Tests that WASB stamped the version in the container metadata.
+   */
+  @Test
+  public void testContainerVersionMetadata() throws Exception {
+    // Do a write operation to trigger version stamp
+    fs.createNewFile(new Path("/foo"));
+    HashMap<String, String> containerMetadata = backingStore
+        .getContainerMetadata();
+    assertNotNull(containerMetadata);
+    assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,
+        containerMetadata.get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
+  }
+
+  private static final class FsWithPreExistingContainer implements Closeable {
+    private final MockStorageInterface mockStorage;
+    private final NativeAzureFileSystem fs;
+
+    private FsWithPreExistingContainer(MockStorageInterface mockStorage,
+        NativeAzureFileSystem fs) {
+      this.mockStorage = mockStorage;
+      this.fs = fs;
+    }
+
+    public NativeAzureFileSystem getFs() {
+      return fs;
+    }
+
+    public HashMap<String, String> getContainerMetadata() {
+      return mockStorage.getBackingStore().getContainerMetadata();
+    }
+
+    public static FsWithPreExistingContainer create() throws Exception {
+      return create(null);
+    }
+
+    public static FsWithPreExistingContainer create(
+        HashMap<String, String> containerMetadata) throws Exception {
+      AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
+      MockStorageInterface mockStorage = new MockStorageInterface();
+      store.setAzureStorageInteractionLayer(mockStorage);
+      NativeAzureFileSystem fs = new NativeAzureFileSystem(store);
+      Configuration conf = new Configuration();
+      AzureBlobStorageTestAccount.setMockAccountKey(conf);
+      mockStorage.addPreExistingContainer(
+          AzureBlobStorageTestAccount.getMockContainerUri(), containerMetadata);
+      fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI), conf);
+      return new FsWithPreExistingContainer(mockStorage, fs);
+    }
+
+    @Override
+    public void close() throws IOException {
+      fs.close();
+    }
+  }
+
+  /**
+   * Tests that WASB stamped the version in the container metadata if it does a
+   * write operation to a pre-existing container.
+   */
+  @Test
+  public void testPreExistingContainerVersionMetadata() throws Exception {
+    // Create a mock storage with a pre-existing container that has no
+    // WASB version metadata on it.
+    FsWithPreExistingContainer fsWithContainer = FsWithPreExistingContainer
+        .create();
+
+    // Now, do some read operations (should touch the metadata)
+    assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist")));
+    assertEquals(0, fsWithContainer.getFs().listStatus(new Path("/")).length);
+
+    // Check that no container metadata exists yet
+    assertNull(fsWithContainer.getContainerMetadata());
+
+    // Now do a write operation - should stamp the version
+    fsWithContainer.getFs().mkdirs(new Path("/dir"));
+
+    // Check that now we have the version stamp
+    assertNotNull(fsWithContainer.getContainerMetadata());
+    assertEquals(
+        AzureNativeFileSystemStore.CURRENT_WASB_VERSION,
+        fsWithContainer.getContainerMetadata().get(
+            AzureNativeFileSystemStore.VERSION_METADATA_KEY));
+    fsWithContainer.close();
+  }
+
+  /**
+   * Tests that WASB works well with an older version container with ASV-era
+   * version and metadata.
+   */
+  @Test
+  public void testFirstContainerVersionMetadata() throws Exception {
+    // Create a mock storage with a pre-existing container that has
+    // ASV version metadata on it.
+    HashMap<String, String> containerMetadata = new HashMap<String, String>();
+    containerMetadata.put(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY,
+        AzureNativeFileSystemStore.FIRST_WASB_VERSION);
+    FsWithPreExistingContainer fsWithContainer = FsWithPreExistingContainer
+        .create(containerMetadata);
+
+    // Now, do some read operations (should touch the metadata)
+    assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist")));
+    assertEquals(0, fsWithContainer.getFs().listStatus(new Path("/")).length);
+
+    // Check that no container metadata exists yet
+    assertEquals(
+        AzureNativeFileSystemStore.FIRST_WASB_VERSION,
+        fsWithContainer.getContainerMetadata().get(
+            AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY));
+    assertNull(fsWithContainer.getContainerMetadata().get(
+        AzureNativeFileSystemStore.VERSION_METADATA_KEY));
+
+    // Now do a write operation - should stamp the version
+    fsWithContainer.getFs().mkdirs(new Path("/dir"));
+
+    // Check that now we have the version stamp
+    assertEquals(
+        AzureNativeFileSystemStore.CURRENT_WASB_VERSION,
+        fsWithContainer.getContainerMetadata().get(
+            AzureNativeFileSystemStore.VERSION_METADATA_KEY));
+    assertNull(fsWithContainer.getContainerMetadata().get(
+        AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY));
+    fsWithContainer.close();
+  }
+
+  @SuppressWarnings("deprecation")
+  @Test
+  public void testPermissionMetadata() throws Exception {
+    FsPermission justMe = new FsPermission(FsAction.READ_WRITE, FsAction.NONE,
+        FsAction.NONE);
+    Path selfishFile = new Path("/noOneElse");
+    fs.create(selfishFile, justMe, true, 4096, fs.getDefaultReplication(),
+        fs.getDefaultBlockSize(), null).close();
+    HashMap<String, String> metadata = backingStore
+        .getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile));
+    assertNotNull(metadata);
+    String storedPermission = metadata.get("hdi_permission");
+    assertEquals(getExpectedPermissionString("rw-------"), storedPermission);
+    FileStatus retrievedStatus = fs.getFileStatus(selfishFile);
+    assertNotNull(retrievedStatus);
+    assertEquals(justMe, retrievedStatus.getPermission());
+    assertEquals(getExpectedOwner(), retrievedStatus.getOwner());
+    assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,
+        retrievedStatus.getGroup());
+  }
+
+  /**
+   * Tests that WASB understands the old-style ASV metadata and changes it when
+   * it gets the chance.
+   */
+  @Test
+  public void testOldPermissionMetadata() throws Exception {
+    Path selfishFile = new Path("/noOneElse");
+    HashMap<String, String> metadata = new HashMap<String, String>();
+    metadata.put("asv_permission", getExpectedPermissionString("rw-------"));
+    backingStore.setContent(AzureBlobStorageTestAccount.toMockUri(selfishFile),
+        new byte[] {}, metadata);
+    FsPermission justMe = new FsPermission(FsAction.READ_WRITE, FsAction.NONE,
+        FsAction.NONE);
+    FileStatus retrievedStatus = fs.getFileStatus(selfishFile);
+    assertNotNull(retrievedStatus);
+    assertEquals(justMe, retrievedStatus.getPermission());
+    assertEquals(getExpectedOwner(), retrievedStatus.getOwner());
+    assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,
+        retrievedStatus.getGroup());
+    FsPermission meAndYou = new FsPermission(FsAction.READ_WRITE,
+        FsAction.READ_WRITE, FsAction.NONE);
+    fs.setPermission(selfishFile, meAndYou);
+    metadata = backingStore.getMetadata(AzureBlobStorageTestAccount
+        .toMockUri(selfishFile));
+    assertNotNull(metadata);
+    String storedPermission = metadata.get("hdi_permission");
+    assertEquals(getExpectedPermissionString("rw-rw----"), storedPermission);
+    assertNull(metadata.get("asv_permission"));
+  }
+
+  @Test
+  public void testFolderMetadata() throws Exception {
+    Path folder = new Path("/folder");
+    FsPermission justRead = new FsPermission(FsAction.READ, FsAction.READ,
+        FsAction.READ);
+    fs.mkdirs(folder, justRead);
+    HashMap<String, String> metadata = backingStore
+        .getMetadata(AzureBlobStorageTestAccount.toMockUri(folder));
+    assertNotNull(metadata);
+    assertEquals("true", metadata.get("hdi_isfolder"));
+    assertEquals(getExpectedPermissionString("r--r--r--"),
+        metadata.get("hdi_permission"));
+  }
+}

Added: hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java?rev=1601781&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java (added)
+++ hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java Tue Jun 10 22:26:45 2014
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeNotNull;
+
+import java.io.FileNotFoundException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
+import org.junit.After;
+import org.junit.Test;
+
+import com.microsoft.windowsazure.storage.blob.BlobOutputStream;
+import com.microsoft.windowsazure.storage.blob.CloudBlobContainer;
+import com.microsoft.windowsazure.storage.blob.CloudBlockBlob;
+
+/**
+ * Tests that WASB creates containers only if needed.
+ */
+public class TestContainerChecks {
+  private AzureBlobStorageTestAccount testAccount;
+
+  @After
+  public void tearDown() throws Exception {
+    if (testAccount != null) {
+      testAccount.cleanup();
+      testAccount = null;
+    }
+  }
+
+  @Test
+  public void testContainerExistAfterDoesNotExist() throws Exception {
+    testAccount = AzureBlobStorageTestAccount.create("",
+        EnumSet.noneOf(CreateOptions.class));
+    assumeNotNull(testAccount);
+    CloudBlobContainer container = testAccount.getRealContainer();
+    FileSystem fs = testAccount.getFileSystem();
+
+    // Starting off with the container not there
+    assertFalse(container.exists());
+
+    // A list shouldn't create the container and will set file system store
+    // state to DoesNotExist
+    try {
+      fs.listStatus(new Path("/"));
+      assertTrue("Should've thrown.", false);
+    } catch (FileNotFoundException ex) {
+      assertTrue("Unexpected exception: " + ex,
+          ex.getMessage().contains("does not exist."));
+    }
+    assertFalse(container.exists());
+
+    // Create a container outside of the WASB FileSystem
+    container.create();
+    // Add a file to the container outside of the WASB FileSystem
+    CloudBlockBlob blob = testAccount.getBlobReference("foo");
+    BlobOutputStream outputStream = blob.openOutputStream();
+    outputStream.write(new byte[10]);
+    outputStream.close();
+
+    // Make sure the file is visible
+    assertTrue(fs.exists(new Path("/foo")));
+    assertTrue(container.exists());
+  }
+
+  @Test
+  public void testContainerCreateAfterDoesNotExist() throws Exception {
+    testAccount = AzureBlobStorageTestAccount.create("",
+        EnumSet.noneOf(CreateOptions.class));
+    assumeNotNull(testAccount);
+    CloudBlobContainer container = testAccount.getRealContainer();
+    FileSystem fs = testAccount.getFileSystem();
+
+    // Starting off with the container not there
+    assertFalse(container.exists());
+
+    // A list shouldn't create the container and will set file system store
+    // state to DoesNotExist
+    try {
+      assertNull(fs.listStatus(new Path("/")));
+      assertTrue("Should've thrown.", false);
+    } catch (FileNotFoundException ex) {
+      assertTrue("Unexpected exception: " + ex,
+          ex.getMessage().contains("does not exist."));
+    }
+    assertFalse(container.exists());
+
+    // Create a container outside of the WASB FileSystem
+    container.create();
+
+    // Write should succeed
+    assertTrue(fs.createNewFile(new Path("/foo")));
+    assertTrue(container.exists());
+  }
+
+  @Test
+  public void testContainerCreateOnWrite() throws Exception {
+    testAccount = AzureBlobStorageTestAccount.create("",
+        EnumSet.noneOf(CreateOptions.class));
+    assumeNotNull(testAccount);
+    CloudBlobContainer container = testAccount.getRealContainer();
+    FileSystem fs = testAccount.getFileSystem();
+
+    // Starting off with the container not there
+    assertFalse(container.exists());
+
+    // A list shouldn't create the container.
+    try {
+      fs.listStatus(new Path("/"));
+      assertTrue("Should've thrown.", false);
+    } catch (FileNotFoundException ex) {
+      assertTrue("Unexpected exception: " + ex,
+          ex.getMessage().contains("does not exist."));
+    }
+    assertFalse(container.exists());
+
+    // Neither should a read.
+    try {
+      fs.open(new Path("/foo"));
+      assertFalse("Should've thrown.", true);
+    } catch (FileNotFoundException ex) {
+    }
+    assertFalse(container.exists());
+
+    // Neither should a rename
+    assertFalse(fs.rename(new Path("/foo"), new Path("/bar")));
+    assertFalse(container.exists());
+
+    // But a write should.
+    assertTrue(fs.createNewFile(new Path("/foo")));
+    assertTrue(container.exists());
+  }
+
+  @Test
+  public void testContainerChecksWithSas() throws Exception {
+    testAccount = AzureBlobStorageTestAccount.create("",
+        EnumSet.of(CreateOptions.UseSas));
+    assumeNotNull(testAccount);
+    CloudBlobContainer container = testAccount.getRealContainer();
+    FileSystem fs = testAccount.getFileSystem();
+
+    // The container shouldn't be there
+    assertFalse(container.exists());
+
+    // A write should just fail
+    try {
+      fs.createNewFile(new Path("/foo"));
+      assertFalse("Should've thrown.", true);
+    } catch (AzureException ex) {
+    }
+    assertFalse(container.exists());
+  }
+}

Added: hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java?rev=1601781&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java (added)
+++ hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java Tue Jun 10 22:26:45 2014
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.OutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.Test;
+
+public class TestNativeAzureFileSystemBlockLocations {
+  @Test
+  public void testNumberOfBlocks() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(NativeAzureFileSystem.AZURE_BLOCK_SIZE_PROPERTY_NAME, "500");
+    AzureBlobStorageTestAccount testAccount = AzureBlobStorageTestAccount
+        .createMock(conf);
+    FileSystem fs = testAccount.getFileSystem();
+    Path testFile = createTestFile(fs, 1200);
+    FileStatus stat = fs.getFileStatus(testFile);
+    assertEquals(500, stat.getBlockSize());
+    testAccount.cleanup();
+  }
+
+  @Test
+  public void testBlockLocationsTypical() throws Exception {
+    BlockLocation[] locations = getBlockLocationsOutput(210, 50, 0, 210);
+    assertEquals(5, locations.length);
+    assertEquals("localhost", locations[0].getHosts()[0]);
+    assertEquals(50, locations[0].getLength());
+    assertEquals(10, locations[4].getLength());
+    assertEquals(100, locations[2].getOffset());
+  }
+
+  @Test
+  public void testBlockLocationsEmptyFile() throws Exception {
+    BlockLocation[] locations = getBlockLocationsOutput(0, 50, 0, 0);
+    assertEquals(0, locations.length);
+  }
+
+  @Test
+  public void testBlockLocationsSmallFile() throws Exception {
+    BlockLocation[] locations = getBlockLocationsOutput(1, 50, 0, 1);
+    assertEquals(1, locations.length);
+    assertEquals(1, locations[0].getLength());
+  }
+
+  @Test
+  public void testBlockLocationsExactBlockSizeMultiple() throws Exception {
+    BlockLocation[] locations = getBlockLocationsOutput(200, 50, 0, 200);
+    assertEquals(4, locations.length);
+    assertEquals(150, locations[3].getOffset());
+    assertEquals(50, locations[3].getLength());
+  }
+
+  @Test
+  public void testBlockLocationsSubsetOfFile() throws Exception {
+    BlockLocation[] locations = getBlockLocationsOutput(205, 10, 15, 35);
+    assertEquals(4, locations.length);
+    assertEquals(10, locations[0].getLength());
+    assertEquals(15, locations[0].getOffset());
+    assertEquals(5, locations[3].getLength());
+    assertEquals(45, locations[3].getOffset());
+  }
+
+  @Test
+  public void testBlockLocationsOutOfRangeSubsetOfFile() throws Exception {
+    BlockLocation[] locations = getBlockLocationsOutput(205, 10, 300, 10);
+    assertEquals(0, locations.length);
+  }
+
+  @Test
+  public void testBlockLocationsEmptySubsetOfFile() throws Exception {
+    BlockLocation[] locations = getBlockLocationsOutput(205, 10, 0, 0);
+    assertEquals(0, locations.length);
+  }
+
+  @Test
+  public void testBlockLocationsDifferentLocationHost() throws Exception {
+    BlockLocation[] locations = getBlockLocationsOutput(100, 10, 0, 100,
+        "myblobhost");
+    assertEquals(10, locations.length);
+    assertEquals("myblobhost", locations[0].getHosts()[0]);
+  }
+
+  private static BlockLocation[] getBlockLocationsOutput(int fileSize,
+      int blockSize, long start, long len) throws Exception {
+    return getBlockLocationsOutput(fileSize, blockSize, start, len, null);
+  }
+
+  private static BlockLocation[] getBlockLocationsOutput(int fileSize,
+      int blockSize, long start, long len, String blockLocationHost)
+      throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(NativeAzureFileSystem.AZURE_BLOCK_SIZE_PROPERTY_NAME, ""
+        + blockSize);
+    if (blockLocationHost != null) {
+      conf.set(NativeAzureFileSystem.AZURE_BLOCK_LOCATION_HOST_PROPERTY_NAME,
+          blockLocationHost);
+    }
+    AzureBlobStorageTestAccount testAccount = AzureBlobStorageTestAccount
+        .createMock(conf);
+    FileSystem fs = testAccount.getFileSystem();
+    Path testFile = createTestFile(fs, fileSize);
+    FileStatus stat = fs.getFileStatus(testFile);
+    BlockLocation[] locations = fs.getFileBlockLocations(stat, start, len);
+    testAccount.cleanup();
+    return locations;
+  }
+
+  private static Path createTestFile(FileSystem fs, int size) throws Exception {
+    Path testFile = new Path("/testFile");
+    OutputStream outputStream = fs.create(testFile);
+    outputStream.write(new byte[size]);
+    outputStream.close();
+    return testFile;
+  }
+}



Mime
View raw message