hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From viraj...@apache.org
Subject [48/50] [abbrv] hadoop git commit: HDFS-12091. [READ] Check that the replicas served from a ProvidedVolumeImpl belong to the correct external storage
Date Tue, 24 Oct 2017 18:32:22 GMT
HDFS-12091. [READ] Check that the replicas served from a ProvidedVolumeImpl belong to the correct
external storage


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d921dee5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d921dee5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d921dee5

Branch: refs/heads/HDFS-9806
Commit: d921dee5a9a639e1aa7e41584e41a07388780819
Parents: 12d8837
Author: Virajith Jalaparti <virajith@apache.org>
Authored: Mon Aug 7 11:35:49 2017 -0700
Committer: Virajith Jalaparti <virajith@apache.org>
Committed: Tue Oct 24 11:10:23 2017 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/StorageLocation.java   |  26 +++--
 .../fsdataset/impl/ProvidedVolumeImpl.java      |  67 ++++++++++--
 .../fsdataset/impl/TestProvidedImpl.java        | 105 ++++++++++++++++++-
 3 files changed, 173 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d921dee5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
index fb7acfd..d72448d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
@@ -64,21 +64,25 @@ public class StorageLocation
     this.storageType = storageType;
     if (uri.getScheme() == null || uri.getScheme().equals("file")) {
       // make sure all URIs that point to a file have the same scheme
-      try {
-        File uriFile = new File(uri.getPath());
-        String uriStr = uriFile.toURI().normalize().toString();
-        if (uriStr.endsWith("/")) {
-          uriStr = uriStr.substring(0, uriStr.length() - 1);
-        }
-        uri = new URI(uriStr);
-      } catch (URISyntaxException e) {
-        throw new IllegalArgumentException(
-            "URI: " + uri + " is not in the expected format");
-      }
+      uri = normalizeFileURI(uri);
     }
     baseURI = uri;
   }
 
+  public static URI normalizeFileURI(URI uri) {
+    try {
+      File uriFile = new File(uri.getPath());
+      String uriStr = uriFile.toURI().normalize().toString();
+      if (uriStr.endsWith("/")) {
+        uriStr = uriStr.substring(0, uriStr.length() - 1);
+      }
+      return new URI(uriStr);
+    } catch (URISyntaxException e) {
+      throw new IllegalArgumentException(
+              "URI: " + uri + " is not in the expected format");
+    }
+  }
+
   public StorageType getStorageType() {
     return this.storageType;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d921dee5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
index 421b9cc..5cd28c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
@@ -64,7 +65,7 @@ import org.apache.hadoop.util.Time;
 public class ProvidedVolumeImpl extends FsVolumeImpl {
 
   static class ProvidedBlockPoolSlice {
-    private FsVolumeImpl providedVolume;
+    private ProvidedVolumeImpl providedVolume;
 
     private FileRegionProvider provider;
     private Configuration conf;
@@ -89,13 +90,20 @@ public class ProvidedVolumeImpl extends FsVolumeImpl {
       return provider;
     }
 
+    @VisibleForTesting
+    void setFileRegionProvider(FileRegionProvider newProvider) {
+      this.provider = newProvider;
+    }
+
     public void getVolumeMap(ReplicaMap volumeMap,
         RamDiskReplicaTracker ramDiskReplicaMap) throws IOException {
       Iterator<FileRegion> iter = provider.iterator();
-      while(iter.hasNext()) {
+      while (iter.hasNext()) {
         FileRegion region = iter.next();
-        if (region.getBlockPoolId() != null &&
-            region.getBlockPoolId().equals(bpid)) {
+        if (region.getBlockPoolId() != null
+            && region.getBlockPoolId().equals(bpid)
+            && containsBlock(providedVolume.baseURI,
+                region.getPath().toUri())) {
           ReplicaInfo newReplica = new ReplicaBuilder(ReplicaState.FINALIZED)
               .setBlockId(region.getBlock().getBlockId())
               .setURI(region.getPath().toUri())
@@ -103,17 +111,16 @@ public class ProvidedVolumeImpl extends FsVolumeImpl {
               .setLength(region.getBlock().getNumBytes())
               .setGenerationStamp(region.getBlock().getGenerationStamp())
               .setFsVolume(providedVolume)
-              .setConf(conf).build();
-
-          ReplicaInfo oldReplica =
-              volumeMap.get(bpid, newReplica.getBlockId());
+              .setConf(conf)
+              .build();
+          // check if the replica already exists
+          ReplicaInfo oldReplica = volumeMap.get(bpid, newReplica.getBlockId());
           if (oldReplica == null) {
             volumeMap.add(bpid, newReplica);
             bpVolumeMap.add(bpid, newReplica);
           } else {
-            throw new IOException(
-                "A block with id " + newReplica.getBlockId() +
-                " already exists in the volumeMap");
+            throw new IOException("A block with id " + newReplica.getBlockId()
+                + " already exists in the volumeMap");
           }
         }
       }
@@ -527,4 +534,42 @@ public class ProvidedVolumeImpl extends FsVolumeImpl {
     throw new UnsupportedOperationException(
         "ProvidedVolume does not yet support writes");
   }
+
+  private static URI getAbsoluteURI(URI uri) {
+    if (!uri.isAbsolute()) {
+      // URI is not absolute implies it is for a local file
+      // normalize the URI
+      return StorageLocation.normalizeFileURI(uri);
+    } else {
+      return uri;
+    }
+  }
+  /**
+   * @param volumeURI URI of the volume
+   * @param blockURI URI of the block
+   * @return true if the {@code blockURI} can belong to the volume or both URIs
+   * are null.
+   */
+  @VisibleForTesting
+  public static boolean containsBlock(URI volumeURI, URI blockURI) {
+    if (volumeURI == null && blockURI == null){
+      return true;
+    }
+    if (volumeURI == null || blockURI == null) {
+      return false;
+    }
+    volumeURI = getAbsoluteURI(volumeURI);
+    blockURI = getAbsoluteURI(blockURI);
+    return !volumeURI.relativize(blockURI).equals(blockURI);
+  }
+
+  @VisibleForTesting
+  void setFileRegionProvider(String bpid, FileRegionProvider provider)
+      throws IOException {
+    ProvidedBlockPoolSlice bp = bpSlices.get(bpid);
+    if (bp == null) {
+      throw new IOException("block pool " + bpid + " is not found");
+    }
+    bp.setFileRegionProvider(provider);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d921dee5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
index 4753235..8782e71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
@@ -31,6 +31,8 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.nio.ByteBuffer;
 import java.nio.channels.Channels;
 import java.nio.channels.ReadableByteChannel;
@@ -174,15 +176,26 @@ public class TestProvidedImpl {
     private Configuration conf;
     private int minId;
     private int numBlocks;
+    private Iterator<FileRegion> suppliedIterator;
 
     TestFileRegionProvider() {
-      minId = MIN_BLK_ID;
-      numBlocks = NUM_PROVIDED_BLKS;
+      this(null, MIN_BLK_ID, NUM_PROVIDED_BLKS);
+    }
+
+    TestFileRegionProvider(Iterator<FileRegion> iterator, int minId,
+        int numBlocks) {
+      this.suppliedIterator = iterator;
+      this.minId = minId;
+      this.numBlocks = numBlocks;
     }
 
     @Override
     public Iterator<FileRegion> iterator() {
-      return new TestFileRegionIterator(providedBasePath, minId, numBlocks);
+      if (suppliedIterator == null) {
+        return new TestFileRegionIterator(providedBasePath, minId, numBlocks);
+      } else {
+        return suppliedIterator;
+      }
     }
 
     @Override
@@ -503,4 +516,90 @@ public class TestProvidedImpl {
       }
     }
   }
+
+  private int getBlocksInProvidedVolumes(String basePath, int numBlocks,
+      int minBlockId) throws IOException {
+    TestFileRegionIterator fileRegionIterator =
+        new TestFileRegionIterator(basePath, minBlockId, numBlocks);
+    int totalBlocks = 0;
+    for (int i = 0; i < providedVolumes.size(); i++) {
+      ProvidedVolumeImpl vol = (ProvidedVolumeImpl) providedVolumes.get(i);
+      vol.setFileRegionProvider(BLOCK_POOL_IDS[CHOSEN_BP_ID],
+          new TestFileRegionProvider(fileRegionIterator, minBlockId,
+              numBlocks));
+      ReplicaMap volumeMap = new ReplicaMap(new AutoCloseableLock());
+      vol.getVolumeMap(BLOCK_POOL_IDS[CHOSEN_BP_ID], volumeMap, null);
+      totalBlocks += volumeMap.size(BLOCK_POOL_IDS[CHOSEN_BP_ID]);
+    }
+    return totalBlocks;
+  }
+
+  /**
+   * Tests if the FileRegions provided by the FileRegionProvider
+   * can belong to the Providevolume.
+   * @throws IOException
+   */
+  @Test
+  public void testProvidedVolumeContents() throws IOException {
+    int expectedBlocks = 5;
+    int minId = 0;
+    //use a path which has the same prefix as providedBasePath
+    //all these blocks can belong to the provided volume
+    int blocksFound = getBlocksInProvidedVolumes(providedBasePath + "/test1/",
+        expectedBlocks, minId);
+    assertEquals(
+        "Number of blocks in provided volumes should be " + expectedBlocks,
+        expectedBlocks, blocksFound);
+    blocksFound = getBlocksInProvidedVolumes(
+        "file:/" + providedBasePath + "/test1/", expectedBlocks, minId);
+    assertEquals(
+        "Number of blocks in provided volumes should be " + expectedBlocks,
+        expectedBlocks, blocksFound);
+    //use a path that is entirely different from the providedBasePath
+    //none of these blocks can belong to the volume
+    blocksFound =
+        getBlocksInProvidedVolumes("randomtest1/", expectedBlocks, minId);
+    assertEquals("Number of blocks in provided volumes should be 0", 0,
+        blocksFound);
+  }
+
+  @Test
+  public void testProvidedVolumeContainsBlock() throws URISyntaxException {
+    assertEquals(true, ProvidedVolumeImpl.containsBlock(null, null));
+    assertEquals(false,
+        ProvidedVolumeImpl.containsBlock(new URI("file:/a"), null));
+    assertEquals(true,
+        ProvidedVolumeImpl.containsBlock(new URI("file:/a/b/c/"),
+            new URI("file:/a/b/c/d/e.file")));
+    assertEquals(true,
+        ProvidedVolumeImpl.containsBlock(new URI("/a/b/c/"),
+            new URI("file:/a/b/c/d/e.file")));
+    assertEquals(true,
+        ProvidedVolumeImpl.containsBlock(new URI("/a/b/c"),
+            new URI("file:/a/b/c/d/e.file")));
+    assertEquals(true,
+        ProvidedVolumeImpl.containsBlock(new URI("/a/b/c/"),
+            new URI("/a/b/c/d/e.file")));
+    assertEquals(true,
+        ProvidedVolumeImpl.containsBlock(new URI("file:/a/b/c/"),
+            new URI("/a/b/c/d/e.file")));
+    assertEquals(false,
+        ProvidedVolumeImpl.containsBlock(new URI("/a/b/e"),
+            new URI("file:/a/b/c/d/e.file")));
+    assertEquals(false,
+        ProvidedVolumeImpl.containsBlock(new URI("file:/a/b/e"),
+            new URI("file:/a/b/c/d/e.file")));
+    assertEquals(true,
+        ProvidedVolumeImpl.containsBlock(new URI("s3a:/bucket1/dir1/"),
+            new URI("s3a:/bucket1/dir1/temp.txt")));
+    assertEquals(false,
+        ProvidedVolumeImpl.containsBlock(new URI("s3a:/bucket2/dir1/"),
+            new URI("s3a:/bucket1/dir1/temp.txt")));
+    assertEquals(false,
+        ProvidedVolumeImpl.containsBlock(new URI("s3a:/bucket1/dir1/"),
+            new URI("s3a:/bucket1/temp.txt")));
+    assertEquals(false,
+        ProvidedVolumeImpl.containsBlock(new URI("/bucket1/dir1/"),
+            new URI("s3a:/bucket1/dir1/temp.txt")));
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message