hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject hadoop git commit: HDFS-7240. DataNode should filter the set of NameSpaceInfos passed to Datasets. (Contributed by Arpit Agarwal)
Date Wed, 01 Jul 2015 21:00:26 GMT
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 652e21a35 -> 71dfa130c


HDFS-7240. DataNode should filter the set of NameSpaceInfos passed to Datasets. (Contributed
by Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71dfa130
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71dfa130
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71dfa130

Branch: refs/heads/HDFS-7240
Commit: 71dfa130cc5cb543d340c86d57501a52800f0f93
Parents: 652e21a
Author: Arpit Agarwal <arp@apache.org>
Authored: Wed Jul 1 13:59:47 2015 -0700
Committer: Arpit Agarwal <arp@apache.org>
Committed: Wed Jul 1 13:59:47 2015 -0700

----------------------------------------------------------------------
 .../hadoop-hdfs/CHANGES-HDFS-7240.txt           |   3 +
 .../hdfs/server/datanode/BlockPoolManager.java  |   9 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   | 107 +++++++++++++++----
 .../hdfs/server/datanode/DataStorage.java       |   6 +-
 .../hdfs/server/datanode/DataXceiver.java       |  11 +-
 .../server/datanode/fsdataset/DatasetSpi.java   |  16 ++-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  29 +++--
 .../server/datanode/SimulatedFSDataset.java     |   7 ++
 .../datanode/TestDataNodeHotSwapVolumes.java    |   2 +
 .../extdataset/ExternalDatasetImpl.java         |   9 +-
 .../fsdataset/impl/TestFsDatasetImpl.java       |   6 +-
 11 files changed, 154 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71dfa130/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-7240.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-7240.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-7240.txt
index 82e4718..123fbc8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-7240.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-7240.txt
@@ -23,3 +23,6 @@
     HDFS-8644. OzoneHandler : Add volume handler. (Anu Engineer via
     Arpit Agarwal)
 
+    HDFS-8661. DataNode should filter the set of NameSpaceInfos passed to
+    Datasets. (Arpit Agarwal)
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71dfa130/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
index 28a6cc7..c4d9c32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.security.UserGroupInformation;
 
 import com.google.common.base.Joiner;
@@ -80,6 +81,14 @@ class BlockPoolManager {
   synchronized List<BPOfferService> getAllNamenodeThreads() {
     return Collections.unmodifiableList(offerServices);
   }
+
+  synchronized List<NamespaceInfo> getAllNamespaceInfos() {
+    List<NamespaceInfo> nsInfos = new ArrayList<>();
+    for (final BPOfferService bpos : getAllNamenodeThreads()) {
+      nsInfos.add(bpos.getNamespaceInfo());
+    }
+    return nsInfos;
+  }
       
   synchronized BPOfferService get(String bpid) {
     return bpByBlockPoolId.get(bpid);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71dfa130/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 921892d..5581d71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -77,6 +77,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.TreeSet;
 import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
@@ -150,7 +151,9 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.datanode.DataStorage.VolumeBuilder;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.DatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@@ -590,6 +593,46 @@ public class DataNode extends ReconfigurableBase
   }
 
   /**
+   * Prepare multiple volumes for addition before notifying the datasets.
+   * Notify each dataset of each failed volume.
+   *
+   * @param locations StorageLocations corresponding to each new volume.
+   * @param nsInfos List of all NameSpaceInfos that the BlockManager knows
+   *                of.
+   * @param errorMessageBuilder StringBuilder to accumulate error messages.
+   * @return List of volumes that was successfully prepared.
+   */
+  private Map<StorageLocation, StorageDirectory> prepareVolumesForAddition(
+      List<StorageLocation> locations,
+      List<NamespaceInfo> nsInfos,
+      StringBuilder errorMessageBuilder) {
+
+    Map<StorageLocation, StorageDirectory> filteredLocations = new HashMap<>();
+    for (final StorageLocation location : locations) {
+      try {
+        // Prepare the volume in DataStorage
+        VolumeBuilder builder =
+            storage.prepareVolume(this, location.getFile(), nsInfos);
+        StorageDirectory sd = builder.getStorageDirectory();
+        builder.build();
+        filteredLocations.put(location, sd);
+      } catch (IOException ioe) {
+        LOG.warn("Failed to add volume " + location);
+
+        errorMessageBuilder.append(
+            String.format("FAILED TO ADD: %s: %s%n",
+                location, ioe.getMessage()));
+
+        for (DatasetSpi<?> dataset : datasets.values()) {
+          dataset.recordFailedVolume(location);
+        }
+      }
+    }
+
+    return filteredLocations;
+  }
+
+  /**
    * Attempts to reload data volumes with new configuration.
    * @param newVolumes a comma separated string that specifies the data volumes.
    * @throws IOException on error. If an IOException is thrown, some new volumes
@@ -602,7 +645,7 @@ public class DataNode extends ReconfigurableBase
     int numOldDataDirs = dataDirs.size();
     ChangedVolumes changedVolumes = parseChangedVolumes(newVolumes);
     StringBuilder errorMessageBuilder = new StringBuilder();
-    List<String> effectiveVolumes = Lists.newArrayList();
+    Set<String> effectiveVolumes = new TreeSet<>();
     for (StorageLocation sl : changedVolumes.unchangedLocations) {
       effectiveVolumes.add(sl.toString());
     }
@@ -617,32 +660,52 @@ public class DataNode extends ReconfigurableBase
             Joiner.on(",").join(changedVolumes.newLocations));
 
         // Add volumes for each Namespace
-        final List<NamespaceInfo> nsInfos = Lists.newArrayList();
-        for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
-          nsInfos.add(bpos.getNamespaceInfo());
-        }
         ExecutorService service = Executors.newFixedThreadPool(
             changedVolumes.newLocations.size());
-        List<Future<IOException>> exceptions = Lists.newArrayList();
-        for (final StorageLocation location : changedVolumes.newLocations) {
-          exceptions.add(service.submit(new Callable<IOException>() {
-            @Override
-            public IOException call() {
-              try {
-                for (DatasetSpi<?> dataset : datasets.values()) {
-                  dataset.addVolume(location, nsInfos);
-                }
-              } catch (IOException e) {
-                return e;
-              }
-              return null;
+
+        List<NamespaceInfo> nsInfos = blockPoolManager.getAllNamespaceInfos();
+        Map<StorageLocation, StorageDirectory> filteredLocations =
+            prepareVolumesForAddition(
+                changedVolumes.newLocations, nsInfos, errorMessageBuilder);
+
+        Map<StorageLocation, Future<IOException>> exceptionsMap =
+            new HashMap<>();
+        for (final DatasetSpi<?> dataset : datasets.values()) {
+          // Find the storage services matching the NodeType for this dataset.
+          final List<NamespaceInfo> filteredNsInfos = new ArrayList<>();
+          for (final NamespaceInfo nsInfo : nsInfos) {
+            if (datasets.get(nsInfo.getNodeType()) == dataset) {
+              filteredNsInfos.add(nsInfo);
             }
-          }));
+          }
+
+          // Add each volume to the dataset.
+          for (final Map.Entry<StorageLocation, StorageDirectory> entry :
+              filteredLocations.entrySet()) {
+            exceptionsMap.put(
+                entry.getKey(),
+                service.submit(new Callable<IOException>() {
+                  @Override
+                  public IOException call() {
+                    try {
+                      dataset.addVolume(
+                          entry.getKey(), entry.getValue(), filteredNsInfos);
+                    } catch (IOException e) {
+                      return e;
+                    }
+                    return null;
+                  }
+                }));
+          }
         }
 
-        for (int i = 0; i < changedVolumes.newLocations.size(); i++) {
-          StorageLocation volume = changedVolumes.newLocations.get(i);
-          Future<IOException> ioExceptionFuture = exceptions.get(i);
+        // TODO: Unlock volumes which could not be added to any dataset.
+
+        for (Map.Entry<StorageLocation, Future<IOException>> exceptionEntry :
+            exceptionsMap.entrySet()) {
+          StorageLocation volume = exceptionEntry.getKey();
+          Future<IOException> ioExceptionFuture = exceptionEntry.getValue();
+
           try {
             IOException ioe = ioExceptionFuture.get();
             if (ioe != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71dfa130/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 76789f9..5c26805 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -307,7 +307,7 @@ public class DataStorage extends Storage {
    * Note that if there is IOException, the state of DataStorage is not modified.
    */
   public VolumeBuilder prepareVolume(DataNode datanode, File volume,
-      List<NamespaceInfo> nsInfos) throws IOException {
+      Collection<NamespaceInfo> nsInfos) throws IOException {
     if (containsStorageDir(volume)) {
       final String errorMessage = "Storage directory is in use";
       LOG.warn(errorMessage + ".");
@@ -315,10 +315,10 @@ public class DataStorage extends Storage {
     }
 
     StorageDirectory sd = loadStorageDirectory(
-        datanode, nsInfos.get(0), volume, StartupOption.HOTSWAP);
+        datanode, nsInfos.iterator().next(), volume, StartupOption.HOTSWAP);
     VolumeBuilder builder =
         new VolumeBuilder(this, sd);
-    for (NamespaceInfo nsInfo : nsInfos) {
+    for (final NamespaceInfo nsInfo : nsInfos) {
       List<File> bpDataDirs = Lists.newArrayList();
       bpDataDirs.add(BlockPoolSliceStorage.getBpRoot(
           nsInfo.getBlockPoolID(), new File(volume, STORAGE_DIR_CURRENT)));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71dfa130/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index 9702691..75b0d26 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -313,9 +313,14 @@ class DataXceiver extends Receiver implements Runnable {
               "anything but a UNIX domain socket.");
         }
         if (slotId != null) {
-          final String bpid = blk.getBlockPoolId();
-          FsDatasetSpi<?> dataset = (FsDatasetSpi<?>) datanode.getDataset(bpid);
-          boolean isCached = dataset.isCached(bpid, blk.getBlockId());
+          final FsDatasetSpi<?> dataset =
+              (FsDatasetSpi<?>) datanode.getDataset(blk.getBlockPoolId());
+          if (dataset == null) {
+            throw new IOException(
+                "Unknown or uninitialized blockpool " + blk.getBlockPoolId());
+          }
+          boolean isCached = dataset.isCached(
+              blk.getBlockPoolId(), blk.getBlockId());
           datanode.shortCircuitRegistry.registerSlot(
               ExtendedBlockId.fromExtendedBlock(blk), slotId, isCached);
           registeredSlotId = slotId;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71dfa130/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DatasetSpi.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DatasetSpi.java
index 5cd52db..f9c61cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DatasetSpi.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
@@ -88,9 +89,9 @@ public interface DatasetSpi<V extends VolumeSpi> {
 
 
   /**
-   * Add a new volume to the FsDataset.<p/>
+   * Add a new volume to the Dataset.<p/>
    *
-   * If the FSDataset supports block scanning, this function registers
+   * If the Dataset supports block scanning, this function registers
    * the new volume with the block scanner.
    *
    * @param location      The storage location for the new volume.
@@ -98,9 +99,18 @@ public interface DatasetSpi<V extends VolumeSpi> {
    */
   void addVolume(
       final StorageLocation location,
+      final Storage.StorageDirectory sd,
       final List<NamespaceInfo> nsInfos) throws IOException;
 
   /**
+   * Record a failure to bring up a volume. Primarily for reporting
+   * purposes.
+   *
+   * @param location StorageLocation corresponding to the failed volume.
+   */
+  void recordFailedVolume(final StorageLocation location);
+
+  /**
    * Removes a collection of volumes from FsDataset.
    *
    * If the FSDataset supports block scanning, this function removes
@@ -148,7 +158,7 @@ public interface DatasetSpi<V extends VolumeSpi> {
   Set<File> checkDataDir();
 
   /**
-   * Shutdown the FSDataset
+   * Shutdown the Dataset
    */
   void shutdown();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71dfa130/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 2b8abce..7cb6eb3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -47,6 +47,7 @@ import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -411,23 +412,19 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   }
 
   @Override
+  public void recordFailedVolume(final StorageLocation location) {
+    volumes.addVolumeFailureInfo(new VolumeFailureInfo(
+        location.getFile().getAbsolutePath(), Time.now()));
+  }
+
+  @Override
   public void addVolume(final StorageLocation location,
-      final List<NamespaceInfo> nsInfos)
+                        final Storage.StorageDirectory sd,
+                        final List<NamespaceInfo> nsInfos)
       throws IOException {
-    final File dir = location.getFile();
-
-    // Prepare volume in DataStorage
-    final DataStorage.VolumeBuilder builder;
-    try {
-      builder = dataStorage.prepareVolume(datanode, location.getFile(), nsInfos);
-    } catch (IOException e) {
-      volumes.addVolumeFailureInfo(new VolumeFailureInfo(
-          location.getFile().getAbsolutePath(), Time.now()));
-      throw e;
-    }
-
-    final Storage.StorageDirectory sd = builder.getStorageDirectory();
 
+    LOG.info("FsDatasetImpl: Adding volume " + location.getFile() +
+             " with namespaces " + Joiner.on("; ").join(nsInfos));
     StorageType storageType = location.getStorageType();
     final FsVolumeImpl fsVolume =
         createFsVolume(sd.getStorageUuid(), sd.getCurrentDir(), storageType);
@@ -457,7 +454,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     final FsVolumeReference ref = fsVolume.obtainReference();
     setupAsyncLazyPersistThread(fsVolume);
 
-    builder.build();
     synchronized (this) {
       volumeMap.addAll(tempVolumeMap);
       storageMap.put(sd.getStorageUuid(),
@@ -467,7 +463,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       asyncDiskService.addVolume(sd.getCurrentDir());
       volumes.addVolume(ref);
     }
-    LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
+    LOG.info("Added volume - " + location.getFile() +
+                 ", StorageType: " + storageType);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71dfa130/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 9404d74..9def302 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.*;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
@@ -1294,11 +1295,17 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi>
{
   @Override
   public void addVolume(
       final StorageLocation location,
+      final Storage.StorageDirectory sd,
       final List<NamespaceInfo> nsInfos) throws IOException {
     throw new UnsupportedOperationException();
   }
 
   @Override
+  public void recordFailedVolume(StorageLocation location) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
   public DatanodeStorage getStorage(final String storageUuid) {
     return storageUuid.equals(storage.getStorageUuid()) ?
         storage.dnStorage :

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71dfa130/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index a396b0e..a9429c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -287,6 +287,8 @@ public class TestDataNodeHotSwapVolumes {
     // Verify the configuration value is appropriately set.
     String[] effectiveDataDirs = conf.get(DFS_DATANODE_DATA_DIR_KEY).split(",");
     String[] expectDataDirs = newDataDir.split(",");
+    Arrays.sort(effectiveDataDirs);
+    Arrays.sort(expectDataDirs);
     assertEquals(expectDataDirs.length, effectiveDataDirs.length);
     for (int i = 0; i < expectDataDirs.length; i++) {
       StorageLocation expectLocation = StorageLocation.parse(expectDataDirs[i]);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71dfa130/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
index 0c2c610..29bb04b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.*;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -56,11 +57,17 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl>
{
   }
 
   @Override
-  public void addVolume(StorageLocation location, List<NamespaceInfo> nsInfos) throws
IOException {
+  public void addVolume(final StorageLocation location,
+                        final Storage.StorageDirectory sd,
+                        final List<NamespaceInfo> nsInfos) throws IOException {
 
   }
 
   @Override
+  public void recordFailedVolume(StorageLocation location) {
+  }
+
+  @Override
   public void removeVolumes(Set<File> volumes, boolean clearFailure) {
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71dfa130/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 59c7ade..c23e8df 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -180,7 +180,7 @@ public class TestFsDatasetImpl {
           anyListOf(NamespaceInfo.class)))
           .thenReturn(builder);
 
-      dataset.addVolume(loc, nsInfos);
+      dataset.addVolume(loc, builder.getStorageDirectory(), nsInfos);
     }
 
     assertEquals(totalVolumes, getNumVolumes());
@@ -259,7 +259,7 @@ public class TestFsDatasetImpl {
         anyListOf(NamespaceInfo.class)))
         .thenReturn(builder);
 
-    dataset.addVolume(loc, nsInfos);
+    dataset.addVolume(loc, sd, nsInfos);
     assertEquals(numExistingVolumes + 1, getNumVolumes());
 
     when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
@@ -354,7 +354,7 @@ public class TestFsDatasetImpl {
     }
 
     try {
-      spyDataset.addVolume(location, nsInfos);
+      spyDataset.addVolume(location, sd, nsInfos);
       fail("Expect to throw MultipleIOException");
     } catch (MultipleIOException e) {
     }


Mime
View raw message