hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1535158 - in /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/datanode/ src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/
Date Wed, 23 Oct 2013 20:28:54 GMT
Author: arp
Date: Wed Oct 23 20:28:54 2013
New Revision: 1535158

URL: http://svn.apache.org/r1535158
Log:
HDFS-5401. Fix NPE in Directory Scanner.

Modified:
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt?rev=1535158&r1=1535157&r2=1535158&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt
Wed Oct 23 20:28:54 2013
@@ -45,3 +45,5 @@ IMPROVEMENTS:
 
     HDFS-5390. Send one incremental block report per storage directory.
     (Arpit Agarwal)
+
+    HDFS-5401. Fix NPE in Directory Scanner. (Arpit Agarwal)

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java?rev=1535158&r1=1535157&r2=1535158&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
Wed Oct 23 20:28:54 2013
@@ -27,6 +27,7 @@ import java.util.concurrent.CopyOnWriteA
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -180,10 +181,11 @@ class BPOfferService {
     }
   }
   
-  void reportBadBlocks(ExtendedBlock block) {
+  void reportBadBlocks(ExtendedBlock block,
+                       String storageUuid, StorageType storageType) {
     checkBlock(block);
     for (BPServiceActor actor : bpServices) {
-      actor.reportBadBlocks(block);
+      actor.reportBadBlocks(block, storageUuid, storageType);
     }
   }
   

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java?rev=1535158&r1=1535157&r2=1535158&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
Wed Oct 23 20:28:54 2013
@@ -28,6 +28,7 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -237,12 +238,18 @@ class BPServiceActor implements Runnable
     resetBlockReportTime = true; // reset future BRs for randomness
   }
 
-  void reportBadBlocks(ExtendedBlock block) {
+  void reportBadBlocks(ExtendedBlock block,
+      String storageUuid, StorageType storageType) {
     if (bpRegistration == null) {
       return;
     }
     DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
-    LocatedBlock[] blocks = { new LocatedBlock(block, dnArr) }; 
+    String[] uuids = { storageUuid };
+    StorageType[] types = { storageType };
+    // TODO: Corrupt flag is set to false for compatibility. We can probably
+    // set it to true here.
+    LocatedBlock[] blocks = {
+        new LocatedBlock(block, dnArr, uuids, types, -1, false) };
     
     try {
       bpNamenode.reportBadBlocks(blocks);  

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1535158&r1=1535157&r2=1535158&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Wed Oct 23 20:28:54 2013
@@ -559,7 +559,9 @@ public class DataNode extends Configured
    */
   public void reportBadBlocks(ExtendedBlock block) throws IOException{
     BPOfferService bpos = getBPOSForBlock(block);
-    bpos.reportBadBlocks(block);
+    FsVolumeSpi volume = getFSDataset().getVolume(block);
+    bpos.reportBadBlocks(
+        block, volume.getStorageID(), volume.getStorageType());
   }
 
   /**
@@ -1265,8 +1267,10 @@ public class DataNode extends Configured
     // Check if NN recorded length matches on-disk length 
     long onDiskLength = data.getLength(block);
     if (block.getNumBytes() > onDiskLength) {
+      FsVolumeSpi volume = getFSDataset().getVolume(block);
       // Shorter on-disk len indicates corruption so report NN the corrupt block
-      bpos.reportBadBlocks(block);
+      bpos.reportBadBlocks(
+          block, volume.getStorageID(), volume.getStorageType());
       LOG.warn("Can't replicate block " + block
           + " because on-disk length " + onDiskLength 
           + " is shorter than NameNode recorded length " + block.getNumBytes());

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java?rev=1535158&r1=1535157&r2=1535158&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
Wed Oct 23 20:28:54 2013
@@ -198,7 +198,9 @@ class FsDatasetImpl implements FsDataset
   //                 two maps. This might require some refactoring
   //                 rewrite of FsDatasetImpl.
   final ReplicaMap volumeMap;
-  final Map<FsVolumeImpl, ReplicaMap> perVolumeReplicaMap;
+
+  // Map from StorageID to ReplicaMap.
+  final Map<String, ReplicaMap> perVolumeReplicaMap;
 
 
   // Used for synchronizing access to usage stats
@@ -249,7 +251,7 @@ class FsDatasetImpl implements FsDataset
       LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
     }
     volumeMap = new ReplicaMap(this);
-    perVolumeReplicaMap = new HashMap<FsVolumeImpl, ReplicaMap>();
+    perVolumeReplicaMap = new HashMap<String, ReplicaMap>();
 
     @SuppressWarnings("unchecked")
     final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
@@ -628,7 +630,7 @@ class FsDatasetImpl implements FsDataset
     
     // Replace finalized replica by a RBW replica in replicas map
     volumeMap.add(bpid, newReplicaInfo);
-    perVolumeReplicaMap.get(v).add(bpid, newReplicaInfo);
+    perVolumeReplicaMap.get(v.getStorageID()).add(bpid, newReplicaInfo);
     
     return newReplicaInfo;
   }
@@ -759,7 +761,7 @@ class FsDatasetImpl implements FsDataset
     ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(), 
         b.getGenerationStamp(), v, f.getParentFile());
     volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
-    perVolumeReplicaMap.get(v).add(b.getBlockPoolId(), newReplicaInfo);
+    perVolumeReplicaMap.get(v.getStorageID()).add(b.getBlockPoolId(), newReplicaInfo);
     return newReplicaInfo;
   }
   
@@ -878,7 +880,7 @@ class FsDatasetImpl implements FsDataset
     rbw.setBytesAcked(visible);
     // overwrite the RBW in the volume map
     volumeMap.add(b.getBlockPoolId(), rbw);
-    perVolumeReplicaMap.get(v).add(b.getBlockPoolId(), rbw);
+    perVolumeReplicaMap.get(v.getStorageID()).add(b.getBlockPoolId(), rbw);
     return rbw;
   }
 
@@ -898,7 +900,7 @@ class FsDatasetImpl implements FsDataset
     ReplicaInPipeline newReplicaInfo = new ReplicaInPipeline(b.getBlockId(), 
         b.getGenerationStamp(), v, f.getParentFile());
     volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
-    perVolumeReplicaMap.get(v).add(b.getBlockPoolId(), newReplicaInfo);
+    perVolumeReplicaMap.get(v.getStorageID()).add(b.getBlockPoolId(), newReplicaInfo);
     
     return newReplicaInfo;
   }
@@ -967,7 +969,8 @@ class FsDatasetImpl implements FsDataset
       newReplicaInfo = new FinalizedReplica(replicaInfo, v, dest.getParentFile());
     }
     volumeMap.add(bpid, newReplicaInfo);
-    perVolumeReplicaMap.get(newReplicaInfo.getVolume()).add(bpid, newReplicaInfo);
+    perVolumeReplicaMap.get(newReplicaInfo.getVolume().getStorageID())
+        .add(bpid, newReplicaInfo);
     return newReplicaInfo;
   }
 
@@ -981,7 +984,7 @@ class FsDatasetImpl implements FsDataset
     if (replicaInfo != null && replicaInfo.getState() == ReplicaState.TEMPORARY)
{
       // remove from volumeMap
       volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
-      perVolumeReplicaMap.get((FsVolumeImpl) replicaInfo.getVolume())
+      perVolumeReplicaMap.get(replicaInfo.getVolume().getStorageID())
           .remove(b.getBlockPoolId(), b.getLocalBlock());
       
       // delete the on-disk temp file
@@ -1064,7 +1067,7 @@ class FsDatasetImpl implements FsDataset
         new HashMap<String, BlockListAsLongs>();
 
     for (FsVolumeImpl v : getVolumes()) {
-      ReplicaMap rMap = perVolumeReplicaMap.get(v);
+      ReplicaMap rMap = perVolumeReplicaMap.get(v.getStorageID());
       BlockListAsLongs blockList = getBlockReportWithReplicaMap(bpid, rMap);
       blockReportMap.put(v.getStorageID(), blockList);
     }
@@ -1212,7 +1215,7 @@ class FsDatasetImpl implements FsDataset
           v.clearPath(bpid, parent);
         }
         volumeMap.remove(bpid, invalidBlks[i]);
-        perVolumeReplicaMap.get(v).remove(bpid, invalidBlks[i]);
+        perVolumeReplicaMap.get(v.getStorageID()).remove(bpid, invalidBlks[i]);
       }
 
       // Delete the block asynchronously to make sure we can do it fast enough
@@ -1274,7 +1277,8 @@ class FsDatasetImpl implements FsDataset
               LOG.warn("Removing replica " + bpid + ":" + b.getBlockId()
                   + " on failed volume " + fv.getCurrentDir().getAbsolutePath());
               ib.remove();
-              perVolumeReplicaMap.get(fv).remove(bpid, b.getBlockId());
+              perVolumeReplicaMap.get(fv.getStorageID())
+                  .remove(bpid, b.getBlockId());
               removedBlocks++;
             }
           }
@@ -1391,8 +1395,7 @@ class FsDatasetImpl implements FsDataset
           // Block is in memory and not on the disk
           // Remove the block from volumeMap
           volumeMap.remove(bpid, blockId);
-          perVolumeReplicaMap.get((FsVolumeImpl) memBlockInfo.getVolume())
-              .remove(bpid, blockId);
+          perVolumeReplicaMap.get(vol.getStorageID()).remove(bpid, blockId);
           final DataBlockScanner blockScanner = datanode.getBlockScanner();
           if (blockScanner != null) {
             blockScanner.deleteBlock(bpid, new Block(blockId));
@@ -1416,8 +1419,8 @@ class FsDatasetImpl implements FsDataset
         ReplicaInfo diskBlockInfo = new FinalizedReplica(blockId, 
             diskFile.length(), diskGS, vol, diskFile.getParentFile());
         volumeMap.add(bpid, diskBlockInfo);
-        perVolumeReplicaMap.get((FsVolumeImpl) memBlockInfo.getVolume()).
-            remove(bpid, diskBlockInfo);
+        perVolumeReplicaMap.get(vol.getStorageID())
+            .remove(bpid, diskBlockInfo);
         final DataBlockScanner blockScanner = datanode.getBlockScanner();
         if (blockScanner != null) {
           blockScanner.addBlock(new ExtendedBlock(bpid, diskBlockInfo));
@@ -1695,7 +1698,7 @@ class FsDatasetImpl implements FsDataset
 
     // TODO: Avoid the double scan.
     for (FsVolumeImpl v : getVolumes()) {
-      ReplicaMap rMap = perVolumeReplicaMap.get(v);
+      ReplicaMap rMap = perVolumeReplicaMap.get(v.getStorageID());
       rMap.initBlockPool(bpid);
       volumes.getVolumeMap(bpid, v, rMap);
     }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java?rev=1535158&r1=1535157&r2=1535158&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
Wed Oct 23 20:28:54 2013
@@ -90,13 +90,13 @@ class FsVolumeList {
     return remaining;
   }
     
-  void initializeReplicaMaps(Map<FsVolumeImpl, ReplicaMap> perVolumeReplicaMap,
+  void initializeReplicaMaps(Map<String, ReplicaMap> perVolumeReplicaMap,
                              ReplicaMap globalReplicaMap,
                              Object mutex) throws IOException {
     for (FsVolumeImpl v : volumes) {
       ReplicaMap rMap = new ReplicaMap(mutex);
       v.getVolumeMap(rMap);
-      perVolumeReplicaMap.put(v, rMap);
+      perVolumeReplicaMap.put(v.getStorageID(), rMap);
       globalReplicaMap.addAll(rMap);
     }
   }



Mime
View raw message