hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jiten...@apache.org
Subject svn commit: r1074261 - in /hadoop/hdfs/branches/HDFS-1052: ./ src/java/org/apache/hadoop/hdfs/server/datanode/ src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/
Date Thu, 24 Feb 2011 19:21:36 GMT
Author: jitendra
Date: Thu Feb 24 19:21:35 2011
New Revision: 1074261

URL: http://svn.apache.org/viewvc?rev=1074261&view=rev
Log:
Federation: FSDataset in Datanode should be created after initial handshake with namenode

Modified:
    hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java

Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/CHANGES.txt?rev=1074261&r1=1074260&r2=1074261&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1052/CHANGES.txt Thu Feb 24 19:21:35 2011
@@ -25,6 +25,9 @@ Trunk (unreleased changes)
     HDFS-1634. Federation: Convert single threaded DataNode into 
     per BlockPool thread model.(boryas)
 
+    HDFS-1637. Federation: FSDataset in Datanode should be created after 
+    initial handshake with namenode. (boryas and jitendra)
+
   IMPROVEMENTS
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1074261&r1=1074260&r2=1074261&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Thu Feb 24 19:21:35 2011
@@ -554,31 +554,31 @@ public class DataNode extends Configured
       setNamespaceInfo(nsInfo);
       setClusterId(nsInfo.clusterID);
       
-      // setup storage..
       StartupOption startOpt = getStartupOption(conf);
       assert startOpt != null : "Startup option must be set.";
 
       boolean simulatedFSDataset = 
         conf.getBoolean("dfs.datanode.simulateddatastorage", false);
+      
       if (simulatedFSDataset) {
-        bpRegistration.setStorageID(dnRegistration.getStorageID()); // same as mother DN
+        bpRegistration.setStorageID(dnRegistration.getStorageID()); //same as DN
         bpRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION;
         bpRegistration.storageInfo.namespaceID = bpNSInfo.namespaceID;
         bpRegistration.storageInfo.clusterID = bpNSInfo.clusterID;
-      //????        bpRegistration.storageInfo.blockpoolID = bpNSInfo.blockpoolID; // TODO:FEDERATION
+        // TODO: FEDERATION 
+        // bpRegistration.storageInfo.blockpoolID = bpNSInfo.blockpoolID;
       } else {
         // read storage info, lock data dirs and transition fs state if necessary       
  
         storage.recoverTransitionRead(blockPoolId, bpNSInfo, dataDirs, startOpt);
-        LOG.info("in setUp setting up storage: nsid=" + storage.namespaceID +
-            ";bpid=" + blockPoolId + 
-            ";lv=" + storage.layoutVersion +
-            ";nsInfo=" + bpNSInfo);
+        LOG.info("setting up storage: nsid=" + storage.namespaceID + ";bpid="
+            + blockPoolId + ";lv=" + storage.layoutVersion + ";nsInfo="
+            + bpNSInfo);
 
-        // use BlockPoolStorage as storageInfo in registration.
         bpRegistration.setStorageID(storage.getStorageID());
         bpRegistration.setStorageInfo(storage.getBPStorage(blockPoolId));
-        //data.addStorage(blockPoolId, storage);
-      }      
+      }
+      initFsDataSet(conf, dataDirs);
+      //data.addStorage(blockPoolId, storage);
     }
 
     /**
@@ -1066,7 +1066,6 @@ public class DataNode extends Configured
     // global DN settings
     initConfig(conf);
     registerMXBean();
-    initFsDataSet(conf, dataDirs); // TODO:FEDERATION should this be moved to after at least
one storage is created..
     initDataXceiver(conf);
     startInfoServer(conf);
     initIpcServer(conf); // TODO:FEDERATION redirect the call appropriately 
@@ -1077,41 +1076,42 @@ public class DataNode extends Configured
     nameNodeThreads = getAllNamenodes(conf);
   }
   
-  private void initFsDataSet(Configuration conf, AbstractList<File> dataDirs)
-  throws IOException {
+  /**
+   * Initializes the {@link #data}. The initialization is done only once, when
+   * handshake with the the first namenode is completed.
+   */
+  private synchronized void initFsDataSet(Configuration conf,
+      AbstractList<File> dataDirs) throws IOException {
+    if (data != null) { // Already initialized
+      return;
+    }
+
     // get version and id info from the name-node
     boolean simulatedFSDataset = 
       conf.getBoolean("dfs.datanode.simulateddatastorage", false);
 
     if (simulatedFSDataset) {
-      
-      if(data == null) { // create FSDataset
-        setNewStorageID(dnRegistration);
-        conf.set(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY,
-            dnRegistration.getStorageID());
-        
-        // it would have been better to pass storage as a parameter to
-        // constructor below - need to augment ReflectionUtils used below.
+      setNewStorageID(dnRegistration);
+      conf.set(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY,
+          dnRegistration.getStorageID());
 
-        try {
-          //TODO:FEDERATION Equivalent of following (can't do because Simulated is in test
dir)
-          if(data==null) {
-            data = (FSDatasetInterface) ReflectionUtils.newInstance(
-              Class.forName(
-                  "org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"),
-                  conf);
-          }
-        } catch (ClassNotFoundException e) {
-          throw new IOException(StringUtils.stringifyException(e));
-        }
+      // it would have been better to pass storage as a parameter to
+      // constructor below - need to augment ReflectionUtils used below.
 
+      try {
+        // TODO:FEDERATION Equivalent of following (can't do because Simulated
+        // is in test dir)
+        data = (FSDatasetInterface) ReflectionUtils.newInstance(
+            Class.forName(
+            "org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"),
+            conf);
+      } catch (ClassNotFoundException e) {
+        throw new IOException(StringUtils.stringifyException(e));
       }
       // TODO:FEDERATION do we need set it to the general dnRegistration?????
       // TODO:FEDERATION do we need LV,NSid, cid,bpid for datanode version file?
-      
     } else {
-      if(data == null)
-        data = new FSDataset(storage, conf);
+      data = new FSDataset(storage, conf);
     }
   }
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1074261&r1=1074260&r2=1074261&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
Thu Feb 24 19:21:35 2011
@@ -877,12 +877,14 @@ public class FSDataset implements FSCons
     }
     FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
     for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
-      volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(), conf);
+      volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(),
+          conf);
+      DataNode.LOG.info("FSDataset added volume - "
+          + storage.getStorageDir(idx).getCurrentDir());
     }
     volumes = new FSVolumeSet(volArray);
     volumes.getVolumeMap(volumeMap);
 
-    // TODO:FEDERATION this needs to be moved to addStorage()
     File[] roots = new File[storage.getNumStorageDirs()];
     for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
       roots[idx] = storage.getStorageDir(idx).getCurrentDir();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java?rev=1074261&r1=1074260&r2=1074261&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
Thu Feb 24 19:21:35 2011
@@ -22,6 +22,7 @@ import static org.apache.hadoop.hdfs.ser
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService;
+import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.net.DNS;
@@ -177,6 +179,18 @@ public class TestDataNodeMultipleRegistr
     Assert.assertNotNull("failed to create DataNode", dn);
     waitDataNodeUp(dn);
 
+    
+ // check number of vlumes in fsdataset
+    Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo();
+    Assert.assertNotNull("No volumes in the fsdataset", volInfos);
+    int i=0;
+    for(VolumeInfo vi : volInfos) {
+      LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace);
+    }
+    // number of volumes should be 2 - [data1, data2]
+    Assert.assertEquals("number of volumes is wrong",2, volInfos.size());
+    
+    
     for (BPOfferService bpos : dn.nameNodeThreads) {
       LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration.name
           + "; sid=" + bpos.bpRegistration.storageID + "; nna=" + bpos.nn_addr);
@@ -235,12 +249,23 @@ public class TestDataNodeMultipleRegistr
     Assert.assertNotNull("failed to create DataNode", dn);
 
     waitDataNodeUp(dn);
-    // try block report
+    // check number of vlumes in fsdataset
+    Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo();
+    Assert.assertNotNull("No volumes in the fsdataset", volInfos);
+    int i=0;
+    for(VolumeInfo vi : volInfos) {
+      LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace);
+    }
+    // number of volumes should be 2 - [data1, data2]
+    Assert.assertEquals("number of volumes is wrong",2, volInfos.size());
+    
 
     for (BPOfferService bpos : dn.nameNodeThreads) {
       LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration.name
           + "; sid=" + bpos.bpRegistration.storageID + "; nna=" + bpos.nn_addr);
     }
+    
+    // try block report
     BPOfferService bpos1 = dn.nameNodeThreads[0];
     bpos1.lastBlockReport = 0;
     DatanodeCommand cmd = bpos1.blockReport();



Mime
View raw message