hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cnaur...@apache.org
Subject svn commit: r1557294 - in /hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/h...
Date Sat, 11 Jan 2014 00:37:26 GMT
Author: cnauroth
Date: Sat Jan 11 00:37:23 2014
New Revision: 1557294

URL: http://svn.apache.org/r1557294
Log:
Merge trunk to HDFS-4685.

Modified:
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props
changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props
changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
  (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
  (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
  (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props
changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1556664-1557293

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1557294&r1=1557293&r2=1557294&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sat Jan 11
00:37:23 2014
@@ -245,6 +245,9 @@ Trunk (Unreleased)
     HDFS-5715. Use Snapshot ID to indicate the corresponding Snapshot for a
     FileDiff/DirectoryDiff. (jing9)
 
+    HDFS-5721. sharedEditsImage in Namenode#initializeSharedEdits() should be 
+    closed before method returns. (Ted Yu via junping_du)
+
   OPTIMIZATIONS
 
     HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
@@ -736,6 +739,13 @@ Release 2.4.0 - UNRELEASED
     HDFS-5690. DataNode fails to start in secure mode when dfs.http.policy equals to 
     HTTP_ONLY. (Haohui Mai via jing9)
 
+    HDFS-5449. WebHdfs compatibility broken between 2.2 and 1.x / 23.x (kihwal)
+
+    HDFS-5756. hadoopRzOptionsSetByteBufferPool does not accept NULL argument,
+    contrary to docs. (cmccabe via wang)
+
+    HDFS-5747. Fix NPEs in BlockManager. (Arpit Agarwal)
+
   BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS
 
     HDFS-4985. Add storage type to the protocol and expose it in block report

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1556664-1557293

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java?rev=1557294&r1=1557293&r2=1557294&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
(original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
Sat Jan 11 00:37:23 2014
@@ -324,12 +324,14 @@ public class BlockInfoUnderConstruction 
     Iterator<ReplicaUnderConstruction> it = replicas.iterator();
     while (it.hasNext()) {
       ReplicaUnderConstruction r = it.next();
-      if(r.getExpectedStorageLocation() == storage) {
+      DatanodeStorageInfo expectedLocation = r.getExpectedStorageLocation();
+      if(expectedLocation == storage) {
         // Record the gen stamp from the report
         r.setGenerationStamp(block.getGenerationStamp());
         return;
-      } else if (r.getExpectedStorageLocation().getDatanodeDescriptor() ==
-          storage.getDatanodeDescriptor()) {
+      } else if (expectedLocation != null &&
+                 expectedLocation.getDatanodeDescriptor() ==
+                     storage.getDatanodeDescriptor()) {
 
         // The Datanode reported that the block is on a different storage
         // than the one chosen by BlockPlacementPolicy. This can occur as

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1557294&r1=1557293&r2=1557294&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Sat Jan 11 00:37:23 2014
@@ -621,8 +621,14 @@ public class FSNamesystem implements Nam
 
     long loadStart = now();
     String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
-    namesystem.loadFSImage(startOpt, fsImage,
-      HAUtil.isHAEnabled(conf, nameserviceId));
+    try {
+      namesystem.loadFSImage(startOpt, fsImage,
+        HAUtil.isHAEnabled(conf, nameserviceId));
+    } catch (IOException ioe) {
+      LOG.warn("Encountered exception loading fsimage", ioe);
+      fsImage.close();
+      throw ioe;
+    }
     long timeTakenToLoadFSImage = now() - loadStart;
     LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
     NameNodeMetrics nnMetrics = NameNode.getNameNodeMetrics();

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1557294&r1=1557293&r2=1557294&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
(original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
Sat Jan 11 00:37:23 2014
@@ -547,8 +547,8 @@ public class NameNode implements NameNod
   }
   
   private void stopCommonServices() {
-    if(namesystem != null) namesystem.close();
     if(rpcServer != null) rpcServer.stop();
+    if(namesystem != null) namesystem.close();
     if (pauseMonitor != null) pauseMonitor.stop();
     if (plugins != null) {
       for (ServicePlugin p : plugins) {
@@ -816,14 +816,20 @@ public class NameNode implements NameNod
     System.out.println("Formatting using clusterid: " + clusterId);
     
     FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat);
-    FSNamesystem fsn = new FSNamesystem(conf, fsImage);
-    fsImage.getEditLog().initJournalsForWrite();
-    
-    if (!fsImage.confirmFormat(force, isInteractive)) {
-      return true; // aborted
+    try {
+      FSNamesystem fsn = new FSNamesystem(conf, fsImage);
+      fsImage.getEditLog().initJournalsForWrite();
+
+      if (!fsImage.confirmFormat(force, isInteractive)) {
+        return true; // aborted
+      }
+
+      fsImage.format(fsn, clusterId);
+    } catch (IOException ioe) {
+      LOG.warn("Encountered exception during format: ", ioe);
+      fsImage.close();
+      throw ioe;
     }
-    
-    fsImage.format(fsn, clusterId);
     return false;
   }
 
@@ -897,6 +903,7 @@ public class NameNode implements NameNod
     }
 
     NNStorage existingStorage = null;
+    FSImage sharedEditsImage = null;
     try {
       FSNamesystem fsns =
           FSNamesystem.loadFromDisk(getConfigurationWithoutSharedEdits(conf));
@@ -906,7 +913,7 @@ public class NameNode implements NameNod
       
       List<URI> sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf);
       
-      FSImage sharedEditsImage = new FSImage(conf,
+      sharedEditsImage = new FSImage(conf,
           Lists.<URI>newArrayList(),
           sharedEditsDirs);
       sharedEditsImage.getEditLog().initJournalsForWrite();
@@ -934,6 +941,13 @@ public class NameNode implements NameNod
       LOG.error("Could not initialize shared edits dir", ioe);
       return true; // aborted
     } finally {
+      if (sharedEditsImage != null) {
+        try {
+          sharedEditsImage.close();
+        }  catch (IOException ioe) {
+          LOG.warn("Could not close sharedEditsImage", ioe);
+        }
+      }
       // Have to unlock storage explicitly for the case when we're running in a
       // unit test, which runs in the same JVM as NNs.
       if (existingStorage != null) {

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java?rev=1557294&r1=1557293&r2=1557294&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
(original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
Sat Jan 11 00:37:23 2014
@@ -190,24 +190,29 @@ public class BootstrapStandby implements
     // Load the newly formatted image, using all of the directories (including shared
     // edits)
     FSImage image = new FSImage(conf);
-    image.getStorage().setStorageInfo(storage);
-    image.initEditLog();
-    assert image.getEditLog().isOpenForRead() :
+    try {
+      image.getStorage().setStorageInfo(storage);
+      image.initEditLog();
+      assert image.getEditLog().isOpenForRead() :
         "Expected edit log to be open for read";
-    
-    // Ensure that we have enough edits already in the shared directory to
-    // start up from the last checkpoint on the active.
-    if (!checkLogsAvailableForRead(image, imageTxId, curTxId)) {
-      return ERR_CODE_LOGS_UNAVAILABLE;
-    }
-    
-    image.getStorage().writeTransactionIdFileToStorage(curTxId);
 
-    // Download that checkpoint into our storage directories.
-    MD5Hash hash = TransferFsImage.downloadImageToStorage(
+      // Ensure that we have enough edits already in the shared directory to
+      // start up from the last checkpoint on the active.
+      if (!checkLogsAvailableForRead(image, imageTxId, curTxId)) {
+        return ERR_CODE_LOGS_UNAVAILABLE;
+      }
+
+      image.getStorage().writeTransactionIdFileToStorage(curTxId);
+
+      // Download that checkpoint into our storage directories.
+      MD5Hash hash = TransferFsImage.downloadImageToStorage(
         otherHttpAddr, imageTxId,
         storage, true);
-    image.saveDigestAndRenameCheckpointImage(imageTxId, hash);
+      image.saveDigestAndRenameCheckpointImage(imageTxId, hash);
+    } catch (IOException ioe) {
+      image.close();
+      throw ioe;
+    }
     return 0;
   }
 

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1557294&r1=1557293&r2=1557294&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
(original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
Sat Jan 11 00:37:23 2014
@@ -271,7 +271,7 @@ public class JsonUtil {
   }
   
   /** Convert a DatanodeInfo to a Json map. */
-  private static Map<String, Object> toJsonMap(final DatanodeInfo datanodeinfo) {
+  static Map<String, Object> toJsonMap(final DatanodeInfo datanodeinfo) {
     if (datanodeinfo == null) {
       return null;
     }
@@ -279,6 +279,9 @@ public class JsonUtil {
     // TODO: Fix storageID
     final Map<String, Object> m = new TreeMap<String, Object>();
     m.put("ipAddr", datanodeinfo.getIpAddr());
+    // 'name' is equivalent to ipAddr:xferPort. Older clients (1.x, 0.23.x) 
+    // expects this instead of the two fields.
+    m.put("name", datanodeinfo.getXferAddr());
     m.put("hostName", datanodeinfo.getHostName());
     m.put("storageID", datanodeinfo.getDatanodeUuid());
     m.put("xferPort", datanodeinfo.getXferPort());
@@ -325,17 +328,49 @@ public class JsonUtil {
   }
 
   /** Convert a Json map to an DatanodeInfo object. */
-  static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
+  static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) 
+    throws IOException {
     if (m == null) {
       return null;
     }
 
+    // ipAddr and xferPort are the critical fields for accessing data.
+    // If any one of the two is missing, an exception needs to be thrown.
+
+    // Handle the case of old servers (1.x, 0.23.x) sending 'name' instead
+    //  of ipAddr and xferPort.
+    String ipAddr = getString(m, "ipAddr", null);
+    int xferPort = getInt(m, "xferPort", -1);
+    if (ipAddr == null) {
+      String name = getString(m, "name", null);
+      if (name != null) {
+        int colonIdx = name.indexOf(':');
+        if (colonIdx > 0) {
+          ipAddr = name.substring(0, colonIdx);
+          xferPort = Integer.parseInt(name.substring(colonIdx +1));
+        } else {
+          throw new IOException(
+              "Invalid value in server response: name=[" + name + "]");
+        }
+      } else {
+        throw new IOException(
+            "Missing both 'ipAddr' and 'name' in server response.");
+      }
+      // ipAddr is non-null & non-empty string at this point.
+    }
+
+    // Check the validity of xferPort.
+    if (xferPort == -1) {
+      throw new IOException(
+          "Invalid or missing 'xferPort' in server response.");
+    }
+
     // TODO: Fix storageID
     return new DatanodeInfo(
-        (String)m.get("ipAddr"),
+        ipAddr,
         (String)m.get("hostName"),
         (String)m.get("storageID"),
-        (int)(long)(Long)m.get("xferPort"),
+        xferPort,
         (int)(long)(Long)m.get("infoPort"),
         getInt(m, "infoSecurePort", 0),
         (int)(long)(Long)m.get("ipcPort"),
@@ -368,7 +403,8 @@ public class JsonUtil {
   }
 
   /** Convert an Object[] to a DatanodeInfo[]. */
-  private static DatanodeInfo[] toDatanodeInfoArray(final Object[] objects) {
+  private static DatanodeInfo[] toDatanodeInfoArray(final Object[] objects) 
+      throws IOException {
     if (objects == null) {
       return null;
     } else if (objects.length == 0) {

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1556664-1557293

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c?rev=1557294&r1=1557293&r2=1557294&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
(original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
Sat Jan 11 00:37:23 2014
@@ -2174,16 +2174,18 @@ int hadoopRzOptionsSetByteBufferPool(
         return -1;
     }
 
-    // Note: we don't have to call hadoopRzOptionsClearCached in this
-    // function, since the ByteBufferPool is passed separately from the
-    // EnumSet of ReadOptions.
-
-    jthr = constructNewObjectOfClass(env, &byteBufferPool, className, "()V");
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-            "hadoopRzOptionsSetByteBufferPool(className=%s): ", className);
-        errno = EINVAL;
-        return -1;
+    if (className) {
+      // Note: we don't have to call hadoopRzOptionsClearCached in this
+      // function, since the ByteBufferPool is passed separately from the
+      // EnumSet of ReadOptions.
+
+      jthr = constructNewObjectOfClass(env, &byteBufferPool, className, "()V");
+      if (jthr) {
+          printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+              "hadoopRzOptionsSetByteBufferPool(className=%s): ", className);
+          errno = EINVAL;
+          return -1;
+      }
     }
     if (opts->byteBufferPool) {
         // Delete any previous ByteBufferPool we had.

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c?rev=1557294&r1=1557293&r2=1557294&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
(original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
Sat Jan 11 00:37:23 2014
@@ -140,6 +140,12 @@ static int doTestZeroCopyReads(hdfsFS fs
     EXPECT_NULL(hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE));
     EXPECT_INT_EQ(EPROTONOSUPPORT, errno);
 
+    /* Verify that setting a NULL ByteBufferPool class works. */
+    EXPECT_ZERO(hadoopRzOptionsSetByteBufferPool(opts, NULL));
+    EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 0));
+    EXPECT_NULL(hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE));
+    EXPECT_INT_EQ(EPROTONOSUPPORT, errno);
+
     /* Now set a ByteBufferPool and try again.  It should succeed this time. */
     EXPECT_ZERO(hadoopRzOptionsSetByteBufferPool(opts,
           ELASTIC_BYTE_BUFFER_POOL_CLASS));

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1556664-1557293

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1556664-1557293

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1556664-1557293

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1556664-1557293

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java?rev=1557294&r1=1557293&r2=1557294&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
(original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
Sat Jan 11 00:37:23 2014
@@ -24,6 +24,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.util.Time;
@@ -61,7 +62,7 @@ public class TestJsonUtil {
   }
   
   @Test
-  public void testToDatanodeInfoWithoutSecurePort() {
+  public void testToDatanodeInfoWithoutSecurePort() throws Exception {
     Map<String, Object> response = new HashMap<String, Object>();
     
     response.put("ipAddr", "127.0.0.1");
@@ -84,4 +85,63 @@ public class TestJsonUtil {
     
     JsonUtil.toDatanodeInfo(response);
   }
+
+  @Test
+  public void testToDatanodeInfoWithName() throws Exception {
+    Map<String, Object> response = new HashMap<String, Object>();
+
+    // Older servers (1.x, 0.23, etc.) sends 'name' instead of ipAddr
+    // and xferPort.
+    String name = "127.0.0.1:1004";
+    response.put("name", name);
+    response.put("hostName", "localhost");
+    response.put("storageID", "fake-id");
+    response.put("infoPort", 1338l);
+    response.put("ipcPort", 1339l);
+    response.put("capacity", 1024l);
+    response.put("dfsUsed", 512l);
+    response.put("remaining", 512l);
+    response.put("blockPoolUsed", 512l);
+    response.put("lastUpdate", 0l);
+    response.put("xceiverCount", 4096l);
+    response.put("networkLocation", "foo.bar.baz");
+    response.put("adminState", "NORMAL");
+    response.put("cacheCapacity", 123l);
+    response.put("cacheUsed", 321l);
+
+    DatanodeInfo di = JsonUtil.toDatanodeInfo(response);
+    Assert.assertEquals(name, di.getXferAddr());
+
+    // The encoded result should contain name, ipAddr and xferPort.
+    Map<String, Object> r = JsonUtil.toJsonMap(di);
+    Assert.assertEquals(name, (String)r.get("name"));
+    Assert.assertEquals("127.0.0.1", (String)r.get("ipAddr"));
+    // In this test, it is Integer instead of Long since json was not actually
+    // involved in constructing the map.
+    Assert.assertEquals(1004, (int)(Integer)r.get("xferPort"));
+
+    // Invalid names
+    String[] badNames = {"127.0.0.1", "127.0.0.1:", ":", "127.0.0.1:sweet", ":123"};
+    for (String badName : badNames) {
+      response.put("name", badName);
+      checkDecodeFailure(response);
+    }
+
+    // Missing both name and ipAddr
+    response.remove("name");
+    checkDecodeFailure(response);
+
+    // Only missing xferPort
+    response.put("ipAddr", "127.0.0.1");
+    checkDecodeFailure(response);
+  }
+
+  private void checkDecodeFailure(Map<String, Object> map) {
+    try {
+      JsonUtil.toDatanodeInfo(map);
+      Assert.fail("Exception not thrown against bad input.");
+    } catch (Exception e) {
+      // expected
+    }
+  }
 }



Mime
View raw message