hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ji...@apache.org
Subject svn commit: r1562964 [2/2] - in /hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/qjournal/serv...
Date Thu, 30 Jan 2014 21:11:26 GMT
Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm?rev=1562964&r1=1562963&r2=1562964&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm
Thu Jan 30 21:11:21 2014
@@ -18,8 +18,6 @@
 
 HDFS Permissions Guide
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Overview
@@ -55,8 +53,10 @@ HDFS Permissions Guide
 
      * If the user name matches the owner of foo, then the owner
        permissions are tested;
+
      * Else if the group of foo matches any of member of the groups list,
        then the group permissions are tested;
+
      * Otherwise the other permissions of foo are tested.
 
    If a permissions check fails, the client operation fails.

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm?rev=1562964&r1=1562963&r2=1562964&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm
Thu Jan 30 21:11:21 2014
@@ -18,8 +18,6 @@
 
 HDFS Quotas Guide
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Overview

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm?rev=1562964&r1=1562963&r2=1562964&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm
Thu Jan 30 21:11:21 2014
@@ -108,9 +108,11 @@ HDFS Users Guide
    The following documents describe how to install and set up a Hadoop
    cluster:
 
-     * {{Single Node Setup}} for first-time users.
+     * {{{../hadoop-common/SingleCluster.html}Single Node Setup}}
+       for first-time users.
 
-     * {{Cluster Setup}} for large, distributed clusters.
+     * {{{../hadoop-common/ClusterSetup.html}Cluster Setup}}
+       for large, distributed clusters.
 
    The rest of this document assumes the user is able to set up and run a
    HDFS with at least one DataNode. For the purpose of this document, both
@@ -136,7 +138,8 @@ HDFS Users Guide
    for a command. These commands support most of the normal files system
    operations like copying files, changing file permissions, etc. It also
    supports a few HDFS specific operations like changing replication of
-   files. For more information see {{{File System Shell Guide}}}.
+   files. For more information see {{{../hadoop-common/FileSystemShell.html}
+   File System Shell Guide}}.
 
 **  DFSAdmin Command
 
@@ -169,7 +172,7 @@ HDFS Users Guide
        of racks and datanodes attached to the tracks as viewed by the
        NameNode.
 
-   For command usage, see {{{dfsadmin}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#dfsadmin}dfsadmin}}.
 
 * Secondary NameNode
 
@@ -203,7 +206,8 @@ HDFS Users Guide
    So that the check pointed image is always ready to be read by the
    primary NameNode if necessary.
 
-   For command usage, see {{{secondarynamenode}}}.
+   For command usage,
+   see {{{../hadoop-common/CommandsManual.html#secondarynamenode}secondarynamenode}}.
 
 * Checkpoint Node
 
@@ -245,7 +249,7 @@ HDFS Users Guide
    Multiple checkpoint nodes may be specified in the cluster configuration
    file.
 
-   For command usage, see {{{namenode}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
 
 * Backup Node
 
@@ -287,7 +291,7 @@ HDFS Users Guide
 
    For a complete discussion of the motivation behind the creation of the
    Backup node and Checkpoint node, see {{{https://issues.apache.org/jira/browse/HADOOP-4539}HADOOP-4539}}.
-   For command usage, see {{{namenode}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
 
 * Import Checkpoint
 
@@ -310,7 +314,7 @@ HDFS Users Guide
    verifies that the image in <<<dfs.namenode.checkpoint.dir>>> is consistent,
    but does not modify it in any way.
 
-   For command usage, see {{{namenode}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
 
 * Rebalancer
 
@@ -337,7 +341,7 @@ HDFS Users Guide
    A brief administrator's guide for rebalancer as a PDF is attached to
    {{{https://issues.apache.org/jira/browse/HADOOP-1652}HADOOP-1652}}.
 
-   For command usage, see {{{balancer}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#balancer}balancer}}.
 
 * Rack Awareness
 
@@ -379,8 +383,9 @@ HDFS Users Guide
    most of the recoverable failures. By default fsck ignores open files
    but provides an option to select all files during reporting. The HDFS
    fsck command is not a Hadoop shell command. It can be run as
-   <<<bin/hadoop fsck>>>. For command usage, see {{{fsck}}}. fsck can be
run on the
-   whole file system or on a subset of files.
+   <<<bin/hadoop fsck>>>. For command usage, see 
+   {{{../hadoop-common/CommandsManual.html#fsck}fsck}}. fsck can be run on
+   the whole file system or on a subset of files.
 
 * fetchdt
 
@@ -393,7 +398,8 @@ HDFS Users Guide
    command. It can be run as <<<bin/hadoop fetchdt DTfile>>>. After you
got
    the token you can run an HDFS command without having Kerberos tickets,
    by pointing <<<HADOOP_TOKEN_FILE_LOCATION>>> environmental variable
to the
-   delegation token file. For command usage, see {{{fetchdt}}} command.
+   delegation token file. For command usage, see
+   {{{../hadoop-common/CommandsManual.html#fetchdt}fetchdt}} command.
 
 * Recovery Mode
 
@@ -427,10 +433,11 @@ HDFS Users Guide
    let alone to restart HDFS from scratch. HDFS allows administrators to
    go back to earlier version of Hadoop and rollback the cluster to the
    state it was in before the upgrade. HDFS upgrade is described in more
-   detail in {{{Hadoop Upgrade}}} Wiki page. HDFS can have one such backup at a
-   time. Before upgrading, administrators need to remove existing backup
-   using bin/hadoop dfsadmin <<<-finalizeUpgrade>>> command. The following
-   briefly describes the typical upgrade procedure:
+   detail in {{{http://wiki.apache.org/hadoop/Hadoop_Upgrade}Hadoop Upgrade}}
+   Wiki page. HDFS can have one such backup at a time. Before upgrading,
+   administrators need to remove existing backupusing bin/hadoop dfsadmin
+   <<<-finalizeUpgrade>>> command. The following briefly describes the
+   typical upgrade procedure:
 
      * Before upgrading Hadoop software, finalize if there an existing
        backup. <<<dfsadmin -upgradeProgress>>> status can tell if the cluster
@@ -450,7 +457,7 @@ HDFS Users Guide
 
           * stop the cluster and distribute earlier version of Hadoop.
 
-          * start the cluster with rollback option. (<<<bin/start-dfs.h -rollback>>>).
+          * start the cluster with rollback option. (<<<bin/start-dfs.sh -rollback>>>).
 
 * File Permissions and Security
 
@@ -465,14 +472,15 @@ HDFS Users Guide
 * Scalability
 
    Hadoop currently runs on clusters with thousands of nodes. The
-   {{{PoweredBy}}} Wiki page lists some of the organizations that deploy Hadoop
-   on large clusters. HDFS has one NameNode for each cluster. Currently
-   the total memory available on NameNode is the primary scalability
-   limitation. On very large clusters, increasing average size of files
-   stored in HDFS helps with increasing cluster size without increasing
-   memory requirements on NameNode. The default configuration may not
-   suite very large clustes. The {{{FAQ}}} Wiki page lists suggested
-   configuration improvements for large Hadoop clusters.
+   {{{http://wiki.apache.org/hadoop/PoweredBy}PoweredBy}} Wiki page lists
+   some of the organizations that deploy Hadoop on large clusters.
+   HDFS has one NameNode for each cluster. Currently the total memory
+   available on NameNode is the primary scalability limitation.
+   On very large clusters, increasing average size of files stored in
+   HDFS helps with increasing cluster size without increasing memory
+   requirements on NameNode. The default configuration may not suite
+   very large clusters. The {{{http://wiki.apache.org/hadoop/FAQ}FAQ}}
+   Wiki page lists suggested configuration improvements for large Hadoop clusters.
 
 * Related Documentation
 
@@ -481,19 +489,22 @@ HDFS Users Guide
    documentation about Hadoop and HDFS. The following list is a starting
    point for further exploration:
 
-     * {{{Hadoop Site}}}: The home page for the Apache Hadoop site.
+     * {{{http://hadoop.apache.org}Hadoop Site}}: The home page for
+       the Apache Hadoop site.
 
-     * {{{Hadoop Wiki}}}: The home page (FrontPage) for the Hadoop Wiki. Unlike
+     * {{{http://wiki.apache.org/hadoop/FrontPage}Hadoop Wiki}}:
+       The home page (FrontPage) for the Hadoop Wiki. Unlike
        the released documentation, which is part of Hadoop source tree,
        Hadoop Wiki is regularly edited by Hadoop Community.
 
-     * {{{FAQ}}}: The FAQ Wiki page.
+     * {{{http://wiki.apache.org/hadoop/FAQ}FAQ}}: The FAQ Wiki page.
 
-     * {{{Hadoop JavaDoc API}}}.
+     * {{{../../api/index.html}Hadoop JavaDoc API}}.
 
-     * {{{Hadoop User Mailing List}}}: core-user[at]hadoop.apache.org.
+     * Hadoop User Mailing List: user[at]hadoop.apache.org.
 
-     * Explore {{{src/hdfs/hdfs-default.xml}}}. It includes brief description of
-       most of the configuration variables available.
+     * Explore {{{./hdfs-default.xml}hdfs-default.xml}}. It includes
+       brief description of most of the configuration variables available.
 
-     * {{{Hadoop Commands Guide}}}: Hadoop commands usage.
+     * {{{../hadoop-common/CommandsManual.html}Hadoop Commands Guide}}:
+       Hadoop commands usage.

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Hftp.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Hftp.apt.vm?rev=1562964&r1=1562963&r2=1562964&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Hftp.apt.vm
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Hftp.apt.vm
Thu Jan 30 21:11:21 2014
@@ -18,8 +18,6 @@
 
 HFTP Guide
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Introduction

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ShortCircuitLocalReads.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ShortCircuitLocalReads.apt.vm?rev=1562964&r1=1562963&r2=1562964&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ShortCircuitLocalReads.apt.vm
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ShortCircuitLocalReads.apt.vm
Thu Jan 30 21:11:21 2014
@@ -19,8 +19,6 @@
 
 HDFS Short-Circuit Local Reads
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * {Background}

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm?rev=1562964&r1=1562963&r2=1562964&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
Thu Jan 30 21:11:21 2014
@@ -18,8 +18,6 @@
 
 WebHDFS REST API
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * {Document Conventions}
@@ -54,7 +52,7 @@ WebHDFS REST API
     * {{{Status of a File/Directory}<<<GETFILESTATUS>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getFileStatus)
 
-    * {{<<<LISTSTATUS>>>}}
+    * {{{List a Directory}<<<LISTSTATUS>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listStatus)
 
     * {{{Get Content Summary of a Directory}<<<GETCONTENTSUMMARY>>>}}
@@ -109,7 +107,7 @@ WebHDFS REST API
     * {{{Append to a File}<<<APPEND>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append)
 
-    * {{{Concatenate Files}<<<CONCAT>>>}}
+    * {{{Concat File(s)}<<<CONCAT>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat)
 
   * HTTP DELETE
@@ -871,7 +869,7 @@ Content-Length: 0
 * {Error Responses}
 
   When an operation fails, the server may throw an exception.
-  The JSON schema of error responses is defined in {{<<<RemoteException>>>
JSON schema}}.
+  The JSON schema of error responses is defined in {{RemoteException JSON Schema}}.
   The table below shows the mapping from exceptions to HTTP response codes.
 
 ** {HTTP Response Codes}
@@ -1119,7 +1117,7 @@ Transfer-Encoding: chunked
   See also:
   {{{FileStatus Properties}<<<FileStatus>>> Properties}},
   {{{Status of a File/Directory}<<<GETFILESTATUS>>>}},
-  {{{../../api/org/apache/hadoop/fs/FileStatus}FileStatus}}
+  {{{../../api/org/apache/hadoop/fs/FileStatus.html}FileStatus}}
 
 
 *** {FileStatus Properties}
@@ -1232,7 +1230,7 @@ var fileStatusProperties =
   See also:
   {{{FileStatus Properties}<<<FileStatus>>> Properties}},
   {{{List a Directory}<<<LISTSTATUS>>>}},
-  {{{../../api/org/apache/hadoop/fs/FileStatus}FileStatus}}
+  {{{../../api/org/apache/hadoop/fs/FileStatus.html}FileStatus}}
 
 
 ** {Long JSON Schema}
@@ -1275,7 +1273,7 @@ var fileStatusProperties =
 
   See also:
   {{{Get Home Directory}<<<GETHOMEDIRECTORY>>>}},
-  {{{../../api/org/apache/hadoop/fs/Path}Path}}
+  {{{../../api/org/apache/hadoop/fs/Path.html}Path}}
 
 
 ** {RemoteException JSON Schema}

Propchange: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1561802-1562961

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1562964&r1=1562963&r2=1562964&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
Thu Jan 30 21:11:21 2014
@@ -118,6 +118,20 @@ public class TestDFSUtil {
     assertEquals(0, bs.length);
   }
 
+  /**
+   * Test constructing LocatedBlock with null cachedLocs
+   */
+  @Test
+  public void testLocatedBlockConstructorWithNullCachedLocs() {
+    DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
+    DatanodeInfo[] ds = new DatanodeInfo[1];
+    ds[0] = d;
+    
+    ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
+    LocatedBlock l1 = new LocatedBlock(b1, ds, null, null, 0, false, null);
+    final DatanodeInfo[] cachedLocs = l1.getCachedLocations();
+    assertTrue(cachedLocs.length == 0);
+  }
 
   private Configuration setupAddress(String key) {
     HdfsConfiguration conf = new HdfsConfiguration();

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java?rev=1562964&r1=1562963&r2=1562964&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
Thu Jan 30 21:11:21 2014
@@ -71,7 +71,7 @@ public class TestFSOutputSummer {
     cleanupFile(name);
   }
   
-  /* create a file, write data with vairable amount of data */
+  /* create a file, write data with variable amount of data */
   private void writeFile3(Path name) throws Exception {
     FSDataOutputStream stm = fileSys.create(name, true, 
         fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096),
@@ -103,6 +103,8 @@ public class TestFSOutputSummer {
     stm.readFully(0, actual);
     checkAndEraseData(actual, 0, expected, "Read Sanity Test");
     stm.close();
+    // do a sanity check. Get the file checksum
+    fileSys.getFileChecksum(name);
   }
 
   private void cleanupFile(Path name) throws IOException {
@@ -112,13 +114,20 @@ public class TestFSOutputSummer {
   }
   
   /**
-   * Test write opeation for output stream in DFS.
+   * Test write operation for output stream in DFS.
    */
   @Test
   public void testFSOutputSummer() throws Exception {
+    doTestFSOutputSummer("CRC32");
+    doTestFSOutputSummer("CRC32C");
+    doTestFSOutputSummer("NULL");
+  }
+  
+  private void doTestFSOutputSummer(String checksumType) throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
+    conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, checksumType);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                                .numDataNodes(NUM_OF_DATANODES)
                                                .build();

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java?rev=1562964&r1=1562963&r2=1562964&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
Thu Jan 30 21:11:21 2014
@@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -528,77 +529,111 @@ public class TestCacheDirectives {
 
   @Test(timeout=60000)
   public void testCacheManagerRestart() throws Exception {
-    // Create and validate a pool
-    final String pool = "poolparty";
-    String groupName = "partygroup";
-    FsPermission mode = new FsPermission((short)0777);
-    long limit = 747;
-    dfs.addCachePool(new CachePoolInfo(pool)
-        .setGroupName(groupName)
-        .setMode(mode)
-        .setLimit(limit));
-    RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
-    assertTrue("No cache pools found", pit.hasNext());
-    CachePoolInfo info = pit.next().getInfo();
-    assertEquals(pool, info.getPoolName());
-    assertEquals(groupName, info.getGroupName());
-    assertEquals(mode, info.getMode());
-    assertEquals(limit, (long)info.getLimit());
-    assertFalse("Unexpected # of cache pools found", pit.hasNext());
-  
-    // Create some cache entries
-    int numEntries = 10;
-    String entryPrefix = "/party-";
-    long prevId = -1;
-    final Date expiry = new Date();
-    for (int i=0; i<numEntries; i++) {
-      prevId = dfs.addCacheDirective(
-          new CacheDirectiveInfo.Builder().
-            setPath(new Path(entryPrefix + i)).setPool(pool).
-            setExpiration(
-                CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).
-            build());
-    }
-    RemoteIterator<CacheDirectiveEntry> dit
-        = dfs.listCacheDirectives(null);
-    for (int i=0; i<numEntries; i++) {
-      assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
-      CacheDirectiveInfo cd = dit.next().getInfo();
-      assertEquals(i+1, cd.getId().longValue());
-      assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
-      assertEquals(pool, cd.getPool());
-    }
-    assertFalse("Unexpected # of cache directives found", dit.hasNext());
-  
-    // Restart namenode
-    cluster.restartNameNode();
+    SecondaryNameNode secondary = null;
+    try {
+      // Start a secondary namenode
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+          "0.0.0.0:0");
+      secondary = new SecondaryNameNode(conf);
   
-    // Check that state came back up
-    pit = dfs.listCachePools();
-    assertTrue("No cache pools found", pit.hasNext());
-    info = pit.next().getInfo();
-    assertEquals(pool, info.getPoolName());
-    assertEquals(pool, info.getPoolName());
-    assertEquals(groupName, info.getGroupName());
-    assertEquals(mode, info.getMode());
-    assertEquals(limit, (long)info.getLimit());
-    assertFalse("Unexpected # of cache pools found", pit.hasNext());
+      // Create and validate a pool
+      final String pool = "poolparty";
+      String groupName = "partygroup";
+      FsPermission mode = new FsPermission((short)0777);
+      long limit = 747;
+      dfs.addCachePool(new CachePoolInfo(pool)
+          .setGroupName(groupName)
+          .setMode(mode)
+          .setLimit(limit));
+      RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
+      assertTrue("No cache pools found", pit.hasNext());
+      CachePoolInfo info = pit.next().getInfo();
+      assertEquals(pool, info.getPoolName());
+      assertEquals(groupName, info.getGroupName());
+      assertEquals(mode, info.getMode());
+      assertEquals(limit, (long)info.getLimit());
+      assertFalse("Unexpected # of cache pools found", pit.hasNext());
+    
+      // Create some cache entries
+      int numEntries = 10;
+      String entryPrefix = "/party-";
+      long prevId = -1;
+      final Date expiry = new Date();
+      for (int i=0; i<numEntries; i++) {
+        prevId = dfs.addCacheDirective(
+            new CacheDirectiveInfo.Builder().
+              setPath(new Path(entryPrefix + i)).setPool(pool).
+              setExpiration(
+                  CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).
+              build());
+      }
+      RemoteIterator<CacheDirectiveEntry> dit
+          = dfs.listCacheDirectives(null);
+      for (int i=0; i<numEntries; i++) {
+        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
+        CacheDirectiveInfo cd = dit.next().getInfo();
+        assertEquals(i+1, cd.getId().longValue());
+        assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
+        assertEquals(pool, cd.getPool());
+      }
+      assertFalse("Unexpected # of cache directives found", dit.hasNext());
+      
+      // Checkpoint once to set some cache pools and directives on 2NN side
+      secondary.doCheckpoint();
+      
+      // Add some more CacheManager state
+      final String imagePool = "imagePool";
+      dfs.addCachePool(new CachePoolInfo(imagePool));
+      prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
+        .setPath(new Path("/image")).setPool(imagePool).build());
+
+      // Save a new image to force a fresh fsimage download
+      dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      dfs.saveNamespace();
+      dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+      // Checkpoint again forcing a reload of FSN state
+      boolean fetchImage = secondary.doCheckpoint();
+      assertTrue("Secondary should have fetched a new fsimage from NameNode",
+          fetchImage);
+
+      // Remove temp pool and directive
+      dfs.removeCachePool(imagePool);
+
+      // Restart namenode
+      cluster.restartNameNode();
+    
+      // Check that state came back up
+      pit = dfs.listCachePools();
+      assertTrue("No cache pools found", pit.hasNext());
+      info = pit.next().getInfo();
+      assertEquals(pool, info.getPoolName());
+      assertEquals(pool, info.getPoolName());
+      assertEquals(groupName, info.getGroupName());
+      assertEquals(mode, info.getMode());
+      assertEquals(limit, (long)info.getLimit());
+      assertFalse("Unexpected # of cache pools found", pit.hasNext());
+    
+      dit = dfs.listCacheDirectives(null);
+      for (int i=0; i<numEntries; i++) {
+        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
+        CacheDirectiveInfo cd = dit.next().getInfo();
+        assertEquals(i+1, cd.getId().longValue());
+        assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
+        assertEquals(pool, cd.getPool());
+        assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
+      }
+      assertFalse("Unexpected # of cache directives found", dit.hasNext());
   
-    dit = dfs.listCacheDirectives(null);
-    for (int i=0; i<numEntries; i++) {
-      assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
-      CacheDirectiveInfo cd = dit.next().getInfo();
-      assertEquals(i+1, cd.getId().longValue());
-      assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
-      assertEquals(pool, cd.getPool());
-      assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
+      long nextId = dfs.addCacheDirective(
+            new CacheDirectiveInfo.Builder().
+              setPath(new Path("/foobar")).setPool(pool).build());
+      assertEquals(prevId + 1, nextId);
+    } finally {
+      if (secondary != null) {
+        secondary.shutdown();
+      }
     }
-    assertFalse("Unexpected # of cache directives found", dit.hasNext());
-
-    long nextId = dfs.addCacheDirective(
-          new CacheDirectiveInfo.Builder().
-            setPath(new Path("/foobar")).setPool(pool).build());
-    assertEquals(prevId + 1, nextId);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1562964&r1=1562963&r2=1562964&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
Thu Jan 30 21:11:21 2014
@@ -1634,7 +1634,7 @@ public class TestCheckpoint {
    * Test that the secondary namenode correctly deletes temporary edits
    * on startup.
    */
-  @Test(timeout = 30000)
+  @Test(timeout = 60000)
   public void testDeleteTemporaryEditsOnStartup() throws IOException {
     Configuration conf = new HdfsConfiguration();
     SecondaryNameNode secondary = null;

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java?rev=1562964&r1=1562963&r2=1562964&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
Thu Jan 30 21:11:21 2014
@@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authorize.AccessControlList;
@@ -66,7 +66,7 @@ public class TestGetImageServlet {
     AccessControlList acls = Mockito.mock(AccessControlList.class);
     Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
     ServletContext context = Mockito.mock(ServletContext.class);
-    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+    Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
     
     // Make sure that NN2 is considered a valid fsimage/edits requestor.
     assertTrue(GetImageServlet.isValidRequestor(context,

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java?rev=1562964&r1=1562963&r2=1562964&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
Thu Jan 30 21:11:21 2014
@@ -37,7 +37,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.HttpServerFunctionalTest;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -119,7 +119,7 @@ public class TestTransferFsImage {
    */
   @Test(timeout = 5000)
   public void testImageTransferTimeout() throws Exception {
-    HttpServer testServer = HttpServerFunctionalTest.createServer("hdfs");
+    HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs");
     try {
       testServer.addServlet("GetImage", "/getimage", TestGetImageServlet.class);
       testServer.start();

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java?rev=1562964&r1=1562963&r2=1562964&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
Thu Jan 30 21:11:21 2014
@@ -58,7 +58,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.ipc.ProtobufRpcEngine.Server;
 import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -89,7 +89,7 @@ public class SnapshotTestHelper {
     setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));
     
     setLevel2OFF(DataBlockScanner.LOG);
-    setLevel2OFF(HttpServer.LOG);
+    setLevel2OFF(HttpServer2.LOG);
     setLevel2OFF(DataNode.LOG);
     setLevel2OFF(BlockPoolSliceStorage.LOG);
     setLevel2OFF(LeaseManager.LOG);



Mime
View raw message