hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject svn commit: r1590766 [4/4] - in /hadoop/common/branches/HDFS-2006/hadoop-hdfs-project: hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/ hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/ hadoop-hdfs/src/main/bin/...
Date Mon, 28 Apr 2014 19:40:15 GMT
Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java
Mon Apr 28 19:40:06 2014
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java
Mon Apr 28 19:40:06 2014
@@ -25,7 +25,7 @@ import java.io.IOException;
 import java.net.URI;
 import java.util.List;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
@@ -208,7 +208,7 @@ public class TestQuorumJournalManagerUni
         anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
     
     // And the third log not respond
-    SettableFuture<Void> slowLog = SettableFuture.<Void>create();
+    SettableFuture<Void> slowLog = SettableFuture.create();
     Mockito.doReturn(slowLog).when(spyLoggers.get(2)).sendEdits(
         anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
     stm.flush();

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
Mon Apr 28 19:40:06 2014
@@ -101,7 +101,6 @@ public class BlockManagerTestUtil {
   }
 
   /**
-   * @param blockManager
    * @return replication monitor thread instance from block manager.
    */
   public static Daemon getReplicationThread(final BlockManager blockManager)
@@ -111,7 +110,6 @@ public class BlockManagerTestUtil {
   
   /**
    * Stop the replication monitor thread
-   * @param blockManager
    */
   public static void stopReplicationThread(final BlockManager blockManager) 
       throws IOException {
@@ -126,7 +124,6 @@ public class BlockManagerTestUtil {
   }
 
   /**
-   * @param blockManager
    * @return corruptReplicas from block manager
    */
   public static  CorruptReplicasMap getCorruptReplicas(final BlockManager blockManager){
@@ -135,7 +132,6 @@ public class BlockManagerTestUtil {
   }
 
   /**
-   * @param blockManager
    * @return computed block replication and block invalidation work that can be
    *         scheduled on data-nodes.
    * @throws IOException
@@ -158,7 +154,7 @@ public class BlockManagerTestUtil {
    * regardless of invalidation/replication limit configurations.
    * 
    * NB: you may want to set
-   * {@link DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY} to
+   * {@link DFSConfigKeys#DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY} to
    * a high value to ensure that all work is calculated.
    */
   public static int computeAllPendingWork(BlockManager bm) {
@@ -200,7 +196,7 @@ public class BlockManagerTestUtil {
   /**
    * Change whether the block placement policy will prefer the writer's
    * local Datanode or not.
-   * @param prefer
+   * @param prefer if true, prefer local node
    */
   public static void setWritingPrefersLocalNode(
       BlockManager bm, boolean prefer) {

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
Mon Apr 28 19:40:06 2014
@@ -171,9 +171,6 @@ public abstract class BlockReportTestBas
    * Utility routine to send block reports to the NN, either in a single call
    * or reporting one storage per call.
    *
-   * @param dnR
-   * @param poolId
-   * @param reports
    * @throws IOException
    */
   protected abstract void sendBlockReports(DatanodeRegistration dnR, String poolId,

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
Mon Apr 28 19:40:06 2014
@@ -833,8 +833,8 @@ public class SimulatedFSDataset implemen
     
     /**
      * An input stream of size l with repeated bytes
-     * @param l
-     * @param iRepeatedData
+     * @param l size of the stream
+     * @param iRepeatedData byte that is repeated in the stream
      */
     SimulatedInputStream(long l, byte iRepeatedData) {
       length = l;
@@ -843,17 +843,14 @@ public class SimulatedFSDataset implemen
     
     /**
      * An input stream of of the supplied data
-     * 
-     * @param iData
+     * @param iData data to construct the stream
      */
     SimulatedInputStream(byte[] iData) {
       data = iData;
       length = data.length;
-      
     }
     
     /**
-     * 
      * @return the lenght of the input stream
      */
     long getLength() {

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java
Mon Apr 28 19:40:06 2014
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.da
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.*;
 import org.junit.Test;
 
-import static junit.framework.Assert.fail;
+import static org.junit.Assert.fail;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertThat;
 

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
Mon Apr 28 19:40:06 2014
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static junit.framework.Assert.assertFalse;
+import static org.junit.Assert.assertFalse;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
 import static org.mockito.Mockito.atLeastOnce;

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
Mon Apr 28 19:40:06 2014
@@ -131,14 +131,10 @@ public class CreateEditsLog {
     printUsageExit();
   }
   /**
-   * @param args
+   * @param args arguments
    * @throws IOException 
    */
-  public static void main(String[] args) 
-      throws IOException {
-
-
-
+  public static void main(String[] args)  throws IOException {
     long startingBlockId = 1;
     int numFiles = 0;
     short replication = 1;

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
Mon Apr 28 19:40:06 2014
@@ -202,7 +202,7 @@ public class NNThroughputBenchmark imple
      * {@link #executeOp(int, int, String)}, which can have different meanings
      * depending on the operation performed.
      * 
-     * @param daemonId
+     * @param daemonId id of the daemon calling this method
      * @return the argument
      */
     abstract String getExecutionArgument(int daemonId);
@@ -322,11 +322,10 @@ public class NNThroughputBenchmark imple
     /**
      * Parse first 2 arguments, corresponding to the "-op" option.
      * 
-     * @param args
+     * @param args argument list
      * @return true if operation is all, which means that options not related
      * to this operation should be ignored, or false otherwise, meaning
      * that usage should be printed when an unrelated option is encountered.
-     * @throws IOException
      */
     protected boolean verifyOpArgument(List<String> args) {
       if(args.size() < 2 || ! args.get(0).startsWith("-op"))

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java
Mon Apr 28 19:40:06 2014
@@ -304,7 +304,7 @@ public class TestAclTransformation {
       .add(aclEntry(DEFAULT, MASK, ALL))
       .add(aclEntry(DEFAULT, OTHER, READ))
       .build();
-    List<AclEntry> aclSpec = Lists.<AclEntry>newArrayList();
+    List<AclEntry> aclSpec = Lists.newArrayList();
     assertEquals(existing, filterAclEntriesByAclSpec(existing, aclSpec));
   }
 
@@ -705,7 +705,7 @@ public class TestAclTransformation {
       .add(aclEntry(DEFAULT, MASK, ALL))
       .add(aclEntry(DEFAULT, OTHER, READ))
       .build();
-    List<AclEntry> aclSpec = Lists.<AclEntry>newArrayList();
+    List<AclEntry> aclSpec = Lists.newArrayList();
     assertEquals(existing, mergeAclEntries(existing, aclSpec));
   }
 

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
Mon Apr 28 19:40:06 2014
@@ -24,7 +24,10 @@ import static org.junit.Assert.assertTru
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.net.HttpURLConnection;
 import java.net.InetAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -33,9 +36,11 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.junit.Before;
 import org.junit.Test;
 
 /**
@@ -45,6 +50,16 @@ public class TestAuditLogger {
 
   private static final short TEST_PERMISSION = (short) 0654;
 
+  @Before
+  public void setup() {
+    DummyAuditLogger.initialized = false;
+    DummyAuditLogger.logCount = 0;
+    DummyAuditLogger.remoteAddr = null;
+
+    Configuration conf = new HdfsConfiguration();
+    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);    
+  }
+
   /**
    * Tests that AuditLogger works as expected.
    */
@@ -69,6 +84,57 @@ public class TestAuditLogger {
     }
   }
 
+  @Test
+  public void testWebHdfsAuditLogger() throws IOException, URISyntaxException {
+    Configuration conf = new HdfsConfiguration();
+    conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
+        DummyAuditLogger.class.getName());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    
+    GetOpParam.Op op = GetOpParam.Op.GETFILESTATUS;
+    try {
+      cluster.waitClusterUp();
+      assertTrue(DummyAuditLogger.initialized);      
+      URI uri = new URI(
+          "http",
+          NetUtils.getHostPortString(cluster.getNameNode().getHttpAddress()),
+          "/webhdfs/v1/", op.toQueryString(), null);
+      
+      // non-proxy request
+      HttpURLConnection conn = (HttpURLConnection) uri.toURL().openConnection();
+      conn.setRequestMethod(op.getType().toString());
+      conn.connect();
+      assertEquals(200, conn.getResponseCode());
+      conn.disconnect();
+      assertEquals(1, DummyAuditLogger.logCount);
+      assertEquals("127.0.0.1", DummyAuditLogger.remoteAddr);
+      
+      // non-trusted proxied request
+      conn = (HttpURLConnection) uri.toURL().openConnection();
+      conn.setRequestMethod(op.getType().toString());
+      conn.setRequestProperty("X-Forwarded-For", "1.1.1.1");
+      conn.connect();
+      assertEquals(200, conn.getResponseCode());
+      conn.disconnect();
+      assertEquals(2, DummyAuditLogger.logCount);
+      assertEquals("127.0.0.1", DummyAuditLogger.remoteAddr);
+      
+      // trusted proxied request
+      conf.set(ProxyUsers.CONF_HADOOP_PROXYSERVERS, "127.0.0.1");
+      ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+      conn = (HttpURLConnection) uri.toURL().openConnection();
+      conn.setRequestMethod(op.getType().toString());
+      conn.setRequestProperty("X-Forwarded-For", "1.1.1.1");
+      conn.connect();
+      assertEquals(200, conn.getResponseCode());
+      conn.disconnect();
+      assertEquals(3, DummyAuditLogger.logCount);
+      assertEquals("1.1.1.1", DummyAuditLogger.remoteAddr);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
   /**
    * Minor test related to HADOOP-9155. Verify that during a
    * FileSystem.setPermission() operation, the stat passed in during the
@@ -128,7 +194,8 @@ public class TestAuditLogger {
     static boolean initialized;
     static int logCount;
     static short foundPermission;
-
+    static String remoteAddr;
+    
     public void initialize(Configuration conf) {
       initialized = true;
     }
@@ -140,6 +207,7 @@ public class TestAuditLogger {
     public void logAuditEvent(boolean succeeded, String userName,
         InetAddress addr, String cmd, String src, String dst,
         FileStatus stat) {
+      remoteAddr = addr.getHostAddress();
       logCount++;
       if (stat != null) {
         foundPermission = stat.getPermission().toShort();

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
Mon Apr 28 19:40:06 2014
@@ -622,11 +622,11 @@ public class TestCheckpoint {
   }
 
   private File filePathContaining(final String substring) {
-    return Mockito.<File>argThat(
+    return Mockito.argThat(
         new ArgumentMatcher<File>() {
           @Override
           public boolean matches(Object argument) {
-            String path = ((File)argument).getAbsolutePath();
+            String path = ((File) argument).getAbsolutePath();
             return path.contains(substring);
           }
         });
@@ -2441,8 +2441,8 @@ public class TestCheckpoint {
   
   private static List<File> getCheckpointCurrentDirs(SecondaryNameNode secondary) {
     List<File> ret = Lists.newArrayList();
-    for (URI u : secondary.getCheckpointDirs()) {
-      File checkpointDir = new File(u.getPath());
+    for (String u : secondary.getCheckpointDirectories()) {
+      File checkpointDir = new File(URI.create(u).getPath());
       ret.add(new File(checkpointDir, "current"));
     }
     return ret;

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
Mon Apr 28 19:40:06 2014
@@ -24,7 +24,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.EnumSet;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
Mon Apr 28 19:40:06 2014
@@ -35,6 +35,87 @@ import org.mortbay.util.ajax.JSON;
  */
 public class TestFSNamesystemMBean {
 
+  /**
+   * MBeanClient tries to access FSNamesystem/FSNamesystemState/NameNodeInfo
+   * JMX properties. If it can access all the properties, the test is
+   * considered successful.
+   */
+  private static class MBeanClient extends Thread {
+    private boolean succeeded = false;
+    @Override
+    public void run() {
+      try {
+        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+
+        // Metrics that belong to "FSNamesystem", these are metrics that
+        // come from hadoop metrics framework for the class FSNamesystem.
+        ObjectName mxbeanNamefsn = new ObjectName(
+            "Hadoop:service=NameNode,name=FSNamesystem");
+        Integer blockCapacity = (Integer) (mbs.getAttribute(mxbeanNamefsn,
+            "BlockCapacity"));
+
+        // Metrics that belong to "FSNamesystemState".
+        // These are metrics that FSNamesystem registers directly with MBeanServer.
+        ObjectName mxbeanNameFsns = new ObjectName(
+            "Hadoop:service=NameNode,name=FSNamesystemState");
+        String FSState = (String) (mbs.getAttribute(mxbeanNameFsns,
+            "FSState"));
+        Long blocksTotal = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "BlocksTotal"));
+        Long capacityTotal = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "CapacityTotal"));
+        Long capacityRemaining = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "CapacityRemaining"));
+        Long capacityUsed = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "CapacityUsed"));
+        Long filesTotal = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "FilesTotal"));
+        Long pendingReplicationBlocks = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "PendingReplicationBlocks"));
+        Long underReplicatedBlocks = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "UnderReplicatedBlocks"));
+        Long scheduledReplicationBlocks = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "ScheduledReplicationBlocks"));
+        Integer totalLoad = (Integer) (mbs.getAttribute(mxbeanNameFsns,
+            "TotalLoad"));
+        Integer numLiveDataNodes = (Integer) (mbs.getAttribute(mxbeanNameFsns,
+            "NumLiveDataNodes"));
+        Integer numDeadDataNodes = (Integer) (mbs.getAttribute(mxbeanNameFsns,
+           "NumDeadDataNodes"));
+        Integer numStaleDataNodes = (Integer) (mbs.getAttribute(mxbeanNameFsns,
+            "NumStaleDataNodes"));
+        Integer numDecomLiveDataNodes = (Integer) (mbs.getAttribute(mxbeanNameFsns,
+            "NumDecomLiveDataNodes"));
+        Integer numDecomDeadDataNodes = (Integer) (mbs.getAttribute(mxbeanNameFsns,
+            "NumDecomDeadDataNodes"));
+        Integer numDecommissioningDataNodes = (Integer) (mbs.getAttribute(mxbeanNameFsns,
+            "NumDecommissioningDataNodes"));
+        String snapshotStats = (String) (mbs.getAttribute(mxbeanNameFsns,
+            "SnapshotStats"));
+        Long MaxObjects = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "MaxObjects"));
+
+        // Metrics that belong to "NameNodeInfo".
+        // These are metrics that FSNamesystem registers directly with MBeanServer.
+        ObjectName mxbeanNameNni = new ObjectName(
+            "Hadoop:service=NameNode,name=NameNodeInfo");
+        String safemode = (String) (mbs.getAttribute(mxbeanNameNni,
+            "Safemode"));
+        String liveNodes = (String) (mbs.getAttribute(mxbeanNameNni,
+            "LiveNodes"));
+        String deadNodes = (String) (mbs.getAttribute(mxbeanNameNni,
+            "DeadNodes"));
+        String decomNodes = (String) (mbs.getAttribute(mxbeanNameNni,
+            "DecomNodes"));
+        String corruptFiles = (String) (mbs.getAttribute(mxbeanNameNni,
+            "CorruptFiles"));
+
+        succeeded = true;
+      } catch (Exception e) {
+      }
+    }
+  }
+
   @Test
   public void test() throws Exception {
     Configuration conf = new Configuration();
@@ -73,4 +154,35 @@ public class TestFSNamesystemMBean {
       }
     }
   }
-}
+
+  // The test makes sure JMX request can be processed even if namesystem's
+  // writeLock is owned by another thread.
+  @Test
+  public void testWithFSNamesystemWriteLock() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    FSNamesystem fsn = null;
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).build();
+      cluster.waitActive();
+
+      fsn = cluster.getNameNode().namesystem;
+      fsn.writeLock();
+
+      MBeanClient client = new MBeanClient();
+      client.start();
+      client.join(20000);
+      assertTrue("JMX calls are blocked when FSNamesystem's writerlock" +
+          "is owned by another thread", client.succeeded);
+      client.interrupt();
+    } finally {
+      if (fsn != null && fsn.hasWriteLock()) {
+        fsn.writeUnlock();
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+}
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
Mon Apr 28 19:40:06 2014
@@ -27,10 +27,9 @@ import static org.junit.Assert.fail;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -789,14 +788,6 @@ public class TestINodeFile {
     return dir; // Last Inode in the chain
   }
   
-  private static void checkEquals(byte[][] expected, byte[][] actual) {
-    assertEquals(expected.length, actual.length);
-    int i = 0;
-    for (byte[] e : expected) {
-      assertTrue(Arrays.equals(e, actual[i++]));
-    }
-  }
-  
   /**
    * Test for {@link FSDirectory#getPathComponents(INode)}
    */
@@ -806,7 +797,7 @@ public class TestINodeFile {
     INode inode = createTreeOfInodes(path);
     byte[][] expected = INode.getPathComponents(path);
     byte[][] actual = FSDirectory.getPathComponents(inode);
-    checkEquals(expected, actual);
+    DFSTestUtil.checkComponentsEquals(expected, actual);
   }
   
   /**

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java
Mon Apr 28 19:40:06 2014
@@ -17,20 +17,22 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import javax.management.*;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.net.URL;
+
 public class TestSecondaryWebUi {
   
   private static MiniDFSCluster cluster;
@@ -41,6 +43,7 @@ public class TestSecondaryWebUi {
   public static void setUpCluster() throws IOException {
     conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
         "0.0.0.0:0");
+    conf.setLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 500);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
         .build();
     cluster.waitActive();
@@ -59,18 +62,34 @@ public class TestSecondaryWebUi {
   }
 
   @Test
-  public void testSecondaryWebUi() throws IOException {
-    String pageContents = DFSTestUtil.urlGet(new URL("http://localhost:" +
-        SecondaryNameNode.getHttpAddress(conf).getPort() + "/status.jsp"));
-    assertTrue("Didn't find \"Last Checkpoint\"",
-        pageContents.contains("Last Checkpoint"));
+  public void testSecondaryWebUi()
+          throws IOException, MalformedObjectNameException,
+                 AttributeNotFoundException, MBeanException,
+                 ReflectionException, InstanceNotFoundException {
+    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+    ObjectName mxbeanName = new ObjectName(
+            "Hadoop:service=SecondaryNameNode,name=SecondaryNameNodeInfo");
+
+    String[] checkpointDir = (String[]) mbs.getAttribute(mxbeanName,
+            "CheckpointDirectories");
+    Assert.assertArrayEquals(checkpointDir, snn.getCheckpointDirectories());
+    String[] checkpointEditlogDir = (String[]) mbs.getAttribute(mxbeanName,
+            "CheckpointEditlogDirectories");
+    Assert.assertArrayEquals(checkpointEditlogDir,
+            snn.getCheckpointEditlogDirectories());
   }
-  
+
   @Test
-  public void testSecondaryWebJmx() throws MalformedURLException, IOException {
+  public void testSecondaryWebUiJsp()
+          throws IOException, MalformedObjectNameException,
+                 AttributeNotFoundException, MBeanException,
+                 ReflectionException, InstanceNotFoundException {
     String pageContents = DFSTestUtil.urlGet(new URL("http://localhost:" +
-        SecondaryNameNode.getHttpAddress(conf).getPort() + "/jmx"));
-    assertTrue(pageContents.contains(
-        "Hadoop:service=SecondaryNameNode,name=JvmMetrics"));
+        SecondaryNameNode.getHttpAddress(conf).getPort() + "/status.jsp"));
+    Assert.assertTrue("Didn't find \"Last Checkpoint\"",
+        pageContents.contains("Last Checkpoint"));
+    Assert.assertTrue("Didn't find Checkpoint Transactions: 500",
+        pageContents.contains("Checkpoint Transactions: 500"));
+
   }
 }

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
Mon Apr 28 19:40:06 2014
@@ -65,7 +65,7 @@ public class TestTransferFsImage {
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
       .numDataNodes(0).build();
     NNStorage mockStorage = Mockito.mock(NNStorage.class);
-    List<File> localPath = Collections.<File>singletonList(
+    List<File> localPath = Collections.singletonList(
         new File("/xxxxx-does-not-exist/blah"));
        
     try {

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
Mon Apr 28 19:40:06 2014
@@ -1829,7 +1829,7 @@ public class TestRenameWithSnapshots {
   }
   
   /**
-   * move a directory to its prior descedant
+   * move a directory to its prior descendant
    */
   @Test
   public void testRename2PreDescendant_2() throws Exception {

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
Mon Apr 28 19:40:06 2014
@@ -414,6 +414,6 @@ public class TestDFSHAAdmin {
   }
   
   private StateChangeRequestInfo anyReqInfo() {
-    return Mockito.<StateChangeRequestInfo>any();
+    return Mockito.any();
   }
 }

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
(original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
Mon Apr 28 19:40:06 2014
@@ -56,8 +56,7 @@ public class TestOfflineEditsViewer {
 
   @SuppressWarnings("deprecation")
   private static ImmutableSet<FSEditLogOpCodes> skippedOps() {
-    ImmutableSet.Builder<FSEditLogOpCodes> b = ImmutableSet
-        .<FSEditLogOpCodes> builder();
+    ImmutableSet.Builder<FSEditLogOpCodes> b = ImmutableSet.builder();
 
     // Deprecated opcodes
     b.add(FSEditLogOpCodes.OP_DATANODE_ADD)



Mime
View raw message