hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject svn commit: r993542 - in /hadoop/hdfs/trunk: CHANGES.txt src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
Date Tue, 07 Sep 2010 21:53:03 GMT
Author: shv
Date: Tue Sep  7 21:53:03 2010
New Revision: 993542

URL: http://svn.apache.org/viewvc?rev=993542&view=rev
Log:
HDFS-1361. Add -fileStatus operation to NNThroughputBenchmark. Contributed by Konstantin Shvachko.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=993542&r1=993541&r2=993542&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Tue Sep  7 21:53:03 2010
@@ -27,12 +27,14 @@ Trunk (unreleased changes)
     HDFS-1150. Verify datanodes' identities to clients in secure clusters.
     (jghoman)
 
-    aHDFS-1330. Make RPCs to DataNodes timeout. (hairong)
+    HDFS-1330. Make RPCs to DataNodes timeout. (hairong)
 
     HDFS-202.  HDFS support of listLocatedStatus introduced in HADOOP-6870.
     HDFS piggyback block locations to each file status when listing a
     directory.  (hairong)
 
+    HDFS-1361. Add -fileStatus operation to NNThroughputBenchmark. (shv)
+
   IMPROVEMENTS
 
     HDFS-1096. fix for prev. commit. (boryas)

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=993542&r1=993541&r2=993542&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
Tue Sep  7 21:53:03 2010
@@ -76,6 +76,8 @@ import org.apache.log4j.LogManager;
  * {@link NameNode#refreshUserToGroupsMappings(Configuration)} after
  * every G operations, which purges the name-node's user group cache.
  * By default the refresh is never called.</li>
+ * <li>-keepResults do not clean up the name-space after execution.</li>
+ * <li>-useExisting do not recreate the name-space, use existing data.</li>
  * </ol>
  * 
  * The benchmark first generates inputs for each thread so that the
@@ -91,7 +93,7 @@ public class NNThroughputBenchmark {
   private static final Log LOG = LogFactory.getLog(NNThroughputBenchmark.class);
   private static final int BLOCK_SIZE = 16;
   private static final String GENERAL_OPTIONS_USAGE = 
-    "    [-logLevel L] [-UGCacheRefreshCount G]";
+    "     [-keepResults] | [-logLevel L] | [-UGCacheRefreshCount G]";
 
   static Configuration config;
   static NameNode nameNode;
@@ -138,8 +140,7 @@ public class NNThroughputBenchmark {
   abstract class OperationStatsBase {
     protected static final String BASE_DIR_NAME = "/nnThroughputBenchmark";
     protected static final String OP_ALL_NAME = "all";
-    protected static final String OP_ALL_USAGE = "-op all " +
-                                  "<other ops options> [-keepResults]";
+    protected static final String OP_ALL_USAGE = "-op all <other ops options>";
 
     protected String baseDir;
     protected short replication;
@@ -671,6 +672,34 @@ public class NNThroughputBenchmark {
   }
 
   /**
+   * List file status statistics.
+   * 
+   * Measure how many get-file-status calls the name-node can handle per second.
+   */
+  class FileStatusStats extends OpenFileStats {
+    // Operation types
+    static final String OP_FILE_STATUS_NAME = "fileStatus";
+    static final String OP_FILE_STATUS_USAGE = 
+      "-op " + OP_FILE_STATUS_NAME + OP_USAGE_ARGS;
+
+    FileStatusStats(List<String> args) {
+      super(args);
+    }
+
+    String getOpName() {
+      return OP_FILE_STATUS_NAME;
+    }
+
+    long executeOp(int daemonId, int inputIdx, String ignore) 
+    throws IOException {
+      long start = System.currentTimeMillis();
+      nameNode.getFileInfo(fileNames[daemonId][inputIdx]);
+      long end = System.currentTimeMillis();
+      return end-start;
+    }
+  }
+
+  /**
    * Rename file statistics.
    * 
    * Measure how many rename calls the name-node can handle per second.
@@ -1160,6 +1189,7 @@ public class NNThroughputBenchmark {
         + " | \n\t" + CreateFileStats.OP_CREATE_USAGE
         + " | \n\t" + OpenFileStats.OP_OPEN_USAGE
         + " | \n\t" + DeleteFileStats.OP_DELETE_USAGE
+        + " | \n\t" + FileStatusStats.OP_FILE_STATUS_USAGE
         + " | \n\t" + RenameFileStats.OP_RENAME_USAGE
         + " | \n\t" + BlockReportStats.OP_BLOCK_REPORT_USAGE
         + " | \n\t" + ReplicationStats.OP_REPLICATION_USAGE
@@ -1197,6 +1227,10 @@ public class NNThroughputBenchmark {
         opStat = bench.new DeleteFileStats(args);
         ops.add(opStat);
       }
+      if(runAll || FileStatusStats.OP_FILE_STATUS_NAME.equals(type)) {
+        opStat = bench.new FileStatusStats(args);
+        ops.add(opStat);
+      }
       if(runAll || RenameFileStats.OP_RENAME_NAME.equals(type)) {
         opStat = bench.new RenameFileStats(args);
         ops.add(opStat);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java?rev=993542&r1=993541&r2=993542&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
Tue Sep  7 21:53:03 2010
@@ -19,18 +19,18 @@ package org.apache.hadoop.hdfs.server.na
 
 import java.util.Arrays;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.junit.Test;
 
-public class TestNNThroughputBenchmark extends TestCase {
+public class TestNNThroughputBenchmark {
 
   /**
    * This test runs all benchmarks defined in {@link NNThroughputBenchmark}.
    */
+  @Test
   public void testNNThroughput() throws Exception {
     Configuration conf = new HdfsConfiguration();
     FileSystem.setDefaultUri(conf, "hdfs://localhost:" + 0);



Mime
View raw message