hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r785005 [3/3] - in /hadoop/core/branches/HADOOP-4687/hdfs: lib/ src/contrib/hdfsproxy/ src/contrib/hdfsproxy/bin/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/co...
Date Mon, 15 Jun 2009 22:13:09 GMT
Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java Mon Jun 15 22:13:06 2009
@@ -17,22 +17,21 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
+
 import java.io.File;
+
 import junit.framework.TestCase;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
-
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
-
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 
 /**
  * This test ensures the appropriate response (successful or failure) from 
@@ -42,8 +41,6 @@
   
   private static final Log LOG = LogFactory.getLog(
                                                    "org.apache.hadoop.hdfs.TestDFSStartupVersions");
-  private static Path TEST_ROOT_DIR = new Path(
-                                               System.getProperty("test.build.data","/tmp").toString().replace(' ', '+'));
   private MiniDFSCluster cluster = null;
   
   /**

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Mon Jun 15 22:13:06 2009
@@ -22,15 +22,18 @@
 import java.io.FileReader;
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.util.EnumSet;
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
@@ -617,6 +620,100 @@
       cluster.shutdown();
     }
   }
+  
+  /**
+   * Test file creation with all supported flags.
+   */
+  public void testFileCreationWithFlags() throws IOException {
+    Configuration conf = new Configuration();
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    Path path = new Path("/" + System.currentTimeMillis()
+        + "-testFileCreationWithFlags");
+    FSDataOutputStream out = null;
+
+    // append to a non-exist file, it should throw an IOException
+    try {
+      IOException expectedException = null;
+      EnumSet<CreateFlag> appendNoFile = EnumSet.of(CreateFlag.APPEND);
+      // this should throw a IOException, because the file does not exist
+      try {
+        out = createFileWithFlag(fs, path, 1, appendNoFile);
+      } catch (IOException e) {
+        expectedException = e;
+      } finally {
+        if (out != null)
+          out.close();
+      }
+      assertTrue(
+          "Append a non-exists file with no create flag should throw an IOException ",
+          expectedException != null);
+
+      // the file already exists, and recreate it with CreateFlag.APPEND,
+      // CreateFlag.CREATE. It will not throw any exception.
+      EnumSet<CreateFlag> appendAndCreate = EnumSet.of(CreateFlag.APPEND,
+          CreateFlag.CREATE);
+      out = createFileWithFlag(fs, path, 1, appendAndCreate);
+      out.close();
+
+      // the file already exists, and recreate it only with CreateFlag.CREATE
+      // flag. it should throw an IOException
+      expectedException = null;
+      EnumSet<CreateFlag> createExistsFile = EnumSet.of(CreateFlag.CREATE);
+      // this should throw a IOException, because the file already exists
+      try {
+        createFileWithFlag(fs, path, 1, createExistsFile);
+      } catch (IOException e) {
+        expectedException = e;
+      }
+      assertTrue(
+          "create a file which already exists should throw an IOException ",
+          expectedException != null);
+
+      // the file exists, recreate it with the flag of CreateFlag.OVERWRITE.
+      EnumSet<CreateFlag> overwriteFile = EnumSet.of(CreateFlag.OVERWRITE);
+      out = createFileWithFlag(fs, path, 1, overwriteFile);
+      out.close();
+
+      // the file exists, recreate it with the flag of CreateFlag.OVERWRITE
+      // together with CreateFlag.CREATE. It has the same effect as only specify
+      // CreateFlag.OVERWRITE.
+      EnumSet<CreateFlag> overwriteWithCreateFile = EnumSet.of(
+          CreateFlag.OVERWRITE, CreateFlag.CREATE);
+      out = createFileWithFlag(fs, path, 1, overwriteWithCreateFile);
+      out.close();
+
+      // the file exists, recreate it with the flag of CreateFlag.OVERWRITE
+      // together with CreateFlag.APPEND. It has the same effect as only specify
+      // CreateFlag.OVERWRITE.
+      EnumSet<CreateFlag> overwriteWithAppendFile = EnumSet.of(
+          CreateFlag.OVERWRITE, CreateFlag.APPEND);
+      out = createFileWithFlag(fs, path, 1, overwriteWithAppendFile);
+      out.close();
+
+      fs.delete(path, true);
+
+      EnumSet<CreateFlag> createNonExistsFile = EnumSet.of(CreateFlag.CREATE,
+          CreateFlag.OVERWRITE);
+      out = createFileWithFlag(fs, path, 1, createNonExistsFile);
+      out.close();
+      fs.delete(path, true);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+  
+  // creates a file with the flag api
+  static FSDataOutputStream createFileWithFlag(FileSystem fileSys, Path name, int repl, EnumSet<CreateFlag> flag)
+    throws IOException {
+    System.out.println("createFile: Created " + name + " with " + repl + " replica.");
+    FSDataOutputStream stm = fileSys.create(name, FsPermission.getDefault(), flag, 
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),(short)repl, (long)blockSize, null);
+    return stm;
+  }
 
 /**
  * Test that file data becomes available before file is closed.

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java Mon Jun 15 22:13:06 2009
@@ -35,10 +35,6 @@
   static final int blockSize = 8192;
   static final int fileSize = 16384;
 
-  private static String TEST_ROOT_DIR =
-    new Path(System.getProperty("test.build.data","/tmp"))
-    .toString().replace(' ', '+');
-  
   private void writeFile(FileSystem fileSys, Path name, int repl,
                          int fileSize, int blockSize)
     throws IOException {
@@ -99,7 +95,6 @@
 
       // create an empty directory
       //
-      Path parentDir = new Path("/test");
       Path dir = new Path("/test/mkdirs");
       assertTrue(fs.mkdirs(dir));
       assertTrue(fs.exists(dir));

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java Mon Jun 15 22:13:06 2009
@@ -19,19 +19,24 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.net.UnknownHostException;
 
 import junit.framework.TestCase;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.BackupNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode;
+import org.apache.hadoop.net.DNS;
 
 /**
  * This test checks correctness of port usage by hdfs components:
- * NameNode, DataNode, and SecondaryNamenode.
+ * NameNode, DataNode, SecondaryNamenode and BackupNode.
  * 
  * The correct behavior is:<br> 
  * - when a specific port is provided the server must either start on that port 
@@ -40,17 +45,47 @@
  * a free port and start on it.
  */
 public class TestHDFSServerPorts extends TestCase {
+  public static final Log LOG = LogFactory.getLog(TestHDFSServerPorts.class);
+  
   public static final String NAME_NODE_HOST = "localhost:";
-  public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:";
+  public static final String NAME_NODE_HTTP_HOST = getFullHostName() + ":";
 
   Configuration config;
   File hdfsDir;
 
   /**
-   * Start the name-node.
+   * Attempt to determine the fully qualified domain name for this host 
+   * to compare during testing.
+   * 
+   * This is necessary because in order for the BackupNode test to correctly 
+   * work, the namenode must have its http server started with the fully 
+   * qualified address, as this is the one the backupnode will attempt to start
+   * on as well.
+   * 
+   * @return Fully qualified hostname, or 127.0.0.1 if can't determine
+   */
+  private static String getFullHostName() {
+    try {
+      return DNS.getDefaultHost("default");
+    } catch (UnknownHostException e) {
+      LOG.warn("Unable to determine hostname.  May interfere with obtaining " +
+          "valid test results.");
+      return "127.0.0.1";
+    }
+  }
+  
+  /**
+   * Get base directory these tests should run in.
+   */
+  private String getTestingDir() {
+    return System.getProperty("test.build.data", "build/test/data");
+  }
+  
+  /**
+   * Start the namenode.
    */
   public NameNode startNameNode() throws IOException {
-    String dataDir = System.getProperty("test.build.data");
+    String dataDir = getTestingDir();
     hdfsDir = new File(dataDir, "dfs");
     if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
       throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
@@ -67,11 +102,39 @@
   }
 
   /**
-   * Start the data-node.
+   * Start the BackupNode
+   */
+  public BackupNode startBackupNode(Configuration conf) throws IOException {
+    String dataDir = getTestingDir();
+    // Set up testing environment directories
+    hdfsDir = new File(dataDir, "backupNode");
+    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
+      throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
+    }
+    File currDir = new File(hdfsDir, "name2");
+    File currDir2 = new File(currDir, "current");
+    File currDir3 = new File(currDir, "image");
+    
+    assertTrue(currDir.mkdirs());
+    assertTrue(currDir2.mkdirs());
+    assertTrue(currDir3.mkdirs());
+    
+    conf.set("dfs.name.dir", new File(hdfsDir, "name2").getPath());
+    conf.set("dfs.name.edits.dir", "${dfs.name.dir}");
+    
+    // Start BackupNode
+    String[] args = new String [] { StartupOption.BACKUP.getName() };
+    BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);
+
+    return bu;
+  }
+  
+  /**
+   * Start the datanode.
    */
   public DataNode startDataNode(int index, Configuration config) 
   throws IOException {
-    String dataDir = System.getProperty("test.build.data");
+    String dataDir = getTestingDir();
     File dataNodeDir = new File(dataDir, "data-" + index);
     config.set("dfs.data.dir", dataNodeDir.getPath());
 
@@ -100,7 +163,7 @@
   }
 
   /**
-   * Check whether the name-node can be started.
+   * Check whether the namenode can be started.
    */
   private boolean canStartNameNode(Configuration conf) throws IOException {
     NameNode nn2 = null;
@@ -110,13 +173,14 @@
       if (e instanceof java.net.BindException)
         return false;
       throw e;
+    } finally {
+      stopNameNode(nn2);
     }
-    stopNameNode(nn2);
     return true;
   }
 
   /**
-   * Check whether the data-node can be started.
+   * Check whether the datanode can be started.
    */
   private boolean canStartDataNode(Configuration conf) throws IOException {
     DataNode dn = null;
@@ -126,29 +190,53 @@
       if (e instanceof java.net.BindException)
         return false;
       throw e;
+    } finally {
+      if(dn != null) dn.shutdown();
     }
-    dn.shutdown();
     return true;
   }
 
   /**
    * Check whether the secondary name-node can be started.
    */
+  @SuppressWarnings("deprecation")
   private boolean canStartSecondaryNode(Configuration conf) throws IOException {
-    SecondaryNameNode sn = null;
+    // Using full name allows us not to have to add deprecation tag to
+    // entire source file.
+    org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode sn = null;
+    try {
+      sn = new org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode(conf);
+    } catch(IOException e) {
+      if (e instanceof java.net.BindException)
+        return false;
+      throw e;
+    } finally {
+      if(sn != null) sn.shutdown();
+    }
+    return true;
+  }
+  
+  /**
+   * Check whether the BackupNode can be started.
+   */
+  private boolean canStartBackupNode(Configuration conf) throws IOException {
+    BackupNode bn = null;
+
     try {
-      sn = new SecondaryNameNode(conf);
+      bn = startBackupNode(conf);
     } catch(IOException e) {
       if (e instanceof java.net.BindException)
         return false;
       throw e;
+    } finally {
+      if(bn != null) bn.stop();
     }
-    sn.shutdown();
+
     return true;
   }
 
   /**
-   * Verify name-node port usage.
+   * Verify namenode port usage.
    */
   public void testNameNodePorts() throws Exception {
     NameNode nn = null;
@@ -179,7 +267,7 @@
   }
 
   /**
-   * Verify data-node port usage.
+   * Verify datanode port usage.
    */
   public void testDataNodePorts() throws Exception {
     NameNode nn = null;
@@ -214,7 +302,7 @@
   }
 
   /**
-   * Verify secondary name-node port usage.
+   * Verify secondary namenode port usage.
    */
   public void testSecondaryNodePorts() throws Exception {
     NameNode nn = null;
@@ -225,14 +313,14 @@
       Configuration conf2 = new Configuration(config);
       conf2.set("dfs.secondary.http.address", 
                 config.get("dfs.http.address"));
-      SecondaryNameNode.LOG.info("= Starting 1 on: " + 
+      LOG.info("= Starting 1 on: " + 
                                  conf2.get("dfs.secondary.http.address"));
       boolean started = canStartSecondaryNode(conf2);
       assertFalse(started); // should fail
 
       // bind http server to a different port
       conf2.set("dfs.secondary.http.address", NAME_NODE_HTTP_HOST + "0");
-      SecondaryNameNode.LOG.info("= Starting 2 on: " + 
+      LOG.info("= Starting 2 on: " + 
                                  conf2.get("dfs.secondary.http.address"));
       started = canStartSecondaryNode(conf2);
       assertTrue(started); // should start now
@@ -240,4 +328,34 @@
       stopNameNode(nn);
     }
   }
+    
+    /**
+     * Verify BackupNode port usage.
+     */
+    public void testBackupNodePorts() throws Exception {
+      NameNode nn = null;
+      try {
+        nn = startNameNode();
+
+        // bind http server to the same port as name-node
+        Configuration backup_config = new Configuration(config);
+        backup_config.set("dfs.backup.http.address", 
+                                        backup_config.get("dfs.http.address"));
+
+        LOG.info("= Starting 1 on: " + 
+                                  backup_config.get("dfs.backup.http.address"));
+
+        assertFalse("Backup started on same port as Namenode", 
+                           canStartBackupNode(backup_config)); // should fail
+
+        // bind http server to a different port
+        backup_config.set("dfs.backup.http.address", NAME_NODE_HTTP_HOST + "0");
+        LOG.info("= Starting 2 on: " + 
+                                  backup_config.get("dfs.backup.http.address"));
+
+        assertTrue(canStartBackupNode(backup_config)); // should start now
+      } finally {
+        stopNameNode(nn);
+      }
+  }
 }

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestModTime.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestModTime.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestModTime.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestModTime.java Mon Jun 15 22:13:06 2009
@@ -31,7 +31,6 @@
 
 /**
  * This class tests the decommissioning of nodes.
- * @author Dhruba Borthakur
  */
 public class TestModTime extends TestCase {
   static final long seed = 0xDEADBEEFL;

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java Mon Jun 15 22:13:06 2009
@@ -49,7 +49,7 @@
   
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
     for (int idx = 0; idx < actual.length; idx++) {
-      this.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
                         expected[from+idx]+" actual "+actual[idx],
                         actual[idx], expected[from+idx]);
       actual[idx] = 0;

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java Mon Jun 15 22:13:06 2009
@@ -52,7 +52,7 @@
   
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
     for (int idx = 0; idx < actual.length; idx++) {
-      this.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
                         expected[from+idx]+" actual "+actual[idx],
                         actual[idx], expected[from+idx]);
       actual[idx] = 0;

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Mon Jun 15 22:13:06 2009
@@ -22,6 +22,7 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.EnumSet;
 import java.util.List;
 
 import javax.security.auth.login.LoginException;
@@ -30,6 +31,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -43,6 +45,7 @@
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.UnixUserGroupInformation;
@@ -512,7 +515,8 @@
       long start = System.currentTimeMillis();
       // dummyActionNoSynch(fileIdx);
       nameNode.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
-                      clientName, true, replication, BLOCK_SIZE);
+                      clientName, new EnumSetWritable<CreateFlag>(EnumSet
+              .of(CreateFlag.OVERWRITE)), replication, BLOCK_SIZE);
       long end = System.currentTimeMillis();
       for(boolean written = !closeUponCreate; !written; 
         written = nameNode.complete(fileNames[daemonId][inputIdx], clientName));
@@ -882,8 +886,9 @@
       nameNode.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE);
       for(int idx=0; idx < nrFiles; idx++) {
         String fileName = nameGenerator.getNextFileName("ThroughputBench");
-        nameNode.create(fileName, FsPermission.getDefault(),
-                        clientName, true, replication, BLOCK_SIZE);
+        nameNode.create(fileName, FsPermission.getDefault(), clientName,
+            new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.OVERWRITE)), replication,
+            BLOCK_SIZE);
         addBlocks(fileName, clientName);
         nameNode.complete(fileName, clientName);
       }

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Mon Jun 15 22:13:06 2009
@@ -134,6 +134,7 @@
   /*
    * Simulate namenode crashing after rolling edit log.
    */
+  @SuppressWarnings("deprecation")
   private void testSecondaryNamenodeError1(Configuration conf)
     throws IOException {
     System.out.println("Starting testSecondaryNamenodeError 1");
@@ -210,6 +211,7 @@
   /*
    * Simulate a namenode crash after uploading new image
    */
+  @SuppressWarnings("deprecation")
   private void testSecondaryNamenodeError2(Configuration conf)
     throws IOException {
     System.out.println("Starting testSecondaryNamenodeError 21");
@@ -268,6 +270,7 @@
   /*
    * Simulate a secondary namenode crash after rolling the edit log.
    */
+  @SuppressWarnings("deprecation")
   private void testSecondaryNamenodeError3(Configuration conf)
     throws IOException {
     System.out.println("Starting testSecondaryNamenodeError 31");
@@ -336,6 +339,7 @@
    * back to the name-node.
    * Used to truncate primary fsimage file.
    */
+  @SuppressWarnings("deprecation")
   void testSecondaryFailsToReturnImage(Configuration conf)
     throws IOException {
     System.out.println("Starting testSecondaryFailsToReturnImage");
@@ -399,6 +403,7 @@
    * <li> Complete failed checkpoint for secondary node.
    * </ol>
    */
+  @SuppressWarnings("deprecation")
   void testStartup(Configuration conf) throws IOException {
     System.out.println("Startup of the name-node in the checkpoint directory.");
     String primaryDirs = conf.get("dfs.name.dir");
@@ -553,6 +558,9 @@
     return nn;
   }
 
+  // This deprecation suppress warning does not work due to known Java bug:
+  // http://bugs.sun.com/view_bug.do?bug_id=6460147
+  @SuppressWarnings("deprecation")
   SecondaryNameNode startSecondaryNameNode(Configuration conf
                                           ) throws IOException {
     conf.set("dfs.secondary.http.address", "0.0.0.0:0");
@@ -562,6 +570,7 @@
   /**
    * Tests checkpoint in HDFS.
    */
+  @SuppressWarnings("deprecation")
   public void testCheckpoint() throws IOException {
     Path file1 = new Path("checkpoint.dat");
     Path file2 = new Path("checkpoint2.dat");

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java Mon Jun 15 22:13:06 2009
@@ -88,6 +88,9 @@
     assertTrue(!fileSys.exists(name));
   }
 
+  // This deprecation suppress warning does not work due to known Java bug:
+  // http://bugs.sun.com/view_bug.do?bug_id=6460147
+  @SuppressWarnings("deprecation")
   SecondaryNameNode startSecondaryNameNode(Configuration conf
                                           ) throws IOException {
     conf.set("dfs.secondary.http.address", "0.0.0.0:0");
@@ -106,6 +109,7 @@
    * All along the test, we create and delete files at reach restart to make
    * sure we are reading proper edits and image.
    */
+  @SuppressWarnings("deprecation")
   public void testNameEditsConfigs() throws IOException {
     Path file1 = new Path("TestNameEditsConfigs1");
     Path file2 = new Path("TestNameEditsConfigs2");

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Mon Jun 15 22:13:06 2009
@@ -83,6 +83,7 @@
    * start MiniDFScluster, create a file (to create edits) and do a checkpoint  
    * @throws IOException
    */
+  @SuppressWarnings("deprecation")
   public void createCheckPoint() throws IOException {
     LOG.info("--starting mini cluster");
     // manage dirs parameter set to false 
@@ -248,6 +249,7 @@
    * secondary node copies fsimage and edits into correct separate directories.
    * @throws IOException
    */
+  @SuppressWarnings("deprecation")
   public void testSNNStartup() throws IOException{
     //setUpConfig();
     LOG.info("--starting SecondNN startup test");

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java Mon Jun 15 22:13:06 2009
@@ -282,6 +282,7 @@
    * 7. run doCheckpoint
    * 8. verify that all the image and edits files are the same.
    */
+  @SuppressWarnings("deprecation")
   public void testStorageRestore() throws Exception {
     int numDatanodes = 2;
     cluster = new MiniDFSCluster(0, config, numDatanodes, true, false, true,  null, null, null, null);

Propchange: hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Mon Jun 15 22:13:06 2009
@@ -0,0 +1,2 @@
+/hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
+/hadoop/core/trunk/src/webapps/datanode:776175-784663

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/datanode/browseBlock.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/datanode/browseBlock.jsp?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/datanode/browseBlock.jsp (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/datanode/browseBlock.jsp Mon Jun 15 22:13:06 2009
@@ -19,382 +19,22 @@
 %>
 <%@ page
   contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="java.net.*"
-
-  import="org.apache.hadoop.hdfs.*"
-  import="org.apache.hadoop.hdfs.server.namenode.*"
-  import="org.apache.hadoop.hdfs.protocol.*"
-  import="org.apache.hadoop.security.AccessToken"
-  import="org.apache.hadoop.security.AccessTokenHandler"
-  import="org.apache.hadoop.util.*"
+  import="org.apache.hadoop.hdfs.server.common.JspHelper"
+  import="org.apache.hadoop.util.ServletUtil"
 %>
-
 <%!
-  static final DataNode datanode = DataNode.getDataNode();
-
-  public void generateFileDetails(JspWriter out, HttpServletRequest req) 
-    throws IOException {
-
-    long startOffset = 0;
-    int datanodePort;
-
-    final Long blockId = JspHelper.validateLong(req.getParameter("blockId"));
-    if (blockId == null) {
-      out.print("Invalid input (blockId absent)");
-      return;
-    }
-
-    String datanodePortStr = req.getParameter("datanodePort");
-    if (datanodePortStr == null) {
-      out.print("Invalid input (datanodePort absent)");
-      return;
-    }
-    datanodePort = Integer.parseInt(datanodePortStr);
-
-    String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
-    int namenodeInfoPort = -1;
-    if (namenodeInfoPortStr != null)
-      namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);
-
-    final int chunkSizeToView = JspHelper.string2ChunkSizeToView(req.getParameter("chunkSizeToView"));
-
-    String startOffsetStr = req.getParameter("startOffset");
-    if (startOffsetStr == null || Long.parseLong(startOffsetStr) < 0)
-      startOffset = 0;
-    else startOffset = Long.parseLong(startOffsetStr);
-    
-    final String filename = JspHelper.validatePath(
-        req.getParameter("filename"));
-    if (filename == null) {
-      out.print("Invalid input");
-      return;
-    }
-
-    String blockSizeStr = req.getParameter("blockSize"); 
-    long blockSize = 0;
-    if (blockSizeStr == null || blockSizeStr.length() == 0) {
-      out.print("Invalid input");
-      return;
-    } 
-    blockSize = Long.parseLong(blockSizeStr);
-
-    final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(), JspHelper.conf);
-    List<LocatedBlock> blocks = 
-      dfs.namenode.getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
-    //Add the various links for looking at the file contents
-    //URL for downloading the full file
-    String downloadUrl = "http://" + req.getServerName() + ":" +
-                         + req.getServerPort() + "/streamFile?" + "filename=" +
-                         URLEncoder.encode(filename, "UTF-8");
-    out.print("<a name=\"viewOptions\"></a>");
-    out.print("<a href=\"" + downloadUrl + "\">Download this file</a><br>");
-    
-    DatanodeInfo chosenNode;
-    //URL for TAIL 
-    LocatedBlock lastBlk = blocks.get(blocks.size() - 1);
-    try {
-      chosenNode = JspHelper.bestNode(lastBlk);
-    } catch (IOException e) {
-      out.print(e.toString());
-      dfs.close();
-      return;
-    }
-    String fqdn = 
-           InetAddress.getByName(chosenNode.getHost()).getCanonicalHostName();
-    String tailUrl = "http://" + fqdn + ":" +
-                     chosenNode.getInfoPort() + 
-                 "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8") +
-                 "&namenodeInfoPort=" + namenodeInfoPort +
-                 "&chunkSizeToView=" + chunkSizeToView +
-                 "&referrer=" + 
-          URLEncoder.encode(req.getRequestURL() + "?" + req.getQueryString(),
-                            "UTF-8");
-    out.print("<a href=\"" + tailUrl + "\">Tail this file</a><br>");
-
-    out.print("<form action=\"/browseBlock.jsp\" method=GET>");
-    out.print("<b>Chunk size to view (in bytes, up to file's DFS block size): </b>");
-    out.print("<input type=\"hidden\" name=\"blockId\" value=\"" + blockId +
-              "\">");
-    out.print("<input type=\"hidden\" name=\"blockSize\" value=\"" + 
-              blockSize + "\">");
-    out.print("<input type=\"hidden\" name=\"startOffset\" value=\"" + 
-              startOffset + "\">");
-    out.print("<input type=\"hidden\" name=\"filename\" value=\"" + filename +
-              "\">");
-    out.print("<input type=\"hidden\" name=\"datanodePort\" value=\"" + 
-              datanodePort+ "\">");
-    out.print("<input type=\"hidden\" name=\"namenodeInfoPort\" value=\"" +
-              namenodeInfoPort + "\">");
-    out.print("<input type=\"text\" name=\"chunkSizeToView\" value=" +
-              chunkSizeToView + " size=10 maxlength=10>");
-    out.print("&nbsp;&nbsp;<input type=\"submit\" name=\"submit\" value=\"Refresh\">");
-    out.print("</form>");
-    out.print("<hr>"); 
-    out.print("<a name=\"blockDetails\"></a>");
-    out.print("<B>Total number of blocks: "+blocks.size()+"</B><br>");
-    //generate a table and dump the info
-    out.println("\n<table>");
-    for (LocatedBlock cur : blocks) {
-      out.print("<tr>");
-      final String blockidstring = Long.toString(cur.getBlock().getBlockId());
-      blockSize = cur.getBlock().getNumBytes();
-      out.print("<td>"+blockidstring+":</td>");
-      DatanodeInfo[] locs = cur.getLocations();
-      for(int j=0; j<locs.length; j++) {
-        String datanodeAddr = locs[j].getName();
-        datanodePort = Integer.parseInt(datanodeAddr.substring(
-                                        datanodeAddr.indexOf(':') + 1, 
-                                    datanodeAddr.length())); 
-        fqdn = InetAddress.getByName(locs[j].getHost()).getCanonicalHostName();
-        String blockUrl = "http://"+ fqdn + ":" +
-                        locs[j].getInfoPort() +
-                        "/browseBlock.jsp?blockId=" + blockidstring +
-                        "&blockSize=" + blockSize +
-               "&filename=" + URLEncoder.encode(filename, "UTF-8")+ 
-                        "&datanodePort=" + datanodePort + 
-                        "&genstamp=" + cur.getBlock().getGenerationStamp() + 
-                        "&namenodeInfoPort=" + namenodeInfoPort +
-                        "&chunkSizeToView=" + chunkSizeToView;
-        out.print("<td>&nbsp</td>" 
-          + "<td><a href=\"" + blockUrl + "\">" + datanodeAddr + "</a></td>");
-      }
-      out.println("</tr>");
-    }
-    out.println("</table>");
-    out.print("<hr>");
-    String namenodeHost = datanode.getNameNodeAddr().getHostName();
-    out.print("<br><a href=\"http://" + 
-              InetAddress.getByName(namenodeHost).getCanonicalHostName() + ":" +
-              namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
-    dfs.close();
-  }
-
-  public void generateFileChunks(JspWriter out, HttpServletRequest req) 
-    throws IOException {
-    long startOffset = 0;
-    int datanodePort = 0; 
-
-    String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
-    int namenodeInfoPort = -1;
-    if (namenodeInfoPortStr != null)
-      namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);
-
-    final String filename = JspHelper.validatePath(
-        req.getParameter("filename"));
-    if (filename == null) {
-      out.print("Invalid input (filename absent)");
-      return;
-    }
-    
-    final Long blockId = JspHelper.validateLong(req.getParameter("blockId"));
-    if (blockId == null) {
-      out.print("Invalid input (blockId absent)");
-      return;
-    }
-
-    final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(), JspHelper.conf);
-    
-    AccessToken accessToken = AccessToken.DUMMY_TOKEN;
-    if (JspHelper.conf
-        .getBoolean(AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, false)) {
-      List<LocatedBlock> blks = dfs.namenode.getBlockLocations(filename, 0,
-          Long.MAX_VALUE).getLocatedBlocks();
-      if (blks == null || blks.size() == 0) {
-        out.print("Can't locate file blocks");
-        dfs.close();
-        return;
-      }
-      for (int i = 0; i < blks.size(); i++) {
-        if (blks.get(i).getBlock().getBlockId() == blockId) {
-          accessToken = blks.get(i).getAccessToken();
-          break;
-        }
-      }
-    }
-    
-    final Long genStamp = JspHelper.validateLong(req.getParameter("genstamp"));
-    if (genStamp == null) {
-      out.print("Invalid input (genstamp absent)");
-      return;
-    }
-
-    String blockSizeStr;
-    long blockSize = 0;
-    blockSizeStr = req.getParameter("blockSize"); 
-    if (blockSizeStr == null) {
-      out.print("Invalid input (blockSize absent)");
-      return;
-    }
-    blockSize = Long.parseLong(blockSizeStr);
-    
-    final int chunkSizeToView = JspHelper.string2ChunkSizeToView(req.getParameter("chunkSizeToView"));
-
-    String startOffsetStr = req.getParameter("startOffset");
-    if (startOffsetStr == null || Long.parseLong(startOffsetStr) < 0)
-      startOffset = 0;
-    else startOffset = Long.parseLong(startOffsetStr);
-
-    String datanodePortStr = req.getParameter("datanodePort");
-    if (datanodePortStr == null) {
-      out.print("Invalid input (datanodePort absent)");
-      return;
-    }
-    datanodePort = Integer.parseInt(datanodePortStr);
-    out.print("<h3>File: ");
-    JspHelper.printPathWithLinks(filename, out, namenodeInfoPort);
-    out.print("</h3><hr>");
-    String parent = new File(filename).getParent();
-    JspHelper.printGotoForm(out, namenodeInfoPort, parent);
-    out.print("<hr>");
-    out.print("<a href=\"http://" + req.getServerName() + ":" + 
-              req.getServerPort() + 
-              "/browseDirectory.jsp?dir=" + 
-              URLEncoder.encode(parent, "UTF-8") +
-              "&namenodeInfoPort=" + namenodeInfoPort + 
-              "\"><i>Go back to dir listing</i></a><br>");
-    out.print("<a href=\"#viewOptions\">Advanced view/download options</a><br>");
-    out.print("<hr>");
-
-    //Determine the prev & next blocks
-    long nextStartOffset = 0;
-    long nextBlockSize = 0;
-    String nextBlockIdStr = null;
-    String nextGenStamp = null;
-    String nextHost = req.getServerName();
-    int nextPort = req.getServerPort();
-    int nextDatanodePort = datanodePort;
-    //determine data for the next link
-    if (startOffset + chunkSizeToView >= blockSize) {
-      //we have to go to the next block from this point onwards
-      List<LocatedBlock> blocks = 
-        dfs.namenode.getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
-      for (int i = 0; i < blocks.size(); i++) {
-        if (blocks.get(i).getBlock().getBlockId() == blockId) {
-          if (i != blocks.size() - 1) {
-            LocatedBlock nextBlock = blocks.get(i+1);
-            nextBlockIdStr = Long.toString(nextBlock.getBlock().getBlockId());
-            nextGenStamp = Long.toString(nextBlock.getBlock().getGenerationStamp());
-            nextStartOffset = 0;
-            nextBlockSize = nextBlock.getBlock().getNumBytes();
-            DatanodeInfo d = JspHelper.bestNode(nextBlock);
-            String datanodeAddr = d.getName();
-            nextDatanodePort = Integer.parseInt(
-                                      datanodeAddr.substring(
-                                           datanodeAddr.indexOf(':') + 1, 
-                                      datanodeAddr.length())); 
-            nextHost = InetAddress.getByName(d.getHost()).getCanonicalHostName();
-            nextPort = d.getInfoPort(); 
-          }
-        }
-      }
-    } 
-    else {
-      //we are in the same block
-      nextBlockIdStr = blockId.toString();
-      nextStartOffset = startOffset + chunkSizeToView;
-      nextBlockSize = blockSize;
-      nextGenStamp = genStamp.toString();
-    }
-    String nextUrl = null;
-    if (nextBlockIdStr != null) {
-      nextUrl = "http://" + nextHost + ":" + 
-                nextPort + 
-                "/browseBlock.jsp?blockId=" + nextBlockIdStr +
-                "&blockSize=" + nextBlockSize + "&startOffset=" + 
-                nextStartOffset + 
-                "&genstamp=" + nextGenStamp +
-                "&filename=" + URLEncoder.encode(filename, "UTF-8") +
-                "&chunkSizeToView=" + chunkSizeToView + 
-                "&datanodePort=" + nextDatanodePort +
-                "&namenodeInfoPort=" + namenodeInfoPort;
-      out.print("<a href=\"" + nextUrl + "\">View Next chunk</a>&nbsp;&nbsp;");        
-    }
-    //determine data for the prev link
-    String prevBlockIdStr = null;
-    String prevGenStamp = null;
-    long prevStartOffset = 0;
-    long prevBlockSize = 0;
-    String prevHost = req.getServerName();
-    int prevPort = req.getServerPort();
-    int prevDatanodePort = datanodePort;
-    if (startOffset == 0) {
-      List<LocatedBlock> blocks = 
-        dfs.namenode.getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
-      for (int i = 0; i < blocks.size(); i++) {
-        if (blocks.get(i).getBlock().getBlockId() == blockId) {
-          if (i != 0) {
-            LocatedBlock prevBlock = blocks.get(i-1);
-            prevBlockIdStr = Long.toString(prevBlock.getBlock().getBlockId());
-            prevGenStamp = Long.toString(prevBlock.getBlock().getGenerationStamp());
-            prevStartOffset = prevBlock.getBlock().getNumBytes() - chunkSizeToView;
-            if (prevStartOffset < 0)
-              prevStartOffset = 0;
-            prevBlockSize = prevBlock.getBlock().getNumBytes();
-            DatanodeInfo d = JspHelper.bestNode(prevBlock);
-            String datanodeAddr = d.getName();
-            prevDatanodePort = Integer.parseInt(
-                                      datanodeAddr.substring(
-                                          datanodeAddr.indexOf(':') + 1, 
-                                      datanodeAddr.length())); 
-            prevHost = InetAddress.getByName(d.getHost()).getCanonicalHostName();
-            prevPort = d.getInfoPort();
-          }
-        }
-      }
-    }
-    else {
-      //we are in the same block
-      prevBlockIdStr = blockId.toString();
-      prevStartOffset = startOffset - chunkSizeToView;
-      if (prevStartOffset < 0) prevStartOffset = 0;
-      prevBlockSize = blockSize;
-      prevGenStamp = genStamp.toString();
-    }
-
-    String prevUrl = null;
-    if (prevBlockIdStr != null) {
-      prevUrl = "http://" + prevHost + ":" + 
-                prevPort + 
-                "/browseBlock.jsp?blockId=" + prevBlockIdStr + 
-                "&blockSize=" + prevBlockSize + "&startOffset=" + 
-                prevStartOffset + 
-                "&filename=" + URLEncoder.encode(filename, "UTF-8") + 
-                "&chunkSizeToView=" + chunkSizeToView +
-                "&genstamp=" + prevGenStamp +
-                "&datanodePort=" + prevDatanodePort +
-                "&namenodeInfoPort=" + namenodeInfoPort;
-      out.print("<a href=\"" + prevUrl + "\">View Prev chunk</a>&nbsp;&nbsp;");
-    }
-    out.print("<hr>");
-    out.print("<textarea cols=\"100\" rows=\"25\" wrap=\"virtual\" style=\"width:100%\" READONLY>");
-    try {
-    JspHelper.streamBlockInAscii(
-            new InetSocketAddress(req.getServerName(), datanodePort), blockId, 
-            accessToken, genStamp, blockSize, startOffset, chunkSizeToView, out);
-    } catch (Exception e){
-        out.print(e);
-    }
-    out.print("</textarea>");
-    dfs.close();
-  }
-
+  //for java.io.Serializable
+  private static final long serialVersionUID = 1L;
 %>
+
 <html>
 <head>
 <%JspHelper.createTitle(out, request, request.getParameter("filename")); %>
 </head>
 <body onload="document.goto.dir.focus()">
-<% 
-   generateFileChunks(out,request);
-%>
+<% DatanodeJspHelper.generateFileChunks(out,request); %>
 <hr>
-<% 
-   generateFileDetails(out,request);
-%>
+<% DatanodeJspHelper.generateFileDetails(out,request); %>
 
 <h2>Local logs</h2>
 <a href="/logs/">Log</a> directory

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/datanode/browseDirectory.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/datanode/browseDirectory.jsp?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/datanode/browseDirectory.jsp (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/datanode/browseDirectory.jsp Mon Jun 15 22:13:06 2009
@@ -19,140 +19,14 @@
 %>
 <%@ page
   contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="java.net.*"
+  import="java.io.IOException"
 
-  import="org.apache.hadoop.fs.*"
-  import="org.apache.hadoop.hdfs.*"
-  import="org.apache.hadoop.hdfs.server.namenode.*"
-  import="org.apache.hadoop.hdfs.protocol.*"
-  import="org.apache.hadoop.util.*"
+  import="org.apache.hadoop.hdfs.server.common.JspHelper"
+  import="org.apache.hadoop.util.ServletUtil"
 %>
 <%!
-  static final DataNode datanode = DataNode.getDataNode();
-  
-  public void generateDirectoryStructure( JspWriter out, 
-                                          HttpServletRequest req,
-                                          HttpServletResponse resp) 
-    throws IOException {
-    final String dir = JspHelper.validatePath(req.getParameter("dir"));
-    if (dir == null) {
-      out.print("Invalid input");
-      return;
-    }
-    
-    String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
-    int namenodeInfoPort = -1;
-    if (namenodeInfoPortStr != null)
-      namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);
-    
-    final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(), JspHelper.conf);
-    String target = dir;
-    final FileStatus targetStatus = dfs.getFileInfo(target);
-    if (targetStatus == null) { // not exists
-      out.print("<h3>File or directory : " + target + " does not exist</h3>");
-      JspHelper.printGotoForm(out, namenodeInfoPort, target);
-    }
-    else {
-      if( !targetStatus.isDir() ) { // a file
-        List<LocatedBlock> blocks = 
-          dfs.namenode.getBlockLocations(dir, 0, 1).getLocatedBlocks();
-	      
-        LocatedBlock firstBlock = null;
-        DatanodeInfo [] locations = null;
-        if (blocks.size() > 0) {
-          firstBlock = blocks.get(0);
-          locations = firstBlock.getLocations();
-        }
-        if (locations == null || locations.length == 0) {
-          out.print("Empty file");
-        } else {
-          DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock);
-          String fqdn = InetAddress.getByName(chosenNode.getHost()).
-            getCanonicalHostName();
-          String datanodeAddr = chosenNode.getName();
-          int datanodePort = Integer.parseInt(
-                                              datanodeAddr.substring(
-                                                                     datanodeAddr.indexOf(':') + 1, 
-                                                                     datanodeAddr.length())); 
-          String redirectLocation = "http://"+fqdn+":" +
-            chosenNode.getInfoPort() + 
-            "/browseBlock.jsp?blockId=" +
-            firstBlock.getBlock().getBlockId() +
-            "&blockSize=" + firstBlock.getBlock().getNumBytes() +
-            "&genstamp=" + firstBlock.getBlock().getGenerationStamp() +
-            "&filename=" + URLEncoder.encode(dir, "UTF-8") + 
-            "&datanodePort=" + datanodePort + 
-            "&namenodeInfoPort=" + namenodeInfoPort;
-          resp.sendRedirect(redirectLocation);
-        }
-        return;
-      }
-      // directory
-      FileStatus[] files = dfs.listPaths(target);
-      //generate a table and dump the info
-      String [] headings = { "Name", "Type", "Size", "Replication", 
-                              "Block Size", "Modification Time",
-                              "Permission", "Owner", "Group" };
-      out.print("<h3>Contents of directory ");
-      JspHelper.printPathWithLinks(dir, out, namenodeInfoPort);
-      out.print("</h3><hr>");
-      JspHelper.printGotoForm(out, namenodeInfoPort, dir);
-      out.print("<hr>");
-	
-      File f = new File(dir);
-      String parent;
-      if ((parent = f.getParent()) != null)
-        out.print("<a href=\"" + req.getRequestURL() + "?dir=" + parent +
-                  "&namenodeInfoPort=" + namenodeInfoPort +
-                  "\">Go to parent directory</a><br>");
-	
-      if (files == null || files.length == 0) {
-        out.print("Empty directory");
-      }
-      else {
-        JspHelper.addTableHeader(out);
-        int row=0;
-        JspHelper.addTableRow(out, headings, row++);
-        String cols [] = new String[headings.length];
-        for (int i = 0; i < files.length; i++) {
-          //Get the location of the first block of the file
-          if (files[i].getPath().toString().endsWith(".crc")) continue;
-          if (!files[i].isDir()) {
-            cols[1] = "file";
-            cols[2] = StringUtils.byteDesc(files[i].getLen());
-            cols[3] = Short.toString(files[i].getReplication());
-            cols[4] = StringUtils.byteDesc(files[i].getBlockSize());
-          }
-          else {
-            cols[1] = "dir";
-            cols[2] = "";
-            cols[3] = "";
-            cols[4] = "";
-          }
-          String datanodeUrl = req.getRequestURL()+"?dir="+
-              URLEncoder.encode(files[i].getPath().toString(), "UTF-8") + 
-              "&namenodeInfoPort=" + namenodeInfoPort;
-          cols[0] = "<a href=\""+datanodeUrl+"\">"+files[i].getPath().getName()+"</a>";
-          cols[5] = FsShell.dateForm.format(new Date((files[i].getModificationTime())));
-          cols[6] = files[i].getPermission().toString();
-          cols[7] = files[i].getOwner();
-          cols[8] = files[i].getGroup();
-          JspHelper.addTableRow(out, cols, row++);
-        }
-        JspHelper.addTableFooter(out);
-      }
-    } 
-    String namenodeHost = datanode.getNameNodeAddr().getHostName();
-    out.print("<br><a href=\"http://" + 
-              InetAddress.getByName(namenodeHost).getCanonicalHostName() + ":" +
-              namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
-    dfs.close();
-  }
-
+  //for java.io.Serializable
+  private static final long serialVersionUID = 1L;
 %>
 
 <html>
@@ -171,7 +45,7 @@
 <body onload="document.goto.dir.focus()">
 <% 
   try {
-    generateDirectoryStructure(out,request,response);
+    DatanodeJspHelper.generateDirectoryStructure(out,request,response);
   }
   catch(IOException ioe) {
     String msg = ioe.getLocalizedMessage();

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/datanode/tail.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/datanode/tail.jsp?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/datanode/tail.jsp (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/datanode/tail.jsp Mon Jun 15 22:13:06 2009
@@ -19,111 +19,20 @@
 %>
 <%@ page
   contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="java.net.*"
-
-  import="org.apache.hadoop.hdfs.*"
-  import="org.apache.hadoop.hdfs.server.namenode.*"
-  import="org.apache.hadoop.hdfs.protocol.*"
-  import="org.apache.hadoop.security.AccessToken"
-  import="org.apache.hadoop.util.*"
-  import="org.apache.hadoop.net.NetUtils"
+  import="org.apache.hadoop.hdfs.server.common.JspHelper"
+  import="org.apache.hadoop.util.ServletUtil"
 %>
-
 <%!
-  static final DataNode datanode = DataNode.getDataNode();
-
-  public void generateFileChunks(JspWriter out, HttpServletRequest req) 
-    throws IOException {
-    final String referrer = JspHelper.validateURL(req.getParameter("referrer"));
-    boolean noLink = false;
-    if (referrer == null) {
-      noLink = true;
-    }
-
-    final String filename = JspHelper.validatePath(
-        req.getParameter("filename"));
-    if (filename == null) {
-      out.print("Invalid input (file name absent)");
-      return;
-    }
-
-    String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
-    int namenodeInfoPort = -1;
-    if (namenodeInfoPortStr != null)
-      namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);
-    
-    final int chunkSizeToView = JspHelper.string2ChunkSizeToView(req.getParameter("chunkSizeToView"));
-
-    if (!noLink) {
-      out.print("<h3>Tail of File: ");
-      JspHelper.printPathWithLinks(filename, out, namenodeInfoPort);
-	    out.print("</h3><hr>");
-      out.print("<a href=\"" + referrer + "\">Go Back to File View</a><hr>");
-    }
-    else {
-      out.print("<h3>" + filename + "</h3>");
-    }
-    out.print("<b>Chunk size to view (in bytes, up to file's DFS block size): </b>");
-    out.print("<input type=\"text\" name=\"chunkSizeToView\" value=" +
-              chunkSizeToView + " size=10 maxlength=10>");
-    out.print("&nbsp;&nbsp;<input type=\"submit\" name=\"submit\" value=\"Refresh\"><hr>");
-    out.print("<input type=\"hidden\" name=\"filename\" value=\"" + filename +
-              "\">");
-    out.print("<input type=\"hidden\" name=\"namenodeInfoPort\" value=\"" + namenodeInfoPort +
-    "\">");
-    if (!noLink)
-      out.print("<input type=\"hidden\" name=\"referrer\" value=\"" + 
-                referrer+ "\">");
-
-    //fetch the block from the datanode that has the last block for this file
-    final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(), JspHelper.conf);
-    List<LocatedBlock> blocks = 
-      dfs.namenode.getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
-    if (blocks == null || blocks.size() == 0) {
-      out.print("No datanodes contain blocks of file "+filename);
-      dfs.close();
-      return;
-    }
-    LocatedBlock lastBlk = blocks.get(blocks.size() - 1);
-    long blockSize = lastBlk.getBlock().getNumBytes();
-    long blockId = lastBlk.getBlock().getBlockId();
-    AccessToken accessToken = lastBlk.getAccessToken();
-    long genStamp = lastBlk.getBlock().getGenerationStamp();
-    DatanodeInfo chosenNode;
-    try {
-      chosenNode = JspHelper.bestNode(lastBlk);
-    } catch (IOException e) {
-      out.print(e.toString());
-      dfs.close();
-      return;
-    }      
-    InetSocketAddress addr = NetUtils.createSocketAddr(chosenNode.getName());
-    //view the last chunkSizeToView bytes while Tailing
-    final long startOffset = blockSize >= chunkSizeToView? blockSize - chunkSizeToView: 0;
-
-    out.print("<textarea cols=\"100\" rows=\"25\" wrap=\"virtual\" style=\"width:100%\" READONLY>");
-    JspHelper.streamBlockInAscii(addr, blockId, accessToken, genStamp, blockSize, startOffset, chunkSizeToView, out);
-    out.print("</textarea>");
-    dfs.close();
-  }
-
+  //for java.io.Serializable
+  private static final long serialVersionUID = 1L;
 %>
-
-
-
 <html>
 <head>
 <%JspHelper.createTitle(out, request, request.getParameter("filename")); %>
 </head>
 <body>
 <form action="/tail.jsp" method="GET">
-<% 
-   generateFileChunks(out,request);
-%>
+<% DatanodeJspHelper.generateFileChunksForTail(out,request); %>
 </form>
 <hr>
 

Propchange: hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Mon Jun 15 22:13:06 2009
@@ -0,0 +1,2 @@
+/hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
+/hadoop/core/trunk/src/webapps/hdfs:776175-784663

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/hdfs/dfshealth.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/hdfs/dfshealth.jsp?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/hdfs/dfshealth.jsp (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/hdfs/dfshealth.jsp Mon Jun 15 22:13:06 2009
@@ -19,231 +19,14 @@
 %>
 <%@ page
   contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="org.apache.hadoop.fs.*"
-  import="org.apache.hadoop.hdfs.*"
-  import="org.apache.hadoop.hdfs.server.namenode.*"
-  import="org.apache.hadoop.hdfs.server.datanode.*"
-  import="org.apache.hadoop.hdfs.server.common.Storage"
-  import="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"
-  import="org.apache.hadoop.hdfs.protocol.*"
-  import="org.apache.hadoop.util.*"
-  import="java.text.DateFormat"
-  import="java.lang.Math"
-  import="java.net.URLEncoder"
+  import="org.apache.hadoop.util.ServletUtil"
 %>
 <%!
-  int rowNum = 0;
-  int colNum = 0;
-
-  String rowTxt() { colNum = 0;
-      return "<tr class=\"" + (((rowNum++)%2 == 0)? "rowNormal" : "rowAlt")
-          + "\"> "; }
-  String colTxt() { return "<td id=\"col" + ++colNum + "\"> "; }
-  void counterReset () { colNum = 0; rowNum = 0 ; }
-
-  long diskBytes = 1024 * 1024 * 1024;
-  String diskByteStr = "GB";
-
-  String sorterField = null;
-  String sorterOrder = null;
-
-  String NodeHeaderStr(String name) {
-      String ret = "class=header";
-      String order = "ASC";
-      if ( name.equals( sorterField ) ) {
-          ret += sorterOrder;
-          if ( sorterOrder.equals("ASC") )
-              order = "DSC";
-      }
-      ret += " onClick=\"window.document.location=" +
-          "'/dfshealth.jsp?sorter/field=" + name + "&sorter/order=" +
-          order + "'\" title=\"sort on this column\"";
-      
-      return ret;
-  }
-      
-  public void generateNodeData( JspWriter out, DatanodeDescriptor d,
-                                    String suffix, boolean alive,
-                                    int nnHttpPort )
-    throws IOException {
-      
-    /* Say the datanode is dn1.hadoop.apache.org with ip 192.168.0.5
-       we use:
-       1) d.getHostName():d.getPort() to display.
-           Domain and port are stripped if they are common across the nodes.
-           i.e. "dn1"
-       2) d.getHost():d.Port() for "title".
-          i.e. "192.168.0.5:50010"
-       3) d.getHostName():d.getInfoPort() for url.
-          i.e. "http://dn1.hadoop.apache.org:50075/..."
-          Note that "d.getHost():d.getPort()" is what DFS clients use
-          to interact with datanodes.
-    */
-    // from nn_browsedfscontent.jsp:
-    String url = "http://" + d.getHostName() + ":" + d.getInfoPort() +
-                 "/browseDirectory.jsp?namenodeInfoPort=" +
-                 nnHttpPort + "&dir=" +
-                 URLEncoder.encode("/", "UTF-8");
-     
-    String name = d.getHostName() + ":" + d.getPort();
-    if ( !name.matches( "\\d+\\.\\d+.\\d+\\.\\d+.*" ) ) 
-        name = name.replaceAll( "\\.[^.:]*", "" );    
-    int idx = (suffix != null && name.endsWith( suffix )) ?
-        name.indexOf( suffix ) : -1;
-    
-    out.print( rowTxt() + "<td class=\"name\"><a title=\""
-               + d.getHost() + ":" + d.getPort() +
-               "\" href=\"" + url + "\">" +
-               (( idx > 0 ) ? name.substring(0, idx) : name) + "</a>" +
-               (( alive ) ? "" : "\n") );
-    if ( !alive )
-        return;
-    
-    long c = d.getCapacity();
-    long u = d.getDfsUsed();
-    long nu = d.getNonDfsUsed();
-    long r = d.getRemaining();
-    String percentUsed = StringUtils.limitDecimalTo2(d.getDfsUsedPercent());    
-    String percentRemaining = StringUtils.limitDecimalTo2(d.getRemainingPercent());    
-    
-    String adminState = (d.isDecommissioned() ? "Decommissioned" :
-                         (d.isDecommissionInProgress() ? "Decommission In Progress":
-                          "In Service"));
-    
-    long timestamp = d.getLastUpdate();
-    long currentTime = System.currentTimeMillis();
-    out.print("<td class=\"lastcontact\"> " +
-              ((currentTime - timestamp)/1000) +
-              "<td class=\"adminstate\">" +
-              adminState +
-              "<td align=\"right\" class=\"capacity\">" +
-              StringUtils.limitDecimalTo2(c*1.0/diskBytes) +
-              "<td align=\"right\" class=\"used\">" +
-              StringUtils.limitDecimalTo2(u*1.0/diskBytes) +      
-              "<td align=\"right\" class=\"nondfsused\">" +
-              StringUtils.limitDecimalTo2(nu*1.0/diskBytes) +      
-              "<td align=\"right\" class=\"remaining\">" +
-              StringUtils.limitDecimalTo2(r*1.0/diskBytes) +      
-              "<td align=\"right\" class=\"pcused\">" + percentUsed +
-              "<td class=\"pcused\">" +
-              ServletUtil.percentageGraph( (int)Double.parseDouble(percentUsed) , 100) +
-              "<td align=\"right\" class=\"pcremaining`\">" + percentRemaining +
-              "<td title=" + "\"blocks scheduled : " + d.getBlocksScheduled() + 
-              "\" class=\"blocks\">" + d.numBlocks() + "\n");
-  }
-  
-  
-  public void generateConfReport( JspWriter out,
-		  NameNode nn,
-		  HttpServletRequest request)
-  throws IOException {
-	  FSNamesystem fsn = nn.getNamesystem();
-	  long underReplicatedBlocks = fsn.getUnderReplicatedBlocks();
-	  FSImage fsImage = fsn.getFSImage();
-	  List<Storage.StorageDirectory> removedStorageDirs = fsImage.getRemovedStorageDirs();
-	  String storageDirsSizeStr="", removedStorageDirsSizeStr="", storageDirsStr="", removedStorageDirsStr="", storageDirsDiv="", removedStorageDirsDiv="";
-
-	  //FS Image storage configuration
-	  out.print("<h3> " + nn.getRole() + " Storage: </h3>");
-	  out.print("<div id=\"dfstable\"> <table border=1 cellpadding=10 cellspacing=0 title=\"NameNode Storage\">\n"+
-	  "<thead><tr><td><b>Storage Directory</b></td><td><b>Type</b></td><td><b>State</b></td></tr></thead>");
-	  
-	  StorageDirectory st =null;
-	  for (Iterator<StorageDirectory> it = fsImage.dirIterator(); it.hasNext();) {
-	      st = it.next();
-	      String dir = "" +  st.getRoot();
-		  String type = "" + st.getStorageDirType();
-		  out.print("<tr><td>"+dir+"</td><td>"+type+"</td><td>Active</td></tr>");
-	  }
-	  
-	  long storageDirsSize = removedStorageDirs.size();
-	  for(int i=0; i< storageDirsSize; i++){
-		  st = removedStorageDirs.get(i);
-		  String dir = "" +  st.getRoot();
-		  String type = "" + st.getStorageDirType();
-		  out.print("<tr><td>"+dir+"</td><td>"+type+"</td><td><font color=red>Failed</font></td></tr>");
-	  }
-	  
-	  out.print("</table></div><br>\n");
-  }
-
-
-  public void generateDFSHealthReport(JspWriter out,
-                                      NameNode nn,
-                                      HttpServletRequest request)
-                                      throws IOException {
-    FSNamesystem fsn = nn.getNamesystem();
-    ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
-    ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
-    fsn.DFSNodesStatus(live, dead);
-
-    sorterField = request.getParameter("sorter/field");
-    sorterOrder = request.getParameter("sorter/order");
-    if ( sorterField == null )
-        sorterField = "name";
-    if ( sorterOrder == null )
-        sorterOrder = "ASC";
-
-    // Find out common suffix. Should this be before or after the sort?
-    String port_suffix = null;
-    if ( live.size() > 0 ) {
-        String name = live.get(0).getName();
-        int idx = name.indexOf(':');
-        if ( idx > 0 ) {
-            port_suffix = name.substring( idx );
-        }
-        
-        for ( int i=1; port_suffix != null && i < live.size(); i++ ) {
-            if ( live.get(i).getName().endsWith( port_suffix ) == false ) {
-                port_suffix = null;
-                break;
-            }
-        }
-    }
-        
-    counterReset();
-    long[] fsnStats = fsn.getStats(); 
-    long total = fsnStats[0];
-    long remaining = fsnStats[2];
-    long used = fsnStats[1];
-    long nonDFS = total - remaining - used;
-	nonDFS = nonDFS < 0 ? 0 : nonDFS; 
-    float percentUsed = total <= 0 
-        ? 0f : ((float)used * 100.0f)/(float)total;
-    float percentRemaining = total <= 0 
-        ? 100f : ((float)remaining * 100.0f)/(float)total;
-
-    out.print( "<div id=\"dfstable\"> <table>\n" +
-	       rowTxt() + colTxt() + "Configured Capacity" + colTxt() + ":" + colTxt() +
-	       StringUtils.byteDesc( total ) +
-	       rowTxt() + colTxt() + "DFS Used" + colTxt() + ":" + colTxt() +
-	       StringUtils.byteDesc( used ) +
-	       rowTxt() + colTxt() + "Non DFS Used" + colTxt() + ":" + colTxt() +
-	       StringUtils.byteDesc( nonDFS ) +
-	       rowTxt() + colTxt() + "DFS Remaining" + colTxt() + ":" + colTxt() +
-	       StringUtils.byteDesc( remaining ) +
-	       rowTxt() + colTxt() + "DFS Used%" + colTxt() + ":" + colTxt() +
-	       StringUtils.limitDecimalTo2(percentUsed) + " %" +
-	       rowTxt() + colTxt() + "DFS Remaining%" + colTxt() + ":" + colTxt() +
-	       StringUtils.limitDecimalTo2(percentRemaining) + " %" +
-	       rowTxt() + colTxt() +
-	       		"<a href=\"dfsnodelist.jsp?whatNodes=LIVE\">Live Nodes</a> " +
-	       		colTxt() + ":" + colTxt() + live.size() +
-	       rowTxt() + colTxt() +
-	       		"<a href=\"dfsnodelist.jsp?whatNodes=DEAD\">Dead Nodes</a> " +
-	       		colTxt() + ":" + colTxt() + dead.size() +
-               "</table></div><br>\n" );
-    
-    if (live.isEmpty() && dead.isEmpty()) {
-        out.print("There are no datanodes in the cluster");
-    }
-  }%>
-
+  //for java.io.Serializable
+  private static final long serialVersionUID = 1L;
+%>
 <%
+  final NamenodeJspHelper.HealthJsp healthjsp  = new NamenodeJspHelper.HealthJsp();
   NameNode nn = (NameNode)application.getAttribute("name.node");
   FSNamesystem fsn = nn.getNamesystem();
   String namenodeRole = nn.getRole().toString();
@@ -257,24 +40,20 @@
     
 <body>
 <h1><%=namenodeRole%> '<%=namenodeLabel%>'</h1>
-<%= JspHelper.getVersionTable(fsn) %>
+<%= NamenodeJspHelper.getVersionTable(fsn) %>
 <br />
 <b><a href="/nn_browsedfscontent.jsp">Browse the filesystem</a></b><br>
 <b><a href="/logs/"><%=namenodeRole%> Logs</a></b>
 
 <hr>
 <h3>Cluster Summary</h3>
-<b> <%= JspHelper.getSafeModeText(fsn)%> </b>
-<b> <%= JspHelper.getInodeLimitText(fsn)%> </b>
-<a class="warning"> <%= JspHelper.getWarningText(fsn)%></a>
+<b> <%= NamenodeJspHelper.getSafeModeText(fsn)%> </b>
+<b> <%= NamenodeJspHelper.getInodeLimitText(fsn)%> </b>
+<a class="warning"><%= NamenodeJspHelper.getWarningText(fsn)%></a>
 
-<%
-    generateDFSHealthReport(out, nn, request); 
-%>
+<% healthjsp.generateHealthReport(out, nn, request); %>
 <hr>
-<%
-	generateConfReport(out, nn, request);
-%>
+<% healthjsp.generateConfReport(out, nn, request); %>
 <%
 out.println(ServletUtil.htmlFooter());
 %>

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/hdfs/dfsnodelist.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/hdfs/dfsnodelist.jsp?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/hdfs/dfsnodelist.jsp (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/hdfs/dfsnodelist.jsp Mon Jun 15 22:13:06 2009
@@ -18,236 +18,15 @@
  */
 %>
 <%@ page
-contentType="text/html; charset=UTF-8"
-	import="javax.servlet.*"
-	import="javax.servlet.http.*"
-	import="java.io.*"
-	import="java.util.*"
-	import="org.apache.hadoop.fs.*"
-	import="org.apache.hadoop.hdfs.*"
-	import="org.apache.hadoop.hdfs.server.common.*"
-	import="org.apache.hadoop.hdfs.server.namenode.*"
-	import="org.apache.hadoop.hdfs.server.datanode.*"
-	import="org.apache.hadoop.hdfs.protocol.*"
-	import="org.apache.hadoop.util.*"
-	import="java.text.DateFormat"
-	import="java.lang.Math"
-	import="java.net.URLEncoder"
+  contentType="text/html; charset=UTF-8"
+  import="org.apache.hadoop.util.ServletUtil"
 %>
 <%!
-	int rowNum = 0;
-	int colNum = 0;
-
-	String rowTxt() { colNum = 0;
-	return "<tr class=\"" + (((rowNum++)%2 == 0)? "rowNormal" : "rowAlt")
-	+ "\"> "; }
-	String colTxt() { return "<td id=\"col" + ++colNum + "\"> "; }
-	void counterReset () { colNum = 0; rowNum = 0 ; }
-
-	long diskBytes = 1024 * 1024 * 1024;
-	String diskByteStr = "GB";
-
-	String sorterField = null;
-	String sorterOrder = null;
-	String whatNodes = "LIVE";
-
-String NodeHeaderStr(String name) {
-	String ret = "class=header";
-	String order = "ASC";
-	if ( name.equals( sorterField ) ) {
-		ret += sorterOrder;
-		if ( sorterOrder.equals("ASC") )
-			order = "DSC";
-	}
-	ret += " onClick=\"window.document.location=" +
-	"'/dfsnodelist.jsp?whatNodes="+whatNodes+"&sorter/field=" + name + "&sorter/order=" +
-	order + "'\" title=\"sort on this column\"";
-
-	return ret;
-}
-
-public void generateNodeData( JspWriter out, DatanodeDescriptor d,
-		String suffix, boolean alive,
-		int nnHttpPort )
-throws IOException {
-
-	/* Say the datanode is dn1.hadoop.apache.org with ip 192.168.0.5
-we use:
-1) d.getHostName():d.getPort() to display.
-Domain and port are stripped if they are common across the nodes.
-i.e. "dn1"
-2) d.getHost():d.Port() for "title".
-i.e. "192.168.0.5:50010"
-3) d.getHostName():d.getInfoPort() for url.
-i.e. "http://dn1.hadoop.apache.org:50075/..."
-Note that "d.getHost():d.getPort()" is what DFS clients use
-to interact with datanodes.
-	 */
-	// from nn_browsedfscontent.jsp:
-	String url = "http://" + d.getHostName() + ":" + d.getInfoPort() +
-	"/browseDirectory.jsp?namenodeInfoPort=" +
-	nnHttpPort + "&dir=" +
-	URLEncoder.encode("/", "UTF-8");
-
-	String name = d.getHostName() + ":" + d.getPort();
-	if ( !name.matches( "\\d+\\.\\d+.\\d+\\.\\d+.*" ) ) 
-		name = name.replaceAll( "\\.[^.:]*", "" );    
-	int idx = (suffix != null && name.endsWith( suffix )) ?
-			name.indexOf( suffix ) : -1;
-
-			out.print( rowTxt() + "<td class=\"name\"><a title=\""
-					+ d.getHost() + ":" + d.getPort() +
-					"\" href=\"" + url + "\">" +
-					(( idx > 0 ) ? name.substring(0, idx) : name) + "</a>" +
-					(( alive ) ? "" : "\n") );
-			if ( !alive )
-				return;
-
-			long c = d.getCapacity();
-			long u = d.getDfsUsed();
-			long nu = d.getNonDfsUsed();
-			long r = d.getRemaining();
-			String percentUsed = StringUtils.limitDecimalTo2(d.getDfsUsedPercent());    
-			String percentRemaining = StringUtils.limitDecimalTo2(d.getRemainingPercent());    
-
-			String adminState = (d.isDecommissioned() ? "Decommissioned" :
-				(d.isDecommissionInProgress() ? "Decommission In Progress":
-				"In Service"));
-
-			long timestamp = d.getLastUpdate();
-			long currentTime = System.currentTimeMillis();
-			out.print("<td class=\"lastcontact\"> " +
-					((currentTime - timestamp)/1000) +
-					"<td class=\"adminstate\">" +
-					adminState +
-					"<td align=\"right\" class=\"capacity\">" +
-					StringUtils.limitDecimalTo2(c*1.0/diskBytes) +
-					"<td align=\"right\" class=\"used\">" +
-					StringUtils.limitDecimalTo2(u*1.0/diskBytes) +      
-					"<td align=\"right\" class=\"nondfsused\">" +
-					StringUtils.limitDecimalTo2(nu*1.0/diskBytes) +      
-					"<td align=\"right\" class=\"remaining\">" +
-					StringUtils.limitDecimalTo2(r*1.0/diskBytes) +      
-					"<td align=\"right\" class=\"pcused\">" + percentUsed +
-					"<td class=\"pcused\">" +
-					ServletUtil.percentageGraph( (int)Double.parseDouble(percentUsed) , 100) +
-					"<td align=\"right\" class=\"pcremaining`\">" + percentRemaining +
-					"<td title=" + "\"blocks scheduled : " + d.getBlocksScheduled() + 
-					"\" class=\"blocks\">" + d.numBlocks() + "\n");
-}
-
-
-
-public void generateDFSNodesList(JspWriter out, 
-		NameNode nn,
-		HttpServletRequest request)
-throws IOException {
-	ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();    
-	ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
-	nn.getNamesystem().DFSNodesStatus(live, dead);
-
-	whatNodes = request.getParameter("whatNodes"); // show only live or only dead nodes
-	sorterField = request.getParameter("sorter/field");
-	sorterOrder = request.getParameter("sorter/order");
-	if ( sorterField == null )
-		sorterField = "name";
-	if ( sorterOrder == null )
-		sorterOrder = "ASC";
-
-	JspHelper.sortNodeList(live, sorterField, sorterOrder);
-	JspHelper.sortNodeList(dead, "name", "ASC");
-
-	// Find out common suffix. Should this be before or after the sort?
-	String port_suffix = null;
-	if ( live.size() > 0 ) {
-		String name = live.get(0).getName();
-		int idx = name.indexOf(':');
-		if ( idx > 0 ) {
-			port_suffix = name.substring( idx );
-		}
-
-		for ( int i=1; port_suffix != null && i < live.size(); i++ ) {
-			if ( live.get(i).getName().endsWith( port_suffix ) == false ) {
-				port_suffix = null;
-				break;
-			}
-		}
-	}
-
-	counterReset();
-
-	try {
-		Thread.sleep(1000);
-	} catch (InterruptedException e) {}
-
-	if (live.isEmpty() && dead.isEmpty()) {
-		out.print("There are no datanodes in the cluster");
-	}
-	else {
-
-		int nnHttpPort = nn.getHttpAddress().getPort();
-		out.print( "<div id=\"dfsnodetable\"> ");
-		if(whatNodes.equals("LIVE")) {
-
-			out.print( 
-					"<a name=\"LiveNodes\" id=\"title\">" +
-					"Live Datanodes : " + live.size() + "</a>" +
-			"<br><br>\n<table border=1 cellspacing=0>\n" );
-
-			counterReset();
-
-			if ( live.size() > 0 ) {
-
-				if ( live.get(0).getCapacity() > 1024 * diskBytes ) {
-					diskBytes *= 1024;
-					diskByteStr = "TB";
-				}
-
-				out.print( "<tr class=\"headerRow\"> <th " +
-						NodeHeaderStr("name") + "> Node <th " +
-						NodeHeaderStr("lastcontact") + "> Last <br>Contact <th " +
-						NodeHeaderStr("adminstate") + "> Admin State <th " +
-						NodeHeaderStr("capacity") + "> Configured <br>Capacity (" + 
-						diskByteStr + ") <th " + 
-						NodeHeaderStr("used") + "> Used <br>(" + 
-						diskByteStr + ") <th " + 
-						NodeHeaderStr("nondfsused") + "> Non DFS <br>Used (" + 
-						diskByteStr + ") <th " + 
-						NodeHeaderStr("remaining") + "> Remaining <br>(" + 
-						diskByteStr + ") <th " + 
-						NodeHeaderStr("pcused") + "> Used <br>(%) <th " + 
-						NodeHeaderStr("pcused") + "> Used <br>(%) <th " +
-						NodeHeaderStr("pcremaining") + "> Remaining <br>(%) <th " +
-						NodeHeaderStr("blocks") + "> Blocks\n" );
-
-				JspHelper.sortNodeList(live, sorterField, sorterOrder);
-				for ( int i=0; i < live.size(); i++ ) {
-					generateNodeData(out, live.get(i), port_suffix, true, nnHttpPort);
-				}
-			}
-			out.print("</table>\n");
-		} else {
-
-			out.print("<br> <a name=\"DeadNodes\" id=\"title\"> " +
-					" Dead Datanodes : " +dead.size() + "</a><br><br>\n");
-
-			if ( dead.size() > 0 ) {
-				out.print( "<table border=1 cellspacing=0> <tr id=\"row1\"> " +
-				"<td> Node \n" );
-
-				JspHelper.sortNodeList(dead, "name", "ASC");
-				for ( int i=0; i < dead.size() ; i++ ) {
-					generateNodeData(out, dead.get(i), port_suffix, false, nnHttpPort);
-				}
-
-				out.print("</table>\n");
-			}
-		}
-		out.print("</div>");
-	}
-}%>
-
+  //for java.io.Serializable
+  private static final long serialVersionUID = 1L;
+%>
 <%
+final NamenodeJspHelper.NodeListJsp nodelistjsp = new NamenodeJspHelper.NodeListJsp();
 NameNode nn = (NameNode)application.getAttribute("name.node");
 String namenodeRole = nn.getRole().toString();
 FSNamesystem fsn = nn.getNamesystem();
@@ -261,15 +40,13 @@
   
 <body>
 <h1><%=namenodeRole%> '<%=namenodeLabel%>'</h1>
-<%= JspHelper.getVersionTable(fsn) %>
+<%= NamenodeJspHelper.getVersionTable(fsn) %>
 <br />
 <b><a href="/nn_browsedfscontent.jsp">Browse the filesystem</a></b><br>
 <b><a href="/logs/"><%=namenodeRole%> Logs</a></b><br>
 <b><a href=/dfshealth.jsp> Go back to DFS home</a></b>
 <hr>
-<%
-	generateDFSNodesList(out, nn, request); 
-%>
+<% nodelistjsp.generateNodesList(out, nn, request); %>
 
 <%
 out.println(ServletUtil.htmlFooter());

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/hdfs/nn_browsedfscontent.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/hdfs/nn_browsedfscontent.jsp?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/hdfs/nn_browsedfscontent.jsp (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/hdfs/nn_browsedfscontent.jsp Mon Jun 15 22:13:06 2009
@@ -19,45 +19,12 @@
 %>
 <%@ page
   contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="org.apache.hadoop.hdfs.*"
-  import="org.apache.hadoop.hdfs.server.namenode.*"
-  import="org.apache.hadoop.hdfs.server.datanode.*"
-  import="org.apache.hadoop.hdfs.protocol.*"
-  import="org.apache.hadoop.util.*"
-  import="java.text.DateFormat"
-  import="java.net.InetAddress"
-  import="java.net.URLEncoder"
+  import="org.apache.hadoop.util.ServletUtil"
 %>
 <%!
-  public void redirectToRandomDataNode(
-                            NameNode nn, 
-                            HttpServletResponse resp) throws IOException {
-    FSNamesystem fsn = nn.getNamesystem();
-    String datanode = fsn.randomDataNode();
-    String redirectLocation;
-    String nodeToRedirect;
-    int redirectPort;
-    if (datanode != null) {
-      redirectPort = Integer.parseInt(datanode.substring(datanode.indexOf(':') + 1));
-      nodeToRedirect = datanode.substring(0, datanode.indexOf(':'));
-    }
-    else {
-      nodeToRedirect = nn.getHttpAddress().getHostName();
-      redirectPort = nn.getHttpAddress().getPort();
-    }
-    String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
-    redirectLocation = "http://" + fqdn + ":" + redirectPort + 
-                       "/browseDirectory.jsp?namenodeInfoPort=" + 
-                       nn.getHttpAddress().getPort() +
-                       "&dir=" + URLEncoder.encode("/", "UTF-8");
-    resp.sendRedirect(redirectLocation);
-  }
+  //for java.io.Serializable
+  private static final long serialVersionUID = 1L;
 %>
-
 <html>
 
 <title></title>
@@ -65,7 +32,7 @@
 <body>
 <% 
   NameNode nn = (NameNode)application.getAttribute("name.node");
-  redirectToRandomDataNode(nn, response); 
+  NamenodeJspHelper.redirectToRandomDataNode(nn, response); 
 %>
 <hr>
 

Propchange: hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Mon Jun 15 22:13:06 2009
@@ -0,0 +1,2 @@
+/hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
+/hadoop/core/trunk/src/webapps/secondary:776175-784663

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/secondary/status.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/secondary/status.jsp?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/secondary/status.jsp (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/webapps/secondary/status.jsp Mon Jun 15 22:13:06 2009
@@ -19,7 +19,12 @@
 %>
 <%@ page
   contentType="text/html; charset=UTF-8"
-  import="org.apache.hadoop.util.*"
+  import="org.apache.hadoop.hdfs.server.common.JspHelper"
+  import="org.apache.hadoop.util.ServletUtil"
+%>
+<%!
+  //for java.io.Serializable
+  private static final long serialVersionUID = 1L;
 %>
 
 <html>



Mime
View raw message