hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject svn commit: r753481 [3/3] - in /hadoop/core/trunk: ./ src/hdfs/ src/hdfs/org/apache/hadoop/hdfs/protocol/ src/hdfs/org/apache/hadoop/hdfs/server/common/ src/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/hdfs/org/apache/hadoop/hdfs/server/namenode/ s...
Date Sat, 14 Mar 2009 01:20:37 GMT
Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java?rev=753481&r1=753480&r2=753481&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
(original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
Sat Mar 14 01:20:36 2009
@@ -30,12 +30,36 @@
  *****************************************************************************/
 public interface NamenodeProtocol extends VersionedProtocol {
   /**
-   * 2: Added getEditLogSize(), rollEditLog(), rollFSImage().
+   * Compared to the previous version the following changes have been introduced:
+   * (Only the latest change is reflected.
+   * The log of historical changes can be retrieved from the svn).
+   * 
+   * 3: Backup node support: versionRequest(), errorReport(), register(),
+   *      startCheckpoint(), endCheckpoint(), journalSize(), journal().
+   *    SecondaryNameNode methods deprecated:
+   *      getEditLogSize(), rollEditLog(), rollFSImage().
    */
-  public static final long versionID = 2L;
+  public static final long versionID = 3L;
 
-  /** Get a list of blocks belonged to <code>datanode</code>
-    * whose total size is equal to <code>size</code>
+  // Error codes passed by errorReport().
+  final static int NOTIFY = 0;
+  final static int FATAL = 1;
+
+  // Journal action codes. See journal().
+  public static byte JA_IS_ALIVE = 100; // check whether the journal is alive
+  public static byte JA_JOURNAL      = 101; // just journal
+  public static byte JA_JSPOOL_START = 102;  // = FSEditLog.OP_JSPOOL_START
+  public static byte JA_CHECKPOINT_TIME = 103; // = FSEditLog.OP_CHECKPOINT_TIME
+
+  public final static int ACT_UNKNOWN = 0;    // unknown action   
+  public final static int ACT_SHUTDOWN = 50;   // shutdown node
+  public final static int ACT_CHECKPOINT = 51;   // do checkpoint
+
+  /**
+   * Get a list of blocks belonging to <code>datanode</code>
+   * whose total size equals <code>size</code>.
+   * 
+   * @see org.apache.hadoop.hdfs.server.balancer.Balancer
    * @param datanode  a data node
    * @param size      requested size
    * @return          a list of blocks & their locations
@@ -49,7 +73,10 @@
    * Get the size of the current edit log (in bytes).
    * @return The number of bytes in the current edit log.
    * @throws IOException
+   * @deprecated 
+   *    See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}
    */
+  @Deprecated
   public long getEditLogSize() throws IOException;
 
   /**
@@ -57,7 +84,10 @@
    * call fails if the file system is in SafeMode.
    * @throws IOException
    * @return a unique token to identify this transaction.
+   * @deprecated 
+   *    See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}
    */
+  @Deprecated
   public CheckpointSignature rollEditLog() throws IOException;
 
   /**
@@ -65,6 +95,96 @@
    * new image to fsImage, removes the old edits and renames edits.new 
    * to edits. The call fails if any of the four files are missing.
    * @throws IOException
+   * @deprecated 
+   *    See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}
    */
+  @Deprecated
   public void rollFsImage() throws IOException;
+
+  /**
+   * Request name-node version and storage information.
+   * 
+   * @return {@link NamespaceInfo} identifying versions and storage information 
+   *          of the name-node
+   * @throws IOException
+   */
+  public NamespaceInfo versionRequest() throws IOException;
+
+  /**
+   * Report to the active name-node an error occurred on a subordinate node.
+   * Depending on the error code the active node may decide to unregister the
+   * reporting node.
+   * 
+   * @param registration requesting node.
+   * @param errorCode indicates the error
+   * @param msg free text description of the error
+   * @throws IOException
+   */
+  public void errorReport(NamenodeRegistration registration,
+                          int errorCode, 
+                          String msg) throws IOException;
+
+  /** 
+   * Register a subordinate name-node like backup node.
+   *
+   * @return  {@link NamenodeRegistration} of the node,
+   *          which this node has just registered with.
+   */
+  public NamenodeRegistration register(NamenodeRegistration registration)
+  throws IOException;
+
+  /**
+   * A request to the active name-node to start a checkpoint.
+   * The name-node should decide whether to admit it or reject.
+   * The name-node also decides what should be done with the backup node
+   * image before and after the checkpoint.
+   * 
+   * @see CheckpointCommand
+   * @see NamenodeCommand
+   * @see #ACT_SHUTDOWN
+   * 
+   * @param registration the requesting node
+   * @return {@link CheckpointCommand} if checkpoint is allowed.
+   * @throws IOException
+   */
+  public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
+  throws IOException;
+
+  /**
+   * A request to the active name-node to finalize
+   * previously started checkpoint.
+   * 
+   * @param registration the requesting node
+   * @param sig {@code CheckpointSignature} which identifies the checkpoint.
+   * @throws IOException
+   */
+  public void endCheckpoint(NamenodeRegistration registration,
+                            CheckpointSignature sig) throws IOException;
+
+  /**
+   * Get the size of the active name-node journal (edit log) in bytes.
+   * 
+   * @param registration the requesting node
+   * @return The number of bytes in the journal.
+   * @throws IOException
+   */
+  public long journalSize(NamenodeRegistration registration) throws IOException;
+
+  /**
+   * Journal edit records.
+   * This message is sent by the active name-node to the backup node
+   * via {@code EditLogBackupOutputStream} in order to synchronize meta-data
+   * changes with the backup namespace image.
+   * 
+   * @param registration active node registration
+   * @param jAction journal action
+   * @param length length of the byte array
+   * @param records byte array containing serialized journal records
+   * @throws IOException
+   */
+  public void journal(NamenodeRegistration registration,
+                      int jAction,
+                      int length,
+                      byte[] records) throws IOException;
 }
+

Added: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java?rev=753481&view=auto
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
(added)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
Sat Mar 14 01:20:36 2009
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
+
+/**
+ * Information sent by a subordinate name-node to the active name-node
+ * during the registration process. 
+ */
+public class NamenodeRegistration extends StorageInfo
+implements NodeRegistration {
+  String rpcAddress;          // RPC address of the node
+  String httpAddress;         // HTTP address of the node
+  NamenodeRole role;          // node role
+  long checkpointTime = -1L;  // the age of the image
+
+  public NamenodeRegistration() {
+    super();
+  }
+
+  public NamenodeRegistration(String address,
+                              String httpAddress,
+                              StorageInfo storageInfo,
+                              NamenodeRole role,
+                              long checkpointTime) {
+    super();
+    this.rpcAddress = address;
+    this.httpAddress = httpAddress;
+    this.setStorageInfo(storageInfo);
+    this.role = role;
+    this.checkpointTime = checkpointTime;
+  }
+
+  @Override // NodeRegistration
+  public String getAddress() {
+    return rpcAddress;
+  }
+  
+  @Override // NodeRegistration
+  public String getRegistrationID() {
+    return Storage.getRegistrationID(this);
+  }
+
+  @Override // NodeRegistration
+  public int getVersion() {
+    return super.getLayoutVersion();
+  }
+
+  @Override // NodeRegistration
+  public String toString() {
+    return getClass().getSimpleName()
+    + "(" + rpcAddress
+    + ", role=" + getRole()
+    + ")";
+  }
+
+  /**
+   * Get name-node role.
+   */
+  public NamenodeRole getRole() {
+    return role;
+  }
+
+  public boolean isRole(NamenodeRole that) {
+    return role.equals(that);
+  }
+
+  /**
+   * Get the age of the image.
+   */
+  public long getCheckpointTime() {
+    return checkpointTime;
+  }
+
+  /////////////////////////////////////////////////
+  // Writable
+  /////////////////////////////////////////////////
+  static {
+    WritableFactories.setFactory
+      (NamenodeRegistration.class,
+       new WritableFactory() {
+         public Writable newInstance() { return new NamenodeRegistration(); }
+       });
+  }
+
+  @Override // Writable
+  public void write(DataOutput out) throws IOException {
+    Text.writeString(out, rpcAddress);
+    Text.writeString(out, httpAddress);
+    Text.writeString(out, role.name());
+    super.write(out);
+    out.writeLong(checkpointTime);
+  }
+
+  @Override // Writable
+  public void readFields(DataInput in) throws IOException {
+    rpcAddress = Text.readString(in);
+    httpAddress = Text.readString(in);
+    role = NamenodeRole.valueOf(Text.readString(in));
+    super.readFields(in);
+    checkpointTime = in.readLong();
+  }
+}

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java?rev=753481&r1=753480&r2=753481&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java Sat
Mar 14 01:20:36 2009
@@ -35,7 +35,7 @@
  * to a data-node handshake.
  * 
  */
-public class NamespaceInfo extends StorageInfo implements Writable {
+public class NamespaceInfo extends StorageInfo {
   String  buildVersion;
   int distributedUpgradeVersion;
 
@@ -71,17 +71,13 @@
 
   public void write(DataOutput out) throws IOException {
     UTF8.writeString(out, getBuildVersion());
-    out.writeInt(getLayoutVersion());
-    out.writeInt(getNamespaceID());
-    out.writeLong(getCTime());
+    super.write(out);
     out.writeInt(getDistributedUpgradeVersion());
   }
 
   public void readFields(DataInput in) throws IOException {
     buildVersion = UTF8.readString(in);
-    layoutVersion = in.readInt();
-    namespaceID = in.readInt();
-    cTime = in.readLong();
+    super.readFields(in);
     distributedUpgradeVersion = in.readInt();
   }
 }

Added: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java?rev=753481&view=auto
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java
(added)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java
Sat Mar 14 01:20:36 2009
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.protocol;
+
+/**
+ * Generic class specifying information, which need to be sent to the name-node
+ * during the registration process. 
+ */
+public interface NodeRegistration {
+  /**
+   * Get address of the server node.
+   * @return hostname:portNumber
+   */
+  public String getAddress();
+
+  /**
+   * Get registration ID of the server node.
+   */
+  public String getRegistrationID();
+
+  /**
+   * Get layout version of the server node.
+   */
+  public int getVersion();
+
+  public String toString();
+}

Added: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java?rev=753481&view=auto
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java (added)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java Sat
Mar 14 01:20:36 2009
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.io.*;
+
+import org.apache.hadoop.io.Writable;
+
+/**
+ * Base class for a server command.
+ * Issued by the name-node to notify other servers what should be done.
+ * Commands are defined by actions defined in respective protocols.
+ * 
+ * @see DatanodeProtocol
+ * @see NamenodeProtocol
+ */
+public abstract class ServerCommand implements Writable {
+  private int action;
+
+  /**
+   * Unknown server command constructor.
+   * Creates a command with action 0.
+   * 
+   * @see NamenodeProtocol#ACT_UNKNOWN
+   * @see DatanodeProtocol#DNA_UNKNOWN
+   */
+  public ServerCommand() {
+    this(0);
+  }
+
+  /**
+   * Create a command for the specified action.
+   * Actions are protocol specific.
+   * 
+   * @see DatanodeProtocol
+   * @see NamenodeProtocol
+   * @param action
+   */
+  public ServerCommand(int action) {
+    this.action = action;
+  }
+
+  /**
+   * Get server command action.
+   * @return action code.
+   */
+  public int getAction() {
+    return this.action;
+  }
+
+  ///////////////////////////////////////////
+  // Writable
+  ///////////////////////////////////////////
+  public void write(DataOutput out) throws IOException {
+    out.writeInt(this.action);
+  }
+
+  public void readFields(DataInput in) throws IOException {
+    this.action = in.readInt();
+  }
+}

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=753481&r1=753480&r2=753481&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java Sat Mar 14 01:20:36
2009
@@ -239,7 +239,7 @@
       ioe.initCause(e);
       throw ioe;
     }
-    base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/");
+    base_dir = new File(getBaseDirectory());
     data_dir = new File(base_dir, "data");
     
     // Setup the NameNode configuration
@@ -585,7 +585,7 @@
   boolean corruptBlockOnDataNode(int i, String blockName) throws Exception {
     Random random = new Random();
     boolean corrupted = false;
-    File dataDir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/data");
+    File dataDir = new File(getBaseDirectory() + "data");
     if (i < 0 || i >= dataNodes.size())
       return false;
     for (int dn = i*2; dn < i*2+2; dn++) {
@@ -739,7 +739,7 @@
   }
   
   public void formatDataNodeDirs() throws IOException {
-    base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/");
+    base_dir = new File(getBaseDirectory());
     data_dir = new File(base_dir, "data");
     if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
       throw new IOException("Cannot remove data directory: " + data_dir);
@@ -841,4 +841,8 @@
   public String getDataDirectory() {
     return data_dir.getAbsolutePath();
   }
+
+  public static String getBaseDirectory() {
+    return System.getProperty("test.build.data", "build/test/data") + "/dfs/";
+  }
 }

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=753481&r1=753480&r2=753481&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/UpgradeUtilities.java Sat Mar 14 01:20:36
2009
@@ -40,7 +40,6 @@
 import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
 import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
 
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -117,7 +116,6 @@
       
       // save image
       namenode.getFSImage().saveFSImage();
-      namenode.getFSImage().getEditLog().open();
       
       // write more files
       writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);

Added: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=753481&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
(added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
Sat Mar 14 01:20:36 2009
@@ -0,0 +1,250 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.namenode.FSImage.CheckpointStates;
+
+import junit.framework.TestCase;
+
+public class TestBackupNode extends TestCase {
+  public static final Log LOG = LogFactory.getLog(TestBackupNode.class);
+
+  static final String BASE_DIR = MiniDFSCluster.getBaseDirectory();
+
+  protected void setUp() throws Exception {
+    super.setUp();
+    File baseDir = new File(BASE_DIR);
+    if(baseDir.exists())
+      if(!(FileUtil.fullyDelete(baseDir)))
+        throw new IOException("Cannot remove directory: " + baseDir);
+    File dirC = new File(getBackupNodeDir(StartupOption.CHECKPOINT, 1));
+    dirC.mkdirs();
+    File dirB = new File(getBackupNodeDir(StartupOption.BACKUP, 1));
+    dirB.mkdirs();
+    dirB = new File(getBackupNodeDir(StartupOption.BACKUP, 2));
+    dirB.mkdirs();
+  }
+
+  protected void tearDown() throws Exception {
+    super.tearDown();
+    File baseDir = new File(BASE_DIR);
+    if(!(FileUtil.fullyDelete(baseDir)))
+      throw new IOException("Cannot remove directory: " + baseDir);
+  }
+
+  static void writeFile(FileSystem fileSys, Path name, int repl)
+  throws IOException {
+    TestCheckpoint.writeFile(fileSys, name, repl);
+  }
+
+
+  static void checkFile(FileSystem fileSys, Path name, int repl)
+  throws IOException {
+    TestCheckpoint.checkFile(fileSys, name, repl);
+  }
+
+  void cleanupFile(FileSystem fileSys, Path name)
+  throws IOException {
+    TestCheckpoint.cleanupFile(fileSys, name);
+  }
+
+  static String getBackupNodeDir(StartupOption t, int i) {
+    return BASE_DIR + "name" + t.getName() + i;
+  }
+
+  BackupNode startBackupNode(Configuration conf,
+                             StartupOption t, int i) throws IOException {
+    Configuration c = new Configuration(conf);
+    String dirs = getBackupNodeDir(t, i);
+    c.set("dfs.name.dir", dirs);
+    c.set("dfs.name.edits.dir", "${dfs.name.dir}");
+    return (BackupNode)NameNode.createNameNode(new String[]{t.getName()}, c);
+  }
+
+  void waitCheckpointDone(BackupNode backup) {
+    do {
+      try {
+        LOG.info("Waiting checkpoint to complete...");
+        Thread.sleep(1000);
+      } catch (Exception e) {}
+    } while(backup.getCheckpointState() != CheckpointStates.START);
+  }
+
+  public void testCheckpoint() throws IOException {
+    testCheckpoint(StartupOption.CHECKPOINT);
+    testCheckpoint(StartupOption.BACKUP);
+  }
+
+  void testCheckpoint(StartupOption op) throws IOException {
+    Path file1 = new Path("checkpoint.dat");
+    Path file2 = new Path("checkpoint2.dat");
+
+    Configuration conf = new Configuration();
+    short replication = (short)conf.getInt("dfs.replication", 3);
+    conf.set("dfs.blockreport.initialDelay", "0");
+    conf.setInt("dfs.datanode.scan.period.hours", -1); // disable block scanner
+    int numDatanodes = Math.max(3, replication);
+    MiniDFSCluster cluster = null;
+    FileSystem fileSys = null;
+    BackupNode backup = null;
+
+    try {
+      cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+      fileSys = cluster.getFileSystem();
+      //
+      // verify that 'format' really blew away all pre-existing files
+      //
+      assertTrue(!fileSys.exists(file1));
+      assertTrue(!fileSys.exists(file2));
+
+      //
+      // Create file1
+      //
+      writeFile(fileSys, file1, replication);
+      checkFile(fileSys, file1, replication);
+
+      //
+      // Take a checkpoint
+      //
+      backup = startBackupNode(conf, op, 1);
+      waitCheckpointDone(backup);
+    } catch(IOException e) {
+      LOG.error("Error in TestBackupNode:", e);
+      assertTrue(e.getLocalizedMessage(), false);
+    } finally {
+      if(backup != null) backup.stop();
+      if(fileSys != null) fileSys.close();
+      if(cluster != null) cluster.shutdown();
+    }
+    File imageFileNN = new File(BASE_DIR, "name1/current/fsimage");
+    File imageFileBN = new File(getBackupNodeDir(op, 1), "/current/fsimage");
+    LOG.info("NameNode fsimage length = " + imageFileNN.length());
+    LOG.info("Backup Node fsimage length = " + imageFileBN.length());
+    assertTrue(imageFileNN.length() == imageFileBN.length());
+
+    try {
+      //
+      // Restart cluster and verify that file1 still exist.
+      //
+      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      fileSys = cluster.getFileSystem();
+      // check that file1 still exists
+      checkFile(fileSys, file1, replication);
+      cleanupFile(fileSys, file1);
+
+      // create new file file2
+      writeFile(fileSys, file2, replication);
+      checkFile(fileSys, file2, replication);
+
+      //
+      // Take a checkpoint
+      //
+      backup = startBackupNode(conf, op, 1);
+      waitCheckpointDone(backup);
+    } catch(IOException e) {
+      LOG.error("Error in TestBackupNode:", e);
+      assertTrue(e.getLocalizedMessage(), false);
+    } finally {
+      if(backup != null) backup.stop();
+      if(fileSys != null) fileSys.close();
+      if(cluster != null) cluster.shutdown();
+    }
+    LOG.info("NameNode fsimage length = " + imageFileNN.length());
+    LOG.info("Backup Node fsimage length = " + imageFileBN.length());
+    assertTrue(imageFileNN.length() == imageFileBN.length());
+
+    try {
+      //
+      // Restart cluster and verify that file2 exists and
+      // file1 does not exist.
+      //
+      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      fileSys = cluster.getFileSystem();
+
+      assertTrue(!fileSys.exists(file1));
+
+      // verify that file2 exists
+      checkFile(fileSys, file2, replication);
+    } catch(IOException e) {
+      LOG.error("Error in TestBackupNode:", e);
+      assertTrue(e.getLocalizedMessage(), false);
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test that only one backup node can register.
+   * @throws IOException
+   */
+  public void testBackupRegistration() throws IOException {
+    Configuration conf1 = new Configuration();
+    Configuration conf2 = null;
+    MiniDFSCluster cluster = null;
+    BackupNode backup1 = null;
+    BackupNode backup2 = null;
+    try {
+      // start name-node and backup node 1
+      cluster = new MiniDFSCluster(conf1, 0, true, null);
+      conf1.set("dfs.backup.address", "0.0.0.0:7770");
+      conf1.set("dfs.backup.http.address", "0.0.0.0:7775");
+      backup1 = startBackupNode(conf1, StartupOption.BACKUP, 1);
+      // try to start backup node 2
+      conf2 = new Configuration(conf1);
+      conf2.set("dfs.backup.address", "0.0.0.0:7771");
+      conf2.set("dfs.backup.http.address", "0.0.0.0:7776");
+      try {
+        backup2 = startBackupNode(conf2, StartupOption.BACKUP, 2);
+        backup2.stop();
+        backup2 = null;
+        assertTrue("Only one backup node should be able to start", false);
+      } catch(IOException e) {
+        assertTrue(
+            e.getLocalizedMessage().contains("Registration is not allowed"));
+        // should fail - doing good
+      }
+      // stop backup node 1; backup node 2 should be able to start
+      backup1.stop();
+      backup1 = null;
+      try {
+        backup2 = startBackupNode(conf2, StartupOption.BACKUP, 2);
+      } catch(IOException e) {
+        assertTrue("Backup node 2 should be able to start", false);
+      }
+    } catch(IOException e) {
+      LOG.error("Error in TestBackupNode:", e);
+      assertTrue(e.getLocalizedMessage(), false);
+    } finally {
+      if(backup1 != null) backup1.stop();
+      if(backup2 != null) backup2.stop();
+      if(cluster != null) cluster.shutdown();
+    }
+  }
+}

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=753481&r1=753480&r2=753481&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
Sat Mar 14 01:20:36 2009
@@ -50,20 +50,20 @@
   static final int numDatanodes = 3;
   short replication = 3;
 
-  private void writeFile(FileSystem fileSys, Path name, int repl)
+  static void writeFile(FileSystem fileSys, Path name, int repl)
     throws IOException {
     FSDataOutputStream stm = fileSys.create(name, true,
                                             fileSys.getConf().getInt("io.file.buffer.size",
4096),
                                             (short)repl, (long)blockSize);
-    byte[] buffer = new byte[fileSize];
-    Random rand = new Random(seed);
+    byte[] buffer = new byte[TestCheckpoint.fileSize];
+    Random rand = new Random(TestCheckpoint.seed);
     rand.nextBytes(buffer);
     stm.write(buffer);
     stm.close();
   }
   
   
-  private void checkFile(FileSystem fileSys, Path name, int repl)
+  static void checkFile(FileSystem fileSys, Path name, int repl)
     throws IOException {
     assertTrue(fileSys.exists(name));
     int replication = fileSys.getFileStatus(name).getReplication();
@@ -71,7 +71,7 @@
     //We should probably test for more of the file properties.    
   }
   
-  private void cleanupFile(FileSystem fileSys, Path name)
+  static void cleanupFile(FileSystem fileSys, Path name)
     throws IOException {
     assertTrue(fileSys.exists(name));
     fileSys.delete(name, true);

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=753481&r1=753480&r2=753481&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
Sat Mar 14 01:20:36 2009
@@ -73,7 +73,7 @@
  
   protected void setUp() throws Exception {
     config = new Configuration();
-    String baseDir = System.getProperty("test.build.data", "/tmp");
+    String baseDir = System.getProperty("test.build.data",  "build/test/data");
     
     hdfsDir = new File(baseDir, "dfs");
     if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {

Modified: hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp?rev=753481&r1=753480&r2=753481&view=diff
==============================================================================
--- hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp (original)
+++ hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp Sat Mar 14 01:20:36 2009
@@ -138,16 +138,17 @@
   
   
   public void generateConfReport( JspWriter out,
-		  FSNamesystem fsn,
+		  NameNode nn,
 		  HttpServletRequest request)
   throws IOException {
+	  FSNamesystem fsn = nn.getNamesystem();
 	  long underReplicatedBlocks = fsn.getUnderReplicatedBlocks();
 	  FSImage fsImage = fsn.getFSImage();
 	  List<Storage.StorageDirectory> removedStorageDirs = fsImage.getRemovedStorageDirs();
 	  String storageDirsSizeStr="", removedStorageDirsSizeStr="", storageDirsStr="", removedStorageDirsStr="",
storageDirsDiv="", removedStorageDirsDiv="";
 
 	  //FS Image storage configuration
-	  out.print("<h3> NameNode Storage: </h3>");
+	  out.print("<h3> " + nn.getRole() + " Storage: </h3>");
 	  out.print("<div id=\"dfstable\"> <table border=1 cellpadding=10 cellspacing=0
title=\"NameNode Storage\">\n"+
 	  "<thead><tr><td><b>Storage Directory</b></td><td><b>Type</b></td><td><b>State</b></td></tr></thead>");
 	  
@@ -245,20 +246,21 @@
 <%
   NameNode nn = (NameNode)application.getAttribute("name.node");
   FSNamesystem fsn = nn.getNamesystem();
+  String namenodeRole = nn.getRole().toString();
   String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
 %>
 
 <html>
 
 <link rel="stylesheet" type="text/css" href="/static/hadoop.css">
-<title>Hadoop NameNode <%=namenodeLabel%></title>
+<title>Hadoop <%=namenodeRole%> <%=namenodeLabel%></title>
     
 <body>
-<h1>NameNode '<%=namenodeLabel%>'</h1>
+<h1><%=namenodeRole%> '<%=namenodeLabel%>'</h1>
 <%= JspHelper.getVersionTable(fsn) %>
 <br />
 <b><a href="/nn_browsedfscontent.jsp">Browse the filesystem</a></b><br>
-<b><a href="/logs/">Namenode Logs</a></b>
+<b><a href="/logs/"><%=namenodeRole%> Logs</a></b>
 
 <hr>
 <h3>Cluster Summary</h3>
@@ -271,7 +273,7 @@
 %>
 <hr>
 <%
-	generateConfReport(out, fsn, request);
+	generateConfReport(out, nn, request);
 %>
 <%
 out.println(ServletUtil.htmlFooter());

Modified: hadoop/core/trunk/src/webapps/hdfs/dfsnodelist.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/webapps/hdfs/dfsnodelist.jsp?rev=753481&r1=753480&r2=753481&view=diff
==============================================================================
--- hadoop/core/trunk/src/webapps/hdfs/dfsnodelist.jsp (original)
+++ hadoop/core/trunk/src/webapps/hdfs/dfsnodelist.jsp Sat Mar 14 01:20:36 2009
@@ -249,6 +249,7 @@
 
 <%
 NameNode nn = (NameNode)application.getAttribute("name.node");
+String namenodeRole = nn.getRole().toString();
 FSNamesystem fsn = nn.getNamesystem();
 String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
 %>
@@ -256,14 +257,14 @@
 <html>
 
 <link rel="stylesheet" type="text/css" href="/static/hadoop.css">
-<title>Hadoop NameNode <%=namenodeLabel%></title>
+<title>Hadoop <%=namenodeRole%> <%=namenodeLabel%></title>
   
 <body>
-<h1>NameNode '<%=namenodeLabel%>'</h1>
+<h1><%=namenodeRole%> '<%=namenodeLabel%>'</h1>
 <%= JspHelper.getVersionTable(fsn) %>
 <br />
 <b><a href="/nn_browsedfscontent.jsp">Browse the filesystem</a></b><br>
-<b><a href="/logs/">Namenode Logs</a></b><br>
+<b><a href="/logs/"><%=namenodeRole%> Logs</a></b><br>
 <b><a href=/dfshealth.jsp> Go back to DFS home</a></b>
 <hr>
 <%



Mime
View raw message