hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From j...@apache.org
Subject svn commit: r636361 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HLog.java src/java/org/apache/hadoop/hbase/HRegion.java
Date Wed, 12 Mar 2008 15:21:55 GMT
Author: jimk
Date: Wed Mar 12 08:21:52 2008
New Revision: 636361

URL: http://svn.apache.org/viewvc?rev=636361&view=rev
Log:
HBASE-433 HBASE-251 Region server should delete restore log after successful restore, Stuck
replaying the edits of crashed machine.

HLog

- don't overwrite oldlogfile in splitLog if it already exists. Rename it and copy it into
the new oldlogfile. Then delete it once it has been copied.
- use FileUtil.fullyDelete to delete region server log directory.

HRegion

- delete oldlogfile once it has been successfully processed

Modified:
    hadoop/hbase/branches/0.1/CHANGES.txt
    hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java
    hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=636361&r1=636360&r2=636361&view=diff
==============================================================================
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Wed Mar 12 08:21:52 2008
@@ -36,6 +36,8 @@
    HBASE-79    When HBase needs to be migrated, it should display a message on
                stdout, not just in the logs
    HBASE-495   No server address listed in .META.
+   HBASE-433 HBASE-251 Region server should delete restore log after successful
+               restore, Stuck replaying the edits of crashed machine.
 
   IMPROVEMENTS
    HADOOP-2555 Refactor the HTable#get and HTable#getRow methods to avoid

Modified: hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java?rev=636361&r1=636360&r2=636361&view=diff
==============================================================================
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java (original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java Wed Mar 12 08:21:52
2008
@@ -19,7 +19,6 @@
  */
 package org.apache.hadoop.hbase;
 
-import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Collections;
@@ -35,6 +34,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
@@ -494,7 +494,15 @@
    */
   static void splitLog(Path rootDir, Path srcDir, FileSystem fs,
     Configuration conf) throws IOException {
-    Path logfiles[] = fs.listPaths(new Path[] { srcDir });
+    if (!fs.exists(srcDir)) {
+      // Nothing to do
+      return;
+    }
+    FileStatus logfiles[] = fs.listStatus(srcDir);
+    if (logfiles == null || logfiles.length == 0) {
+      // Nothing to do
+      return;
+    }
     LOG.info("splitting " + logfiles.length + " log(s) in " +
       srcDir.toString());
     Map<Text, SequenceFile.Writer> logWriters =
@@ -503,17 +511,18 @@
       for (int i = 0; i < logfiles.length; i++) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Splitting " + i + " of " + logfiles.length + ": " +
-            logfiles[i]);
+            logfiles[i].getPath());
         }
         // Check for empty file.
-        if (fs.getFileStatus(logfiles[i]).getLen() <= 0) {
+        if (logfiles[i].getLen() <= 0) {
           LOG.info("Skipping " + logfiles[i].toString() +
-            " because zero length");
+              " because zero length");
           continue;
         }
         HLogKey key = new HLogKey();
         HLogEdit val = new HLogEdit();
-        SequenceFile.Reader in = new SequenceFile.Reader(fs, logfiles[i], conf);
+        SequenceFile.Reader in =
+          new SequenceFile.Reader(fs, logfiles[i].getPath(), conf);
         try {
           int count = 0;
           for (; in.next(key, val); count++) {
@@ -528,6 +537,17 @@
                   ),
                   HREGION_OLDLOGFILE_NAME
               );
+              
+              Path oldlogfile = null;
+              SequenceFile.Reader old = null;
+              if (fs.exists(logfile)) {
+                LOG.warn("Old log file " + logfile +
+                    " already exists. Copying existing file to new file");
+                oldlogfile = new Path(logfile.toString() + ".old");
+                fs.rename(logfile, oldlogfile);
+                old = new SequenceFile.Reader(fs, oldlogfile, conf);
+              }
+              
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Creating new log file writer for path " + logfile +
                   "; map content " + logWriters.toString());
@@ -537,8 +557,22 @@
               // Use copy of regionName; regionName object is reused inside in
               // HStoreKey.getRegionName so its content changes as we iterate.
               logWriters.put(new Text(regionName), w);
+              
+              if (old != null) {
+                // Copy from existing log file
+                HLogKey oldkey = new HLogKey();
+                HLogEdit oldval = new HLogEdit();
+                for (; old.next(oldkey, oldval); count++) {
+                  if (LOG.isDebugEnabled() && count > 0 && count % 10000
== 0) {
+                    LOG.debug("Copied " + count + " edits");
+                  }
+                  w.append(oldkey, oldval);
+                }
+                old.close();
+                fs.delete(oldlogfile);
+              }
             }
-            if (count % 10000 == 0 && count > 0 && LOG.isDebugEnabled())
{
+            if (LOG.isDebugEnabled() && count > 0 && count % 10000 ==
0) {
               LOG.debug("Applied " + count + " edits");
             }
             w.append(key, val);
@@ -556,13 +590,13 @@
       }
     }
 
-    if (fs.exists(srcDir)) {
-      if (!fs.delete(srcDir)) {
-        LOG.error("Cannot delete: " + srcDir);
-        if (!FileUtil.fullyDelete(new File(srcDir.toString()))) {
-          throw new IOException("Cannot delete: " + srcDir);
-        }
-      }
+    try {
+      FileUtil.fullyDelete(fs, srcDir);
+    } catch (IOException e) {
+      e = RemoteExceptionHandler.checkIOException(e);
+      IOException io = new IOException("Cannot delete: " + srcDir);
+      io.initCause(e);
+      throw io;
     }
     LOG.info("log file splitting completed for " + srcDir.toString());
   }

Modified: hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java?rev=636361&r1=636360&r2=636361&view=diff
==============================================================================
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java (original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java Wed Mar 12 08:21:52
2008
@@ -296,6 +296,13 @@
         maxSeqId = storeSeqId;
       }
     }
+    if (fs.exists(oldLogFile)) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Deleting old log file: " + oldLogFile);
+      }
+      fs.delete(oldLogFile);
+    }
+    
     this.minSequenceId = maxSeqId;
     if (LOG.isDebugEnabled()) {
       LOG.debug("Next sequence id for region " + regionInfo.getRegionName() +



Mime
View raw message