hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1453712 - in /hbase/trunk/hbase-server/src: main/java/org/apache/hadoop/hbase/regionserver/ main/java/org/apache/hadoop/hbase/regionserver/wal/ main/java/org/apache/hadoop/hbase/util/ test/java/org/apache/hadoop/hbase/util/
Date Thu, 07 Mar 2013 07:01:54 GMT
Author: stack
Date: Thu Mar  7 07:01:54 2013
New Revision: 1453712

URL: http://svn.apache.org/r1453712
Log:
HBASE-7982 TestReplicationQueueFailover* runs for a minute, spews 3/4million lines complaining
'Filesystem closed', has an NPE, and still passes?

Modified:
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1453712&r1=1453711&r2=1453712&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
(original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
Thu Mar  7 07:01:54 2013
@@ -1159,23 +1159,27 @@ public class HRegionServer implements Cl
   }
 
   private void closeWAL(final boolean delete) {
-    try {
-      if (this.hlogForMeta != null) {
-        //All hlogs (meta and non-meta) are in the same directory. Don't call 
-        //closeAndDelete here since that would delete all hlogs not just the 
-        //meta ones. We will just 'close' the hlog for meta here, and leave
-        //the directory cleanup to the follow-on closeAndDelete call.
+    if (this.hlogForMeta != null) {
+      // All hlogs (meta and non-meta) are in the same directory. Don't call
+      // closeAndDelete here since that would delete all hlogs not just the
+      // meta ones. We will just 'close' the hlog for meta here, and leave
+      // the directory cleanup to the follow-on closeAndDelete call.
+      try {
         this.hlogForMeta.close();
+      } catch (Throwable e) {
+        LOG.error("Metalog close and delete failed", RemoteExceptionHandler.checkThrowable(e));
       }
-      if (this.hlog != null) {
+    }
+    if (this.hlog != null) {
+      try {
         if (delete) {
           hlog.closeAndDelete();
         } else {
           hlog.close();
         }
+      } catch (Throwable e) {
+        LOG.error("Close and delete failed", RemoteExceptionHandler.checkThrowable(e));
       }
-    } catch (Throwable e) {
-      LOG.error("Close and delete failed", RemoteExceptionHandler.checkThrowable(e));
     }
   }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java?rev=1453712&r1=1453711&r2=1453712&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java
(original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java
Thu Mar  7 07:01:54 2013
@@ -193,9 +193,9 @@ public class HLogSplitter {
 
     status = TaskMonitor.get().createStatus(
         "Splitting logs in " + srcDir);
-    
+
     long startTime = EnvironmentEdgeManager.currentTimeMillis();
-    
+
     status.setStatus("Determining files to split...");
     List<Path> splits = null;
     if (!fs.exists(srcDir)) {
@@ -219,7 +219,7 @@ public class HLogSplitter {
     LOG.info(msg);
     return splits;
   }
-  
+
   private void logAndReport(String msg) {
     status.setStatus(msg);
     LOG.info(msg);
@@ -321,7 +321,7 @@ public class HLogSplitter {
         }
       }
       status.setStatus("Log splits complete. Checking for orphaned logs.");
-      
+
       if (fs.listStatus(srcDir).length > processedLogs.size()
           + corruptedLogs.size()) {
         throw new OrphanHLogAfterSplitException(
@@ -511,7 +511,12 @@ public class HLogSplitter {
     List<Path> corruptedLogs = new ArrayList<Path>();
     FileSystem fs;
     fs = rootdir.getFileSystem(conf);
-    Path logPath = new Path(logfile);
+    Path logPath = null;
+    if (FSUtils.isStartingWithPath(rootdir, logfile)) {
+      logPath = new Path(logfile);
+    } else {
+      logPath = new Path(rootdir, logfile);
+    }
     if (ZKSplitLog.isCorrupted(rootdir, logPath.getName(), fs)) {
       corruptedLogs.add(logPath);
     } else {
@@ -842,7 +847,7 @@ public class HLogSplitter {
           buffer = new RegionEntryBuffer(key.getTablename(), key.getEncodedRegionName());
           buffers.put(key.getEncodedRegionName(), buffer);
         }
-        incrHeap= buffer.appendEntry(entry);        
+        incrHeap= buffer.appendEntry(entry);
       }
 
       // If we crossed the chunk threshold, wait for more space to be available
@@ -1092,7 +1097,7 @@ public class HLogSplitter {
 
   /**
    * A class used in distributed log splitting
-   * 
+   *
    */
   class DistributedLogSplittingHelper {
     // Report progress, only used in distributed log splitting
@@ -1143,7 +1148,7 @@ public class HLogSplitter {
         new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR));
 
     private boolean closeAndCleanCompleted = false;
-    
+
     private boolean logWritersClosed  = false;
 
     private final int numThreads;
@@ -1171,7 +1176,7 @@ public class HLogSplitter {
     }
 
     /**
-     * 
+     *
      * @return null if failed to report progress
      * @throws IOException
      */
@@ -1303,7 +1308,7 @@ public class HLogSplitter {
       }
       return paths;
     }
-    
+
     private List<IOException> closeLogWriters(List<IOException> thrown)
         throws IOException {
       if (!logWritersClosed) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=1453712&r1=1453711&r2=1453712&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java Thu Mar
 7 07:01:54 2013
@@ -86,6 +86,61 @@ public abstract class FSUtils {
     super();
   }
 
+  /**
+   * Compare of path component. Does not consider schema; i.e. if schemas different but <code>path
+   * <code> starts with <code>rootPath<code>, then the function returns
true
+   * @param rootPath
+   * @param path 
+   * @return True if <code>path</code> starts with <code>rootPath</code>
+   */
+  public static boolean isStartingWithPath(final Path rootPath, final String path) {
+    String uriRootPath = rootPath.toUri().getPath();
+    String tailUriPath = (new Path(path)).toUri().getPath();
+    return tailUriPath.startsWith(uriRootPath);
+  }
+
+  /**
+   * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare
the
+   * '/a/b/c' part. Does not consider schema; i.e. if schemas different but path or subpath
matches,
+   * the two will equate.
+   * @param pathToSearch Path we will be trying to match.
+   * @param pathTail
+   * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
+   */
+  public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
+    return isMatchingTail(pathToSearch, new Path(pathTail));
+  }
+
+  /**
+   * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare
the
+   * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true.  Does not
consider
+   * schema; i.e. if schemas different but path or subpath matches, the two will equate.
+   * @param pathToSearch Path we will be trying to match.
+   * @param pathTail
+   * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
+   */
+  public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
+    if (pathToSearch.depth() != pathTail.depth()) return false;
+    Path tailPath = pathTail;
+    String tailName;
+    Path toSearch = pathToSearch;
+    String toSearchName;
+    boolean result = false;
+    do {
+      tailName = tailPath.getName();
+      if (tailName == null || tailName.length() <= 0) {
+        result = true;
+        break;
+      }
+      toSearchName = toSearch.getName();
+      if (toSearchName == null || toSearchName.length() <= 0) break;
+      // Move up a parent on each path for next go around.  Path doesn't let us go off the
end.
+      tailPath = tailPath.getParent();
+      toSearch = toSearch.getParent();
+    } while(tailName.equals(toSearchName));
+    return result;
+  }
+
   public static FSUtils getInstance(FileSystem fs, Configuration conf) {
     String scheme = fs.getUri().getScheme();
     if (scheme == null) {

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java?rev=1453712&r1=1453711&r2=1453712&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java Thu
Mar  7 07:01:54 2013
@@ -34,12 +34,12 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -49,6 +49,33 @@ import org.junit.experimental.categories
  */
 @Category(MediumTests.class)
 public class TestFSUtils {
+  /**
+   * Test path compare and prefix checking.
+   * @throws IOException 
+   */
+  @Test
+  public void testMatchingTail() throws IOException {
+    HBaseTestingUtility htu = new HBaseTestingUtility();
+    final FileSystem fs = htu.getTestFileSystem();
+    Path rootdir = htu.getDataTestDir();
+    assertTrue(rootdir.depth() > 1);
+    Path partPath = new Path("a", "b");
+    Path fullPath = new Path(rootdir, partPath);
+    Path fullyQualifiedPath = fs.makeQualified(fullPath);
+    assertFalse(FSUtils.isMatchingTail(fullPath, partPath));
+    assertFalse(FSUtils.isMatchingTail(fullPath, partPath.toString()));
+    assertTrue(FSUtils.isStartingWithPath(rootdir, fullPath.toString()));
+    assertTrue(FSUtils.isStartingWithPath(fullyQualifiedPath, fullPath.toString()));
+    assertFalse(FSUtils.isStartingWithPath(rootdir, partPath.toString()));
+    assertFalse(FSUtils.isMatchingTail(fullyQualifiedPath, partPath));
+    assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fullPath));
+    assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fullPath.toString()));
+    assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fs.makeQualified(fullPath)));
+    assertTrue(FSUtils.isStartingWithPath(rootdir, fullyQualifiedPath.toString()));
+    assertFalse(FSUtils.isMatchingTail(fullPath, new Path("x")));
+    assertFalse(FSUtils.isMatchingTail(new Path("x"), fullPath));
+  }
+
   @Test
   public void testVersion() throws DeserializationException, IOException {
     HBaseTestingUtility htu = new HBaseTestingUtility();



Mime
View raw message