From commits-return-98747-archive-asf-public=cust-asf.ponee.io@hbase.apache.org Wed Jun 2 17:33:18 2021 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mxout1-he-de.apache.org (mxout1-he-de.apache.org [95.216.194.37]) by mx-eu-01.ponee.io (Postfix) with ESMTPS id CA949180638 for ; Wed, 2 Jun 2021 19:33:18 +0200 (CEST) Received: from mail.apache.org (mailroute1-lw-us.apache.org [207.244.88.153]) by mxout1-he-de.apache.org (ASF Mail Server at mxout1-he-de.apache.org) with SMTP id 2ECFD61598 for ; Wed, 2 Jun 2021 17:33:18 +0000 (UTC) Received: (qmail 59468 invoked by uid 500); 2 Jun 2021 17:33:17 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 59459 invoked by uid 99); 2 Jun 2021 17:33:17 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 02 Jun 2021 17:33:17 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id 2AD1281A86; Wed, 2 Jun 2021 17:33:17 +0000 (UTC) Date: Wed, 02 Jun 2021 17:33:17 +0000 To: "commits@hbase.apache.org" Subject: [hbase-operator-tools] branch master updated: HBASE-25921 Fix Wrong FileSystem when running `filesystem` on non-HDFS storage (#88) MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit Message-ID: <162265519708.12998.3510219320792194556@gitbox.apache.org> From: taklwu@apache.org X-Git-Host: gitbox.apache.org X-Git-Repo: hbase-operator-tools X-Git-Refname: refs/heads/master X-Git-Reftype: branch X-Git-Oldrev: 33d4c3110e56ca30d2431543fcf210c3c7655aaa X-Git-Newrev: 22458706ffc8973d6bd78ed325c138321cd19927 X-Git-Rev: 22458706ffc8973d6bd78ed325c138321cd19927 X-Git-NotificationType: ref_changed_plus_diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated This is an automated email from the ASF dual-hosted git repository. taklwu pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git The following commit(s) were added to refs/heads/master by this push: new 2245870 HBASE-25921 Fix Wrong FileSystem when running `filesystem` on non-HDFS storage (#88) 2245870 is described below commit 22458706ffc8973d6bd78ed325c138321cd19927 Author: Tak Lon (Stephen) Wu AuthorDate: Wed Jun 2 10:33:08 2021 -0700 HBASE-25921 Fix Wrong FileSystem when running `filesystem` on non-HDFS storage (#88) Signed-off-by: Anoop Sam John Signed-off-by: Wellington Ramos Chevreuil --- .../main/java/org/apache/hbase/HBCKFsUtils.java | 9 ++ .../java/org/apache/hbase/hbck1/HBaseFsck.java | 32 +++--- .../apache/hbase/hbck1/HFileCorruptionChecker.java | 3 +- .../TestHBCKFsTableDescriptorForceCreation.java | 6 +- .../apache/hbase/TestHBCKFsTableDescriptors.java | 6 +- .../java/org/apache/hbase/hbck1/TestHBaseFsck.java | 124 +++++++++++++++++++++ .../hbase/hbck1/TestHFileCorruptionChecker.java | 119 ++++++++++++++++++++ 7 files changed, 279 insertions(+), 20 deletions(-) diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsUtils.java b/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsUtils.java index f206609..e7e534a 100644 --- a/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsUtils.java +++ b/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsUtils.java @@ -108,6 +108,15 @@ public final class HBCKFsUtils { } /** + * @param conf must not be null + * @return Returns the filesystem of the hbase rootdir. + * @throws IOException from underlying FileSystem + */ + public static FileSystem getRootDirFileSystem(Configuration conf) throws IOException { + return getRootDir(conf).getFileSystem(conf); + } + + /** * Copy all files/subdirectories from source path to destination path. * * COPIED from FSUtils.copyFilesParallel diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsck.java b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsck.java index c04bd02..256ff51 100644 --- a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsck.java +++ b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsck.java @@ -149,6 +149,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Tool; +import org.apache.hbase.HBCKFsUtils; import org.apache.hbase.HBCKMetaTableAccessor; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -274,6 +275,8 @@ public class HBaseFsck extends Configured implements Closeable { private int retcode = 0; private Path hbckLockPath; private FSDataOutputStream hbckOutFd; + // single root file system instance to be used within HBaseFsck + private FileSystem rootFs; // This lock is to prevent cleanup of balancer resources twice between // ShutdownHook and the main code. We cleanup only if the connect() is // successful @@ -402,6 +405,7 @@ public class HBaseFsck extends Configured implements Closeable { lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf()); createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf()); zkw = createZooKeeperWatcher(); + rootFs = HBCKFsUtils.getRootDirFileSystem(conf); } /** @@ -433,13 +437,13 @@ public class HBaseFsck extends Configured implements Closeable { */ @VisibleForTesting public static Path getTmpDir(Configuration conf) throws IOException { - return new Path(CommonFSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY); + return new Path(HBCKFsUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY); } /** * Creates an hbck lock file. */ - private static class FileLockCallable implements Callable { + static class FileLockCallable implements Callable { RetryCounter retryCounter; private final Configuration conf; private Path hbckLockPath = null; @@ -465,10 +469,11 @@ public class HBaseFsck extends Configured implements Closeable { @Override public FSDataOutputStream call() throws IOException { try { - FileSystem fs = CommonFSUtils.getCurrentFileSystem(this.conf); + // tmpDir is created based on hbase.rootdir + Path tmpDir = getTmpDir(conf); + FileSystem fs = tmpDir.getFileSystem(conf); FsPermission defaultPerms = CommonFSUtils.getFilePermissions(fs, this.conf, HConstants.DATA_FILE_UMASK_KEY); - Path tmpDir = getTmpDir(conf); this.hbckLockPath = new Path(tmpDir, this.lockFileName); fs.mkdirs(tmpDir); final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms); @@ -559,7 +564,7 @@ public class HBaseFsck extends Configured implements Closeable { do { try { IOUtils.closeQuietly(hbckOutFd); - CommonFSUtils.delete(CommonFSUtils.getCurrentFileSystem(getConf()), hbckLockPath, true); + HBCKFsUtils.delete(rootFs, hbckLockPath, true); return; } catch (IOException ioe) { LOG.info("Failed to delete " + hbckLockPath + ", try=" @@ -1535,8 +1540,7 @@ public class HBaseFsck extends Configured implements Closeable { return; } - FileSystem fs = FileSystem.get(getConf()); - RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); + RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(rootFs, regionDir); LOG.debug("RegionInfo read: " + hri.toString()); hbi.hdfsEntry.hri = hri; } @@ -1927,7 +1931,7 @@ public class HBaseFsck extends Configured implements Closeable { HBaseTestingUtility.closeRegionAndWAL(meta); // Clean out the WAL we created and used here. LOG.info("Deleting {}, result={}", waldir, - CommonFSUtils.delete(FileSystem.get(getConf()), waldir, true)); + HBCKFsUtils.delete(waldir.getFileSystem(getConf()), waldir, true)); } LOG.info("Success! hbase:meta table rebuilt. Old hbase:meta moved into " + backupDir); return true; @@ -3500,7 +3504,6 @@ public class HBaseFsck extends Configured implements Closeable { return; } - FileSystem fs = FileSystem.get(conf); LOG.info("Found parent: " + parent.getRegionNameAsString()); LOG.info("Found potential daughter a: " + daughterA.getRegionNameAsString()); LOG.info("Found potential daughter b: " + daughterB.getRegionNameAsString()); @@ -3528,7 +3531,7 @@ public class HBaseFsck extends Configured implements Closeable { return; } - sidelineRegionDir(fs, parent); + sidelineRegionDir(rootFs, parent); LOG.info("[" + thread + "] Sidelined parent region dir "+ parent.getHdfsRegionDir() + " into " + getSidelineDir()); debugLsr(parent.getHdfsRegionDir()); @@ -3624,7 +3627,6 @@ public class HBaseFsck extends Configured implements Closeable { } List regionsToSideline = RegionSplitCalculator.findBigRanges(bigOverlap, overlapsToSideline); - FileSystem fs = FileSystem.get(conf); for (HbckInfo regionToSideline: regionsToSideline) { try { LOG.info("Closing region: " + regionToSideline); @@ -3643,7 +3645,7 @@ public class HBaseFsck extends Configured implements Closeable { } LOG.info("Before sideline big overlapped region: " + regionToSideline.toString()); - Path sidelineRegionDir = sidelineRegionDir(fs, TO_BE_LOADED, regionToSideline); + Path sidelineRegionDir = sidelineRegionDir(rootFs, TO_BE_LOADED, regionToSideline); if (sidelineRegionDir != null) { sidelinedRegions.put(sidelineRegionDir, regionToSideline); LOG.info("After sidelined big overlapped region: " @@ -5441,7 +5443,7 @@ public class HBaseFsck extends Configured implements Closeable { tableDirs.add(CommonFSUtils.getTableDir(rootdir, t)); } } else { - tableDirs = FSUtils.getTableDirs(CommonFSUtils.getCurrentFileSystem(getConf()), rootdir); + tableDirs = FSUtils.getTableDirs(rootFs, rootdir); } hfcc.checkTables(tableDirs); hfcc.report(errors); @@ -5616,4 +5618,8 @@ public class HBaseFsck extends Configured implements Closeable { } } } + + FileSystem getRootFs() { + return rootFs; + } } diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HFileCorruptionChecker.java b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HFileCorruptionChecker.java index e93bae7..2d569bf 100644 --- a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HFileCorruptionChecker.java +++ b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HFileCorruptionChecker.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils.FamilyDirFilter; import org.apache.hadoop.hbase.util.FSUtils.HFileFilter; import org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter; +import org.apache.hbase.HBCKFsUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -84,7 +85,7 @@ public class HFileCorruptionChecker { public HFileCorruptionChecker(Configuration conf, ExecutorService executor, boolean quarantine) throws IOException { this.conf = conf; - this.fs = FileSystem.get(conf); + this.fs = HBCKFsUtils.getRootDirFileSystem(conf); this.cacheConf = CacheConfig.DISABLED; this.executor = executor; this.inQuarantineMode = quarantine; diff --git a/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKFsTableDescriptorForceCreation.java b/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKFsTableDescriptorForceCreation.java index 2db717a..5b5b853 100644 --- a/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKFsTableDescriptorForceCreation.java +++ b/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKFsTableDescriptorForceCreation.java @@ -55,7 +55,7 @@ public class TestHBCKFsTableDescriptorForceCreation { public void testShouldCreateNewTableDescriptorIfForcefulCreationIsFalse() throws IOException { final String name = this.name.getMethodName(); - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); + FileSystem fs = HBCKFsUtils.getRootDirFileSystem(UTIL.getConfiguration()); Path rootdir = new Path(UTIL.getDataTestDir(), name); HBCKFsTableDescriptors fstd = new HBCKFsTableDescriptors(fs, rootdir); @@ -68,7 +68,7 @@ public class TestHBCKFsTableDescriptorForceCreation { public void testShouldNotCreateTheSameTableDescriptorIfForcefulCreationIsFalse() throws IOException { final String name = this.name.getMethodName(); - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); + FileSystem fs = HBCKFsUtils.getRootDirFileSystem(UTIL.getConfiguration()); // Cleanup old tests if any detritus laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); HBCKFsTableDescriptors fstd = new HBCKFsTableDescriptors(fs, rootdir); @@ -83,7 +83,7 @@ public class TestHBCKFsTableDescriptorForceCreation { public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor() throws Exception { final String name = this.name.getMethodName(); - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); + FileSystem fs = HBCKFsUtils.getRootDirFileSystem(UTIL.getConfiguration()); Path rootdir = new Path(UTIL.getDataTestDir(), name); HBCKFsTableDescriptors fstd = new HBCKFsTableDescriptors(fs, rootdir); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(); diff --git a/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKFsTableDescriptors.java b/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKFsTableDescriptors.java index 20ccdfc..e4282e6 100644 --- a/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKFsTableDescriptors.java +++ b/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKFsTableDescriptors.java @@ -107,7 +107,7 @@ public class TestHBCKFsTableDescriptors { @Test public void testReadingHTDFromFS() throws IOException { final String name = this.name.getMethodName(); - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); + FileSystem fs = HBCKFsUtils.getRootDirFileSystem(UTIL.getConfiguration()); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(); Path rootdir = UTIL.getDataTestDir(name); HBCKFsTableDescriptors fstd = new HBCKFsTableDescriptors(fs, rootdir); @@ -120,7 +120,7 @@ public class TestHBCKFsTableDescriptors { @Test(expected = TableInfoMissingException.class) public void testNoSuchTable() throws IOException { final String name = "testNoSuchTable"; - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); + FileSystem fs = HBCKFsUtils.getRootDirFileSystem(UTIL.getConfiguration()); // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); HBCKFsTableDescriptors htds = new HBCKFsTableDescriptors(fs, rootdir); @@ -161,7 +161,7 @@ public class TestHBCKFsTableDescriptors { Path testdir = UTIL.getDataTestDir(name.getMethodName()); final TableName name = TableName.valueOf(this.name.getMethodName()); TableDescriptor htd = TableDescriptorBuilder.newBuilder(name).build(); - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); + FileSystem fs = HBCKFsUtils.getRootDirFileSystem(UTIL.getConfiguration()); HBCKFsTableDescriptors fstd = new HBCKFsTableDescriptors(fs, testdir); assertTrue(fstd.createTableDescriptor(htd, false)); assertFalse(fstd.createTableDescriptor(htd, false)); diff --git a/hbase-hbck2/src/test/java/org/apache/hbase/hbck1/TestHBaseFsck.java b/hbase-hbck2/src/test/java/org/apache/hbase/hbck1/TestHBaseFsck.java new file mode 100644 index 0000000..10dd667 --- /dev/null +++ b/hbase-hbck2/src/test/java/org/apache/hbase/hbck1/TestHBaseFsck.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hbase.hbck1; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hbase.HBCKFsUtils; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +@Category({MiscTests.class, SmallTests.class}) +public class TestHBaseFsck { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestHBaseFsck.class); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private String defaultRootDir; + private String nonDefaultRootDir; + private FileSystem testFileSystem; + private LocalFileSystem localFileSystem; + private Configuration conf; + + @Rule + public TestName testName = new TestName(); + + @BeforeClass + public static void beforeClass() throws Exception { + TEST_UTIL.startMiniCluster(3); + } + + @AfterClass + public static void afterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void setup() throws IOException { + conf = TEST_UTIL.getConfiguration(); + // the default is a hdfs directory + defaultRootDir = TEST_UTIL.getDataTestDirOnTestFS().toString(); + localFileSystem = new LocalFileSystem(); + testFileSystem = TEST_UTIL.getTestFileSystem(); + nonDefaultRootDir = + TEST_UTIL.getRandomDir().makeQualified(localFileSystem.getUri(), + localFileSystem.getWorkingDirectory()).toString(); + } + + @Test + public void testHBaseRootDirWithSameFileSystemScheme() throws IOException, + ClassNotFoundException { + checkFileSystemScheme(defaultRootDir, testFileSystem.getUri().getScheme()); + } + + @Test + public void testHBaseRootDirWithDifferentFileSystemScheme() throws IOException, + ClassNotFoundException { + checkFileSystemScheme(nonDefaultRootDir, localFileSystem.getUri().getScheme()); + } + + private void checkFileSystemScheme(String hbaseRootDir, String expectedFsScheme) + throws IOException, ClassNotFoundException { + conf.set(HConstants.HBASE_DIR, hbaseRootDir); + HBaseFsck fsck = new HBaseFsck(conf); + String actualFsScheme = fsck.getRootFs().getScheme(); + assertEquals(expectedFsScheme, actualFsScheme); + } + + @Test + public void testFileLockCallableWithSetHBaseRootDir() throws IOException { + FileSystem fs = new Path(nonDefaultRootDir).getFileSystem(conf); + try { + assertNotEquals(TEST_UTIL.getTestFileSystem().getUri().getScheme(), + fs.getScheme()); + + conf.set(HConstants.HBASE_DIR, nonDefaultRootDir); + Path expectedLockFilePath = new Path(HBaseFsck.getTmpDir(conf), HBaseFsck.HBCK2_LOCK_FILE); + HBaseFsck.FileLockCallable fileLockCallable = new HBaseFsck.FileLockCallable(conf, + HBaseFsck.createLockRetryCounterFactory(conf).create()); + + assertTrue(!fs.exists(expectedLockFilePath)); + // make a call and generate the hbck2 lock file to the non default file system + fileLockCallable.call(); + assertTrue(fs.exists(expectedLockFilePath)); + } finally { + HBCKFsUtils.delete(fs, new Path(nonDefaultRootDir), true); + } + } + +} diff --git a/hbase-hbck2/src/test/java/org/apache/hbase/hbck1/TestHFileCorruptionChecker.java b/hbase-hbck2/src/test/java/org/apache/hbase/hbck1/TestHFileCorruptionChecker.java new file mode 100644 index 0000000..f2278aa --- /dev/null +++ b/hbase-hbck2/src/test/java/org/apache/hbase/hbck1/TestHFileCorruptionChecker.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hbase.hbck1; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + +import java.io.IOException; +import java.util.concurrent.ExecutorService; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hbase.HBCKFsUtils; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.mockito.Mockito; + +@Category({MiscTests.class, SmallTests.class}) +public class TestHFileCorruptionChecker { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestHFileCorruptionChecker.class); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private String defaultRootDir; + private String nonDefaultRootDir; + private String testFsScheme; + private String localFsScheme; + private Configuration conf; + + @Rule + public TestName testName = new TestName(); + + @BeforeClass + public static void beforeClass() throws Exception { + TEST_UTIL.startMiniCluster(3); + } + + @AfterClass + public static void afterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void setup() throws IOException { + conf = TEST_UTIL.getConfiguration(); + // the default is a hdfs directory + defaultRootDir = TEST_UTIL.getDataTestDirOnTestFS().toString(); + + FileSystem localFileSystem = new LocalFileSystem(); + testFsScheme = TEST_UTIL.getTestFileSystem().getUri().getScheme(); + localFsScheme = localFileSystem.getScheme(); + nonDefaultRootDir = + TEST_UTIL.getRandomDir().makeQualified(localFileSystem.getUri(), + localFileSystem.getWorkingDirectory()).toString(); + } + + @Test + public void testCheckTableDir() throws IOException { + checkFileSystemScheme(defaultRootDir, testFsScheme, testFsScheme); + } + + @Test + public void testCheckTableDirWithNonDefaultRootDir() throws IOException { + checkFileSystemScheme(nonDefaultRootDir, testFsScheme, localFsScheme); + } + + private void checkFileSystemScheme(String hbaseRootDir, String defaultFsScheme, + String hbaseRootFsScheme) throws IOException { + Configuration conf = TEST_UTIL.getConfiguration(); + conf.set(HConstants.HBASE_DIR, hbaseRootDir); + + // check default filesystem, should be always hdfs + assertEquals(defaultFsScheme, TEST_UTIL.getTestFileSystem().getUri().getScheme()); + + ExecutorService mockExecutor = Mockito.mock(ExecutorService.class); + HFileCorruptionChecker corruptionChecker = + new HFileCorruptionChecker(conf, mockExecutor, true); + // if `FSUtils.listStatusWithStatusFilter` pass, then we're using the configured HBASE_DIR + corruptionChecker.checkTableDir(HBCKFsUtils.getTableDir(new Path(hbaseRootDir), + TableName.META_TABLE_NAME)); + + assertEquals(hbaseRootFsScheme, corruptionChecker.fs.getScheme()); + if (!defaultFsScheme.equalsIgnoreCase(hbaseRootFsScheme)) { + assertNotEquals(TEST_UTIL.getTestFileSystem().getUri().getScheme(), + corruptionChecker.fs.getScheme()); + } + } +}