hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bus...@apache.org
Subject hbase git commit: HBASE-13498 Add more docs and a basic check for storage policy handling.
Date Mon, 20 Apr 2015 16:20:42 GMT
Repository: hbase
Updated Branches:
  refs/heads/branch-1 4001492e7 -> b924c7564


HBASE-13498 Add more docs and a basic check for storage policy handling.

Conflicts:
	hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b924c756
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b924c756
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b924c756

Branch: refs/heads/branch-1
Commit: b924c7564f3dc61e853a1d01a8a6857468e4002f
Parents: 4001492
Author: Sean Busbey <busbey@apache.org>
Authored: Mon Apr 20 00:31:57 2015 -0500
Committer: Sean Busbey <busbey@apache.org>
Committed: Mon Apr 20 10:54:05 2015 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/HConstants.java     |  6 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |  2 +
 .../org/apache/hadoop/hbase/util/FSUtils.java   | 59 ++++++++++++++++----
 .../apache/hadoop/hbase/util/TestFSUtils.java   | 49 ++++++++++++++++
 4 files changed, 102 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b924c756/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index a607310..2ec2eff 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -939,9 +939,9 @@ public final class HConstants {
 
   /** Configuration name of WAL storage policy
    * Valid values are:
-   *  NONE: no preference in destination of replicas
-   *  ONE_SSD: place only one replica in SSD and the remaining in default storage
-   *  and ALL_SSD: place all replica on SSD
+   *  NONE: no preference in destination of block replicas
+   *  ONE_SSD: place only one block replica in SSD and the remaining in default storage
+   *  and ALL_SSD: place all block replicas on SSD
    *
    * See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html*/
   public static final String WAL_STORAGE_POLICY = "hbase.wal.storage.policy";

http://git-wip-us.apache.org/repos/asf/hbase/blob/b924c756/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index 6eb62d1..37daa11 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -492,6 +492,8 @@ public class FSHLog implements WAL {
       throw new IllegalArgumentException("wal suffix must start with '" + WAL_FILE_NAME_DELIMITER
+
           "' but instead was '" + suffix + "'");
     }
+    // Now that it exists, set the storage policy for the entire directory of wal files related
to
+    // this FSHLog instance
     FSUtils.setStoragePolicy(fs, conf, this.fullPathLogDir, HConstants.WAL_STORAGE_POLICY,
       HConstants.DEFAULT_WAL_STORAGE_POLICY);
     this.logFileSuffix = (suffix == null) ? "" : URLEncoder.encode(suffix, "UTF8");

http://git-wip-us.apache.org/repos/asf/hbase/blob/b924c756/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index aa06c20..85a78bf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -45,6 +45,7 @@ import java.util.regex.Pattern;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -101,20 +102,34 @@ public abstract class FSUtils {
     super();
   }
 
-  /*
-   * Sets storage policy for given path according to config setting
-   * @param fs
-   * @param conf
+  /**
+   * Sets storage policy for given path according to config setting.
+   * If the passed path is a directory, we'll set the storage policy for all files
+   * created in the future in said directory. Note that this change in storage
+   * policy takes place at the HDFS level; it will persist beyond this RS's lifecycle.
+   * If we're running on a version of HDFS that doesn't support the given storage policy
+   * (or storage policies at all), then we'll issue a log message and continue.
+   *
+   * See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html
+   *
+   * @param fs We only do anything if an instance of DistributedFileSystem
+   * @param conf used to look up storage policy with given key; not modified.
    * @param path the Path whose storage policy is to be set
-   * @param policyKey
-   * @param defaultPolicy
+   * @param policyKey e.g. HConstants.WAL_STORAGE_POLICY
+   * @param defaultPolicy usually should be the policy NONE to delegate to HDFS
    */
   public static void setStoragePolicy(final FileSystem fs, final Configuration conf,
       final Path path, final String policyKey, final String defaultPolicy) {
     String storagePolicy = conf.get(policyKey, defaultPolicy).toUpperCase();
-    if (!storagePolicy.equals(defaultPolicy) &&
-        fs instanceof DistributedFileSystem) {
+    if (storagePolicy.equals(defaultPolicy)) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("default policy of " + defaultPolicy + " requested, exiting early.");
+      }
+      return;
+    }
+    if (fs instanceof DistributedFileSystem) {
       DistributedFileSystem dfs = (DistributedFileSystem)fs;
+      // Once our minimum supported Hadoop version is 2.6.0 we can remove reflection.
       Class<? extends DistributedFileSystem> dfsClass = dfs.getClass();
       Method m = null;
       try {
@@ -123,10 +138,10 @@ public abstract class FSUtils {
         m.setAccessible(true);
       } catch (NoSuchMethodException e) {
         LOG.info("FileSystem doesn't support"
-            + " setStoragePolicy; --HDFS-7228 not available");
+            + " setStoragePolicy; --HDFS-6584 not available");
       } catch (SecurityException e) {
         LOG.info("Doesn't have access to setStoragePolicy on "
-            + "FileSystems --HDFS-7228 not available", e);
+            + "FileSystems --HDFS-6584 not available", e);
         m = null; // could happen on setAccessible()
       }
       if (m != null) {
@@ -134,9 +149,31 @@ public abstract class FSUtils {
           m.invoke(dfs, path, storagePolicy);
           LOG.info("set " + storagePolicy + " for " + path);
         } catch (Exception e) {
-          LOG.warn("Unable to set " + storagePolicy + " for " + path, e);
+          // check for lack of HDFS-7228
+          boolean probablyBadPolicy = false;
+          if (e instanceof InvocationTargetException) {
+            final Throwable exception = e.getCause();
+            if (exception instanceof RemoteException &&
+                HadoopIllegalArgumentException.class.getName().equals(
+                    ((RemoteException)exception).getClassName())) {
+              LOG.warn("Given storage policy, '" + storagePolicy + "', was rejected and probably
" +
+                  "isn't a valid policy for the version of Hadoop you're running. I.e. if
you're " +
+                  "trying to use SSD related policies then you're likely missing HDFS-7228.
For " +
+                  "more information see the 'ArchivalStorage' docs for your Hadoop release.");
+              LOG.debug("More information about the invalid storage policy.", exception);
+              probablyBadPolicy = true;
+            }
+          }
+          if (!probablyBadPolicy) {
+            // This swallows FNFE, should we be throwing it? seems more likely to indicate
dev
+            // misuse than a runtime problem with HDFS.
+            LOG.warn("Unable to set " + storagePolicy + " for " + path, e);
+          }
         }
       }
+    } else {
+      LOG.info("FileSystem isn't an instance of DistributedFileSystem; presuming it doesn't
" +
+          "support setStoragePolicy.");
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b924c756/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
index 2a13b7d..c501477 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
@@ -337,4 +337,53 @@ public class TestFSUtils {
       EnvironmentEdgeManager.reset();
     }
   }
+
+  private void verifyFileInDirWithStoragePolicy(final String policy) throws Exception {
+    HBaseTestingUtility htu = new HBaseTestingUtility();
+    Configuration conf = htu.getConfiguration();
+    conf.set(HConstants.WAL_STORAGE_POLICY, policy);
+
+    MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
+    try {
+      assertTrue(FSUtils.isHDFS(conf));
+
+      FileSystem fs = FileSystem.get(conf);
+      Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
+      fs.mkdirs(testDir);
+
+      FSUtils.setStoragePolicy(fs, conf, testDir, HConstants.WAL_STORAGE_POLICY,
+          HConstants.DEFAULT_WAL_STORAGE_POLICY);
+
+      String file = UUID.randomUUID().toString();
+      Path p = new Path(testDir, file);
+      WriteDataToHDFS(fs, p, 4096);
+      // will assert existance before deleting.
+      cleanupFile(fs, testDir);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testSetStoragePolicyDefault() throws Exception {
+    verifyFileInDirWithStoragePolicy(HConstants.DEFAULT_WAL_STORAGE_POLICY);
+  }
+
+  /* might log a warning, but still work. (always warning on Hadoop < 2.6.0) */
+  @Test
+  public void testSetStoragePolicyValidButMaybeNotPresent() throws Exception {
+    verifyFileInDirWithStoragePolicy("ALL_SSD");
+  }
+
+  /* should log a warning, but still work. (different warning on Hadoop < 2.6.0) */
+  @Test
+  public void testSetStoragePolicyInvalid() throws Exception {
+    verifyFileInDirWithStoragePolicy("1772");
+  }
+
+  private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
+    assertTrue(fileSys.exists(name));
+    assertTrue(fileSys.delete(name, true));
+    assertTrue(!fileSys.exists(name));
+  }
 }


Mime
View raw message