hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From j...@apache.org
Subject [hbase] branch branch-2.1 updated: HBASE-23686 Revert binary incompatible change in ByteRangeUtils and removed reflections in CommonFSUtils
Date Fri, 24 Jan 2020 21:01:08 GMT
This is an automated email from the ASF dual-hosted git repository.

janh pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
     new 00555d2  HBASE-23686 Revert binary incompatible change in ByteRangeUtils and removed
reflections in CommonFSUtils
00555d2 is described below

commit 00555d2fe4bdbdf21a121370aefc8ea48a2c74f0
Author: Jan Hentschel <janh@apache.org>
AuthorDate: Fri Jan 24 20:28:01 2020 +0100

    HBASE-23686 Revert binary incompatible change in ByteRangeUtils and removed reflections
in CommonFSUtils
    
    Signed-off-by: Sean Busbey <busbey@apache.org>
---
 .../resources/hbase/checkstyle-suppressions.xml    |  4 ++
 .../java/org/apache/hadoop/hbase/net/Address.java  |  2 +-
 .../apache/hadoop/hbase/util/ByteRangeUtils.java   |  5 +-
 .../apache/hadoop/hbase/util/CommonFSUtils.java    | 67 ++++------------------
 4 files changed, 16 insertions(+), 62 deletions(-)

diff --git a/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml b/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml
index b83b468..de5385c 100644
--- a/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml
+++ b/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml
@@ -46,4 +46,8 @@
   <suppress checks="EmptyBlock" files="org.apache.hadoop.hbase.TestTimeout"/>
   <suppress checks="InnerAssignment" files="org.apache.hadoop.hbase.rest.PerformanceEvaluation"/>
   <suppress checks="EmptyBlock" files="org.apache.hadoop.hbase.rest.PerformanceEvaluation"/>
+  <!-- Will not have a private constructor, because it is InterfaceAudience.Public -->
+  <suppress checks="HideUtilityClassConstructor" files="org.apache.hadoop.hbase.util.ByteRangeUtils"/>
+  <!-- Will not be final, because it is InterfaceAudience.Public -->
+  <suppress checks="FinalClass" files="org.apache.hadoop.hbase.net.Address"/>
 </suppressions>
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java
index d76ef9f..48fa522 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java
@@ -31,7 +31,7 @@ import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort;
  * We cannot have Guava classes in our API hence this Type.
  */
 @InterfaceAudience.Public
-public final class Address implements Comparable<Address> {
+public class Address implements Comparable<Address> {
   private HostAndPort hostAndPort;
 
   private Address(HostAndPort hostAndPort) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
index fb0b336..9acfa26 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
@@ -30,10 +30,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
  * Utility methods for working with {@link ByteRange}.
  */
 @InterfaceAudience.Public
-public final class ByteRangeUtils {
-  private ByteRangeUtils() {
-  }
-
+public class ByteRangeUtils {
   public static int numEqualPrefixBytes(ByteRange left, ByteRange right, int rightInnerOffset)
{
     int maxCompares = Math.min(left.getLength(), right.getLength() - rightInnerOffset);
     final byte[] lbytes = left.getBytes();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index 6a9f73d..2384d14 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -148,69 +148,22 @@ public abstract class CommonFSUtils {
    * Return the number of bytes that large input files should be optimally
    * be split into to minimize i/o time.
    *
-   * use reflection to search for getDefaultBlockSize(Path f)
-   * if the method doesn't exist, fall back to using getDefaultBlockSize()
-   *
    * @param fs filesystem object
    * @return the default block size for the path's filesystem
-   * @throws IOException e
    */
-  public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException
{
-    Method m = null;
-    Class<? extends FileSystem> cls = fs.getClass();
-    try {
-      m = cls.getMethod("getDefaultBlockSize", Path.class);
-    } catch (NoSuchMethodException e) {
-      LOG.info("FileSystem doesn't support getDefaultBlockSize");
-    } catch (SecurityException e) {
-      LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
-      m = null; // could happen on setAccessible()
-    }
-    if (m == null) {
-      return fs.getDefaultBlockSize(path);
-    } else {
-      try {
-        Object ret = m.invoke(fs, path);
-        return ((Long)ret).longValue();
-      } catch (Exception e) {
-        throw new IOException(e);
-      }
-    }
+  public static long getDefaultBlockSize(final FileSystem fs, final Path path) {
+    return fs.getDefaultBlockSize(path);
   }
 
   /*
    * Get the default replication.
    *
-   * use reflection to search for getDefaultReplication(Path f)
-   * if the method doesn't exist, fall back to using getDefaultReplication()
-   *
    * @param fs filesystem object
    * @param f path of file
    * @return default replication for the path's filesystem
-   * @throws IOException e
    */
-  public static short getDefaultReplication(final FileSystem fs, final Path path)
-      throws IOException {
-    Method m = null;
-    Class<? extends FileSystem> cls = fs.getClass();
-    try {
-      m = cls.getMethod("getDefaultReplication", Path.class);
-    } catch (NoSuchMethodException e) {
-      LOG.info("FileSystem doesn't support getDefaultReplication");
-    } catch (SecurityException e) {
-      LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
-      m = null; // could happen on setAccessible()
-    }
-    if (m == null) {
-      return fs.getDefaultReplication(path);
-    } else {
-      try {
-        Object ret = m.invoke(fs, path);
-        return ((Number)ret).shortValue();
-      } catch (Exception e) {
-        throw new IOException(e);
-      }
-    }
+  public static short getDefaultReplication(final FileSystem fs, final Path path) {
+    return fs.getDefaultReplication(path);
   }
 
   /**
@@ -623,15 +576,15 @@ public abstract class CommonFSUtils {
           final Throwable exception = e.getCause();
           if (exception instanceof RemoteException &&
               HadoopIllegalArgumentException.class.getName().equals(
-                ((RemoteException)exception).getClassName())) {
+                  ((RemoteException)exception).getClassName())) {
             if (LOG.isDebugEnabled()) {
               LOG.debug("Given storage policy, '" +storagePolicy +"', was rejected and probably
" +
-                "isn't a valid policy for the version of Hadoop you're running. I.e. if you're
" +
-                "trying to use SSD related policies then you're likely missing HDFS-7228.
For " +
-                "more information see the 'ArchivalStorage' docs for your Hadoop release.");
+                  "isn't a valid policy for the version of Hadoop you're running. I.e. if
you're " +
+                  "trying to use SSD related policies then you're likely missing HDFS-7228.
For " +
+                  "more information see the 'ArchivalStorage' docs for your Hadoop release.");
             }
-          // Hadoop 2.8+, 3.0-a1+ added FileSystem.setStoragePolicy with a default implementation
-          // that throws UnsupportedOperationException
+            // Hadoop 2.8+, 3.0-a1+ added FileSystem.setStoragePolicy with a default implementation
+            // that throws UnsupportedOperationException
           } else if (exception instanceof UnsupportedOperationException) {
             if (LOG.isDebugEnabled()) {
               LOG.debug("The underlying FileSystem implementation doesn't support " +


Mime
View raw message