hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject git commit: HBASE-12145 Fix javadoc and findbugs so new folks aren't freaked when they see them
Date Wed, 01 Oct 2014 23:57:33 GMT
Repository: hbase
Updated Branches:
  refs/heads/branch-1 0fb931672 -> 9f33edab0


HBASE-12145 Fix javadoc and findbugs so new folks aren't freaked when they see them

Fix javadoc warnings.

Fixup findbugs warnings mostly by adding annotations saying 'working as expected'.

In RpcRetryingCallerWithReadReplicas made following change which findbugs spotted:

-        if (completed == null) tasks.wait();
+        while (completed == null) tasks.wait();

In RecoverableZooKeeper, made all zk accesses synchronized -- we were doing it
half-ways previously.

In RatioBasedCompactionPolicy we were making an instance of Random on
each invocation of getNextMajorCompactionTime

Conflicts:
	hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
	hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowTooBigException.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9f33edab
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9f33edab
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9f33edab

Branch: refs/heads/branch-1
Commit: 9f33edab0e5f48a8ba404b5a2be42be901d4cdfc
Parents: 0fb9316
Author: stack <stack@apache.org>
Authored: Wed Oct 1 16:37:13 2014 -0700
Committer: stack <stack@apache.org>
Committed: Wed Oct 1 16:57:13 2014 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/client/ConnectionManager.java  |  2 -
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  6 +-
 .../RpcRetryingCallerWithReadReplicas.java      |  8 ++-
 .../hadoop/hbase/protobuf/ProtobufUtil.java     | 63 ++++++++++----------
 .../replication/ReplicationPeersZKImpl.java     | 11 +++-
 .../hbase/zookeeper/RecoverableZooKeeper.java   | 14 +++--
 .../apache/hadoop/hbase/util/PrettyPrinter.java |  3 +-
 .../codec/prefixtree/PrefixTreeSeeker.java      |  1 -
 .../hadoop/hbase/io/hfile/HFileBlock.java       |  5 +-
 .../hbase/io/hfile/HFilePrettyPrinter.java      |  2 +-
 .../hbase/mapreduce/HFileOutputFormat2.java     |  2 +
 .../hbase/mapreduce/TableInputFormatBase.java   | 22 +++----
 .../hbase/mapreduce/TableRecordReader.java      |  2 +-
 .../hbase/master/balancer/BaseLoadBalancer.java |  4 ++
 .../hbase/regionserver/CompactSplitThread.java  |  2 +
 .../hbase/regionserver/RowTooBigException.java  |  1 -
 .../hadoop/hbase/regionserver/StoreUtils.java   |  1 -
 .../compactions/RatioBasedCompactionPolicy.java | 21 +++++--
 .../regionserver/ReplicationSource.java         |  2 +
 .../hadoop/hbase/rest/SchemaResource.java       |  2 +
 .../hadoop/hbase/util/RegionSplitter.java       |  2 +-
 21 files changed, 101 insertions(+), 75 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 29104c0..398d209 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -819,7 +819,6 @@ class ConnectionManager {
 
     /**
      * An identifier that will remain the same for a given connection.
-     * @return
      */
     @Override
     public String toString(){
@@ -2467,7 +2466,6 @@ class ConnectionManager {
     /**
      * Connects to the master to get the table descriptor.
      * @param tableName table name
-     * @return
      * @throws IOException if the connection to master fails or if the table
      *  is not found.
      */

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index fe82165..5d559d8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -186,7 +186,7 @@ public class HBaseAdmin implements Admin {
 
   /**
    * Constructor.
-   * See {@link #HBaseAdmin(HConnection connection)}
+   * See {@link #HBaseAdmin(Connection connection)}
    *
    * @param c Configuration object. Copied internally.
    */
@@ -205,10 +205,10 @@ public class HBaseAdmin implements Admin {
 
 
   /**
-   * Constructor for externally managed HConnections.
+   * Constructor for externally managed Connections.
    * The connection to master will be created when required by admin functions.
    *
-   * @param connection The HConnection instance to use
+   * @param connection The Connection instance to use
    * @throws MasterNotRunningException, ZooKeeperConnectionException are not
    *  thrown anymore but kept into the interface for backward api compatibility
    * @deprecated Do not use this internal ctor.

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
index ca59556..4c5814e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
@@ -378,6 +378,8 @@ public class RpcRetryingCallerWithReadReplicas {
         }
       }
 
+      @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="RCN_REDUNDANT_NULLCHECK_OF_NULL_VALUE",
+          justification="Is this an issue?")
       @Override
       public Result get(long timeout, TimeUnit unit)
           throws InterruptedException, ExecutionException, TimeoutException {
@@ -390,7 +392,7 @@ public class RpcRetryingCallerWithReadReplicas {
           }
           unit.timedWait(tasks, timeout);
         }
-
+        // Findbugs says this null check is redundant.  Will result be set across the wait
above?
         if (result != null) {
           return result;
         }
@@ -398,7 +400,7 @@ public class RpcRetryingCallerWithReadReplicas {
           throw exeEx;
         }
 
-        throw new TimeoutException();
+        throw new TimeoutException("timeout=" + timeout + ", " + unit);
       }
     }
 
@@ -416,7 +418,7 @@ public class RpcRetryingCallerWithReadReplicas {
 
     public QueueingFuture take() throws InterruptedException {
       synchronized (tasks) {
-        if (completed == null) tasks.wait();
+        while (completed == null) tasks.wait();
       }
       return completed;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 86445d9..2195c1e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -18,17 +18,25 @@
 package org.apache.hadoop.hbase.protobuf;
 
 
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.ListMultimap;
-import com.google.common.collect.Lists;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.InvalidProtocolBufferException;
-import com.google.protobuf.Message;
-import com.google.protobuf.Parser;
-import com.google.protobuf.RpcChannel;
-import com.google.protobuf.Service;
-import com.google.protobuf.ServiceException;
-import com.google.protobuf.TextFormat;
+import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableSet;
+import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
@@ -40,7 +48,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -80,7 +87,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest
 import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
 import org.apache.hadoop.hbase.protobuf.generated.CellProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
@@ -110,6 +116,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescripto
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
+import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
@@ -131,27 +138,23 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.token.Token;
 
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.lang.reflect.ParameterizedType;
-import java.lang.reflect.Type;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.NavigableSet;
-
-import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME;
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.ListMultimap;
+import com.google.common.collect.Lists;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+import com.google.protobuf.Message;
+import com.google.protobuf.Parser;
+import com.google.protobuf.RpcChannel;
+import com.google.protobuf.Service;
+import com.google.protobuf.ServiceException;
+import com.google.protobuf.TextFormat;
 
 /**
  * Protobufs utility.
  */
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED",
+  justification="None. Address sometime.")
 public final class ProtobufUtil {
 
   private ProtobufUtil() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index a18d8e8..dce0903 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase.replication;
 
 import java.io.IOException;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -391,8 +390,14 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements
Re
     if (peer == null) {
       return false;
     }
-    ((ConcurrentMap<String, ReplicationPeerZKImpl>) peerClusters).putIfAbsent(peerId,
peer);
-    LOG.info("Added new peer cluster " + peer.getPeerConfig().getClusterKey());
+    ReplicationPeerZKImpl previous =
+      ((ConcurrentMap<String, ReplicationPeerZKImpl>) peerClusters).putIfAbsent(peerId,
peer);
+    if (previous == null) {
+      LOG.info("Added new peer cluster=" + peer.getPeerConfig().getClusterKey());
+    } else {
+      LOG.info("Peer already present, " + previous.getPeerConfig().getClusterKey() +
+        ", new cluster=" + peer.getPeerConfig().getClusterKey());
+    }
     return true;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
index fc854c6..a72f49c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
@@ -105,6 +105,8 @@ public class RecoverableZooKeeper {
         null);
   }
 
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE",
+      justification="None. Its always been this way.")
   public RecoverableZooKeeper(String quorumServers, int sessionTimeout,
       Watcher watcher, int maxRetries, int retryIntervalMillis, String identifier)
   throws IOException {
@@ -690,23 +692,23 @@ public class RecoverableZooKeeper {
     return newData;
   }
 
-  public long getSessionId() {
-    return zk == null ? null : zk.getSessionId();
+  public synchronized long getSessionId() {
+    return zk == null ? -1 : zk.getSessionId();
   }
 
-  public void close() throws InterruptedException {
+  public synchronized void close() throws InterruptedException {
     if (zk != null) zk.close();
   }
 
-  public States getState() {
+  public synchronized States getState() {
     return zk == null ? null : zk.getState();
   }
 
-  public ZooKeeper getZooKeeper() {
+  public synchronized ZooKeeper getZooKeeper() {
     return zk;
   }
 
-  public byte[] getSessionPasswd() {
+  public synchronized byte[] getSessionPasswd() {
     return zk == null ? null : zk.getSessionPasswd();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
index 48de281..cf11de1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
@@ -44,7 +44,8 @@ public class PrettyPrinter {
     return human.toString();
   }
 
-
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ICAST_INTEGER_MULTIPLY_CAST_TO_LONG",
+      justification="Will not overflow")
   private static String humanReadableTTL(final long interval){
     StringBuilder sb = new StringBuilder();
     int days, hours, minutes, seconds;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
index 006ab29..012b3e5 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
@@ -45,7 +45,6 @@ public class PrefixTreeSeeker implements EncodedSeeker {
   protected ByteBuffer block;
   protected boolean includeMvccVersion;
   protected PrefixTreeArraySearcher ptSearcher;
-  protected boolean movedToPrevious = false;
 
   public PrefixTreeSeeker(boolean includeMvccVersion) {
     this.includeMvccVersion = includeMvccVersion;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 246e947..6341f2d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -28,13 +28,12 @@ import java.nio.ByteBuffer;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -62,7 +61,7 @@ import com.google.common.base.Preconditions;
  * information from the block index are required to read a block.
  * <li>In version 2 a block is structured as follows:
  * <ul>
- * <li>header (see {@link Writer#finishBlock()})
+ * <li>header (see Writer#finishBlock())
  * <ul>
  * <li>Magic record identifying the block type (8 bytes)
  * <li>Compressed block size, excluding header, including checksum (4 bytes)

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index bde6282..0021cf4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -538,7 +538,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
     @Override
     public void processHistogram(MetricName name, Histogram histogram, PrintStream stream)
{
       super.processHistogram(name, histogram, stream);
-      stream.printf(Locale.getDefault(), "             count = %d\n", histogram.count());
+      stream.printf(Locale.getDefault(), "             count = %d%n", histogram.count());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index a46ed9f..f8f9b4d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -218,6 +218,8 @@ public class HFileOutputFormat2
        * @return A WriterLength, containing a new StoreFile.Writer.
        * @throws IOException
        */
+      @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BX_UNBOXING_IMMEDIATELY_REBOXED",
+          justification="Not important")
       private WriterLength getNewWriter(byte[] family, Configuration conf)
           throws IOException {
         WriterLength wl = new WriterLength();

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
index 3988ae3..c196eed 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
@@ -65,7 +65,7 @@ import org.apache.hadoop.util.StringUtils;
  *   class ExampleTIF extends TableInputFormatBase implements JobConfigurable {
  *
  *     public void configure(JobConf job) {
- *       Connection connection = 
+ *       Connection connection =
  *          ConnectionFactory.createConnection(HBaseConfiguration.create(job));
  *       TableName tableName = TableName.valueOf("exampleTable");
  *       // mandatory
@@ -91,7 +91,7 @@ extends InputFormat<ImmutableBytesWritable, Result> {
 
   final Log LOG = LogFactory.getLog(TableInputFormatBase.class);
 
-  /** Holds the details for the internal scanner. 
+  /** Holds the details for the internal scanner.
    *
    * @see Scan */
   private Scan scan = null;
@@ -106,10 +106,7 @@ extends InputFormat<ImmutableBytesWritable, Result> {
   /** The reverse DNS lookup cache mapping: IPAddress => HostName */
   private HashMap<InetAddress, String> reverseDNSCacheMap =
     new HashMap<InetAddress, String>();
-  
-  /** The NameServer address */
-  private String nameServer = null;
-  
+
   /**
    * Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses
    * the default.
@@ -161,9 +158,6 @@ extends InputFormat<ImmutableBytesWritable, Result> {
     if (table == null) {
       throw new IOException("No table was provided.");
     }
-    // Get the name server address and the default value is null.
-    this.nameServer =
-      context.getConfiguration().get("hbase.nameserver.address", null);
 
     RegionSizeCalculator sizeCalculator = new RegionSizeCalculator((HTable) table);
 
@@ -229,7 +223,7 @@ extends InputFormat<ImmutableBytesWritable, Result> {
     }
     return splits;
   }
-  
+
   public String reverseDNS(InetAddress ipAddress) throws NamingException, UnknownHostException
{
     String hostName = this.reverseDNSCacheMap.get(ipAddress);
     if (hostName == null) {
@@ -277,8 +271,8 @@ extends InputFormat<ImmutableBytesWritable, Result> {
 
   /**
    * Allows subclasses to get the {@link HTable}.
-   * 
-   * @deprecated Use {@link #getTable()} and {@link #getRegionLocator()} instead.
+   *
+   * @deprecated
    */
   @Deprecated
   protected HTable getHTable() {
@@ -287,8 +281,8 @@ extends InputFormat<ImmutableBytesWritable, Result> {
 
   /**
    * Allows subclasses to set the {@link HTable}.
-   * 
-   * @param table  The {@link HTable} to get the data from.
+   *
+   * @param table  The table to get the data from.
    * @deprecated Use {@link #initializeTable(Connection, TableName)} instead.
    */
   @Deprecated

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
index 1de7676..f46f1e3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
@@ -55,7 +55,7 @@ extends RecordReader<ImmutableBytesWritable, Result> {
   /**
    * Sets the HBase table.
    *
-   * @param htable  The {@link HTable} to scan.
+   * @param htable  The table to scan.
    * @deprecated Use setTable() instead.
    */
   @Deprecated

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 6522e6e..acd3a05 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -538,6 +538,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
       }
     }
 
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NM_FIELD_NAMING_CONVENTION",
+        justification="Mistake. Too disruptive to change now")
     public static Action NullAction = new Action(Type.NULL);
 
     public void doAction(Action action) {
@@ -783,6 +785,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
       }
     };
 
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SBSC_USE_STRINGBUFFER_CONCATENATION",
+        justification="Not important but should be fixed")
     @Override
     public String toString() {
       String desc = "Cluster{" +

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index bc30021..f9e7b32 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -405,6 +405,8 @@ public class CompactSplitThread implements CompactionRequestor {
     return this.regionSplitLimit;
   }
 
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
+      justification="Contrived use of compareTo")
   private class CompactionRunner implements Runnable, Comparable<CompactionRunner>
{
     private final Store store;
     private final HRegion region;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowTooBigException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowTooBigException.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowTooBigException.java
index 93caed3..7722baf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowTooBigException.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowTooBigException.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.RegionException;
  */
 @InterfaceAudience.Public
 public class RowTooBigException extends RegionException {
-
   public RowTooBigException(String message) {
     super(message);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
index 31d347c..3d4e990 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
 import java.util.Collection;
-import java.util.List;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
index 122eeaf..c58ff14 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
@@ -27,13 +27,12 @@ import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreUtils;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicate;
@@ -321,6 +320,15 @@ public class RatioBasedCompactionPolicy extends CompactionPolicy {
     return result;
   }
 
+  /**
+   * Used calculation jitter
+   */
+  private final Random random = new Random();
+
+  /**
+   * @param filesToCompact
+   * @return When to run next major compaction
+   */
   public long getNextMajorCompactTime(final Collection<StoreFile> filesToCompact) {
     // default = 24hrs
     long ret = comConf.getMajorCompactionPeriod();
@@ -332,10 +340,15 @@ public class RatioBasedCompactionPolicy extends CompactionPolicy {
         // deterministic jitter avoids a major compaction storm on restart
         Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
         if (seed != null) {
-          double rnd = (new Random(seed)).nextDouble();
+          // Synchronized to ensure one user of random instance at a time.
+          double rnd = -1;
+          synchronized (this) {
+            this.random.setSeed(seed);
+            rnd = this.random.nextDouble();
+          }
           ret += jitter - Math.round(2L * jitter * rnd);
         } else {
-          ret = 0; // no storefiles == no major compaction
+          ret = 0; // If seed is null, then no storefiles == no major compaction
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index f599b05..99c6b3d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -734,6 +734,8 @@ public class ReplicationSource extends Thread
    * @return true if we're done with the current file, false if we should
    * continue trying to read from it
    */
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE",
+      justification="Yeah, this is how it works")
   protected boolean processEndOfFile() {
     if (this.queue.size() != 0) {
       if (LOG.isTraceEnabled()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
index 0e59c8b..45dd9ee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
@@ -220,6 +220,8 @@ public class SchemaResource extends ResourceBase {
     return update(model, false, uriInfo);
   }
 
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE",
+      justification="Expected")
   @DELETE
   public Response delete(final @Context UriInfo uriInfo) {
     if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f33edab/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index 483da8f..064f67c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -144,7 +144,7 @@ public class RegionSplitter {
    * {@link HexStringSplit} to partition their table and set it as default, but
    * provided this for your custom algorithm. To use, create a new derived class
    * from this interface and call {@link RegionSplitter#createPresplitTable} or
-   * {@link RegionSplitter#rollingSplit(String, SplitAlgorithm, Configuration)} with the
+   * RegionSplitter#rollingSplit(TableName, SplitAlgorithm, Configuration) with the
    * argument splitClassName giving the name of your class.
    */
   public interface SplitAlgorithm {


Mime
View raw message