hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject [2/2] hbase git commit: HBASE-15118 Fix findbugs complaint in hbase-server
Date Tue, 19 Jan 2016 00:18:38 GMT
HBASE-15118 Fix findbugs complaint in hbase-server


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c4bcaa3f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c4bcaa3f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c4bcaa3f

Branch: refs/heads/branch-1.2
Commit: c4bcaa3f65da98c7f513c028d6e3639c8016c2f8
Parents: 5a280c9
Author: stack <stack@apache.org>
Authored: Mon Jan 18 13:02:17 2016 -0800
Committer: stack <stack@apache.org>
Committed: Mon Jan 18 16:18:24 2016 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/util/PrettyPrinter.java |  4 +-
 .../org/apache/hadoop/hbase/JMXListener.java    | 19 +++++---
 .../ZKSplitLogManagerCoordination.java          |  4 +-
 .../ZKSplitTransactionCoordination.java         |  2 +
 .../coordination/ZkRegionMergeCoordination.java |  2 +
 .../ZkSplitLogWorkerCoordination.java           | 14 +++---
 .../hadoop/hbase/http/jmx/JMXJsonServlet.java   |  2 +
 .../org/apache/hadoop/hbase/io/HFileLink.java   |  2 +
 .../hbase/io/hfile/AbstractHFileReader.java     |  9 ++--
 .../org/apache/hadoop/hbase/io/hfile/HFile.java |  4 +-
 .../hadoop/hbase/io/hfile/HFileBlock.java       |  2 +
 .../hadoop/hbase/io/hfile/HFileReaderV2.java    | 11 +++--
 .../hadoop/hbase/io/hfile/HFileWriterV2.java    | 10 +++--
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 27 ++++++-----
 .../hadoop/hbase/mapreduce/HashTable.java       | 18 ++++----
 .../apache/hadoop/hbase/mapreduce/Import.java   |  2 +
 .../hadoop/hbase/mapreduce/ImportTsv.java       | 47 +++++++++++++-------
 .../mapreduce/MultiTableInputFormatBase.java    |  8 +---
 .../mapreduce/MultithreadedTableMapper.java     |  4 +-
 .../hbase/mapreduce/TableInputFormat.java       |  2 +
 .../hadoop/hbase/master/AssignmentManager.java  | 11 +++++
 .../hadoop/hbase/master/HMasterCommandLine.java |  9 ++--
 .../hadoop/hbase/master/RegionStates.java       |  2 -
 .../hadoop/hbase/master/SplitLogManager.java    |  3 +-
 .../hbase/master/TableNamespaceManager.java     |  5 ++-
 .../hbase/master/balancer/BaseLoadBalancer.java |  6 ++-
 .../master/balancer/StochasticLoadBalancer.java |  6 ++-
 .../hbase/master/cleaner/HFileLinkCleaner.java  |  2 +-
 .../master/procedure/DisableTableProcedure.java |  3 ++
 .../master/snapshot/TakeSnapshotHandler.java    |  4 +-
 .../monitoring/MonitoredRPCHandlerImpl.java     |  6 +--
 .../apache/hadoop/hbase/quotas/QuotaState.java  |  3 ++
 .../apache/hadoop/hbase/quotas/RateLimiter.java | 16 ++++---
 .../hadoop/hbase/quotas/UserQuotaState.java     |  3 ++
 .../hadoop/hbase/regionserver/HRegion.java      | 28 +++++++++---
 .../hbase/regionserver/HRegionServer.java       |  1 -
 .../hadoop/hbase/regionserver/LruHashMap.java   | 12 ++---
 .../hbase/regionserver/MemStoreChunkPool.java   | 12 ++---
 .../regionserver/RegionCoprocessorHost.java     |  2 +-
 .../RegionServerCoprocessorHost.java            |  4 +-
 .../hadoop/hbase/regionserver/StoreFile.java    | 10 ++---
 .../hbase/regionserver/TimeRangeTracker.java    |  2 +
 .../replication/HBaseReplicationEndpoint.java   | 10 +++--
 .../RegionReplicaReplicationEndpoint.java       |  2 +-
 .../security/access/SecureBulkLoadEndpoint.java |  1 -
 .../hbase/security/access/TableAuthManager.java | 17 +++----
 .../token/AuthenticationTokenSecretManager.java | 11 +++--
 .../visibility/VisibilityController.java        |  8 +++-
 .../hadoop/hbase/snapshot/ExportSnapshot.java   |  3 +-
 .../hadoop/hbase/snapshot/SnapshotInfo.java     |  4 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java | 45 ++++++++++---------
 .../hadoop/hbase/util/IdReadWriteLock.java      |  7 ++-
 .../org/apache/hadoop/hbase/util/MetaUtils.java |  7 +--
 .../apache/hadoop/hbase/util/SortedList.java    |  6 ++-
 .../hadoop/hbase/util/ZKDataMigrator.java       |  2 +
 .../apache/hadoop/hbase/wal/WALSplitter.java    |  4 +-
 .../hbase/master/TestSplitLogManager.java       |  2 +-
 57 files changed, 288 insertions(+), 184 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
index 5f927cc..efdd144 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
@@ -30,13 +30,11 @@ public class PrettyPrinter {
     NONE
   }
 
-  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DM_BOXED_PRIMITIVE_FOR_PARSING",
-      justification="I don't get what FB is complaining about")
   public static String format(final String value, final Unit unit) {
     StringBuilder human = new StringBuilder();
     switch (unit) {
       case TIME_INTERVAL:
-        human.append(humanReadableTTL(Long.valueOf(value)));
+        human.append(humanReadableTTL(Long.parseLong(value)));
         break;
       default:
         human.append(value);

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
index 0d1c7c4..1a2f6bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
@@ -60,7 +60,7 @@ public class JMXListener implements Coprocessor {
    * only 1 JMX instance is allowed, otherwise there is port conflict even if
    * we only load regionserver coprocessor on master
    */
-  private static JMXConnectorServer jmxCS = null;
+  private static JMXConnectorServer JMX_CS = null;
 
   public static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort,
       int rmiConnectorPort) throws IOException {
@@ -137,8 +137,13 @@ public class JMXListener implements Coprocessor {
 
     try {
       // Start the JMXListener with the connection string
-      jmxCS = JMXConnectorServerFactory.newJMXConnectorServer(serviceUrl, jmxEnv, mbs);
-      jmxCS.start();
+      synchronized(JMXListener.class) {
+        if (JMX_CS != null) {
+          throw new RuntimeException("Started by another thread?");
+        }
+        JMX_CS = JMXConnectorServerFactory.newJMXConnectorServer(serviceUrl, jmxEnv, mbs);
+        JMX_CS.start();
+      }
       LOG.info("ConnectorServer started!");
     } catch (IOException e) {
       LOG.error("fail to start connector server!", e);
@@ -148,10 +153,10 @@ public class JMXListener implements Coprocessor {
 
   public void stopConnectorServer() throws IOException {
     synchronized(JMXListener.class) {
-      if (jmxCS != null) {
-        jmxCS.stop();
+      if (JMX_CS != null) {
+        JMX_CS.stop();
         LOG.info("ConnectorServer stopped!");
-        jmxCS = null;
+        JMX_CS = null;
       }
     }
   }
@@ -186,7 +191,7 @@ public class JMXListener implements Coprocessor {
     }
 
     synchronized(JMXListener.class) {
-      if (jmxCS != null) {
+      if (JMX_CS != null) {
         LOG.info("JMXListener has been started at Registry port " + rmiRegistryPort);
       }
       else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
index 802f643..5f3e477 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
@@ -175,7 +175,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
       return;
     }
     Task task = findOrCreateOrphanTask(path);
-    if (task.isOrphan() && (task.incarnation == 0)) {
+    if (task.isOrphan() && (task.incarnation.get() == 0)) {
       LOG.info("resubmitting unassigned orphan task " + path);
       // ignore failure to resubmit. The timeout-monitor will handle it later
       // albeit in a more crude fashion
@@ -228,7 +228,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
       version = -1;
     }
     LOG.info("resubmitting task " + path);
-    task.incarnation++;
+    task.incarnation.incrementAndGet();
     boolean result = resubmit(this.details.getServerName(), path, version);
     if (!result) {
       task.heartbeatNoDetails(EnvironmentEdgeManager.currentTime());

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java
index 5b831ee..f6e96fa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java
@@ -140,6 +140,8 @@ public class ZKSplitTransactionCoordination implements SplitTransactionCoordinat
    * the node is removed or is not in pending_split state any more, we abort the split.
    */
   @Override
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION",
+    justification="Intended")
   public void waitForSplitTransaction(final RegionServerServices services, Region parent,
       HRegionInfo hri_a, HRegionInfo hri_b, SplitTransactionDetails sptd) throws IOException {
     ZkSplitTransactionDetails zstd = (ZkSplitTransactionDetails) sptd;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkRegionMergeCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkRegionMergeCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkRegionMergeCoordination.java
index 1d26cba..c9fce22 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkRegionMergeCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkRegionMergeCoordination.java
@@ -92,6 +92,8 @@ public class ZkRegionMergeCoordination implements RegionMergeCoordination {
    */
 
   @Override
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION",
+    justification="Intended")
   public void waitForRegionMergeTransaction(RegionServerServices services,
       HRegionInfo mergedRegionInfo, HRegion region_a, HRegion region_b, RegionMergeDetails details)
       throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
index b682764..7e6708e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
@@ -78,7 +78,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
   private TaskExecutor splitTaskExecutor;
 
   private final Object taskReadyLock = new Object();
-  volatile int taskReadySeq = 0;
+  private AtomicInteger taskReadySeq = new AtomicInteger(0);
   private volatile String currentTask = null;
   private int currentVersion;
   private volatile boolean shouldStop = false;
@@ -106,7 +106,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
     if (path.equals(watcher.splitLogZNode)) {
       if (LOG.isTraceEnabled()) LOG.trace("tasks arrived or departed on " + path);
       synchronized (taskReadyLock) {
-        taskReadySeq++;
+        this.taskReadySeq.incrementAndGet();
         taskReadyLock.notify();
       }
     }
@@ -400,14 +400,14 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
    * policy puts an upper-limit on the number of simultaneous log splitting that could be happening
    * in a cluster.
    * <p>
-   * Synchronization using {@link #taskReadyLock} ensures that it will try to grab every task that
-   * has been put up
+   * Synchronization using <code>taskReadyLock</code> ensures that it will try to grab every task
+   * that has been put up
    * @throws InterruptedException
    */
   @Override
   public void taskLoop() throws InterruptedException {
     while (!shouldStop) {
-      int seq_start = taskReadySeq;
+      int seq_start = taskReadySeq.get();
       List<String> paths = null;
       paths = getTaskList();
       if (paths == null) {
@@ -441,7 +441,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
       }
       SplitLogCounters.tot_wkr_task_grabing.incrementAndGet();
       synchronized (taskReadyLock) {
-        while (seq_start == taskReadySeq) {
+        while (seq_start == taskReadySeq.get()) {
           taskReadyLock.wait(checkInterval);
           if (server != null) {
             // check to see if we have stale recovering regions in our internal memory state
@@ -527,7 +527,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
 
   @Override
   public int getTaskReadySeq() {
-    return taskReadySeq;
+    return taskReadySeq.get();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
index f4524e4..8ba26dd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
@@ -152,6 +152,8 @@ public class JMXJsonServlet extends HttpServlet {
    *          The servlet response we are creating
    */
   @Override
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="XSS_REQUEST_PARAMETER_TO_SERVLET_WRITER",
+    justification="TODO: See HBASE-15122")
   public void doGet(HttpServletRequest request, HttpServletResponse response) {
     try {
       if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
index fd676a4..c2a25e5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
@@ -53,6 +53,8 @@ import org.apache.hadoop.hbase.util.Pair;
  * it fallbacks to the archived path.
  */
 @InterfaceAudience.Private
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_DOESNT_OVERRIDE_EQUALS",
+  justification="To be fixed but warning suppressed for now")
 public class HFileLink extends FileLink {
   private static final Log LOG = LogFactory.getLog(HFileLink.class);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
index 99af201..7d8b572 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
@@ -20,14 +20,14 @@ package org.apache.hadoop.hbase.io.hfile;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KVComparator;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -37,15 +37,12 @@ import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
  * Common functionality needed by all versions of {@link HFile} readers.
  */
 @InterfaceAudience.Private
-@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD")
 public abstract class AbstractHFileReader
     implements HFile.Reader, Configurable {
   /** Stream to read from. Does checksum verifications in file system */
-  protected FSDataInputStream istream; // UUF_UNUSED_PUBLIC_OR_PROTECTED_FIELD
 
   /** The file system stream of the underlying {@link HFile} that
    * does not do checksum verification in the file system */
-  protected FSDataInputStream istreamNoFsChecksum;  // UUF_UNUSED_PUBLIC_OR_PROTECTED_FIELD
 
   /** Data block index reader keeping the root data index in memory */
   protected HFileBlockIndex.BlockIndexReader dataBlockIndexReader;
@@ -289,7 +286,7 @@ public abstract class AbstractHFileReader
     protected int currMemstoreTSLen;
     protected long currMemstoreTS;
 
-    protected int blockFetches;
+    protected AtomicInteger blockFetches = new AtomicInteger();
 
     protected final HFile.Reader reader;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index aeda65c..a67bf8c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -450,7 +450,7 @@ public class HFile {
      * Return the file context of the HFile this reader belongs to
      */
     HFileContext getFileContext();
-    
+
     boolean isPrimaryReplicaReader();
 
     void setPrimaryReplicaReader(boolean isPrimaryReplicaReader);
@@ -468,6 +468,8 @@ public class HFile {
    * @return an appropriate instance of HFileReader
    * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
    */
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH",
+      justification="Intentional")
   private static Reader pickReaderVersion(Path path, FSDataInputStreamWrapper fsdis,
       long size, CacheConfig cacheConf, HFileSystem hfs, Configuration conf) throws IOException {
     FixedFileTrailer trailer = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 2da91b6..8f80c3e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -83,6 +83,8 @@ import com.google.common.base.Preconditions;
  * </ul>
  */
 @InterfaceAudience.Private
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="HE_EQUALS_USE_HASHCODE",
+  justification="Fix!!! Fine for now bug FIXXXXXXX!!!!")
 public class HFileBlock implements Cacheable {
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
index f9fb96d..f02bc3d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
@@ -25,7 +25,6 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
@@ -34,6 +33,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KVComparator;
 import org.apache.hadoop.hbase.NoTagsKeyValue;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
@@ -692,6 +692,8 @@ public class HFileReaderV2 extends AbstractHFileReader {
      * @return the next block, or null if there are no more data blocks
      * @throws IOException
      */
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
+        justification="Yeah, unnecessary null check; could do w/ clean up")
     protected HFileBlock readNextDataBlock() throws IOException {
       long lastDataBlockOffset = reader.getTrailer().getLastDataBlockOffset();
       if (block == null)
@@ -700,8 +702,9 @@ public class HFileReaderV2 extends AbstractHFileReader {
       HFileBlock curBlock = block;
 
       do {
-        if (curBlock.getOffset() >= lastDataBlockOffset)
+        if (curBlock.getOffset() >= lastDataBlockOffset) {
           return null;
+        }
 
         if (curBlock.getOffset() < 0) {
           throw new IOException("Invalid block file offset: " + block);
@@ -943,7 +946,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
 
       blockBuffer = block.getBufferWithoutHeader();
       readKeyValueLen();
-      blockFetches++;
+      blockFetches.incrementAndGet();
 
       // Reset the next indexed key
       this.nextIndexedKey = null;
@@ -1205,7 +1208,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
       }
 
       seeker.setCurrentBuffer(getEncodedBuffer(newBlock));
-      blockFetches++;
+      blockFetches.incrementAndGet();
 
       // Reset the next indexed key
       this.nextIndexedKey = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
index edab0dc..d9bc994 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
@@ -89,7 +89,7 @@ public class HFileWriterV2 extends AbstractHFileWriter {
   protected long maxMemstoreTS = 0;
 
   /** warn on cell with tags */
-  private static boolean warnCellWithTags = true;
+  private static boolean WARN_CELL_WITH_TAGS = true;
 
   static class WriterFactoryV2 extends HFile.WriterFactory {
     WriterFactoryV2(Configuration conf, CacheConfig cacheConf) {
@@ -270,11 +270,13 @@ public class HFileWriterV2 extends AbstractHFileWriter {
       newBlock();
     }
 
-    if (warnCellWithTags && getFileContext().isIncludesTags()) {
-      LOG.warn("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
+    synchronized (this.getClass()) {
+      if (WARN_CELL_WITH_TAGS && getFileContext().isIncludesTags()) {
+        LOG.warn("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
           + " is required to support cell attributes/tags. Consider setting "
           + HFile.FORMAT_VERSION_KEY + " accordingly.");
-      warnCellWithTags = false;
+        WARN_CELL_WITH_TAGS = false;
+      }
     }
 
     fsBlockWriter.write(cell);

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 27818fe..ff4e078 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -321,6 +321,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     private long responseBlockSize = 0;
     private boolean retryImmediatelySupported;
 
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
+        justification="Can't figure why this complaint is happening... see below")
     Call(int id, final BlockingService service, final MethodDescriptor md, RequestHeader header,
          Message param, CellScanner cellScanner, Connection connection, Responder responder,
          long size, TraceInfo tinfo, final InetAddress remoteAddress) {
@@ -338,15 +340,18 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       this.isError = false;
       this.size = size;
       this.tinfo = tinfo;
-      this.user = connection == null ? null : connection.user;
+      this.user = connection == null? null: connection.user; // FindBugs: NP_NULL_ON_SOME_PATH
       this.remoteAddress = remoteAddress;
-      this.retryImmediatelySupported = connection.retryImmediatelySupported;
+      this.retryImmediatelySupported =
+          connection == null? null: connection.retryImmediatelySupported;
     }
 
     /**
      * Call is done. Execution happened and we returned results to client. It is now safe to
      * cleanup.
      */
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
+        justification="Presume the lock on processing request held by caller is protection enough")
     void done() {
       if (this.cellBlock != null && reservoir != null) {
         // Return buffer to reservoir now we are done with it.
@@ -588,7 +593,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       return connection.getVersionInfo();
     }
 
-
     @Override
     public boolean isRetryImmediatelySupported() {
       return retryImmediatelySupported;
@@ -764,6 +768,9 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     }
 
     @Override
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
+      justification="selector access is not synchronized; seems fine but concerned changing " +
+        "it will have per impact")
     public void run() {
       LOG.info(getName() + ": starting");
       while (running) {
@@ -1265,15 +1272,14 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     private boolean useWrap = false;
     // Fake 'call' for failed authorization response
     private static final int AUTHORIZATION_FAILED_CALLID = -1;
-    private final Call authFailedCall =
-      new Call(AUTHORIZATION_FAILED_CALLID, null, null, null, null, null, this, null, 0, null,
-        null);
+    private final Call authFailedCall = new Call(AUTHORIZATION_FAILED_CALLID, null, null, null,
+        null, null, this, null, 0, null, null);
     private ByteArrayOutputStream authFailedResponse =
         new ByteArrayOutputStream();
     // Fake 'call' for SASL context setup
     private static final int SASL_CALLID = -33;
-    private final Call saslCall =
-      new Call(SASL_CALLID, this.service, null, null, null, null, this, null, 0, null, null);
+    private final Call saslCall = new Call(SASL_CALLID, null, null, null, null, null, this, null,
+        0, null, null);
 
     // was authentication allowed with a fallback to simple auth
     private boolean authenticatedWithFallback;
@@ -2164,7 +2170,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   }
 
   @Override
-  public void refreshAuthManager(PolicyProvider pp) {
+  public synchronized void refreshAuthManager(PolicyProvider pp) {
     // Ignore warnings that this should be accessed in a static way instead of via an instance;
     // it'll break if you go via static route.
     this.authManager.refresh(this.conf, pp);
@@ -2390,7 +2396,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
    * @throws org.apache.hadoop.security.authorize.AuthorizationException
    *         when the client isn't authorized to talk the protocol
    */
-  public void authorize(UserGroupInformation user, ConnectionHeader connection, InetAddress addr)
+  public synchronized void authorize(UserGroupInformation user, ConnectionHeader connection,
+      InetAddress addr)
   throws AuthorizationException {
     if (authorize) {
       Class<?> c = getServiceInterface(services, connection.getServiceName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
index 20ae4a6..43c72c4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
@@ -32,7 +32,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
@@ -135,17 +134,18 @@ public class HashTable extends Configured implements Tool {
         p.setProperty("endTimestamp", Long.toString(endTime));
       }
       
-      FSDataOutputStream out = fs.create(path);
-      p.store(new OutputStreamWriter(out, Charsets.UTF_8), null);
-      out.close();
+      try (OutputStreamWriter osw = new OutputStreamWriter(fs.create(path), Charsets.UTF_8)) {
+        p.store(osw, null);
+      }
     }
-    
+
     void readPropertiesFile(FileSystem fs, Path path) throws IOException {
-      FSDataInputStream in = fs.open(path);
       Properties p = new Properties();
-      p.load(new InputStreamReader(in, Charsets.UTF_8));
-      in.close();
-      
+      try (FSDataInputStream in = fs.open(path)) {
+        try (InputStreamReader isr = new InputStreamReader(in, Charsets.UTF_8)) {
+          p.load(isr);
+        }
+      }
       tableName = p.getProperty("table");
       families = p.getProperty("columnFamilies");
       batchSize = Long.parseLong(p.getProperty("targetBatchSize"));

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
index a5e4a30..c7bdac9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
@@ -83,6 +83,8 @@ public class Import {
   /**
    * A mapper that just writes out KeyValues.
    */
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
+      justification="Writables are going away and this has been this way forever")
   public static class KeyValueImporter extends TableMapper<ImmutableBytesWritable, KeyValue> {
     private Map<byte[], byte[]> cfRenameMap;
     private Filter filter;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
index 55139f1..9362bda 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
@@ -99,6 +99,11 @@ public class ImportTsv extends Configured implements Tool {
   final static Class DEFAULT_MAPPER = TsvImporterMapper.class;
   public final static String CREATE_TABLE_CONF_KEY = "create.table";
   public final static String NO_STRICT_COL_FAMILY = "no.strict";
+  /**
+   * If table didn't exist and was created in dry-run mode, this flag is
+   * flipped to delete it when MR ends.
+   */
+  private static boolean DRY_RUN_TABLE_CREATED;
 
   public static class TsvParser {
     /**
@@ -496,6 +501,12 @@ public class ImportTsv extends Configured implements Tool {
                 LOG.error(errorMsg);
                 throw new TableNotFoundException(errorMsg);
               }
+            } else {
+              String errorMsg =
+                  format("Table '%s' does not exist and '%s' is set to no.", tableName,
+                      CREATE_TABLE_CONF_KEY);
+              LOG.error(errorMsg);
+              throw new TableNotFoundException(errorMsg);
             }
             try (Table table = connection.getTable(tableName);
                 RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
@@ -535,22 +546,18 @@ public class ImportTsv extends Configured implements Tool {
               } else {
                 job.setMapOutputValueClass(Put.class);
                 job.setCombinerClass(PutCombiner.class);
+
               }
               HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(),
                   regionLocator);
             }
-          } else {
-            if (!admin.tableExists(tableName)) {
-              String errorMsg = format("Table '%s' does not exist.", tableName);
-              LOG.error(errorMsg);
-              throw new TableNotFoundException(errorMsg);
-            }
             if (mapperClass.equals(TsvImporterTextMapper.class)) {
-              usage(TsvImporterTextMapper.class.toString()
-                  + " should not be used for non bulkloading case. use "
-                  + TsvImporterMapper.class.toString()
-                  + " or custom mapper whose value type is Put.");
-              System.exit(-1);
+              job.setMapOutputValueClass(Text.class);
+              job.setReducerClass(TextSortReducer.class);
+            } else {
+              job.setMapOutputValueClass(Put.class);
+              job.setCombinerClass(PutCombiner.class);
+              job.setReducerClass(PutSortReducer.class);
             }
             // No reducers. Just write straight to table. Call initTableReducerJob
             // to set up the TableOutputFormat.
@@ -562,6 +569,7 @@ public class ImportTsv extends Configured implements Tool {
           TableMapReduceUtil.addDependencyJars(job);
           TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
               com.google.common.base.Function.class /* Guava used by TsvParser */);
+
       }
     }
     return job;
@@ -579,7 +587,7 @@ public class ImportTsv extends Configured implements Tool {
       tableName, cfSet));
     admin.createTable(htd);
   }
-  
+
   private static Set<String> getColumnFamilies(String[] columns) {
     Set<String> cfSet = new HashSet<String>();
     for (String aColumn : columns) {
@@ -616,7 +624,7 @@ public class ImportTsv extends Configured implements Tool {
       "input data. Another special column" + TsvParser.TIMESTAMPKEY_COLUMN_SPEC +
       " designates that this column should be\n" +
       "used as timestamp for each record. Unlike " + TsvParser.ROWKEY_COLUMN_SPEC + ", " +
-      TsvParser.TIMESTAMPKEY_COLUMN_SPEC + " is optional.\n" +
+      TsvParser.TIMESTAMPKEY_COLUMN_SPEC + " is optional." + "\n" +
       "You must specify at most one column as timestamp key for each imported record.\n" +
       "Record with invalid timestamps (blank, non-numeric) will be treated as bad record.\n" +
       "Note: if you use this option, then '" + TIMESTAMP_CONF_KEY + "' option will be ignored.\n" +
@@ -724,8 +732,17 @@ public class ImportTsv extends Configured implements Tool {
     // system time
     getConf().setLong(TIMESTAMP_CONF_KEY, timstamp);
 
-    Job job = createSubmittableJob(getConf(), otherArgs);
-    return job.waitForCompletion(true) ? 0 : 1;
+
+    synchronized (ImportTsv.class) {
+      DRY_RUN_TABLE_CREATED = false;
+    }
+    Job job = createSubmittableJob(getConf(), args);
+    boolean success = job.waitForCompletion(true);
+    boolean delete = false;
+    synchronized (ImportTsv.class) {
+      delete = DRY_RUN_TABLE_CREATED;
+    }
+    return success ? 0 : 1;
   }
 
   public static void main(String[] args) throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
index ff690c8..6f0075a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
@@ -110,9 +110,7 @@ public abstract class MultiTableInputFormatBase extends
         @Override
         public void close() throws IOException {
           trr.close();
-          if (connection != null) {
-            connection.close();
-          }
+          connection.close();
         }
 
         @Override
@@ -145,9 +143,7 @@ public abstract class MultiTableInputFormatBase extends
       // If there is an exception make sure that all
       // resources are closed and released.
       trr.close();
-      if (connection != null) {
-        connection.close();
-      }
+      connection.close();
       throw ioe;
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
index 755f7cd..d1dba1d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
@@ -230,6 +230,8 @@ public class MultithreadedTableMapper<K2, V2> extends TableMapper<K2, V2> {
     }
   }
 
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION",
+      justification="Don't understand why FB is complaining about this one. We do throw exception")
   private class MapRunner implements Runnable {
     private Mapper<ImmutableBytesWritable, Result, K2,V2> mapper;
     private Context subcontext;
@@ -280,7 +282,7 @@ public class MultithreadedTableMapper<K2, V2> extends TableMapper<K2, V2> {
           Class<?> wrappedMapperClass = Class.forName("org.apache.hadoop.mapreduce.lib.map.WrappedMapper");
           Method getMapContext = wrappedMapperClass.getMethod("getMapContext", MapContext.class);
           subcontext = (Context) getMapContext.invoke(wrappedMapperClass.newInstance(), mc);
-        } catch (Exception ee) {
+        } catch (Exception ee) { // FindBugs: REC_CATCH_EXCEPTION
           // rethrow as IOE
           throw new IOException(e);
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
index bc2537b..814d82c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
@@ -111,6 +111,8 @@ implements Configurable {
    *   org.apache.hadoop.conf.Configuration)
    */
   @Override
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION",
+    justification="Intentional")
   public void setConf(Configuration configuration) {
     this.conf = configuration;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 7833985..629d307 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -924,6 +924,9 @@ public class AssignmentManager extends ZooKeeperListener {
    * @param coordination coordination for opening region
    * @param ord details about opening region
    */
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(
+      value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION",
+      justification="Needs work; says access to ConcurrentHashMaps not ATOMIC!!!")
   void handleRegion(final RegionTransition rt, OpenRegionCoordination coordination,
                     OpenRegionCoordination.OpenRegionDetails ord) {
     if (rt == null) {
@@ -1047,9 +1050,11 @@ public class AssignmentManager extends ZooKeeperListener {
             // No need to use putIfAbsent, or extra synchronization since
             // this whole handleRegion block is locked on the encoded region
             // name, and failedOpenTracker is updated only in this block
+            // FindBugs: AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION
             failedOpenTracker.put(encodedName, failedOpenCount);
           }
           if (failedOpenCount.incrementAndGet() >= maximumAttempts) {
+            // FindBugs: AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION
             regionStates.updateRegionState(rt, State.FAILED_OPEN);
             // remove the tracking info to save memory, also reset
             // the count for next open initiative
@@ -3637,18 +3642,24 @@ public class AssignmentManager extends ZooKeeperListener {
       EventType.RS_ZK_REQUEST_REGION_SPLIT, EventType.RS_ZK_REGION_SPLIT);
   }
 
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(
+      value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION",
+      justification="Modification of Maps not ATOMIC!!!! FIX!!!")
   private void onRegionFailedOpen(
       final HRegionInfo hri, final ServerName sn) {
     String encodedName = hri.getEncodedName();
+    // FindBugs: AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION Worth fixing!!!
     AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName);
     if (failedOpenCount == null) {
       failedOpenCount = new AtomicInteger();
       // No need to use putIfAbsent, or extra synchronization since
       // this whole handleRegion block is locked on the encoded region
       // name, and failedOpenTracker is updated only in this block
+      // FindBugs: AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION
       failedOpenTracker.put(encodedName, failedOpenCount);
     }
     if (failedOpenCount.incrementAndGet() >= maximumAttempts && !hri.isMetaRegion()) {
+      // FindBugs: AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION
       regionStates.updateRegionState(hri, State.FAILED_OPEN);
       // remove the tracking info to save memory, also reset
       // the count for next open initiative

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
index 7e9a5cd..706dc23 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
@@ -94,8 +94,7 @@ public class HMasterCommandLine extends ServerCommandLine {
 
     if (cmd.hasOption("minRegionServers")) {
       String val = cmd.getOptionValue("minRegionServers");
-      getConf().setInt("hbase.regions.server.count.min",
-                  Integer.valueOf(val));
+      getConf().setInt("hbase.regions.server.count.min", Integer.parseInt(val));
       LOG.debug("minRegionServers set to " + val);
     }
 
@@ -103,7 +102,7 @@ public class HMasterCommandLine extends ServerCommandLine {
     if (cmd.hasOption("minServers")) {
       String val = cmd.getOptionValue("minServers");
       getConf().setInt("hbase.regions.server.count.min",
-                  Integer.valueOf(val));
+                  Integer.parseInt(val));
       LOG.debug("minServers set to " + val);
     }
 
@@ -116,13 +115,13 @@ public class HMasterCommandLine extends ServerCommandLine {
     // master when we are in local/standalone mode. Useful testing)
     if (cmd.hasOption("localRegionServers")) {
       String val = cmd.getOptionValue("localRegionServers");
-      getConf().setInt("hbase.regionservers", Integer.valueOf(val));
+      getConf().setInt("hbase.regionservers", Integer.parseInt(val));
       LOG.debug("localRegionServers set to " + val);
     }
     // How many masters to startup inside this process; useful testing
     if (cmd.hasOption("masters")) {
       String val = cmd.getOptionValue("masters");
-      getConf().setInt("hbase.masters", Integer.valueOf(val));
+      getConf().setInt("hbase.masters", Integer.parseInt(val));
       LOG.debug("masters set to " + val);
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 7eee259..ee2cfb6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -454,8 +454,6 @@ public class RegionStates {
       if (!serverName.equals(oldServerName)) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Onlined " + hri.getShortNameToLog() + " on " + serverName);
-        } else {
-          LOG.debug("Onlined " + hri.getShortNameToLog() + " on " + serverName);
         }
         addToServerHoldings(serverName, hri);
         addToReplicaMapping(hri);

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
index 5d8be58..852b6c4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
@@ -643,7 +643,7 @@ public class SplitLogManager {
     public volatile ServerName cur_worker_name;
     public volatile TaskBatch batch;
     public volatile TerminationStatus status;
-    public volatile int incarnation;
+    public volatile AtomicInteger incarnation = new AtomicInteger(0);
     public final AtomicInteger unforcedResubmits = new AtomicInteger();
     public volatile boolean resubmitThresholdReached;
 
@@ -655,7 +655,6 @@ public class SplitLogManager {
     }
 
     public Task() {
-      incarnation = 0;
       last_version = -1;
       status = IN_PROGRESS;
       setUnassigned();

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index 74d1339..071f124 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -64,12 +64,15 @@ import com.google.common.collect.Sets;
  * {@link org.apache.hadoop.hbase.ZKNamespaceManager}
  */
 @InterfaceAudience.Private
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
+  justification="TODO: synchronize access on nsTable but it is done in tiers above and this " +
+    "class is going away/shrinking")
 public class TableNamespaceManager {
   private static final Log LOG = LogFactory.getLog(TableNamespaceManager.class);
 
   private Configuration conf;
   private MasterServices masterServices;
-  private Table nsTable;
+  private Table nsTable = null; // FindBugs: IS2_INCONSISTENT_SYNC TODO: Access is not synchronized
   private ZKNamespaceManager zkNamespaceManager;
   private boolean initialized;
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index c6068b7..8dc7cf6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -537,17 +537,21 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
       switch (action.type) {
       case NULL: break;
       case ASSIGN_REGION:
+        // FindBugs: Having the assert quietens FB BC_UNCONFIRMED_CAST warnings
+        assert action instanceof AssignRegionAction: action.getClass();
         AssignRegionAction ar = (AssignRegionAction) action;
         regionsPerServer[ar.server] = addRegion(regionsPerServer[ar.server], ar.region);
         regionMoved(ar.region, -1, ar.server);
         break;
       case MOVE_REGION:
+        assert action instanceof MoveRegionAction: action.getClass();
         MoveRegionAction mra = (MoveRegionAction) action;
         regionsPerServer[mra.fromServer] = removeRegion(regionsPerServer[mra.fromServer], mra.region);
         regionsPerServer[mra.toServer] = addRegion(regionsPerServer[mra.toServer], mra.region);
         regionMoved(mra.region, mra.fromServer, mra.toServer);
         break;
       case SWAP_REGIONS:
+        assert action instanceof SwapRegionsAction: action.getClass();
         SwapRegionsAction a = (SwapRegionsAction) action;
         regionsPerServer[a.fromServer] = replaceRegion(regionsPerServer[a.fromServer], a.fromRegion, a.toRegion);
         regionsPerServer[a.toServer] = replaceRegion(regionsPerServer[a.toServer], a.toRegion, a.fromRegion);
@@ -1055,7 +1059,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
   }
 
   @Override
-  public void setClusterStatus(ClusterStatus st) {
+  public synchronized void setClusterStatus(ClusterStatus st) {
     this.clusterStatus = st;
     regionFinder.setClusterStatus(st);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index 52cd53a..2ba3733 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -92,6 +92,8 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
  * so that the balancer gets the full picture of all loads on the cluster.</p>
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
+  justification="Complaint is about costFunctions not being synchronized; not end of the world")
 public class StochasticLoadBalancer extends BaseLoadBalancer {
 
   protected static final String STEPS_PER_REGION_KEY =
@@ -116,7 +118,9 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
 
   private CandidateGenerator[] candidateGenerators;
   private CostFromRegionLoadFunction[] regionLoadFunctions;
-  private CostFunction[] costFunctions;
+
+  private CostFunction[] costFunctions; // FindBugs: Wants this protected; IS2_INCONSISTENT_SYNC
+
   // Keep locality based picker and cost function to alert them
   // when new services are offered
   private LocalityBasedCandidateGenerator localityCandidateGenerator;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java
index 46e74d4..5e0483b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java
@@ -90,7 +90,7 @@ public class HFileLinkCleaner extends BaseHFileCleanerDelegate {
   }
 
   @Override
-  public void setConf(Configuration conf) {
+  public synchronized void setConf(Configuration conf) {
     super.setConf(conf);
 
     // setup filesystem

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
index 8e80a19..a616c6b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
@@ -312,6 +312,8 @@ public class DisableTableProcedure
    * Rollback of table state change in prepareDisable()
    * @param env MasterProcedureEnv
    */
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION",
+      justification="Intended")
   private void undoTableStateChange(final MasterProcedureEnv env) {
     if (!skipTableStateCheck) {
       try {
@@ -322,6 +324,7 @@ public class DisableTableProcedure
         }
       } catch (Exception e) {
         // Ignore exception.
+        LOG.trace(e.getMessage());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 5ac9cbc..7c87ea1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -154,6 +154,8 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
    * call should get implemented for each snapshot flavor.
    */
   @Override
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION",
+    justification="Intentional")
   public void process() {
     String msg = "Running " + snapshot.getType() + " table snapshot " + snapshot.getName() + " "
         + eventType + " on table " + snapshotTable;
@@ -205,7 +207,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
       status.markComplete(msg);
       LOG.info(msg);
       metricsSnapshot.addSnapshot(status.getCompletionTimestamp() - status.getStartTime());
-    } catch (Exception e) {
+    } catch (Exception e) { // FindBugs: REC_CATCH_EXCEPTION
       status.abort("Failed to complete snapshot " + snapshot.getName() + " on table " +
           snapshotTable + " because " + e.getMessage());
       String reason = "Failed taking snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot)

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
index 8fc1cf2..a29595b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
@@ -98,7 +98,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
    * by this Handler.
    * @return a string representing the method call without parameters
    */
-  public String getRPC() {
+  public synchronized String getRPC() {
     return getRPC(false);
   }
 
@@ -166,7 +166,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
    * @return true if the monitored handler is currently servicing an RPC call
    * to a database command.
    */
-  public boolean isOperationRunning() {
+  public synchronized boolean isOperationRunning() {
     if(!isRPCRunning()) {
       return false;
     }
@@ -212,7 +212,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
   }
 
   @Override
-  public void markComplete(String status) {
+  public synchronized void markComplete(String status) {
     super.markComplete(status);
     this.params = null;
     this.packet = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java
index c015b24..3cf5d25 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java
@@ -21,6 +21,9 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
+  justification="FindBugs seems confused; says globalLimiter and lastUpdate " +
+  "are mostly synchronized...but to me it looks like they are totally synchronized")
 public class QuotaState {
   private long lastUpdate = 0;
   private long lastQuery = 0;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java
index 45089e8..3b407bd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java
@@ -38,6 +38,9 @@ import com.google.common.annotations.VisibleForTesting;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
+  justification="FindBugs seems confused; says limit and tlimit " +
+  "are mostly synchronized...but to me it looks like they are totally synchronized")
 public abstract class RateLimiter {
   public static final String QUOTA_RATE_LIMITER_CONF_KEY = "hbase.quota.rate.limiter";
   private long tunit = 1000;           // Timeunit factor for translating to ms.
@@ -66,7 +69,7 @@ public abstract class RateLimiter {
    * @param limit The max value available resource units can be refilled to.
    * @param timeUnit Timeunit factor for translating to ms.
    */
-  public void set(final long limit, final TimeUnit timeUnit) {
+  public synchronized void set(final long limit, final TimeUnit timeUnit) {
     switch (timeUnit) {
     case MILLISECONDS:
       tunit = 1;
@@ -92,10 +95,11 @@ public abstract class RateLimiter {
 
   public String toString() {
     String rateLimiter = this.getClass().getSimpleName();
-    if (limit == Long.MAX_VALUE) {
+    if (getLimit() == Long.MAX_VALUE) {
       return rateLimiter + "(Bypass)";
     }
-    return rateLimiter + "(avail=" + avail + " limit=" + limit + " tunit=" + tunit + ")";
+    return rateLimiter + "(avail=" + getAvailable() + " limit=" + getLimit() +
+        " tunit=" + getTimeUnitInMillis() + ")";
   }
 
   /**
@@ -113,7 +117,7 @@ public abstract class RateLimiter {
   }
 
   public synchronized boolean isBypass() {
-    return limit == Long.MAX_VALUE;
+    return getLimit() == Long.MAX_VALUE;
   }
 
   public synchronized long getLimit() {
@@ -124,7 +128,7 @@ public abstract class RateLimiter {
     return avail;
   }
 
-  protected long getTimeUnitInMillis() {
+  protected synchronized long getTimeUnitInMillis() {
     return tunit;
   }
 
@@ -188,7 +192,7 @@ public abstract class RateLimiter {
    */
   public synchronized long waitInterval(final long amount) {
     // TODO Handle over quota?
-    return (amount <= avail) ? 0 : getWaitInterval(limit, avail, amount);
+    return (amount <= avail) ? 0 : getWaitInterval(getLimit(), avail, amount);
   }
 
   // These two method are for strictly testing purpose only

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java
index eb201cb..e6e45e1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java
@@ -27,6 +27,9 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
+  justification="FindBugs seems confused; says bypassGlobals, namepaceLimiters, and " +
+    "tableLimiters are mostly synchronized...but to me it looks like they are totally synchronized")
 public class UserQuotaState extends QuotaState {
   private Map<String, QuotaLimiter> namespaceLimiters = null;
   private Map<TableName, QuotaLimiter> tableLimiters = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index cfd057a..6862410 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -389,7 +389,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     // Set when a flush has been requested.
     volatile boolean flushRequested = false;
     // Number of compactions running.
-    volatile int compacting = 0;
+    AtomicInteger compacting = new AtomicInteger(0);
     // Gets set in close. If set, cannot compact or flush again.
     volatile boolean writesEnabled = true;
     // Set if region is read-only
@@ -823,7 +823,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
 
     this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
     this.writestate.flushRequested = false;
-    this.writestate.compacting = 0;
+    this.writestate.compacting.set(0);
 
     if (this.writestate.writesEnabled) {
       // Remove temporary data left over from old regions
@@ -1367,6 +1367,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     this.closing.set(closing);
   }
 
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK_EXCEPTION_PATH",
+      justification="I think FindBugs is confused")
   private Map<byte[], List<StoreFile>> doClose(final boolean abort, MonitoredTask status)
       throws IOException {
     if (isClosed()) {
@@ -1404,7 +1406,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     }
 
     // block waiting for the lock for closing
-    lock.writeLock().lock();
+    lock.writeLock().lock(); // FindBugs: Complains UL_UNRELEASED_LOCK_EXCEPTION_PATH but seems fine
     this.closing.set(true);
     status.setStatus("Disabling writes for close");
     try {
@@ -1533,7 +1535,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       }
       boolean interrupted = false;
       try {
-        while (writestate.compacting > 0 || writestate.flushing) {
+        while (writestate.compacting.get() > 0 || writestate.flushing) {
           LOG.debug("waiting for " + writestate.compacting + " compactions"
             + (writestate.flushing ? " & cache flush" : "") + " to complete for region " + this);
           try {
@@ -1818,7 +1820,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         synchronized (writestate) {
           if (writestate.writesEnabled) {
             wasStateSet = true;
-            ++writestate.compacting;
+            writestate.compacting.incrementAndGet();
           } else {
             String msg = "NOT compacting region " + this + ". Writes disabled.";
             LOG.info(msg);
@@ -1844,8 +1846,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       } finally {
         if (wasStateSet) {
           synchronized (writestate) {
-            --writestate.compacting;
-            if (writestate.compacting <= 0) {
+            writestate.compacting.decrementAndGet();
+            if (writestate.compacting.get() <= 0) {
               writestate.notifyAll();
             }
           }
@@ -2092,6 +2094,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     }
   }
 
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DLS_DEAD_LOCAL_STORE",
+      justification="FindBugs seems confused about trxId")
   protected PrepareFlushResult internalPrepareFlushCache(final WAL wal, final long myseqid,
       final Collection<Store> storesToFlush, MonitoredTask status, boolean writeFlushWalMarker)
   throws IOException {
@@ -2323,6 +2327,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     return false;
   }
 
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY",
+      justification="Intentional; notify is about completed flush")
   protected FlushResult internalFlushCacheAndCommit(
         final WAL wal, MonitoredTask status, final PrepareFlushResult prepareResult,
         final Collection<Store> storesToFlush)
@@ -4392,6 +4398,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   }
 
   @VisibleForTesting
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY",
+    justification="Intentional; post memstore flush")
   void replayWALFlushCommitMarker(FlushDescriptor flush) throws IOException {
     MonitoredTask status = TaskMonitor.get().createStatus("Committing flush " + this);
 
@@ -4628,6 +4636,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     return prepareFlushResult;
   }
 
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY",
+      justification="Intentional; cleared the memstore")
   void replayWALRegionEventMarker(RegionEventDescriptor regionEvent) throws IOException {
     checkTargetRegion(regionEvent.getEncodedRegionName().toByteArray(),
       "RegionEvent marker from WAL ", regionEvent);
@@ -4858,6 +4868,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     return refreshStoreFiles(false);
   }
 
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY",
+      justification="Notify is about post replay. Intentional")
   protected boolean refreshStoreFiles(boolean force) throws IOException {
     if (!force && ServerRegionReplicaUtil.isDefaultReplica(this.getRegionInfo())) {
       return false; // if primary nothing to do
@@ -7852,6 +7864,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   }
 
   @Override
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH",
+    justification="Intentional")
   public void startRegionOperation(Operation op) throws IOException {
     switch (op) {
     case GET:  // read operations

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 2aadea2..68c5ddf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1962,7 +1962,6 @@ public class HRegionServer extends HasThread implements
   public boolean reportRegionStateTransition(final RegionStateTransitionContext context) {
     TransitionCode code = context.getCode();
     long openSeqNum = context.getOpenSeqNum();
-    long masterSystemTime = context.getMasterSystemTime();
     HRegionInfo[] hris = context.getHris();
 
     ReportRegionStateTransitionRequest.Builder builder =

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
index b68868e..8975ac7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
@@ -189,7 +189,7 @@ implements HeapSize, Map<K,V> {
    *
    * @return currently available bytes
    */
-  public long getMemFree() {
+  public synchronized long getMemFree() {
     return memFree;
   }
 
@@ -208,7 +208,7 @@ implements HeapSize, Map<K,V> {
    * @return currently used memory in bytes
    */
   public long getMemUsed() {
-    return (memTotal - memFree); // FindBugs IS2_INCONSISTENT_SYNC
+    return (memTotal - getMemFree()); // FindBugs IS2_INCONSISTENT_SYNC
   }
 
   /**
@@ -227,7 +227,7 @@ implements HeapSize, Map<K,V> {
    *
    * @return number of misses
    */
-  public long getMissCount() {
+  public synchronized long getMissCount() {
     return missCount; // FindBugs IS2_INCONSISTENT_SYNC
   }
 
@@ -239,7 +239,7 @@ implements HeapSize, Map<K,V> {
    */
   public double getHitRatio() {
     return (double)((double)hitCount/
-      ((double)(hitCount+missCount)));
+      ((double)(hitCount + getMissCount())));
   }
 
   /**
@@ -269,7 +269,7 @@ implements HeapSize, Map<K,V> {
    * @return memory usage of map in bytes
    */
   public long heapSize() {
-    return (memTotal - memFree);
+    return (memTotal - getMemFree());
   }
 
   //--------------------------------------------------------------------------
@@ -824,6 +824,8 @@ implements HeapSize, Map<K,V> {
    *
    * @return Set of entries in hash
    */
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
+      justification="Unused debugging function that reads only")
   public Set<Entry<K,V>> entryTableSet() {
     Set<Entry<K,V>> entrySet = new HashSet<Entry<K,V>>();
     Entry [] table = entries; // FindBugs IS2_INCONSISTENT_SYNC

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java
index 0566dca..6285060 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java
@@ -60,7 +60,7 @@ public class MemStoreChunkPool {
   final static float POOL_INITIAL_SIZE_DEFAULT = 0.0f;
 
   // Static reference to the MemStoreChunkPool
-  private static MemStoreChunkPool globalInstance;
+  private static MemStoreChunkPool GLOBAL_INSTANCE;
   /** Boolean whether we have disabled the memstore chunk pool entirely. */
   static boolean chunkPoolDisabled = false;
 
@@ -179,12 +179,14 @@ public class MemStoreChunkPool {
    * @param conf
    * @return the global MemStoreChunkPool instance
    */
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DC_DOUBLECHECK",
+      justification="Intentional")
   static MemStoreChunkPool getPool(Configuration conf) {
-    if (globalInstance != null) return globalInstance;
+    if (GLOBAL_INSTANCE != null) return GLOBAL_INSTANCE;
 
     synchronized (MemStoreChunkPool.class) {
       if (chunkPoolDisabled) return null;
-      if (globalInstance != null) return globalInstance;
+      if (GLOBAL_INSTANCE != null) return GLOBAL_INSTANCE;
       float poolSizePercentage = conf.getFloat(CHUNK_POOL_MAXSIZE_KEY, POOL_MAX_SIZE_DEFAULT);
       if (poolSizePercentage <= 0) {
         chunkPoolDisabled = true;
@@ -210,8 +212,8 @@ public class MemStoreChunkPool {
       int initialCount = (int) (initialCountPercentage * maxCount);
       LOG.info("Allocating MemStoreChunkPool with chunk size " + StringUtils.byteDesc(chunkSize)
           + ", max count " + maxCount + ", initial count " + initialCount);
-      globalInstance = new MemStoreChunkPool(conf, chunkSize, maxCount, initialCount);
-      return globalInstance;
+      GLOBAL_INSTANCE = new MemStoreChunkPool(conf, chunkSize, maxCount, initialCount);
+      return GLOBAL_INSTANCE;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index e952372..814370c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -277,7 +277,7 @@ public class RegionCoprocessorHost
               continue;
             }
             int priority = matcher.group(3).trim().isEmpty() ?
-                Coprocessor.PRIORITY_USER : Integer.valueOf(matcher.group(3));
+                Coprocessor.PRIORITY_USER : Integer.parseInt(matcher.group(3));
             String cfgSpec = null;
             try {
               cfgSpec = matcher.group(4);

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
index de0ae7e..50072c3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
@@ -278,6 +278,8 @@ public class RegionServerCoprocessorHost extends
 
     private RegionServerServices regionServerServices;
 
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BC_UNCONFIRMED_CAST",
+        justification="Intentional; FB has trouble detecting isAssignableFrom")
     public RegionServerEnvironment(final Class<?> implClass,
         final Coprocessor impl, final int priority, final int seq,
         final Configuration conf, final RegionServerServices services) {
@@ -285,7 +287,7 @@ public class RegionServerCoprocessorHost extends
       this.regionServerServices = services;
       for (Object itf : ClassUtils.getAllInterfaces(implClass)) {
         Class<?> c = (Class<?>) itf;
-        if (SingletonCoprocessorService.class.isAssignableFrom(c)) {
+        if (SingletonCoprocessorService.class.isAssignableFrom(c)) {// FindBugs: BC_UNCONFIRMED_CAST
           this.regionServerServices.registerService(
             ((SingletonCoprocessorService) impl).getService());
           break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 033f895..46a64f2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -570,6 +570,8 @@ public class StoreFile {
     return sb.toString();
   }
 
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ICAST_INTEGER_MULTIPLY_CAST_TO_LONG",
+      justification="Will not overflow")
   public static class WriterBuilder {
     private final Configuration conf;
     private final CacheConfig cacheConf;
@@ -582,7 +584,6 @@ public class StoreFile {
     private Path filePath;
     private InetSocketAddress[] favoredNodes;
     private HFileContext fileContext;
-    private boolean shouldDropCacheBehind = false;
 
     public WriterBuilder(Configuration conf, CacheConfig cacheConf,
         FileSystem fs) {
@@ -650,8 +651,8 @@ public class StoreFile {
       return this;
     }
 
-    public WriterBuilder withShouldDropCacheBehind(boolean shouldDropCacheBehind) {
-      this.shouldDropCacheBehind = shouldDropCacheBehind;
+    public WriterBuilder withShouldDropCacheBehind(boolean shouldDropCacheBehind/*NOT USED!!*/) {
+      // TODO: HAS NO EFFECT!!! FIX!!
       return this;
     }
     /**
@@ -757,9 +758,6 @@ public class StoreFile {
     private Cell lastDeleteFamilyCell = null;
     private long deleteFamilyCnt = 0;
 
-    /** Bytes per Checksum */
-    protected int bytesPerChecksum;
-
     TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
     /* isTimeRangeTrackerSet keeps track if the timeRange has already been set
      * When flushing a memstore, we set TimeRange and use this variable to

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java
index 0044634..beadde6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java
@@ -91,6 +91,8 @@ public class TimeRangeTracker implements Writable {
    * If required, update the current TimestampRange to include timestamp
    * @param timestamp the timestamp value to include
    */
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MT_CORRECTNESS",
+      justification="Intentional")
   void includeTimestamp(final long timestamp) {
     // Do test outside of synchronization block.  Synchronization in here can be problematic
     // when many threads writing one Store -- they can all pile up trying to add in here.

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
index 27f019a..7f2d2f9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
@@ -43,18 +43,20 @@ import org.apache.zookeeper.KeeperException.SessionExpiredException;
  * target cluster is an HBase cluster.
  */
 @InterfaceAudience.Private
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MT_CORRECTNESS",
+  justification="Thinks zkw needs to be synchronized access but should be fine as is.")
 public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint
   implements Abortable {
 
   private static final Log LOG = LogFactory.getLog(HBaseReplicationEndpoint.class);
 
-  private ZooKeeperWatcher zkw = null;
+  private ZooKeeperWatcher zkw = null; // FindBugs: MT_CORRECTNESS
 
   private List<ServerName> regionServers = new ArrayList<ServerName>(0);
-  private volatile long lastRegionServerUpdate;
+  private long lastRegionServerUpdate;
 
   protected void disconnect() {
-    if (zkw != null){
+    if (zkw != null) {
       zkw.close();
     }
   }
@@ -181,7 +183,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint
    * Set the list of region servers for that peer
    * @param regionServers list of addresses for the region servers
    */
-  public void setRegionServers(List<ServerName> regionServers) {
+  public synchronized void setRegionServers(List<ServerName> regionServers) {
     this.regionServers = regionServers;
     lastRegionServerUpdate = System.currentTimeMillis();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
index db3b87d..5a3c51d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
@@ -116,7 +116,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
    * Skips the entries which has original seqId. Only entries persisted via distributed log replay
    * have their original seq Id fields set.
    */
-  private class SkipReplayedEditsFilter extends BaseWALEntryFilter {
+  private static class SkipReplayedEditsFilter extends BaseWALEntryFilter {
     @Override
     public Entry filter(Entry entry) {
       // if orig seq id is set, skip replaying the entry

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4bcaa3f/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
index dc6b151..ab27cfc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
@@ -279,7 +279,6 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService
             Configuration conf = env.getConfiguration();
             fs = FileSystem.get(conf);
             for(Pair<byte[], String> el: familyPaths) {
-              Path p = new Path(el.getSecond());
               Path stageFamily = new Path(bulkToken, Bytes.toString(el.getFirst()));
               if(!fs.exists(stageFamily)) {
                 fs.mkdirs(stageFamily);


Mime
View raw message