Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id C31CA200D66 for ; Sat, 18 Nov 2017 02:22:35 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id C1CDA160C0E; Sat, 18 Nov 2017 01:22:35 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 50242160C0D for ; Sat, 18 Nov 2017 02:22:33 +0100 (CET) Received: (qmail 65920 invoked by uid 500); 18 Nov 2017 01:22:30 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 64766 invoked by uid 99); 18 Nov 2017 01:22:29 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Sat, 18 Nov 2017 01:22:29 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 72076F5F66; Sat, 18 Nov 2017 01:22:29 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: apurtell@apache.org To: commits@hbase.apache.org Date: Sat, 18 Nov 2017 01:22:51 -0000 Message-Id: In-Reply-To: <421a4f6d2be54a1ab7823e208d09953f@git.apache.org> References: <421a4f6d2be54a1ab7823e208d09953f@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [23/30] hbase git commit: HBASE-19239 Fix findbugs and error-prone issues archived-at: Sat, 18 Nov 2017 01:22:35 -0000 http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 1739553..1e69dbf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -41,6 +41,7 @@ import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -139,8 +140,8 @@ public class HStore implements Store { volatile boolean forceMajor = false; /* how many bytes to write between status checks */ static int closeCheckInterval = 0; - private volatile long storeSize = 0L; - private volatile long totalUncompressedBytes = 0L; + private AtomicLong storeSize = new AtomicLong(); + private AtomicLong totalUncompressedBytes = new AtomicLong(); /** * RWLock for store operations. @@ -200,13 +201,13 @@ public class HStore implements Store { private Encryption.Context cryptoContext = Encryption.Context.NONE; - private volatile long flushedCellsCount = 0; - private volatile long compactedCellsCount = 0; - private volatile long majorCompactedCellsCount = 0; - private volatile long flushedCellsSize = 0; - private volatile long flushedOutputFileSize = 0; - private volatile long compactedCellsSize = 0; - private volatile long majorCompactedCellsSize = 0; + private AtomicLong flushedCellsCount = new AtomicLong(); + private AtomicLong compactedCellsCount = new AtomicLong(); + private AtomicLong majorCompactedCellsCount = new AtomicLong(); + private AtomicLong flushedCellsSize = new AtomicLong(); + private AtomicLong flushedOutputFileSize = new AtomicLong(); + private AtomicLong compactedCellsSize = new AtomicLong(); + private AtomicLong majorCompactedCellsSize = new AtomicLong(); /** * Constructor @@ -549,8 +550,8 @@ public class HStore implements Store { StoreFile storeFile = future.get(); if (storeFile != null) { long length = storeFile.getReader().length(); - this.storeSize += length; - this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes(); + this.storeSize.addAndGet(length); + this.totalUncompressedBytes.addAndGet(storeFile.getReader().getTotalUncompressedBytes()); if (LOG.isDebugEnabled()) { LOG.debug("loaded " + storeFile.toStringDetailed()); } @@ -844,8 +845,8 @@ public class HStore implements Store { private void bulkLoadHFile(StoreFile sf) throws IOException { StoreFile.Reader r = sf.getReader(); - this.storeSize += r.length(); - this.totalUncompressedBytes += r.getTotalUncompressedBytes(); + this.storeSize.addAndGet(r.length()); + this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes()); // Append the new storefile into the list this.lock.writeLock().lock(); @@ -1014,8 +1015,8 @@ public class HStore implements Store { StoreFile sf = createStoreFileAndReader(dstPath); StoreFile.Reader r = sf.getReader(); - this.storeSize += r.length(); - this.totalUncompressedBytes += r.getTotalUncompressedBytes(); + this.storeSize.addAndGet(r.length()); + this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes()); if (LOG.isInfoEnabled()) { LOG.info("Added " + sf + ", entries=" + r.getEntries() + @@ -1348,11 +1349,11 @@ public class HStore implements Store { writeCompactionWalRecord(filesToCompact, sfs); replaceStoreFiles(filesToCompact, sfs); if (cr.isMajor()) { - majorCompactedCellsCount += getCompactionProgress().totalCompactingKVs; - majorCompactedCellsSize += getCompactionProgress().totalCompactedSize; + majorCompactedCellsCount.addAndGet(getCompactionProgress().totalCompactingKVs); + majorCompactedCellsSize.addAndGet(getCompactionProgress().totalCompactedSize); } else { - compactedCellsCount += getCompactionProgress().totalCompactingKVs; - compactedCellsSize += getCompactionProgress().totalCompactedSize; + compactedCellsCount.addAndGet(getCompactionProgress().totalCompactingKVs); + compactedCellsSize.addAndGet(getCompactionProgress().totalCompactedSize); } for (StoreFile sf : sfs) { @@ -1475,7 +1476,7 @@ public class HStore implements Store { } } message.append("total size for store is ") - .append(StringUtils.TraditionalBinaryPrefix.long2String(storeSize, "", 1)) + .append(StringUtils.TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1)) .append(". This selection was in queue for ") .append(StringUtils.formatTimeDiff(compactionStartTime, cr.getSelectionTime())) .append(", and took ").append(StringUtils.formatTimeDiff(now, compactionStartTime)) @@ -1812,7 +1813,7 @@ public class HStore implements Store { completeCompaction(delSfs); LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in " + this + " of " + this.getRegionInfo().getRegionNameAsString() - + "; total size for store is " + TraditionalBinaryPrefix.long2String(storeSize, "", 1)); + + "; total size for store is " + TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1)); } @Override @@ -1892,16 +1893,16 @@ public class HStore implements Store { protected void completeCompaction(final Collection compactedFiles, boolean removeFiles) throws IOException { LOG.debug("Completing compaction..."); - this.storeSize = 0L; - this.totalUncompressedBytes = 0L; + this.storeSize.set(0L); + this.totalUncompressedBytes.set(0L); for (StoreFile hsf : this.storeEngine.getStoreFileManager().getStorefiles()) { StoreFile.Reader r = hsf.getReader(); if (r == null) { LOG.warn("StoreFile " + hsf + " has a null Reader"); continue; } - this.storeSize += r.length(); - this.totalUncompressedBytes += r.getTotalUncompressedBytes(); + this.storeSize.addAndGet(r.length()); + this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes()); } } @@ -2147,7 +2148,7 @@ public class HStore implements Store { @Override public long getSize() { - return storeSize; + return storeSize.get(); } @Override @@ -2275,7 +2276,7 @@ public class HStore implements Store { @Override public long getStoreSizeUncompressed() { - return this.totalUncompressedBytes; + return this.totalUncompressedBytes.get(); } @Override @@ -2491,9 +2492,9 @@ public class HStore implements Store { committedFiles.add(sf.getPath()); } - HStore.this.flushedCellsCount += cacheFlushCount; - HStore.this.flushedCellsSize += cacheFlushSize; - HStore.this.flushedOutputFileSize += outputFileSize; + HStore.this.flushedCellsCount.addAndGet(cacheFlushCount); + HStore.this.flushedCellsSize.addAndGet(cacheFlushSize); + HStore.this.flushedOutputFileSize.addAndGet(outputFileSize); // Add new file to store files. Clear snapshot too while we have the Store write lock. return HStore.this.updateStorefiles(storeFiles, snapshot.getId()); @@ -2526,8 +2527,9 @@ public class HStore implements Store { StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file); StoreFile storeFile = createStoreFileAndReader(storeFileInfo); storeFiles.add(storeFile); - HStore.this.storeSize += storeFile.getReader().length(); - HStore.this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes(); + HStore.this.storeSize.addAndGet(storeFile.getReader().length()); + HStore.this.totalUncompressedBytes.addAndGet( + storeFile.getReader().getTotalUncompressedBytes()); if (LOG.isInfoEnabled()) { LOG.info("Region: " + HStore.this.getRegionInfo().getEncodedName() + " added " + storeFile + ", entries=" + storeFile.getReader().getEntries() + @@ -2567,7 +2569,7 @@ public class HStore implements Store { } public static final long FIXED_OVERHEAD = - ClassSize.align(ClassSize.OBJECT + (17 * ClassSize.REFERENCE) + (11 * Bytes.SIZEOF_LONG) + ClassSize.align(ClassSize.OBJECT + (26 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG) + (5 * Bytes.SIZEOF_INT) + (2 * Bytes.SIZEOF_BOOLEAN)); public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD @@ -2606,37 +2608,37 @@ public class HStore implements Store { @Override public long getFlushedCellsCount() { - return flushedCellsCount; + return flushedCellsCount.get(); } @Override public long getFlushedCellsSize() { - return flushedCellsSize; + return flushedCellsSize.get(); } @Override public long getFlushedOutputFileSize() { - return flushedOutputFileSize; + return flushedOutputFileSize.get(); } @Override public long getCompactedCellsCount() { - return compactedCellsCount; + return compactedCellsCount.get(); } @Override public long getCompactedCellsSize() { - return compactedCellsSize; + return compactedCellsSize.get(); } @Override public long getMajorCompactedCellsCount() { - return majorCompactedCellsCount; + return majorCompactedCellsCount.get(); } @Override public long getMajorCompactedCellsSize() { - return majorCompactedCellsSize; + return majorCompactedCellsSize.get(); } /** http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java index ff68110..61bf0c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java @@ -91,7 +91,7 @@ public class IncreasingToUpperBoundRegionSplitPolicy extends ConstantSizeRegionS } } - return foundABigStore | force; + return foundABigStore || force; } /** http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java index a255b85..2a94182 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java @@ -65,5 +65,6 @@ public interface InternalScanner extends Closeable { * Closes the scanner and releases any resources it has allocated * @throws IOException */ + @Override void close() throws IOException; } http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java index 30dc2c1..c0ba844 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java @@ -97,6 +97,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner } } + @Override public Cell peek() { if (this.current == null) { return null; @@ -104,6 +105,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner return this.current.peek(); } + @Override public Cell next() throws IOException { if(this.current == null) { return null; @@ -180,6 +182,8 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner public KVScannerComparator(KVComparator kvComparator) { this.kvComparator = kvComparator; } + + @Override public int compare(KeyValueScanner left, KeyValueScanner right) { int comparison = compare(left.peek(), right.peek()); if (comparison != 0) { @@ -208,6 +212,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner } } + @Override public void close() { if (this.current != null) { this.current.close(); http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java index eb8bd06..02ebd97 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java @@ -288,11 +288,13 @@ public class Leases extends HasThread { return this.leaseName.hashCode(); } + @Override public long getDelay(TimeUnit unit) { return unit.convert(this.expirationTime - EnvironmentEdgeManager.currentTime(), TimeUnit.MILLISECONDS); } + @Override public int compareTo(Delayed o) { long delta = this.getDelay(TimeUnit.MILLISECONDS) - o.getDelay(TimeUnit.MILLISECONDS); http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java index 0e5f284..246c02c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java @@ -37,10 +37,6 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.HasThread; -import java.io.IOException; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.ReentrantLock; - import com.google.common.annotations.VisibleForTesting; /** @@ -54,6 +50,8 @@ import com.google.common.annotations.VisibleForTesting; */ @InterfaceAudience.Private @VisibleForTesting +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="JLM_JSR166_UTILCONCURRENT_MONITORENTER", + justification="Use of an atomic type both as monitor and condition variable is intended") public class LogRoller extends HasThread { private static final Log LOG = LogFactory.getLog(LogRoller.class); private final ReentrantLock rollLock = new ReentrantLock(); http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 3bca175..2da782e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -29,6 +29,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.SortedMap; import java.util.concurrent.BlockingQueue; @@ -700,8 +701,13 @@ class MemStoreFlusher implements FlushRequester { } @Override + public int hashCode() { + return System.identityHashCode(this); + } + + @Override public boolean equals(Object obj) { - return (this == obj); + return Objects.equals(this, obj); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java index 57d6356..cc68b03 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java @@ -76,7 +76,7 @@ public class MultiVersionConcurrencyControl { while (true) { long seqId = this.getWritePoint(); if (seqId >= newStartPoint) break; - if (this.tryAdvanceTo(/* newSeqId = */ newStartPoint, /* expected = */ seqId)) break; + if (this.tryAdvanceTo(newStartPoint, seqId)) break; } } @@ -245,6 +245,7 @@ public class MultiVersionConcurrencyControl { } @VisibleForTesting + @Override public String toString() { return Objects.toStringHelper(this) .add("readPoint", readPoint) http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 266b4f3..597c665 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1057,10 +1057,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, Class rpcSchedulerFactoryClass = rs.conf.getClass( REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, SimpleRpcSchedulerFactory.class); - rpcSchedulerFactory = ((RpcSchedulerFactory) rpcSchedulerFactoryClass.newInstance()); - } catch (InstantiationException e) { - throw new IllegalArgumentException(e); - } catch (IllegalAccessException e) { + rpcSchedulerFactory = (RpcSchedulerFactory) + rpcSchedulerFactoryClass.getDeclaredConstructor().newInstance(); + } catch (Exception e) { throw new IllegalArgumentException(e); } // Server to handle client requests. @@ -1651,6 +1650,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, */ @Override @QosPriority(priority=HConstants.ADMIN_QOS) + @edu.umd.cs.findbugs.annotations.SuppressWarnings( + value="JLM_JSR166_UTILCONCURRENT_MONITORENTER", + justification="We double up use of an atomic both as monitor and condition variable") public OpenRegionResponse openRegion(final RpcController controller, final OpenRegionRequest request) throws ServiceException { requestCount.increment(); @@ -2581,7 +2583,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private static final long serialVersionUID = -4305297078988180130L; @Override - public Throwable fillInStackTrace() { + public synchronized Throwable fillInStackTrace() { return this; } }; http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index dbe8521..f29397d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -71,7 +71,6 @@ import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.regionserver.DeleteTracker; import org.apache.hadoop.hbase.regionserver.Region.Operation; @@ -88,11 +87,6 @@ import com.google.common.collect.Lists; import com.google.protobuf.Message; import com.google.protobuf.Service; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import com.google.protobuf.Message; -import com.google.protobuf.Service; - /** * Implements the coprocessor environment and runtime support for coprocessors * loaded within a {@link Region}. @@ -160,6 +154,7 @@ public class RegionCoprocessorHost return rsServices; } + @Override public void shutdown() { super.shutdown(); MetricsCoprocessor.removeRegistry(this.metricRegistry); @@ -525,6 +520,7 @@ public class RegionCoprocessorHost throws IOException { oserver.postClose(ctx, abortRequested); } + @Override public void postEnvCall(RegionEnvironment env) { shutdown(env); } @@ -1704,10 +1700,12 @@ public class RegionCoprocessorHost public abstract void call(RegionObserver observer, ObserverContext ctx) throws IOException; + @Override public boolean hasCall(Coprocessor observer) { return observer instanceof RegionObserver; } + @Override public void call(Coprocessor observer, ObserverContext ctx) throws IOException { call((RegionObserver)observer, ctx); @@ -1724,10 +1722,12 @@ public class RegionCoprocessorHost public abstract void call(EndpointObserver observer, ObserverContext ctx) throws IOException; + @Override public boolean hasCall(Coprocessor observer) { return observer instanceof EndpointObserver; } + @Override public void call(Coprocessor observer, ObserverContext ctx) throws IOException { call((EndpointObserver)observer, ctx); http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java index 6831c91..77d0e35 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java @@ -425,8 +425,8 @@ public class ScannerContext { TIME_LIMIT_REACHED_MID_ROW(true, true), BATCH_LIMIT_REACHED(true, true); - private boolean moreValues; - private boolean limitReached; + private final boolean moreValues; + private final boolean limitReached; private NextState(boolean moreValues, boolean limitReached) { this.moreValues = moreValues; @@ -482,7 +482,7 @@ public class ScannerContext { * limits, the checker must know their own scope (i.e. are they checking the limits between * rows, between cells, etc...) */ - int depth; + final int depth; LimitScope(int depth) { this.depth = depth; http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java index 874acb2..1115528 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java @@ -83,9 +83,9 @@ public class SplitLogWorker implements Runnable { } public SplitLogWorker(final Server hserver, final Configuration conf, - final RegionServerServices server, final LastSequenceId sequenceIdChecker, + final RegionServerServices rsServices, final LastSequenceId sequenceIdChecker, final WALFactory factory) { - this(server, conf, server, new TaskExecutor() { + this(hserver, conf, rsServices, new TaskExecutor() { @Override public Status exec(String filename, RecoveryMode mode, CancelableProgressable p) { Path walDir; @@ -102,7 +102,7 @@ public class SplitLogWorker implements Runnable { // encountered a bad non-retry-able persistent error. try { if (!WALSplitter.splitLogFile(walDir, fs.getFileStatus(new Path(walDir, filename)), - fs, conf, p, sequenceIdChecker, server.getCoordinatedStateManager(), mode, factory)) { + fs, conf, p, sequenceIdChecker, rsServices.getCoordinatedStateManager(), mode, factory)) { return Status.PREEMPTED; } } catch (InterruptedIOException iioe) { http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java index 3576478..a3eea6d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java @@ -170,6 +170,7 @@ public class SplitTransactionImpl implements SplitTransaction { * @return true if the region is splittable else * false if it is not (e.g. its already closed, etc.). */ + @Override public boolean prepare() throws IOException { if (!this.parent.isSplittable()) return false; // Split key can be null if this region is unsplittable; i.e. has refs. @@ -886,6 +887,7 @@ public class SplitTransactionImpl implements SplitTransaction { this.family = family; } + @Override public Pair call() throws IOException { return splitStoreFile(family, sf); } http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 28c0892..c27cf40 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -1045,6 +1045,7 @@ public class StoreFile { } } + @Override public void append(final Cell cell) throws IOException { appendGeneralBloomfilter(cell); appendDeleteFamilyBloomFilter(cell); http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java index 08259de..12da6b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java @@ -502,37 +502,49 @@ public class StoreFileInfo { } @Override - public boolean equals(Object that) { - if (this == that) return true; - if (that == null) return false; - - if (!(that instanceof StoreFileInfo)) return false; - - StoreFileInfo o = (StoreFileInfo)that; - if (initialPath != null && o.initialPath == null) return false; - if (initialPath == null && o.initialPath != null) return false; - if (initialPath != o.initialPath && initialPath != null - && !initialPath.equals(o.initialPath)) return false; - - if (reference != null && o.reference == null) return false; - if (reference == null && o.reference != null) return false; - if (reference != o.reference && reference != null - && !reference.equals(o.reference)) return false; - - if (link != null && o.link == null) return false; - if (link == null && o.link != null) return false; - if (link != o.link && link != null && !link.equals(o.link)) return false; - - return true; - }; - + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((initialPath == null) ? 0 : initialPath.hashCode()); + result = prime * result + ((link == null) ? 0 : link.hashCode()); + result = prime * result + ((reference == null) ? 0 : reference.hashCode()); + return result; + } @Override - public int hashCode() { - int hash = 17; - hash = hash * 31 + ((reference == null) ? 0 : reference.hashCode()); - hash = hash * 31 + ((initialPath == null) ? 0 : initialPath.hashCode()); - hash = hash * 31 + ((link == null) ? 0 : link.hashCode()); - return hash; + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + StoreFileInfo other = (StoreFileInfo) obj; + if (initialPath == null) { + if (other.initialPath != null) { + return false; + } + } else if (!initialPath.equals(other.initialPath)) { + return false; + } + if (link == null) { + if (other.link != null) { + return false; + } + } else if (!link.equals(other.link)) { + return false; + } + if (reference == null) { + if (other.reference != null) { + return false; + } + } else if (!reference.equals(other.reference)) { + return false; + } + return true; } + } http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index f5eb74f..8132365 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -150,14 +150,17 @@ public class StoreFileScanner implements KeyValueScanner { matcher, readPt, true); } + @Override public String toString() { return "StoreFileScanner[" + hfs.toString() + ", cur=" + cur + "]"; } + @Override public Cell peek() { return cur; } + @Override public Cell next() throws IOException { Cell retKey = cur; @@ -178,6 +181,7 @@ public class StoreFileScanner implements KeyValueScanner { return retKey; } + @Override public boolean seek(Cell key) throws IOException { if (seekCount != null) seekCount.incrementAndGet(); @@ -205,6 +209,7 @@ public class StoreFileScanner implements KeyValueScanner { } } + @Override public boolean reseek(Cell key) throws IOException { if (seekCount != null) seekCount.incrementAndGet(); @@ -263,6 +268,7 @@ public class StoreFileScanner implements KeyValueScanner { return true; } + @Override public void close() { cur = null; if (closed) return; http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java index 9b2a56a..12cc7bf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java @@ -157,11 +157,13 @@ public class TimeRangeTracker implements Writable { return maximumTimestamp.get(); } + @Override public void write(final DataOutput out) throws IOException { out.writeLong(minimumTimestamp.get()); out.writeLong(maximumTimestamp.get()); } + @Override public void readFields(final DataInput in) throws IOException { this.minimumTimestamp.set(in.readLong()); http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index 12a84eb..9d67af5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -26,9 +26,8 @@ import com.google.common.collect.Collections2; import java.util.ArrayList; import java.util.Collection; +import java.util.Objects; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.regionserver.Store; @@ -43,7 +42,7 @@ import org.apache.hadoop.util.StringUtils; @InterfaceAudience.LimitedPrivate({ "coprocessor" }) @InterfaceStability.Evolving public class CompactionRequest implements Comparable { - private static final Log LOG = LogFactory.getLog(CompactionRequest.class); + // was this compaction promoted to an off-peak private boolean isOffPeak = false; private enum DisplayCompactionType { MINOR, ALL_FILES, MAJOR } @@ -142,8 +141,13 @@ public class CompactionRequest implements Comparable { } @Override + public int hashCode() { + return System.identityHashCode(this); + } + + @Override public boolean equals(Object obj) { - return (this == obj); + return Objects.equals(this, obj); } public Collection getFiles() { http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java index 2d8772c..9e11ecf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java @@ -207,6 +207,7 @@ public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { * @param filesCompacting files being scheduled to compact. * @return true to schedule a request. */ + @Override public boolean needsCompaction(final Collection storeFiles, final List filesCompacting) { int numCandidates = storeFiles.size() - filesCompacting.size(); http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java index 77b0af8..4e8c35a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java @@ -106,6 +106,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { * @param filesToCompact Files to compact. Can be null. * @return True if we should run a major compaction. */ + @Override public abstract boolean shouldPerformMajorCompaction(final Collection filesToCompact) throws IOException; @@ -148,6 +149,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { * @param compactionSize Total size of some compaction * @return whether this should be a large or small compaction */ + @Override public boolean throttleCompaction(long compactionSize) { return compactionSize > comConf.getThrottlePoint(); } http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java index 69786b6..d64b789 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java @@ -43,6 +43,8 @@ import org.apache.hadoop.hbase.util.ConfigUtil; * This is executed after receiving an OPEN RPC from the master or client. */ @InterfaceAudience.Private +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="JLM_JSR166_UTILCONCURRENT_MONITORENTER", + justification="Use of an atomic type both as monitor and condition variable is intended") public class OpenRegionHandler extends EventHandler { private static final Log LOG = LogFactory.getLog(OpenRegionHandler.class); http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index d5cf6bb..593132f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -1323,7 +1323,7 @@ public class FSHLog implements WAL { rollWriterLock.unlock(); } try { - if (lowReplication || writer != null && writer.getLength() > logrollsize) { + if (lowReplication || (writer != null && writer.getLength() > logrollsize)) { requestLogRoll(lowReplication); } } catch (IOException e) { http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java index 69d1c59..69c0db7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java @@ -81,6 +81,7 @@ class FSWALEntry extends Entry { } } + @Override public String toString() { return "sequence=" + this.sequence + ", " + super.toString(); }; http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java index 9fd171f..5643174 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java @@ -100,10 +100,12 @@ public class ProtobufLogReader extends ReaderBase { public long trailerSize() { if (trailerPresent) { // sizeof PB_WAL_COMPLETE_MAGIC + sizof trailerSize + trailer - final long calculatedSize = PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT + trailer.getSerializedSize(); + final long calculatedSize = (long) PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT + + trailer.getSerializedSize(); final long expectedSize = fileLength - walEditsStopOffset; if (expectedSize != calculatedSize) { - LOG.warn("After parsing the trailer, we expect the total footer to be "+ expectedSize +" bytes, but we calculate it as being " + calculatedSize); + LOG.warn("After parsing the trailer, we expect the total footer to be "+ expectedSize + + " bytes, but we calculate it as being " + calculatedSize); } return expectedSize; } else { http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java index 25c2111..deb9959 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java @@ -109,6 +109,7 @@ class RingBufferTruck { * Factory for making a bunch of these. Needed by the ringbuffer/disruptor. */ final static EventFactory EVENT_FACTORY = new EventFactory() { + @Override public RingBufferTruck newInstance() { return new RingBufferTruck(); } http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 3278f0c..10f2e7b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -589,7 +589,7 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf if (replicationQueueInfo.isQueueRecovered() && getWorkerState() == WorkerState.FINISHED) { // use synchronize to make sure one last thread will clean the queue - synchronized (workerThreads) { + synchronized (this) { Threads.sleep(100);// wait a short while for other worker thread to fully exit boolean allOtherTaskDone = true; for (ReplicationSourceShipperThread worker : workerThreads.values()) { http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java index 8ce1437..8660bd2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java @@ -334,6 +334,7 @@ public class AuthenticationTokenSecretManager interrupt(); } + @Override public void run() { zkLeader.start(); zkLeader.waitToBecomeLeader(); http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java index db3caff..4a34f5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.security.visibility; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Stack; @@ -103,7 +104,8 @@ public class ExpressionParser { } index++; } while (index < endPos && !isEndOfLabel(exp[index])); - leafExp = new String(exp, labelOffset, index - labelOffset).trim(); + leafExp = new String(exp, labelOffset, index - labelOffset, + StandardCharsets.UTF_8).trim(); if (leafExp.isEmpty()) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); } http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java index a15669f..1ace4c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java @@ -58,6 +58,7 @@ public class LeafExpressionNode implements ExpressionNode { return true; } + @Override public LeafExpressionNode deepClone() { LeafExpressionNode clone = new LeafExpressionNode(this.identifier); return clone; http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java index 4399ecc..e926045 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java @@ -91,6 +91,7 @@ public class NonLeafExpressionNode implements ExpressionNode { return this.op == Operator.NOT; } + @Override public NonLeafExpressionNode deepClone() { NonLeafExpressionNode clone = new NonLeafExpressionNode(this.op); for (ExpressionNode exp : this.childExps) { http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java index 1025ca9..ace34ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java @@ -23,12 +23,13 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; public enum Operator { AND('&'), OR('|'), NOT('!'); - private char rep; + private final char rep; private Operator(char rep) { this.rep = rep; } + @Override public String toString() { return String.valueOf(this.rep); }; http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index 37a02bd..0824189 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -573,6 +573,7 @@ public class ExportSnapshot extends Configured implements Tool { final List> files, final int ngroups) { // Sort files by size, from small to big Collections.sort(files, new Comparator>() { + @Override public int compare(Pair a, Pair b) { long r = a.getSecond() - b.getSecond(); return (r < 0) ? -1 : ((r > 0) ? 1 : 0); http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index 47b3c34..75dac43 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -749,7 +749,7 @@ public class RestoreSnapshotHelper { public static void restoreSnapshotACL(SnapshotDescription snapshot, TableName newTableName, Configuration conf) throws IOException { - if (snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null) { + if (snapshot.hasUsersAndPermissions()) { LOG.info("Restore snapshot acl to table. snapshot: " + snapshot + ", table: " + newTableName); ListMultimap perms = ProtobufUtil.toUserTablePermissions(snapshot.getUsersAndPermissions()); http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 06eb9ea..7e161ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -382,7 +382,7 @@ public class FSTableDescriptors implements TableDescriptors { // Clean away old versions for (FileStatus file : status) { Path path = file.getPath(); - if (file != mostCurrent) { + if (!file.equals(mostCurrent)) { if (!fs.delete(file.getPath(), false)) { LOG.warn("Failed cleanup of " + path); } else { http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 3e2d230..5d850b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -1078,7 +1078,7 @@ public abstract class FSUtils { private static boolean isValidWALRootDir(Path walDir, final Configuration c) throws IOException { Path rootDir = FSUtils.getRootDir(c); - if (walDir != rootDir) { + if (!walDir.equals(rootDir)) { if (walDir.toString().startsWith(rootDir.toString() + "/")) { throw new IllegalStateException("Illegal WAL directory specified. " + "WAL directories are not permitted to be under the root directory if set."); @@ -1343,6 +1343,7 @@ public abstract class FSUtils { super(fs, HConstants.HBASE_NON_TABLE_DIRS); } + @Override protected boolean isValidName(final String name) { if (!super.isValidName(name)) return false; http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index e5e4b5a..066f2b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -574,10 +574,12 @@ public class HBaseFsck extends Configured implements Closeable { errors.print("Number of regions: " + status.getRegionsCount()); Set rits = status.getRegionsInTransition(); - errors.print("Number of regions in transition: " + rits.size()); - if (details) { - for (RegionState state: rits) { - errors.print(" " + state.toDescriptiveString()); + if (rits != null) { + errors.print("Number of regions in transition: " + rits.size()); + if (details) { + for (RegionState state: rits) { + errors.print(" " + state.toDescriptiveString()); + } } } @@ -3798,7 +3800,7 @@ public class HBaseFsck extends Configured implements Closeable { @Override public int hashCode() { int hash = Arrays.hashCode(getRegionName()); - hash ^= getRegionId(); + hash = (int) (hash ^ getRegionId()); hash ^= Arrays.hashCode(getStartKey()); hash ^= Arrays.hashCode(getEndKey()); hash ^= Boolean.valueOf(isOffline()).hashCode(); @@ -3806,7 +3808,7 @@ public class HBaseFsck extends Configured implements Closeable { if (regionServer != null) { hash ^= regionServer.hashCode(); } - hash ^= modTime; + hash = (int) (hash ^ modTime); return hash; } } http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java index 98ce80d..f55f8cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java @@ -80,6 +80,9 @@ public class IdReadWriteLock { } @VisibleForTesting + @edu.umd.cs.findbugs.annotations.SuppressWarnings( + value="JLM_JSR166_UTILCONCURRENT_MONITORENTER", + justification="Synchronization on rwlock is intentional") public void waitForWaiters(long id, int numWaiters) throws InterruptedException { for (ReentrantReadWriteLock readWriteLock;;) { readWriteLock = lockPool.get(id); http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java index 0739e91..a05d7cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java @@ -16,12 +16,15 @@ */ package org.apache.hadoop.hbase.util; +import java.io.BufferedWriter; import java.io.Closeable; import java.io.IOException; +import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.io.StringWriter; import java.lang.management.ManagementFactory; import java.lang.reflect.Array; +import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.Set; @@ -371,7 +374,8 @@ public class JSONBean { * @throws MalformedObjectNameException */ public static void dumpAllBeans() throws IOException, MalformedObjectNameException { - try (PrintWriter writer = new PrintWriter(System.out)) { + try (PrintWriter writer = new PrintWriter(new BufferedWriter( + new OutputStreamWriter(System.out, StandardCharsets.UTF_8)))) { JSONBean dumper = new JSONBean(); try (JSONBean.Writer jsonBeanWriter = dumper.open(writer)) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index d0f01f8..89a1c56 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -1086,6 +1086,8 @@ public class RegionSplitter { } @Override + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", + justification="Preconditions checks insure we are not going to dereference a null value") public byte[][] split(int numRegions) { Preconditions.checkArgument( Bytes.compareTo(lastRowBytes, firstRowBytes) > 0, http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java index 3fa38b9..53a9681 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java @@ -18,9 +18,6 @@ */ package org.apache.hadoop.hbase.wal; -import java.io.Closeable; -import java.io.DataInput; -import java.io.DataOutput; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -382,7 +379,7 @@ public class DefaultWALProvider implements WALProvider { ProtobufLogWriter.class, Writer.class); Writer writer = null; try { - writer = logWriterClass.newInstance(); + writer = logWriterClass.getDeclaredConstructor().newInstance(); writer.init(fs, path, conf, overwritable); return writer; } catch (Exception e) { http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java index 9e90a0c..2e34b64 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java @@ -24,8 +24,6 @@ import java.io.IOException; import java.util.Map; import java.util.Set; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -38,7 +36,6 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.util.Bytes; import com.google.common.annotations.VisibleForTesting; @@ -103,6 +100,7 @@ public interface WAL extends Closeable { * underlying resources after this call; i.e. filesystem based WALs can archive or * delete files. */ + @Override void close() throws IOException; /** http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java index 5452742..f5723a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java @@ -79,7 +79,8 @@ public class WALFactory { filesystem(DefaultWALProvider.class), multiwal(RegionGroupingProvider.class); - Class clazz; + final Class clazz; + Providers(Class clazz) { this.clazz = clazz; } @@ -142,17 +143,13 @@ public class WALFactory { List listeners, String providerId) throws IOException { LOG.info("Instantiating WALProvider of type " + clazz); try { - final WALProvider result = clazz.newInstance(); + final WALProvider result = clazz.getDeclaredConstructor().newInstance(); result.init(this, conf, listeners, providerId); return result; - } catch (InstantiationException exception) { - LOG.error("couldn't set up WALProvider, the configured class is " + clazz); - LOG.debug("Exception details for failure to load WALProvider.", exception); - throw new IOException("couldn't set up WALProvider", exception); - } catch (IllegalAccessException exception) { + } catch (Exception e) { LOG.error("couldn't set up WALProvider, the configured class is " + clazz); - LOG.debug("Exception details for failure to load WALProvider.", exception); - throw new IOException("couldn't set up WALProvider", exception); + LOG.debug("Exception details for failure to load WALProvider.", e); + throw new IOException("couldn't set up WALProvider", e); } } @@ -299,7 +296,7 @@ public class WALFactory { try { if (lrClass != ProtobufLogReader.class) { // User is overriding the WAL reader, let them. - reader = lrClass.newInstance(); + reader = lrClass.getDeclaredConstructor().newInstance(); reader.init(fs, path, conf, null); return reader; } else { http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java index 5cc7567..4eb79b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java @@ -476,8 +476,8 @@ public class WALKey implements SequenceId, Comparable { @Override public int hashCode() { int result = Bytes.hashCode(this.encodedRegionName); - result ^= this.logSeqNum; - result ^= this.writeTime; + result = (int) (result ^ this.logSeqNum); + result = (int) (result ^ this.writeTime); return result; } http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 005e948..7c74649 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -132,6 +132,8 @@ import com.google.protobuf.TextFormat; * region to replay on startup. Delete the old log files when finished. */ @InterfaceAudience.Private +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="JLM_JSR166_UTILCONCURRENT_MONITORENTER", + justification="Synchronization on concurrent map is intended") public class WALSplitter { private static final Log LOG = LogFactory.getLog(WALSplitter.class); @@ -1145,7 +1147,7 @@ public class WALSplitter { protected PipelineController controller; protected EntryBuffers entryBuffers; - protected Map writers = Collections + protected final Map writers = Collections .synchronizedMap(new TreeMap(Bytes.BYTES_COMPARATOR));; protected final Map regionMaximumEditLogSeqNum = Collections @@ -1710,21 +1712,21 @@ public class WALSplitter { private long waitRegionOnlineTimeOut; private final Set recoveredRegions = Collections.synchronizedSet(new HashSet()); - private final Map writers = + private final Map rsWriters = new ConcurrentHashMap(); // online encoded region name -> region location map private final Map onlineRegions = new ConcurrentHashMap(); - private Map tableNameToHConnectionMap = Collections + private final Map tableNameToHConnectionMap = Collections .synchronizedMap(new TreeMap()); /** * Map key -> value layout * : -> Queue */ - private Map>> serverToBufferQueueMap = + private final Map>> serverToBufferQueueMap = new ConcurrentHashMap>>(); - private List thrown = new ArrayList(); + private final List thrown = new ArrayList(); // The following sink is used in distrubitedLogReplay mode for entries of regions in a disabling // table. It's a limitation of distributedLogReplay. Because log replay needs a region is @@ -2124,7 +2126,7 @@ public class WALSplitter { @Override int getNumOpenWriters() { - return this.writers.size() + this.logRecoveredEditsOutputSink.getNumOpenWriters(); + return this.rsWriters.size() + this.logRecoveredEditsOutputSink.getNumOpenWriters(); } private List closeRegionServerWriters() throws IOException { @@ -2146,8 +2148,8 @@ public class WALSplitter { } } } finally { - synchronized (writers) { - for (Map.Entry entry : writers.entrySet()) { + synchronized (rsWriters) { + for (Map.Entry entry : rsWriters.entrySet()) { String locationKey = entry.getKey(); RegionServerWriter tmpW = entry.getValue(); try { @@ -2182,8 +2184,8 @@ public class WALSplitter { @Override public Map getOutputCounts() { TreeMap ret = new TreeMap(Bytes.BYTES_COMPARATOR); - synchronized (writers) { - for (Map.Entry entry : writers.entrySet()) { + synchronized (rsWriters) { + for (Map.Entry entry : rsWriters.entrySet()) { ret.put(Bytes.toBytes(entry.getKey()), entry.getValue().editsWritten); } } @@ -2201,7 +2203,7 @@ public class WALSplitter { * @return null if this region shouldn't output any logs */ private RegionServerWriter getRegionServerWriter(String loc) throws IOException { - RegionServerWriter ret = writers.get(loc); + RegionServerWriter ret = rsWriters.get(loc); if (ret != null) { return ret; } @@ -2212,11 +2214,11 @@ public class WALSplitter { } HConnection hconn = getConnectionByTableName(tableName); - synchronized (writers) { - ret = writers.get(loc); + synchronized (rsWriters) { + ret = rsWriters.get(loc); if (ret == null) { ret = new RegionServerWriter(conf, tableName, hconn); - writers.put(loc, ret); + rsWriters.put(loc, ret); } } return ret; http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java index 6cd5a28..4fbce90 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java @@ -28,6 +28,7 @@ import java.io.Reader; import java.net.BindException; import java.net.InetSocketAddress; import java.net.Socket; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Random; @@ -405,7 +406,7 @@ public class MiniZooKeeperCluster { Socket sock = new Socket("localhost", port); try { OutputStream outstream = sock.getOutputStream(); - outstream.write("stat".getBytes()); + outstream.write("stat".getBytes(StandardCharsets.UTF_8)); outstream.flush(); } finally { sock.close(); @@ -435,10 +436,10 @@ public class MiniZooKeeperCluster { BufferedReader reader = null; try { OutputStream outstream = sock.getOutputStream(); - outstream.write("stat".getBytes()); + outstream.write("stat".getBytes(StandardCharsets.UTF_8)); outstream.flush(); - Reader isr = new InputStreamReader(sock.getInputStream()); + Reader isr = new InputStreamReader(sock.getInputStream(), StandardCharsets.UTF_8); reader = new BufferedReader(isr); String line = reader.readLine(); if (line != null && line.startsWith("Zookeeper version:")) { http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java index 1d2f394..0fe60e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java @@ -51,7 +51,7 @@ import org.apache.zookeeper.KeeperException; @InterfaceAudience.Private public class RegionServerTracker extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(RegionServerTracker.class); - private NavigableMap regionServers = + private final NavigableMap regionServers = new TreeMap(); private ServerManager serverManager; private MasterServices server; http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp index dceed8e..ad7996b 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp @@ -112,30 +112,32 @@ - <%for (SnapshotDescription snapshotDesc : snapshots) { %> - - - <% - TableName snapshotTable = TableName.valueOf(snapshotDesc.getTable()); - SnapshotInfo.SnapshotStats stats = SnapshotInfo.getSnapshotStats(master.getConfiguration(), - snapshotDesc, filesMap); - totalUnsharedArchivedSize += stats.getNonSharedArchivedStoreFilesSize(); - tableExists = admin.tableExists(snapshotTable); - %> - + + <% + TableName snapshotTable = TableName.valueOf(snapshotDesc.getTable()); + SnapshotInfo.SnapshotStats stats = SnapshotInfo.getSnapshotStats(master.getConfiguration(), + snapshotDesc, filesMap); + totalUnsharedArchivedSize += stats.getNonSharedArchivedStoreFilesSize(); + tableExists = admin.tableExists(snapshotTable); + %> + + + + + <% } %> - - - - - <% } %>

<%= snapshots.size() %> snapshot(s) in set.

Total Storefile Size: <%= StringUtils.humanReadableInt(totalSize) %>

http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/resources/hbase-webapps/master/table.jsp ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 19e66e1..86a5a76 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -22,6 +22,7 @@ import="com.google.protobuf.ByteString" import="java.net.URLEncoder" import="java.util.ArrayList" + import="java.util.HashMap" import="java.util.TreeMap" import="java.util.List" import="java.util.LinkedHashMap" @@ -252,8 +253,14 @@ if ( fqtn != null ) { } %> + <% + String metaLocationString = metaLocation != null ? + StringEscapeUtils.escapeHtml(metaLocation.getHostname().toString()) + + ":" + master.getRegionServerInfoPort(metaLocation) : + "(null)"; + %> - + @@ -355,8 +362,11 @@ if ( fqtn != null ) { String urlRegionServer = null; Map regDistribution = new TreeMap(); Map primaryRegDistribution = new TreeMap(); - Map regions = table.getRegionLocations(); Map regionsToLoad = new LinkedHashMap(); + Map regions = table.getRegionLocations(); + if (regions == null) { + regions = new HashMap(); + } for (Map.Entry hriEntry : regions.entrySet()) { HRegionInfo regionInfo = hriEntry.getKey(); ServerName addr = hriEntry.getValue(); @@ -811,7 +821,7 @@ var showWhole='<%= showWhole %>'; if(showWhole=='true')document.getElementById("showWhole").checked=true; function reloadAsSort(){ - var url="?name="+'<%= URLEncoder.encode(fqtn) %>'; + var url="?name="+'<%= fqtn != null ? URLEncoder.encode(fqtn) : "" %>'; if(document.getElementById("sel").selectedIndex>0){ url=url+"&sort="+document.getElementById("sel").value; } http://git-wip-us.apache.org/repos/asf/hbase/blob/d80d3fa4/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp b/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp index cd35ad1..04d54a5 100644 --- a/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp @@ -37,7 +37,7 @@ Configuration conf = rs.getConfiguration(); Region region = rs.getFromOnlineRegions(regionName); - String displayName = region.getRegionInfo().getRegionNameAsString(); + String displayName = region != null ? region.getRegionInfo().getRegionNameAsString() : "(null)"; %>
Shared Storefile Size Archived Storefile Size
- <%= snapshotDesc.getName() %> - <% if (tableExists) { %> - - <%= snapshotTable.getNameAsString() %> - <% } else { %> - <%= snapshotTable.getNameAsString() %> + <% if (snapshots != null) { %> + <% for (SnapshotDescription snapshotDesc : snapshots) { %> +
+ <%= snapshotDesc.getName() %> + <% if (tableExists) { %> + + <%= snapshotTable.getNameAsString() %> + <% } else { %> + <%= snapshotTable.getNameAsString() %> + <% } %> + <%= new Date(snapshotDesc.getCreationTime()) %><%= StringUtils.humanReadableInt(stats.getSharedStoreFilesSize()) %><%= StringUtils.humanReadableInt(stats.getArchivedStoreFileSize()) %> + (<%= StringUtils.humanReadableInt(stats.getNonSharedArchivedStoreFilesSize()) %>)
<%= new Date(snapshotDesc.getCreationTime()) %><%= StringUtils.humanReadableInt(stats.getSharedStoreFilesSize()) %><%= StringUtils.humanReadableInt(stats.getArchivedStoreFileSize()) %> - (<%= StringUtils.humanReadableInt(stats.getNonSharedArchivedStoreFilesSize()) %>)
<%= escapeXml(meta.getRegionNameAsString()) %><%= StringEscapeUtils.escapeHtml(metaLocation.getHostname().toString()) + ":" + master.getRegionServerInfoPort(metaLocation) %><%= metaLocationString %> <%= readReq%> <%= writeReq%> <%= fileSize%>