From commits-return-84925-apmail-hbase-commits-archive=hbase.apache.org@hbase.apache.org Fri Apr 12 06:29:31 2019 Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [207.244.88.153]) by minotaur.apache.org (Postfix) with SMTP id 07C4C18C3A for ; Fri, 12 Apr 2019 06:29:30 +0000 (UTC) Received: (qmail 51876 invoked by uid 500); 12 Apr 2019 06:29:30 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 51845 invoked by uid 500); 12 Apr 2019 06:29:30 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 51836 invoked by uid 99); 12 Apr 2019 06:29:30 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 12 Apr 2019 06:29:30 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id 33E4A814C8; Fri, 12 Apr 2019 06:29:30 +0000 (UTC) Date: Fri, 12 Apr 2019 06:29:29 +0000 To: "commits@hbase.apache.org" Subject: [hbase] branch master updated: HBASE-22202 Fix new findbugs issues after we upgrade hbase-thirdparty dependencies MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit Message-ID: <155505056952.11368.4840994985548882417@gitbox.apache.org> From: zhangduo@apache.org X-Git-Host: gitbox.apache.org X-Git-Repo: hbase X-Git-Refname: refs/heads/master X-Git-Reftype: branch X-Git-Oldrev: 94d9dc1e84f57c722de60398f0fd7fc7f83726a0 X-Git-Newrev: f77bde37225d6787d99474ea4b8ac83e0d233a73 X-Git-Rev: f77bde37225d6787d99474ea4b8ac83e0d233a73 X-Git-NotificationType: ref_changed_plus_diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated This is an automated email from the ASF dual-hosted git repository. zhangduo pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/master by this push: new f77bde3 HBASE-22202 Fix new findbugs issues after we upgrade hbase-thirdparty dependencies f77bde3 is described below commit f77bde37225d6787d99474ea4b8ac83e0d233a73 Author: Duo Zhang AuthorDate: Fri Apr 12 11:29:26 2019 +0800 HBASE-22202 Fix new findbugs issues after we upgrade hbase-thirdparty dependencies --- .../apache/hadoop/hbase/backup/HFileArchiver.java | 38 ++++++++++--------- .../hbase/quotas/NamespaceQuotaSnapshotStore.java | 13 ++----- .../hbase/quotas/TableQuotaSnapshotStore.java | 16 +++----- .../hbase/regionserver/StoreFileComparators.java | 43 +++++++++------------- .../compactions/RatioBasedCompactionPolicy.java | 2 +- .../compactions/SortedCompactionPolicy.java | 15 ++------ .../master/ReplicationHFileCleaner.java | 5 +++ .../replication/master/ReplicationLogCleaner.java | 5 +++ 8 files changed, 61 insertions(+), 76 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index 0cac629..1783f6b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -21,7 +21,6 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InterruptedIOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -31,7 +30,9 @@ import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -49,10 +50,8 @@ import org.apache.hadoop.io.MultipleIOException; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.base.Function; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -import org.apache.hbase.thirdparty.com.google.common.collect.Collections2; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** * Utility class to handle the removal of HFiles (or the respective {@link HStoreFile StoreFiles}) @@ -158,15 +157,15 @@ public class HFileArchiver { } // convert the files in the region to a File - toArchive.addAll(Lists.transform(Arrays.asList(storeDirs), getAsFile)); + Stream.of(storeDirs).map(getAsFile).forEachOrdered(toArchive::add); LOG.debug("Archiving " + toArchive); List failedArchive = resolveAndArchive(fs, regionArchiveDir, toArchive, EnvironmentEdgeManager.currentTime()); if (!failedArchive.isEmpty()) { - throw new FailedArchiveException("Failed to archive/delete all the files for region:" - + regionDir.getName() + " into " + regionArchiveDir - + ". Something is probably awry on the filesystem.", - Collections2.transform(failedArchive, FUNC_FILE_TO_PATH)); + throw new FailedArchiveException( + "Failed to archive/delete all the files for region:" + regionDir.getName() + " into " + + regionArchiveDir + ". Something is probably awry on the filesystem.", + failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); } // if that was successful, then we delete the region return deleteRegionWithoutArchiving(fs, regionDir); @@ -269,7 +268,7 @@ public class HFileArchiver { } FileStatusConverter getAsFile = new FileStatusConverter(fs); - Collection toArchive = Lists.transform(Arrays.asList(storeFiles), getAsFile); + Collection toArchive = Stream.of(storeFiles).map(getAsFile).collect(Collectors.toList()); Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, family); // do the actual archive @@ -279,7 +278,7 @@ public class HFileArchiver { throw new FailedArchiveException("Failed to archive/delete all the files for region:" + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family) + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.", - Collections2.transform(failedArchive, FUNC_FILE_TO_PATH)); + failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); } } @@ -328,17 +327,18 @@ public class HFileArchiver { // Wrap the storefile into a File StoreToFile getStorePath = new StoreToFile(fs); - Collection storeFiles = Collections2.transform(compactedFiles, getStorePath); + Collection storeFiles = + compactedFiles.stream().map(getStorePath).collect(Collectors.toList()); // do the actual archive - List failedArchive = resolveAndArchive(fs, storeArchiveDir, storeFiles, - EnvironmentEdgeManager.currentTime()); + List failedArchive = + resolveAndArchive(fs, storeArchiveDir, storeFiles, EnvironmentEdgeManager.currentTime()); if (!failedArchive.isEmpty()){ throw new FailedArchiveException("Failed to archive/delete all the files for region:" + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family) + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.", - Collections2.transform(failedArchive, FUNC_FILE_TO_PATH)); + failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); } } @@ -698,8 +698,10 @@ public class HFileArchiver { @Override public Collection getChildren() throws IOException { - if (fs.isFile(file)) return Collections.emptyList(); - return Collections2.transform(Arrays.asList(fs.listStatus(file)), getAsFile); + if (fs.isFile(file)) { + return Collections.emptyList(); + } + return Stream.of(fs.listStatus(file)).map(getAsFile).collect(Collectors.toList()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NamespaceQuotaSnapshotStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NamespaceQuotaSnapshotStore.java index b535775..3a97b33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NamespaceQuotaSnapshotStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NamespaceQuotaSnapshotStore.java @@ -23,14 +23,12 @@ import java.util.Objects; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; - +import java.util.stream.Collectors; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.base.Predicate; -import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; @@ -101,12 +99,9 @@ public class NamespaceQuotaSnapshotStore implements QuotaSnapshotStore { public Iterable> filterBySubject(String namespace) { rlock.lock(); try { - return Iterables.filter(regionUsage.entrySet(), new Predicate>() { - @Override - public boolean apply(Entry input) { - return namespace.equals(input.getKey().getTable().getNamespaceAsString()); - } - }); + return regionUsage.entrySet().stream() + .filter(entry -> namespace.equals(entry.getKey().getTable().getNamespaceAsString())) + .collect(Collectors.toList()); } finally { rlock.unlock(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java index c376dfa..9f3fd0c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java @@ -23,7 +23,7 @@ import java.util.Objects; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; - +import java.util.stream.Collectors; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.TableName; @@ -37,9 +37,9 @@ import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.base.Predicate; -import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; + import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; @@ -144,15 +144,11 @@ public class TableQuotaSnapshotStore implements QuotaSnapshotStore { } @Override - public Iterable> filterBySubject(TableName table) { + public Iterable> filterBySubject(TableName table) { rlock.lock(); try { - return Iterables.filter(regionUsage.entrySet(), new Predicate>() { - @Override - public boolean apply(Entry input) { - return table.equals(input.getKey().getTable()); - } - }); + return regionUsage.entrySet().stream() + .filter(entry -> table.equals(entry.getKey().getTable())).collect(Collectors.toList()); } finally { rlock.unlock(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileComparators.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileComparators.java index 99916d2..5a52adb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileComparators.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileComparators.java @@ -17,12 +17,9 @@ */ package org.apache.hadoop.hbase.regionserver; -import org.apache.hbase.thirdparty.com.google.common.base.Function; -import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hbase.thirdparty.com.google.common.collect.Ordering; - import java.util.Comparator; - +import java.util.function.Function; +import java.util.function.ToLongFunction; import org.apache.yetus.audience.InterfaceAudience; /** @@ -37,32 +34,23 @@ final class StoreFileComparators { * ordering, then bulkLoadTime. If there are ties, the path name is used as a tie-breaker. */ public static final Comparator SEQ_ID = - Ordering.compound(ImmutableList.of(Ordering.natural().onResultOf(new GetSeqId()), - Ordering.natural().onResultOf(new GetFileSize()).reverse(), - Ordering.natural().onResultOf(new GetBulkTime()), - Ordering.natural().onResultOf(new GetPathName()))); + Comparator.comparingLong(HStoreFile::getMaxSequenceId) + .thenComparing(Comparator.comparingLong(new GetFileSize()).reversed()) + .thenComparingLong(new GetBulkTime()).thenComparing(new GetPathName()); /** * Comparator for time-aware compaction. SeqId is still the first ordering criterion to maintain * MVCC. */ public static final Comparator SEQ_ID_MAX_TIMESTAMP = - Ordering.compound(ImmutableList.of(Ordering.natural().onResultOf(new GetSeqId()), - Ordering.natural().onResultOf(new GetMaxTimestamp()), - Ordering.natural().onResultOf(new GetFileSize()).reverse(), - Ordering.natural().onResultOf(new GetBulkTime()), - Ordering.natural().onResultOf(new GetPathName()))); + Comparator.comparingLong(HStoreFile::getMaxSequenceId).thenComparingLong(new GetMaxTimestamp()) + .thenComparing(Comparator.comparingLong(new GetFileSize()).reversed()) + .thenComparingLong(new GetBulkTime()).thenComparing(new GetPathName()); - private static class GetSeqId implements Function { - @Override - public Long apply(HStoreFile sf) { - return sf.getMaxSequenceId(); - } - } + private static class GetFileSize implements ToLongFunction { - private static class GetFileSize implements Function { @Override - public Long apply(HStoreFile sf) { + public long applyAsLong(HStoreFile sf) { if (sf.getReader() != null) { return sf.getReader().length(); } else { @@ -73,23 +61,26 @@ final class StoreFileComparators { } } - private static class GetBulkTime implements Function { + private static class GetBulkTime implements ToLongFunction { + @Override - public Long apply(HStoreFile sf) { + public long applyAsLong(HStoreFile sf) { return sf.getBulkLoadTimestamp().orElse(Long.MAX_VALUE); } } private static class GetPathName implements Function { + @Override public String apply(HStoreFile sf) { return sf.getPath().getName(); } } - private static class GetMaxTimestamp implements Function { + private static class GetMaxTimestamp implements ToLongFunction { + @Override - public Long apply(HStoreFile sf) { + public long applyAsLong(HStoreFile sf) { return sf.getMaximumTimestamp().orElse(Long.MAX_VALUE); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java index e0be6cf..d2ba26a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java @@ -117,7 +117,7 @@ public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { candidateSelection, boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { if (!tryingMajor) { - candidateSelection = filterBulk(candidateSelection); + filterBulk(candidateSelection); candidateSelection = applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck); candidateSelection = checkMinFilesCriteria(candidateSelection, comConf.getMinFilesToCompact()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java index 3eb830a..9b30ab5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java @@ -16,7 +16,6 @@ import java.util.Collection; import java.util.List; import java.util.OptionalInt; import java.util.Random; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; @@ -24,9 +23,8 @@ import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -import org.apache.hbase.thirdparty.com.google.common.base.Predicate; -import org.apache.hbase.thirdparty.com.google.common.collect.Collections2; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** @@ -199,16 +197,9 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { /** * @param candidates pre-filtrate - * @return filtered subset exclude all bulk load files if configured */ - protected ArrayList filterBulk(ArrayList candidates) { - candidates.removeAll(Collections2.filter(candidates, new Predicate() { - @Override - public boolean apply(HStoreFile input) { - return input.excludeFromMinorCompaction(); - } - })); - return candidates; + protected void filterBulk(ArrayList candidates) { + candidates.removeIf(HStoreFile::excludeFromMinorCompaction); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java index 7b62169..3271696 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java @@ -69,6 +69,11 @@ public class ReplicationHFileCleaner extends BaseHFileCleanerDelegate { return Iterables.filter(files, new Predicate() { @Override public boolean apply(FileStatus file) { + // just for overriding the findbugs NP warnings, as the parameter is marked as Nullable in + // the guava Predicate. + if (file == null) { + return false; + } String hfile = file.getPath().getName(); boolean foundHFileRefInQueue = hfileRefs.contains(hfile); if (LOG.isDebugEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java index b9ed8dd..1f3688e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java @@ -78,6 +78,11 @@ public class ReplicationLogCleaner extends BaseLogCleanerDelegate { return Iterables.filter(files, new Predicate() { @Override public boolean apply(FileStatus file) { + // just for overriding the findbugs NP warnings, as the parameter is marked as Nullable in + // the guava Predicate. + if (file == null) { + return false; + } String wal = file.getPath().getName(); boolean logInReplicationQueue = wals.contains(wal); if (logInReplicationQueue) {