Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id B43D2171F2 for ; Fri, 9 Jan 2015 00:26:16 +0000 (UTC) Received: (qmail 38750 invoked by uid 500); 9 Jan 2015 00:26:17 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 38679 invoked by uid 500); 9 Jan 2015 00:26:17 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 38670 invoked by uid 99); 9 Jan 2015 00:26:17 -0000 Received: from tyr.zones.apache.org (HELO tyr.zones.apache.org) (140.211.11.114) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 09 Jan 2015 00:26:17 +0000 Received: by tyr.zones.apache.org (Postfix, from userid 65534) id 8943490D726; Fri, 9 Jan 2015 00:26:17 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: cmccabe@apache.org To: common-commits@hadoop.apache.org Message-Id: <72e54016daf24bc6bfe649eac3a9b61d@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HADOOP-11470. Remove some uses of obsolete guava APIs from the hadoop codebase (Sangjin Lee via Colin P. McCabe) (cherry picked from commit ae91b13a4b1896b893268253104f935c3078d345) Date: Fri, 9 Jan 2015 00:26:17 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/branch-2 f0acb7c2a -> a663faf74 HADOOP-11470. Remove some uses of obsolete guava APIs from the hadoop codebase (Sangjin Lee via Colin P. McCabe) (cherry picked from commit ae91b13a4b1896b893268253104f935c3078d345) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a663faf7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a663faf7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a663faf7 Branch: refs/heads/branch-2 Commit: a663faf74111556ea2c96241a44495f15b2faebf Parents: f0acb7c Author: Colin Patrick Mccabe Authored: Thu Jan 8 16:09:44 2015 -0800 Committer: Colin Patrick Mccabe Committed: Thu Jan 8 16:26:11 2015 -0800 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../apache/hadoop/hdfs/qjournal/server/Journal.java | 15 +++++++-------- .../hadoop/hdfs/TestDataTransferKeepalive.java | 4 +--- 3 files changed, 11 insertions(+), 11 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a663faf7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f7b479c..da763d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -225,6 +225,9 @@ Release 2.7.0 - UNRELEASED HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch (ozawa) + HADOOP-11470. Remove some uses of obsolete guava APIs from the hadoop + codebase. (Sangjin Lee via Colin P. McCabe) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. http://git-wip-us.apache.org/repos/asf/hadoop/blob/a663faf7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 8989aeb..389b5d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -29,6 +29,7 @@ import java.util.Iterator; import java.util.List; import java.util.concurrent.TimeUnit; +import org.apache.commons.lang.math.LongRange; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -61,15 +62,13 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.StopWatch; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; -import com.google.common.collect.Range; -import com.google.common.collect.Ranges; import com.google.protobuf.TextFormat; -import org.apache.hadoop.util.StopWatch; /** * A JournalNode can manage journals for several clusters at once. @@ -793,8 +792,8 @@ public class Journal implements Closeable { // Paranoid sanity check: if the new log is shorter than the log we // currently have, we should not end up discarding any transactions // which are already Committed. - if (txnRange(currentSegment).contains(committedTxnId.get()) && - !txnRange(segment).contains(committedTxnId.get())) { + if (txnRange(currentSegment).containsLong(committedTxnId.get()) && + !txnRange(segment).containsLong(committedTxnId.get())) { throw new AssertionError( "Cannot replace segment " + TextFormat.shortDebugString(currentSegment) + @@ -812,7 +811,7 @@ public class Journal implements Closeable { // If we're shortening the log, update our highest txid // used for lag metrics. - if (txnRange(currentSegment).contains(highestWrittenTxId)) { + if (txnRange(currentSegment).containsLong(highestWrittenTxId)) { highestWrittenTxId = segment.getEndTxId(); } } @@ -856,10 +855,10 @@ public class Journal implements Closeable { TextFormat.shortDebugString(newData)); } - private Range txnRange(SegmentStateProto seg) { + private LongRange txnRange(SegmentStateProto seg) { Preconditions.checkArgument(seg.hasEndTxId(), "invalid segment: %s", seg); - return Ranges.closed(seg.getStartTxId(), seg.getEndTxId()); + return new LongRange(seg.getStartTxId(), seg.getEndTxId()); } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/a663faf7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java index 08aa2c9..1563b72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java @@ -29,7 +29,6 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.InputStream; -import java.io.PrintWriter; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -46,7 +45,6 @@ import org.junit.Before; import org.junit.Test; import com.google.common.base.Supplier; -import com.google.common.io.NullOutputStream; public class TestDataTransferKeepalive { final Configuration conf = new HdfsConfiguration(); @@ -223,7 +221,7 @@ public class TestDataTransferKeepalive { stms[i] = fs.open(TEST_FILE); } for (InputStream stm : stms) { - IOUtils.copyBytes(stm, new NullOutputStream(), 1024); + IOUtils.copyBytes(stm, new IOUtils.NullOutputStream(), 1024); } } finally { IOUtils.cleanup(null, stms);