accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From els...@apache.org
Subject [1/3] git commit: ACCUMULO-2665 Workaround for hadoop-2.4.0 requiring a <Guava-15.0
Date Tue, 15 Apr 2014 04:36:17 GMT
Repository: accumulo
Updated Branches:
  refs/heads/1.6.0-SNAPSHOT 77fe4da07 -> 126b6482a
  refs/heads/master 63f1e7678 -> 0f0acb68b


ACCUMULO-2665 Workaround for hadoop-2.4.0 requiring a <Guava-15.0

Guava 15.0 removes the LimitInputStream class that hdfs uses internally. We need
to ensure that we have such a version on the classpath for tests that use MiniDFSCluster.
The test module is not prone to this due to sisu-guava being pulled in from maven -- avoiding
fixing that as well since this is a very ugly fix. This is a bandaid in term for a better
long-term fix.


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/126b6482
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/126b6482
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/126b6482

Branch: refs/heads/1.6.0-SNAPSHOT
Commit: 126b6482aa57c9956d547258a094d3f9daf486a1
Parents: 77fe4da
Author: Josh Elser <elserj@apache.org>
Authored: Tue Apr 15 00:33:00 2014 -0400
Committer: Josh Elser <elserj@apache.org>
Committed: Tue Apr 15 00:33:00 2014 -0400

----------------------------------------------------------------------
 start/pom.xml                                                 | 7 +++++++
 .../test/java/org/apache/accumulo/test/AccumuloDFSBase.java   | 6 ++++--
 2 files changed, 11 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/126b6482/start/pom.xml
----------------------------------------------------------------------
diff --git a/start/pom.xml b/start/pom.xml
index e164e82..8747930 100644
--- a/start/pom.xml
+++ b/start/pom.xml
@@ -46,6 +46,13 @@
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-client</artifactId>
     </dependency>
+    <!-- Hadoop-2.4.0 MiniDFSCluster uses classes from <Guava-15.0 -->
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>14.0.1</version><!--$NO-MVN-MAN-VER$-->
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/126b6482/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
----------------------------------------------------------------------
diff --git a/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java b/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
index 05623a8..8e2d534 100644
--- a/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
+++ b/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
@@ -66,7 +66,7 @@ public class AccumuloDFSBase {
     
     try {
       cluster = new MiniDFSCluster(conf, 1, true, null);
-      cluster.waitActive();
+      cluster.waitClusterUp();
       // We can't assume that the hostname of "localhost" will still be "localhost" after
       // starting up the NameNode. We may get mapped into a FQDN via settings in /etc/hosts.
       HDFS_URI = cluster.getFileSystem().getUri();
@@ -123,7 +123,9 @@ public class AccumuloDFSBase {
 
   @AfterClass
   public static void tearDownMiniDfsCluster() {
-    cluster.shutdown();
+    if (null != cluster) {
+      cluster.shutdown();
+    }
   }
 
 }


Mime
View raw message