accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mmil...@apache.org
Subject [accumulo-examples] branch master updated: Fix and remove brittle broken tests
Date Tue, 29 Jan 2019 20:09:14 GMT
This is an automated email from the ASF dual-hosted git repository.

mmiller pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo-examples.git


The following commit(s) were added to refs/heads/master by this push:
     new 0b3174d  Fix and remove brittle broken tests
0b3174d is described below

commit 0b3174da0ebcd1deac7753deaf0da0ce7b8f4be2
Author: Mike Miller <mmiller@apache.org>
AuthorDate: Tue Jan 29 14:55:53 2019 -0500

    Fix and remove brittle broken tests
---
 .../org/apache/accumulo/examples/ExamplesIT.java   | 98 ----------------------
 .../accumulo/examples/mapreduce/MapReduceIT.java   |  8 +-
 2 files changed, 5 insertions(+), 101 deletions(-)

diff --git a/src/test/java/org/apache/accumulo/examples/ExamplesIT.java b/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
index 96c0852..ff7045e 100644
--- a/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
+++ b/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
@@ -33,10 +33,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map.Entry;
 import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
 
-import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
 import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchScanner;
@@ -56,12 +53,8 @@ import org.apache.accumulo.examples.client.RandomBatchScanner;
 import org.apache.accumulo.examples.client.ReadWriteExample;
 import org.apache.accumulo.examples.client.RowOperations;
 import org.apache.accumulo.examples.client.SequentialBatchWriter;
-import org.apache.accumulo.examples.client.TraceDumpExample;
-import org.apache.accumulo.examples.client.TracingExample;
 import org.apache.accumulo.examples.combiner.StatsCombiner;
 import org.apache.accumulo.examples.constraints.MaxMutationSize;
-import org.apache.accumulo.examples.dirlist.Ingest;
-import org.apache.accumulo.examples.dirlist.QueryUtil;
 import org.apache.accumulo.examples.helloworld.Insert;
 import org.apache.accumulo.examples.helloworld.Read;
 import org.apache.accumulo.examples.isolation.InterferenceTest;
@@ -76,11 +69,8 @@ import org.apache.accumulo.examples.shard.Query;
 import org.apache.accumulo.examples.shard.Reverse;
 import org.apache.accumulo.harness.AccumuloClusterHarness;
 import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl.LogWriter;
 import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
 import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.tracer.TraceServer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -163,94 +153,6 @@ public class ExamplesIT extends AccumuloClusterHarness {
   }
 
   @Test
-  public void testTrace() throws Exception {
-    Process trace = null;
-    if (ClusterType.MINI == getClusterType()) {
-      MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
-      trace = impl.exec(TraceServer.class);
-      while (!c.tableOperations().exists("trace"))
-        sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-    }
-    String[] args = new String[] {"-c", getClientPropsFile(), "--createtable", "--deletetable",
-        "--create"};
-    Entry<Integer,String> pair = cluster.getClusterControl().execWithStdout(TracingExample.class,
-        args);
-    Assert.assertEquals("Expected return code of zero. STDOUT=" + pair.getValue(), 0,
-        pair.getKey().intValue());
-    String result = pair.getValue();
-    Pattern pattern = Pattern.compile("TraceID: ([0-9a-f]+)");
-    Matcher matcher = pattern.matcher(result);
-    int count = 0;
-    while (matcher.find()) {
-      args = new String[] {"-c", getClientPropsFile(), "--traceid", matcher.group(1)};
-      pair = cluster.getClusterControl().execWithStdout(TraceDumpExample.class, args);
-      assertEquals(0, pair.getKey().intValue());
-      count++;
-    }
-    assertTrue(count > 0);
-    if (ClusterType.MINI == getClusterType() && null != trace) {
-      trace.destroy();
-    }
-  }
-
-  @Test
-  public void testDirList() throws Exception {
-    String[] names = getUniqueNames(3);
-    String dirTable = names[0], indexTable = names[1], dataTable = names[2];
-    String[] args;
-    String dirListDirectory;
-    switch (getClusterType()) {
-      case MINI:
-        dirListDirectory = ((MiniAccumuloClusterImpl) getCluster()).getConfig().getDir()
-            .getAbsolutePath();
-        break;
-      case STANDALONE:
-        dirListDirectory = ((StandaloneAccumuloCluster) getCluster()).getAccumuloHome();
-        break;
-      default:
-        throw new RuntimeException("Unknown cluster type");
-    }
-    assumeTrue(new File(dirListDirectory).exists());
-    // Index a directory listing on /tmp. If this is running against a standalone cluster,
we can't
-    // guarantee Accumulo source will be there.
-    args = new String[] {"-c", getClientPropsFile(), "--dirTable", dirTable, "--indexTable",
-        indexTable, "--dataTable", dataTable, "--vis", visibility, "--chunkSize",
-        Integer.toString(10000), dirListDirectory};
-
-    Entry<Integer,String> entry = getClusterControl().execWithStdout(Ingest.class,
args);
-    assertEquals("Got non-zero return code. Stdout=" + entry.getValue(), 0,
-        entry.getKey().intValue());
-
-    String expectedFile;
-    switch (getClusterType()) {
-      case MINI:
-        // Should be present in a minicluster dir
-        expectedFile = "accumulo-site.xml";
-        break;
-      case STANDALONE:
-        // Should be in place on standalone installs (not having to follow symlinks)
-        expectedFile = "LICENSE";
-        break;
-      default:
-        throw new RuntimeException("Unknown cluster type");
-    }
-
-    args = new String[] {"-c", getClientPropsFile(), "-t", indexTable, "--auths", auths,
"--search",
-        "--path", expectedFile};
-    entry = getClusterControl().execWithStdout(QueryUtil.class, args);
-    if (ClusterType.MINI == getClusterType()) {
-      MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
-      for (LogWriter writer : impl.getLogWriters()) {
-        writer.flush();
-      }
-    }
-
-    log.info("result " + entry.getValue());
-    assertEquals(0, entry.getKey().intValue());
-    assertTrue(entry.getValue().contains(expectedFile));
-  }
-
-  @Test
   public void testAgeoffFilter() throws Exception {
     String tableName = getUniqueNames(1)[0];
     c.tableOperations().create(tableName);
diff --git a/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java b/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
index d66aa0b..8eedb69 100644
--- a/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
+++ b/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
@@ -33,6 +33,7 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.examples.ExamplesIT;
+import org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl;
 import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
 import org.apache.accumulo.test.functional.ConfigurableMacBase;
 import org.apache.hadoop.conf.Configuration;
@@ -76,9 +77,10 @@ public class MapReduceIT extends ConfigurableMacBase {
         bw.addMutation(m);
       }
       bw.close();
-      Process hash = getCluster().exec(RowHash.class, Collections.singletonList(hadoopTmpDirArg),
-          "-c", confFile, "-t", tablename, "--column", input_cfcq);
-      assertEquals(0, hash.waitFor());
+      MiniAccumuloClusterImpl.ProcessInfo hash = getCluster().exec(RowHash.class,
+          Collections.singletonList(hadoopTmpDirArg), "-c", confFile, "-t", tablename, "--column",
+          input_cfcq);
+      assertEquals(0, hash.getProcess().waitFor());
 
       Scanner s = client.createScanner(tablename, Authorizations.EMPTY);
       s.fetchColumn(new Text(input_cf), new Text(output_cq));


Mime
View raw message