accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mmil...@apache.org
Subject [accumulo] branch master updated: Move performance tests to accumulo-testing #1200 (#1264)
Date Tue, 17 Sep 2019 15:49:07 GMT
This is an automated email from the ASF dual-hosted git repository.

mmiller pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/master by this push:
     new 9d78365  Move performance tests to accumulo-testing #1200 (#1264)
9d78365 is described below

commit 9d78365d8f4d7a97a7ca4f104fa107e1f900e5f9
Author: Laura Schanno <lbschanno@gmail.com>
AuthorDate: Tue Sep 17 11:49:01 2019 -0400

    Move performance tests to accumulo-testing #1200 (#1264)
    
    * Move the following performance tests to accumulo-testing:
    * ManySplitsIT, BalanceFasterIT, DeleteTableDuringSplitIT, DurabilityIT, RollWALPerformanceIT
    * Remove the PerformanceTest annotation from DurabilityIT since the
    performance components were moved to DurabilityWriteSpeedPT.
    * Remove PerformanceTests.java since it is no longer being used by any
    classes.
    * Delete ContinousIngest.java and ContinousOpts.java. These classes are no longer in use.
---
 .../accumulo/test/categories/PerformanceTests.java |  27 --
 .../org/apache/accumulo/test/BalanceFasterIT.java  | 113 ---------
 .../java/org/apache/accumulo/test/ManySplitIT.java | 113 ---------
 .../test/functional/DeleteTableDuringSplitIT.java  | 119 ---------
 .../accumulo/test/functional/DurabilityIT.java     |  20 +-
 .../test/performance/ContinuousIngest.java         | 272 ---------------------
 .../accumulo/test/performance/ContinuousOpts.java  |  57 -----
 .../test/performance/RollWALPerformanceIT.java     | 123 ----------
 8 files changed, 9 insertions(+), 835 deletions(-)

diff --git a/start/src/main/java/org/apache/accumulo/test/categories/PerformanceTests.java
b/start/src/main/java/org/apache/accumulo/test/categories/PerformanceTests.java
deleted file mode 100644
index 5a0bd82..0000000
--- a/start/src/main/java/org/apache/accumulo/test/categories/PerformanceTests.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.categories;
-
-/**
- * Annotate integration tests which test performance-related aspects of Accumulo or are sensitive
to
- * timings and hardware capabilities.
- * <p>
- * Intended to be used with the JUnit Category annotation on integration test classes. The
Category
- * annotation should be placed at the class-level. Test class names should still be suffixed
with
- * 'IT' as the rest of the integration tests.
- */
-public interface PerformanceTests {}
diff --git a/test/src/main/java/org/apache/accumulo/test/BalanceFasterIT.java b/test/src/main/java/org/apache/accumulo/test/BalanceFasterIT.java
deleted file mode 100644
index 339b7d2..0000000
--- a/test/src/main/java/org/apache/accumulo/test/BalanceFasterIT.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.apache.accumulo.fate.util.UtilWaitThread.sleepUninterruptibly;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeFalse;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.Accumulo;
-import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.categories.MiniClusterOnlyTests;
-import org.apache.accumulo.test.categories.PerformanceTests;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.accumulo.test.mrit.IntegrationTestMapReduce;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-// ACCUMULO-2952
-@Category({MiniClusterOnlyTests.class, PerformanceTests.class})
-public class BalanceFasterIT extends ConfigurableMacBase {
-
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setNumTservers(3);
-  }
-
-  @BeforeClass
-  public static void checkMR() {
-    assumeFalse(IntegrationTestMapReduce.isMapReduce());
-  }
-
-  @Test(timeout = 90 * 1000)
-  public void test() throws Exception {
-    // create a table, add a bunch of splits
-    String tableName = getUniqueNames(1)[0];
-    try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build())
{
-      client.tableOperations().create(tableName);
-      SortedSet<Text> splits = new TreeSet<>();
-      for (int i = 0; i < 1000; i++) {
-        splits.add(new Text("" + i));
-      }
-      client.tableOperations().addSplits(tableName, splits);
-      // give a short wait for balancing
-      sleepUninterruptibly(10, TimeUnit.SECONDS);
-      // find out where the tablets are
-      Iterator<Integer> i;
-      try (Scanner s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
-        s.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
-        s.setRange(MetadataSchema.TabletsSection.getRange());
-        Map<String,Integer> counts = new HashMap<>();
-        while (true) {
-          int total = 0;
-          counts.clear();
-          for (Entry<Key,Value> kv : s) {
-            String host = kv.getValue().toString();
-            if (!counts.containsKey(host))
-              counts.put(host, 0);
-            counts.put(host, counts.get(host) + 1);
-            total++;
-          }
-          // are enough tablets online?
-          if (total > 1000)
-            break;
-        }
-
-        // should be on all three servers
-        assertEquals(3, counts.size());
-        // and distributed evenly
-        i = counts.values().iterator();
-      }
-
-      int a = i.next();
-      int b = i.next();
-      int c = i.next();
-      assertTrue(Math.abs(a - b) < 3);
-      assertTrue(Math.abs(a - c) < 3);
-      assertTrue(a > 330);
-    }
-  }
-}
diff --git a/test/src/main/java/org/apache/accumulo/test/ManySplitIT.java b/test/src/main/java/org/apache/accumulo/test/ManySplitIT.java
deleted file mode 100644
index 9b30c17..0000000
--- a/test/src/main/java/org/apache/accumulo/test/ManySplitIT.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeFalse;
-
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.core.client.Accumulo;
-import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.categories.MiniClusterOnlyTests;
-import org.apache.accumulo.test.categories.PerformanceTests;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.accumulo.test.mrit.IntegrationTestMapReduce;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({MiniClusterOnlyTests.class, PerformanceTests.class})
-public class ManySplitIT extends ConfigurableMacBase {
-
-  final int SPLITS = 10_000;
-
-  @BeforeClass
-  public static void checkMR() {
-    assumeFalse(IntegrationTestMapReduce.isMapReduce());
-  }
-
-  @Test(timeout = 4 * 60 * 1000)
-  public void test() throws Exception {
-    assumeFalse(IntegrationTestMapReduce.isMapReduce());
-
-    final String tableName = getUniqueNames(1)[0];
-
-    try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build())
{
-
-      log.info("Creating table");
-      log.info("splitting metadata table");
-      client.tableOperations().create(tableName);
-      SortedSet<Text> splits = new TreeSet<>();
-      for (byte b : "123456789abcde".getBytes(UTF_8)) {
-        splits.add(new Text(new byte[] {'1', ';', b}));
-      }
-      client.tableOperations().addSplits(MetadataTable.NAME, splits);
-      splits.clear();
-      for (int i = 0; i < SPLITS; i++) {
-        splits.add(new Text(Integer.toHexString(i)));
-      }
-      log.info("Adding splits");
-      // print out the number of splits so we have some idea of what's going on
-      final AtomicBoolean stop = new AtomicBoolean(false);
-      Thread t = new Thread() {
-        @Override
-        public void run() {
-          while (!stop.get()) {
-            UtilWaitThread.sleep(1000);
-            try {
-              log.info("splits: " + client.tableOperations().listSplits(tableName).size());
-            } catch (TableNotFoundException | AccumuloException | AccumuloSecurityException
e) {
-              // TODO Auto-generated catch block
-              e.printStackTrace();
-            }
-          }
-        }
-      };
-      t.start();
-      long now = System.currentTimeMillis();
-      client.tableOperations().addSplits(tableName, splits);
-      long diff = System.currentTimeMillis() - now;
-      double splitsPerSec = SPLITS / (diff / 1000.);
-      log.info("Done: {} splits per second", splitsPerSec);
-      assertTrue("splits created too slowly", splitsPerSec > 100);
-      stop.set(true);
-      t.join();
-    }
-  }
-
-  @Override
-  protected void configure(MiniAccumuloConfigImpl cfg, Configuration hdfs) {
-    cfg.setNumTservers(1);
-    cfg.setMemory(ServerType.TABLET_SERVER, cfg.getMemory(ServerType.TABLET_SERVER) * 2,
-        MemoryUnit.BYTE);
-  }
-
-}
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java
deleted file mode 100644
index 2cc58ee..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.concurrent.Future;
-
-import org.apache.accumulo.core.client.Accumulo;
-import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.util.SimpleThreadPool;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.test.categories.PerformanceTests;
-import org.apache.accumulo.test.categories.StandaloneCapableClusterTests;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-// ACCUMULO-2361
-@Category({StandaloneCapableClusterTests.class, PerformanceTests.class})
-public class DeleteTableDuringSplitIT extends AccumuloClusterHarness {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 15 * 60;
-  }
-
-  @Test
-  public void test() throws Exception {
-
-    try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
-
-      // 96 invocations, 8 at a time
-      int batches = 12, batchSize = 8;
-      String[] tableNames = getUniqueNames(batches * batchSize);
-      // make a bunch of tables
-      for (String tableName : tableNames) {
-        client.tableOperations().create(tableName);
-      }
-      final SortedSet<Text> splits = new TreeSet<>();
-      for (byte i = 0; i < 100; i++) {
-        splits.add(new Text(new byte[] {0, 0, i}));
-      }
-
-      List<Future<?>> results = new ArrayList<>();
-      List<Runnable> tasks = new ArrayList<>();
-      SimpleThreadPool es = new SimpleThreadPool(batchSize * 2, "concurrent-api-requests");
-      for (String tableName : tableNames) {
-        final String finalName = tableName;
-        tasks.add(new Runnable() {
-          @Override
-          public void run() {
-            try {
-              client.tableOperations().addSplits(finalName, splits);
-            } catch (TableNotFoundException ex) {
-              // expected, ignore
-            } catch (Exception ex) {
-              throw new RuntimeException(finalName, ex);
-            }
-          }
-        });
-        tasks.add(new Runnable() {
-          @Override
-          public void run() {
-            try {
-              UtilWaitThread.sleep(500);
-              client.tableOperations().delete(finalName);
-            } catch (Exception ex) {
-              throw new RuntimeException(ex);
-            }
-          }
-        });
-      }
-      Iterator<Runnable> itr = tasks.iterator();
-      for (int batch = 0; batch < batches; batch++) {
-        for (int i = 0; i < batchSize; i++) {
-          Future<?> f = es.submit(itr.next());
-          results.add(f);
-          f = es.submit(itr.next());
-          results.add(f);
-        }
-        for (Future<?> f : results) {
-          f.get();
-        }
-        results.clear();
-      }
-      // Shut down the ES
-      List<Runnable> queued = es.shutdownNow();
-      assertTrue("Had more tasks to run", queued.isEmpty());
-      assertFalse("Had more tasks that needed to be submitted", itr.hasNext());
-      for (String tableName : tableNames) {
-        assertFalse(client.tableOperations().exists(tableName));
-      }
-    }
-  }
-
-}
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DurabilityIT.java b/test/src/main/java/org/apache/accumulo/test/functional/DurabilityIT.java
index 496e2d1..69c37f5 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/DurabilityIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DurabilityIT.java
@@ -16,14 +16,7 @@
  */
 package org.apache.accumulo.test.functional;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeFalse;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
+import com.google.common.collect.Iterators;
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -36,7 +29,6 @@ import org.apache.accumulo.minicluster.ServerType;
 import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
 import org.apache.accumulo.miniclusterImpl.ProcessReference;
 import org.apache.accumulo.test.categories.MiniClusterOnlyTests;
-import org.apache.accumulo.test.categories.PerformanceTests;
 import org.apache.accumulo.test.mrit.IntegrationTestMapReduce;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.RawLocalFileSystem;
@@ -44,9 +36,15 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import com.google.common.collect.Iterators;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeFalse;
 
-@Category({MiniClusterOnlyTests.class, PerformanceTests.class})
+@Category({MiniClusterOnlyTests.class})
 public class DurabilityIT extends ConfigurableMacBase {
 
   @Override
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/ContinuousIngest.java
b/test/src/main/java/org/apache/accumulo/test/performance/ContinuousIngest.java
deleted file mode 100644
index 3e1d497..0000000
--- a/test/src/main/java/org/apache/accumulo/test/performance/ContinuousIngest.java
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.performance;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.io.BufferedReader;
-import java.io.InputStreamReader;
-import java.security.SecureRandom;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Random;
-import java.util.UUID;
-import java.util.zip.CRC32;
-import java.util.zip.Checksum;
-
-import org.apache.accumulo.core.cli.ClientOpts;
-import org.apache.accumulo.core.client.Accumulo;
-import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.trace.TraceUtil;
-import org.apache.accumulo.core.util.FastFormat;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.htrace.TraceScope;
-import org.apache.htrace.wrappers.TraceProxy;
-
-import com.beust.jcommander.Parameter;
-
-public class ContinuousIngest {
-
-  private static final byte[] EMPTY_BYTES = new byte[0];
-
-  private static List<ColumnVisibility> visibilities;
-
-  private static void initVisibilities(ContinuousOpts opts) throws Exception {
-    if (opts.visFile == null) {
-      visibilities = Collections.singletonList(new ColumnVisibility());
-      return;
-    }
-
-    visibilities = new ArrayList<>();
-
-    FileSystem fs = FileSystem.get(new Configuration());
-    BufferedReader in =
-        new BufferedReader(new InputStreamReader(fs.open(new Path(opts.visFile)), UTF_8));
-
-    String line;
-
-    while ((line = in.readLine()) != null) {
-      visibilities.add(new ColumnVisibility(line));
-    }
-
-    in.close();
-  }
-
-  private static ColumnVisibility getVisibility(Random rand) {
-    return visibilities.get(rand.nextInt(visibilities.size()));
-  }
-
-  static class TestOpts extends ClientOpts {
-    @Parameter(names = "--table", description = "table to use")
-    String tableName = "ci";
-  }
-
-  public static void main(String[] args) throws Exception {
-
-    ContinuousOpts opts = new ContinuousOpts();
-    TestOpts clientOpts = new TestOpts();
-    try (TraceScope clientSpan =
-        clientOpts.parseArgsAndTrace(ContinuousIngest.class.getName(), args, opts)) {
-
-      initVisibilities(opts);
-
-      if (opts.min < 0 || opts.max < 0 || opts.max <= opts.min) {
-        throw new IllegalArgumentException("bad min and max");
-      }
-      try (AccumuloClient client = Accumulo.newClient().from(clientOpts.getClientProps()).build())
{
-
-        if (!client.tableOperations().exists(clientOpts.tableName)) {
-          throw new TableNotFoundException(null, clientOpts.tableName,
-              "Consult the README and create the table before starting ingest.");
-        }
-
-        BatchWriter bw = client.createBatchWriter(clientOpts.tableName);
-        bw = TraceProxy.trace(bw, TraceUtil.countSampler(1024));
-
-        Random r = new SecureRandom();
-
-        byte[] ingestInstanceId = UUID.randomUUID().toString().getBytes(UTF_8);
-
-        System.out.printf("UUID %d %s%n", System.currentTimeMillis(),
-            new String(ingestInstanceId, UTF_8));
-
-        long count = 0;
-        final int flushInterval = 1000000;
-        final int maxDepth = 25;
-
-        // always want to point back to flushed data. This way the previous item should
-        // always exist in accumulo when verifying data. To do this make insert N point
-        // back to the row from insert (N - flushInterval). The array below is used to keep
-        // track of this.
-        long[] prevRows = new long[flushInterval];
-        long[] firstRows = new long[flushInterval];
-        int[] firstColFams = new int[flushInterval];
-        int[] firstColQuals = new int[flushInterval];
-
-        long lastFlushTime = System.currentTimeMillis();
-
-        out: while (true) {
-          // generate first set of nodes
-          ColumnVisibility cv = getVisibility(r);
-
-          for (int index = 0; index < flushInterval; index++) {
-            long rowLong = genLong(opts.min, opts.max, r);
-            prevRows[index] = rowLong;
-            firstRows[index] = rowLong;
-
-            int cf = r.nextInt(opts.maxColF);
-            int cq = r.nextInt(opts.maxColQ);
-
-            firstColFams[index] = cf;
-            firstColQuals[index] = cq;
-
-            Mutation m =
-                genMutation(rowLong, cf, cq, cv, ingestInstanceId, count, null, opts.checksum);
-            count++;
-            bw.addMutation(m);
-          }
-
-          lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
-          if (count >= opts.num)
-            break out;
-
-          // generate subsequent sets of nodes that link to previous set of nodes
-          for (int depth = 1; depth < maxDepth; depth++) {
-            for (int index = 0; index < flushInterval; index++) {
-              long rowLong = genLong(opts.min, opts.max, r);
-              byte[] prevRow = genRow(prevRows[index]);
-              prevRows[index] = rowLong;
-              Mutation m = genMutation(rowLong, r.nextInt(opts.maxColF), r.nextInt(opts.maxColQ),
-                  cv, ingestInstanceId, count, prevRow, opts.checksum);
-              count++;
-              bw.addMutation(m);
-            }
-
-            lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
-            if (count >= opts.num)
-              break out;
-          }
-
-          // create one big linked list, this makes all of the first inserts
-          // point to something
-          for (int index = 0; index < flushInterval - 1; index++) {
-            Mutation m = genMutation(firstRows[index], firstColFams[index], firstColQuals[index],
-                cv, ingestInstanceId, count, genRow(prevRows[index + 1]), opts.checksum);
-            count++;
-            bw.addMutation(m);
-          }
-          lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
-          if (count >= opts.num)
-            break out;
-        }
-
-        bw.close();
-      }
-    }
-  }
-
-  private static long flush(BatchWriter bw, long count, final int flushInterval, long lastFlushTime)
-      throws MutationsRejectedException {
-    long t1 = System.currentTimeMillis();
-    bw.flush();
-    long t2 = System.currentTimeMillis();
-    System.out.printf("FLUSH %d %d %d %d %d%n", t2, (t2 - lastFlushTime), (t2 - t1), count,
-        flushInterval);
-    lastFlushTime = t2;
-    return lastFlushTime;
-  }
-
-  public static Mutation genMutation(long rowLong, int cfInt, int cqInt, ColumnVisibility
cv,
-      byte[] ingestInstanceId, long count, byte[] prevRow, boolean checksum) {
-    // Adler32 is supposed to be faster, but according to wikipedia is not good for small
data....
-    // so used CRC32 instead
-    CRC32 cksum = null;
-
-    byte[] rowString = genRow(rowLong);
-
-    byte[] cfString = FastFormat.toZeroPaddedString(cfInt, 4, 16, EMPTY_BYTES);
-    byte[] cqString = FastFormat.toZeroPaddedString(cqInt, 4, 16, EMPTY_BYTES);
-
-    if (checksum) {
-      cksum = new CRC32();
-      cksum.update(rowString);
-      cksum.update(cfString);
-      cksum.update(cqString);
-      cksum.update(cv.getExpression());
-    }
-
-    Mutation m = new Mutation(new Text(rowString));
-
-    m.put(new Text(cfString), new Text(cqString), cv,
-        createValue(ingestInstanceId, count, prevRow, cksum));
-    return m;
-  }
-
-  public static final long genLong(long min, long max, Random r) {
-    return ((r.nextLong() & 0x7fffffffffffffffL) % (max - min)) + min;
-  }
-
-  static final byte[] genRow(long min, long max, Random r) {
-    return genRow(genLong(min, max, r));
-  }
-
-  static final byte[] genRow(long rowLong) {
-    return FastFormat.toZeroPaddedString(rowLong, 16, 16, EMPTY_BYTES);
-  }
-
-  private static Value createValue(byte[] ingestInstanceId, long count, byte[] prevRow,
-      Checksum cksum) {
-    int dataLen = ingestInstanceId.length + 16 + (prevRow == null ? 0 : prevRow.length) +
3;
-    if (cksum != null)
-      dataLen += 8;
-    byte[] val = new byte[dataLen];
-    System.arraycopy(ingestInstanceId, 0, val, 0, ingestInstanceId.length);
-    int index = ingestInstanceId.length;
-    val[index++] = ':';
-    int added = FastFormat.toZeroPaddedString(val, index, count, 16, 16, EMPTY_BYTES);
-    if (added != 16)
-      throw new RuntimeException(" " + added);
-    index += 16;
-    val[index++] = ':';
-    if (prevRow != null) {
-      System.arraycopy(prevRow, 0, val, index, prevRow.length);
-      index += prevRow.length;
-    }
-
-    val[index++] = ':';
-
-    if (cksum != null) {
-      cksum.update(val, 0, index);
-      cksum.getValue();
-      FastFormat.toZeroPaddedString(val, index, cksum.getValue(), 8, 16, EMPTY_BYTES);
-    }
-
-    // System.out.println("val "+new String(val));
-
-    return new Value(val);
-  }
-}
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/ContinuousOpts.java b/test/src/main/java/org/apache/accumulo/test/performance/ContinuousOpts.java
deleted file mode 100644
index dfbfafa..0000000
--- a/test/src/main/java/org/apache/accumulo/test/performance/ContinuousOpts.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.performance;
-
-import com.beust.jcommander.IStringConverter;
-import com.beust.jcommander.Parameter;
-
-/**
- * Common CLI arguments for the Continuous Ingest suite.
- */
-public class ContinuousOpts {
-
-  public static class ShortConverter implements IStringConverter<Short> {
-    @Override
-    public Short convert(String value) {
-      return Short.valueOf(value);
-    }
-  }
-
-  @Parameter(names = "--min", description = "lowest random row number to use")
-  long min = 0;
-
-  @Parameter(names = "--max", description = "maximum random row number to use")
-  long max = Long.MAX_VALUE;
-
-  @Parameter(names = "--num", description = "the number of entries to ingest")
-  long num = Long.MAX_VALUE;
-
-  @Parameter(names = "--maxColF", description = "maximum column family value to use",
-      converter = ShortConverter.class)
-  short maxColF = Short.MAX_VALUE;
-
-  @Parameter(names = "--maxColQ", description = "maximum column qualifier value to use",
-      converter = ShortConverter.class)
-  short maxColQ = Short.MAX_VALUE;
-
-  @Parameter(names = "--addCheckSum", description = "turn on checksums")
-  boolean checksum = false;
-
-  @Parameter(names = "--visibilities",
-      description = "read the visibilities to ingest with from a file")
-  String visFile = null;
-}
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
b/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
deleted file mode 100644
index 9d6d41e..0000000
--- a/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.performance;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeFalse;
-
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Accumulo;
-import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.categories.MiniClusterOnlyTests;
-import org.apache.accumulo.test.categories.PerformanceTests;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.accumulo.test.mrit.IntegrationTestMapReduce;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({MiniClusterOnlyTests.class, PerformanceTests.class})
-public class RollWALPerformanceIT extends ConfigurableMacBase {
-
-  @BeforeClass
-  public static void checkMR() {
-    assumeFalse(IntegrationTestMapReduce.isMapReduce());
-  }
-
-  @Override
-  protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setProperty(Property.TSERV_WAL_REPLICATION, "1");
-    cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "5M");
-    cfg.setProperty(Property.TSERV_WALOG_MAX_REFERENCED, "100");
-    cfg.setProperty(Property.GC_CYCLE_START, "1s");
-    cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
-    cfg.useMiniDFS(true);
-  }
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 5 * 60;
-  }
-
-  private long ingest(AccumuloClient c) throws Exception {
-    final String tableName = getUniqueNames(1)[0];
-
-    log.info("Creating the table");
-    c.tableOperations().create(tableName);
-
-    log.info("Splitting the table");
-    final long SPLIT_COUNT = 100;
-    final long distance = Long.MAX_VALUE / SPLIT_COUNT;
-    final SortedSet<Text> splits = new TreeSet<>();
-    for (int i = 1; i < SPLIT_COUNT; i++) {
-      splits.add(new Text(String.format("%016x", i * distance)));
-    }
-    c.tableOperations().addSplits(tableName, splits);
-
-    log.info("Waiting for balance");
-    c.instanceOperations().waitForBalance();
-
-    log.info("Starting ingest");
-    final long start = System.nanoTime();
-    // Load 50K 100 byte entries
-    ContinuousIngest.main(new String[] {"-c", cluster.getClientPropsPath(), "--table", tableName,
-        "--num", Long.toString(50 * 1000)});
-    final long result = System.nanoTime() - start;
-    log.debug(String.format("Finished in %,d ns", result));
-    log.debug("Dropping table");
-    c.tableOperations().delete(tableName);
-    return result;
-  }
-
-  private long getAverage(AccumuloClient c) throws Exception {
-    final int REPEAT = 3;
-    long totalTime = 0;
-    for (int i = 0; i < REPEAT; i++) {
-      totalTime += ingest(c);
-    }
-    return totalTime / REPEAT;
-  }
-
-  @Test
-  public void testWalPerformanceOnce() throws Exception {
-    try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
-      // get time with a small WAL, which will cause many WAL roll-overs
-      long avg1 = getAverage(c);
-      // use a bigger WAL max size to eliminate WAL roll-overs
-      c.instanceOperations().setProperty(Property.TSERV_WALOG_MAX_SIZE.getKey(), "1G");
-      c.tableOperations().flush(MetadataTable.NAME, null, null, true);
-      c.tableOperations().flush(RootTable.NAME, null, null, true);
-      getCluster().getClusterControl().stop(ServerType.TABLET_SERVER);
-      getCluster().start();
-      long avg2 = getAverage(c);
-      log.info(String.format("Average run time with small WAL %,d with large WAL %,d", avg1,
avg2));
-      assertTrue(avg1 > avg2);
-      double percent = (100. * avg1) / avg2;
-      log.info(String.format("Percent of large log: %.2f%%", percent));
-    }
-  }
-
-}


Mime
View raw message