accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From els...@apache.org
Subject [25/50] [abbrv] accumulo git commit: ACCUMULO-3167 Reset configuration to the original value
Date Wed, 26 Nov 2014 04:23:41 GMT
ACCUMULO-3167 Reset configuration to the original value


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/91dcc95f
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/91dcc95f
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/91dcc95f

Branch: refs/heads/1.6
Commit: 91dcc95f0a571b7003689b9a19fbf6b9f4652266
Parents: 7691780
Author: Josh Elser <elserj@apache.org>
Authored: Mon Nov 24 13:23:04 2014 -0500
Committer: Josh Elser <elserj@apache.org>
Committed: Mon Nov 24 18:08:52 2014 -0500

----------------------------------------------------------------------
 .../accumulo/test/functional/BloomFilterIT.java | 158 ++++++++++---------
 1 file changed, 81 insertions(+), 77 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/91dcc95f/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
index 6c93665..d497d40 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
@@ -71,88 +71,92 @@ public class BloomFilterIT extends AccumuloClusterIT {
   @Test
   public void test() throws Exception {
     Connector c = getConnector();
+    final String readAhead = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_READ_AHEAD_MAXCONCURRENT.getKey());
     c.instanceOperations().setProperty(Property.TSERV_READ_AHEAD_MAXCONCURRENT.getKey(),
"1");
-    Thread.sleep(1000);
-    final String[] tables = getUniqueNames(4);
-    for (String table : tables) {
-      TableOperations tops = c.tableOperations();
-      tops.create(table);
-      tops.setProperty(table, Property.TABLE_INDEXCACHE_ENABLED.getKey(), "false");
-      tops.setProperty(table, Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "false");
-      tops.setProperty(table, Property.TABLE_BLOOM_SIZE.getKey(), "2000000");
-      tops.setProperty(table, Property.TABLE_BLOOM_ERRORRATE.getKey(), "1%");
-      tops.setProperty(table, Property.TABLE_BLOOM_LOAD_THRESHOLD.getKey(), "0");
-      tops.setProperty(table, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64K");
-    }
-    log.info("Writing");
-    write(c, tables[0], 1, 0, 2000000000, 500);
-    write(c, tables[1], 2, 0, 2000000000, 500);
-    write(c, tables[2], 3, 0, 2000000000, 500);
-    log.info("Writing complete");
-
-    // test inserting an empty key
-    BatchWriter bw = c.createBatchWriter(tables[3], new BatchWriterConfig());
-    Mutation m = new Mutation(new Text(""));
-    m.put(new Text(""), new Text(""), new Value("foo1".getBytes()));
-    bw.addMutation(m);
-    bw.close();
-    c.tableOperations().flush(tables[3], null, null, true);
+    try {
+      Thread.sleep(1000);
+      final String[] tables = getUniqueNames(4);
+      for (String table : tables) {
+        TableOperations tops = c.tableOperations();
+        tops.create(table);
+        tops.setProperty(table, Property.TABLE_INDEXCACHE_ENABLED.getKey(), "false");
+        tops.setProperty(table, Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "false");
+        tops.setProperty(table, Property.TABLE_BLOOM_SIZE.getKey(), "2000000");
+        tops.setProperty(table, Property.TABLE_BLOOM_ERRORRATE.getKey(), "1%");
+        tops.setProperty(table, Property.TABLE_BLOOM_LOAD_THRESHOLD.getKey(), "0");
+        tops.setProperty(table, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64K");
+      }
+      log.info("Writing");
+      write(c, tables[0], 1, 0, 2000000000, 500);
+      write(c, tables[1], 2, 0, 2000000000, 500);
+      write(c, tables[2], 3, 0, 2000000000, 500);
+      log.info("Writing complete");
+
+      // test inserting an empty key
+      BatchWriter bw = c.createBatchWriter(tables[3], new BatchWriterConfig());
+      Mutation m = new Mutation(new Text(""));
+      m.put(new Text(""), new Text(""), new Value("foo1".getBytes()));
+      bw.addMutation(m);
+      bw.close();
+      c.tableOperations().flush(tables[3], null, null, true);
 
-    for (String table : Arrays.asList(tables[0], tables[1], tables[2])) {
-      c.tableOperations().compact(table, null, null, true, true);
-    }
+      for (String table : Arrays.asList(tables[0], tables[1], tables[2])) {
+        c.tableOperations().compact(table, null, null, true, true);
+      }
 
-    // ensure compactions are finished
-    for (String table : tables) {
-      FunctionalTestUtils.checkRFiles(c, table, 1, 1, 1, 1);
-    }
+      // ensure compactions are finished
+      for (String table : tables) {
+        FunctionalTestUtils.checkRFiles(c, table, 1, 1, 1, 1);
+      }
 
-    // these queries should only run quickly if bloom filters are working, so lets get a
base
-    log.info("Base query");
-    long t1 = query(c, tables[0], 1, 0, 2000000000, 5000, 500);
-    long t2 = query(c, tables[1], 2, 0, 2000000000, 5000, 500);
-    long t3 = query(c, tables[2], 3, 0, 2000000000, 5000, 500);
-    log.info("Base query complete");
-
-    log.info("Rewriting with bloom filters");
-    c.tableOperations().setProperty(tables[0], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
-    c.tableOperations().setProperty(tables[0], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(),
RowFunctor.class.getName());
-
-    c.tableOperations().setProperty(tables[1], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
-    c.tableOperations().setProperty(tables[1], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(),
ColumnFamilyFunctor.class.getName());
-
-    c.tableOperations().setProperty(tables[2], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
-    c.tableOperations().setProperty(tables[2], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(),
ColumnQualifierFunctor.class.getName());
-
-    c.tableOperations().setProperty(tables[3], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
-    c.tableOperations().setProperty(tables[3], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(),
RowFunctor.class.getName());
-
-    // ensure the updates to zookeeper propagate
-    UtilWaitThread.sleep(500);
-
-    c.tableOperations().compact(tables[3], null, null, false, true);
-    c.tableOperations().compact(tables[0], null, null, false, true);
-    c.tableOperations().compact(tables[1], null, null, false, true);
-    c.tableOperations().compact(tables[2], null, null, false, true);
-    log.info("Rewriting with bloom filters complete");
-
-    // these queries should only run quickly if bloom
-    // filters are working
-    log.info("Bloom query");
-    long tb1 = query(c, tables[0], 1, 0, 2000000000, 5000, 500);
-    long tb2 = query(c, tables[1], 2, 0, 2000000000, 5000, 500);
-    long tb3 = query(c, tables[2], 3, 0, 2000000000, 5000, 500);
-    log.info("Bloom query complete");
-    timeCheck(t1 + t2 + t3, tb1 + tb2 + tb3);
-
-    // test querying for empty key
-    Scanner scanner = c.createScanner(tables[3], Authorizations.EMPTY);
-    scanner.setRange(new Range(new Text("")));
-
-    if (!scanner.iterator().next().getValue().toString().equals("foo1")) {
-      throw new Exception("Did not see foo1");
+      // these queries should only run quickly if bloom filters are working, so lets get
a base
+      log.info("Base query");
+      long t1 = query(c, tables[0], 1, 0, 2000000000, 5000, 500);
+      long t2 = query(c, tables[1], 2, 0, 2000000000, 5000, 500);
+      long t3 = query(c, tables[2], 3, 0, 2000000000, 5000, 500);
+      log.info("Base query complete");
+
+      log.info("Rewriting with bloom filters");
+      c.tableOperations().setProperty(tables[0], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
+      c.tableOperations().setProperty(tables[0], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(),
RowFunctor.class.getName());
+
+      c.tableOperations().setProperty(tables[1], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
+      c.tableOperations().setProperty(tables[1], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(),
ColumnFamilyFunctor.class.getName());
+
+      c.tableOperations().setProperty(tables[2], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
+      c.tableOperations().setProperty(tables[2], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(),
ColumnQualifierFunctor.class.getName());
+
+      c.tableOperations().setProperty(tables[3], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
+      c.tableOperations().setProperty(tables[3], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(),
RowFunctor.class.getName());
+
+      // ensure the updates to zookeeper propagate
+      UtilWaitThread.sleep(500);
+
+      c.tableOperations().compact(tables[3], null, null, false, true);
+      c.tableOperations().compact(tables[0], null, null, false, true);
+      c.tableOperations().compact(tables[1], null, null, false, true);
+      c.tableOperations().compact(tables[2], null, null, false, true);
+      log.info("Rewriting with bloom filters complete");
+
+      // these queries should only run quickly if bloom
+      // filters are working
+      log.info("Bloom query");
+      long tb1 = query(c, tables[0], 1, 0, 2000000000, 5000, 500);
+      long tb2 = query(c, tables[1], 2, 0, 2000000000, 5000, 500);
+      long tb3 = query(c, tables[2], 3, 0, 2000000000, 5000, 500);
+      log.info("Bloom query complete");
+      timeCheck(t1 + t2 + t3, tb1 + tb2 + tb3);
+
+      // test querying for empty key
+      Scanner scanner = c.createScanner(tables[3], Authorizations.EMPTY);
+      scanner.setRange(new Range(new Text("")));
+
+      if (!scanner.iterator().next().getValue().toString().equals("foo1")) {
+        throw new Exception("Did not see foo1");
+      }
+    } finally {
+      c.instanceOperations().setProperty(Property.TSERV_READ_AHEAD_MAXCONCURRENT.getKey(),
readAhead);
     }
-
   }
 
   private void timeCheck(long t1, long t2) throws Exception {


Mime
View raw message