Return-Path: X-Original-To: apmail-accumulo-commits-archive@www.apache.org Delivered-To: apmail-accumulo-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 7321E10140 for ; Tue, 2 Dec 2014 20:36:51 +0000 (UTC) Received: (qmail 95743 invoked by uid 500); 2 Dec 2014 20:36:51 -0000 Delivered-To: apmail-accumulo-commits-archive@accumulo.apache.org Received: (qmail 95647 invoked by uid 500); 2 Dec 2014 20:36:51 -0000 Mailing-List: contact commits-help@accumulo.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@accumulo.apache.org Delivered-To: mailing list commits@accumulo.apache.org Received: (qmail 94316 invoked by uid 99); 2 Dec 2014 20:36:50 -0000 Received: from tyr.zones.apache.org (HELO tyr.zones.apache.org) (140.211.11.114) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 02 Dec 2014 20:36:50 +0000 Received: by tyr.zones.apache.org (Postfix, from userid 65534) id 45D319BBFBC; Tue, 2 Dec 2014 20:36:50 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: elserj@apache.org To: commits@accumulo.apache.org Date: Tue, 02 Dec 2014 20:37:12 -0000 Message-Id: <39e9aa17854b40419edcc21fceeb92b4@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [24/50] [abbrv] accumulo git commit: ACCUMULO-3167 Reset configuration to the original value ACCUMULO-3167 Reset configuration to the original value Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/91dcc95f Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/91dcc95f Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/91dcc95f Branch: refs/heads/metrics2 Commit: 91dcc95f0a571b7003689b9a19fbf6b9f4652266 Parents: 7691780 Author: Josh Elser Authored: Mon Nov 24 13:23:04 2014 -0500 Committer: Josh Elser Committed: Mon Nov 24 18:08:52 2014 -0500 ---------------------------------------------------------------------- .../accumulo/test/functional/BloomFilterIT.java | 158 ++++++++++--------- 1 file changed, 81 insertions(+), 77 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/accumulo/blob/91dcc95f/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java index 6c93665..d497d40 100644 --- a/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java +++ b/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java @@ -71,88 +71,92 @@ public class BloomFilterIT extends AccumuloClusterIT { @Test public void test() throws Exception { Connector c = getConnector(); + final String readAhead = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_READ_AHEAD_MAXCONCURRENT.getKey()); c.instanceOperations().setProperty(Property.TSERV_READ_AHEAD_MAXCONCURRENT.getKey(), "1"); - Thread.sleep(1000); - final String[] tables = getUniqueNames(4); - for (String table : tables) { - TableOperations tops = c.tableOperations(); - tops.create(table); - tops.setProperty(table, Property.TABLE_INDEXCACHE_ENABLED.getKey(), "false"); - tops.setProperty(table, Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "false"); - tops.setProperty(table, Property.TABLE_BLOOM_SIZE.getKey(), "2000000"); - tops.setProperty(table, Property.TABLE_BLOOM_ERRORRATE.getKey(), "1%"); - tops.setProperty(table, Property.TABLE_BLOOM_LOAD_THRESHOLD.getKey(), "0"); - tops.setProperty(table, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64K"); - } - log.info("Writing"); - write(c, tables[0], 1, 0, 2000000000, 500); - write(c, tables[1], 2, 0, 2000000000, 500); - write(c, tables[2], 3, 0, 2000000000, 500); - log.info("Writing complete"); - - // test inserting an empty key - BatchWriter bw = c.createBatchWriter(tables[3], new BatchWriterConfig()); - Mutation m = new Mutation(new Text("")); - m.put(new Text(""), new Text(""), new Value("foo1".getBytes())); - bw.addMutation(m); - bw.close(); - c.tableOperations().flush(tables[3], null, null, true); + try { + Thread.sleep(1000); + final String[] tables = getUniqueNames(4); + for (String table : tables) { + TableOperations tops = c.tableOperations(); + tops.create(table); + tops.setProperty(table, Property.TABLE_INDEXCACHE_ENABLED.getKey(), "false"); + tops.setProperty(table, Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "false"); + tops.setProperty(table, Property.TABLE_BLOOM_SIZE.getKey(), "2000000"); + tops.setProperty(table, Property.TABLE_BLOOM_ERRORRATE.getKey(), "1%"); + tops.setProperty(table, Property.TABLE_BLOOM_LOAD_THRESHOLD.getKey(), "0"); + tops.setProperty(table, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64K"); + } + log.info("Writing"); + write(c, tables[0], 1, 0, 2000000000, 500); + write(c, tables[1], 2, 0, 2000000000, 500); + write(c, tables[2], 3, 0, 2000000000, 500); + log.info("Writing complete"); + + // test inserting an empty key + BatchWriter bw = c.createBatchWriter(tables[3], new BatchWriterConfig()); + Mutation m = new Mutation(new Text("")); + m.put(new Text(""), new Text(""), new Value("foo1".getBytes())); + bw.addMutation(m); + bw.close(); + c.tableOperations().flush(tables[3], null, null, true); - for (String table : Arrays.asList(tables[0], tables[1], tables[2])) { - c.tableOperations().compact(table, null, null, true, true); - } + for (String table : Arrays.asList(tables[0], tables[1], tables[2])) { + c.tableOperations().compact(table, null, null, true, true); + } - // ensure compactions are finished - for (String table : tables) { - FunctionalTestUtils.checkRFiles(c, table, 1, 1, 1, 1); - } + // ensure compactions are finished + for (String table : tables) { + FunctionalTestUtils.checkRFiles(c, table, 1, 1, 1, 1); + } - // these queries should only run quickly if bloom filters are working, so lets get a base - log.info("Base query"); - long t1 = query(c, tables[0], 1, 0, 2000000000, 5000, 500); - long t2 = query(c, tables[1], 2, 0, 2000000000, 5000, 500); - long t3 = query(c, tables[2], 3, 0, 2000000000, 5000, 500); - log.info("Base query complete"); - - log.info("Rewriting with bloom filters"); - c.tableOperations().setProperty(tables[0], Property.TABLE_BLOOM_ENABLED.getKey(), "true"); - c.tableOperations().setProperty(tables[0], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), RowFunctor.class.getName()); - - c.tableOperations().setProperty(tables[1], Property.TABLE_BLOOM_ENABLED.getKey(), "true"); - c.tableOperations().setProperty(tables[1], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), ColumnFamilyFunctor.class.getName()); - - c.tableOperations().setProperty(tables[2], Property.TABLE_BLOOM_ENABLED.getKey(), "true"); - c.tableOperations().setProperty(tables[2], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), ColumnQualifierFunctor.class.getName()); - - c.tableOperations().setProperty(tables[3], Property.TABLE_BLOOM_ENABLED.getKey(), "true"); - c.tableOperations().setProperty(tables[3], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), RowFunctor.class.getName()); - - // ensure the updates to zookeeper propagate - UtilWaitThread.sleep(500); - - c.tableOperations().compact(tables[3], null, null, false, true); - c.tableOperations().compact(tables[0], null, null, false, true); - c.tableOperations().compact(tables[1], null, null, false, true); - c.tableOperations().compact(tables[2], null, null, false, true); - log.info("Rewriting with bloom filters complete"); - - // these queries should only run quickly if bloom - // filters are working - log.info("Bloom query"); - long tb1 = query(c, tables[0], 1, 0, 2000000000, 5000, 500); - long tb2 = query(c, tables[1], 2, 0, 2000000000, 5000, 500); - long tb3 = query(c, tables[2], 3, 0, 2000000000, 5000, 500); - log.info("Bloom query complete"); - timeCheck(t1 + t2 + t3, tb1 + tb2 + tb3); - - // test querying for empty key - Scanner scanner = c.createScanner(tables[3], Authorizations.EMPTY); - scanner.setRange(new Range(new Text(""))); - - if (!scanner.iterator().next().getValue().toString().equals("foo1")) { - throw new Exception("Did not see foo1"); + // these queries should only run quickly if bloom filters are working, so lets get a base + log.info("Base query"); + long t1 = query(c, tables[0], 1, 0, 2000000000, 5000, 500); + long t2 = query(c, tables[1], 2, 0, 2000000000, 5000, 500); + long t3 = query(c, tables[2], 3, 0, 2000000000, 5000, 500); + log.info("Base query complete"); + + log.info("Rewriting with bloom filters"); + c.tableOperations().setProperty(tables[0], Property.TABLE_BLOOM_ENABLED.getKey(), "true"); + c.tableOperations().setProperty(tables[0], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), RowFunctor.class.getName()); + + c.tableOperations().setProperty(tables[1], Property.TABLE_BLOOM_ENABLED.getKey(), "true"); + c.tableOperations().setProperty(tables[1], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), ColumnFamilyFunctor.class.getName()); + + c.tableOperations().setProperty(tables[2], Property.TABLE_BLOOM_ENABLED.getKey(), "true"); + c.tableOperations().setProperty(tables[2], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), ColumnQualifierFunctor.class.getName()); + + c.tableOperations().setProperty(tables[3], Property.TABLE_BLOOM_ENABLED.getKey(), "true"); + c.tableOperations().setProperty(tables[3], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), RowFunctor.class.getName()); + + // ensure the updates to zookeeper propagate + UtilWaitThread.sleep(500); + + c.tableOperations().compact(tables[3], null, null, false, true); + c.tableOperations().compact(tables[0], null, null, false, true); + c.tableOperations().compact(tables[1], null, null, false, true); + c.tableOperations().compact(tables[2], null, null, false, true); + log.info("Rewriting with bloom filters complete"); + + // these queries should only run quickly if bloom + // filters are working + log.info("Bloom query"); + long tb1 = query(c, tables[0], 1, 0, 2000000000, 5000, 500); + long tb2 = query(c, tables[1], 2, 0, 2000000000, 5000, 500); + long tb3 = query(c, tables[2], 3, 0, 2000000000, 5000, 500); + log.info("Bloom query complete"); + timeCheck(t1 + t2 + t3, tb1 + tb2 + tb3); + + // test querying for empty key + Scanner scanner = c.createScanner(tables[3], Authorizations.EMPTY); + scanner.setRange(new Range(new Text(""))); + + if (!scanner.iterator().next().getValue().toString().equals("foo1")) { + throw new Exception("Did not see foo1"); + } + } finally { + c.instanceOperations().setProperty(Property.TSERV_READ_AHEAD_MAXCONCURRENT.getKey(), readAhead); } - } private void timeCheck(long t1, long t2) throws Exception {