Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 52E53200C16 for ; Wed, 25 Jan 2017 19:32:33 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id 51693160B4E; Wed, 25 Jan 2017 18:32:33 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 16213160B5A for ; Wed, 25 Jan 2017 19:32:30 +0100 (CET) Received: (qmail 2130 invoked by uid 500); 25 Jan 2017 18:32:30 -0000 Mailing-List: contact commits-help@accumulo.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@accumulo.apache.org Delivered-To: mailing list commits@accumulo.apache.org Received: (qmail 2015 invoked by uid 99); 25 Jan 2017 18:32:30 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 25 Jan 2017 18:32:30 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 105BBE080D; Wed, 25 Jan 2017 18:32:30 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: mwalch@apache.org To: commits@accumulo.apache.org Date: Wed, 25 Jan 2017 18:32:33 -0000 Message-Id: <80783044cccd4d50a3ec09d0dc94d411@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [5/6] accumulo git commit: ACCUMULO-4510 - Moved all remaining external test code to accumulo-testing repo archived-at: Wed, 25 Jan 2017 18:32:33 -0000 ACCUMULO-4510 - Moved all remaining external test code to accumulo-testing repo Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/81f215c0 Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/81f215c0 Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/81f215c0 Branch: refs/heads/master Commit: 81f215c0a399acd514bb5a6ad46367950e0e69cc Parents: b31ce44 Author: Mike Walch Authored: Tue Jan 24 15:50:45 2017 -0500 Committer: Mike Walch Committed: Wed Jan 25 12:29:20 2017 -0500 ---------------------------------------------------------------------- TESTING.md | 3 +- assemble/src/main/assemblies/component.xml | 32 - test/compat/diffAPI.pl | 104 -- test/compat/japi-compliance/README | 53 - test/compat/japi-compliance/exclude_classes.txt | 1 - .../japi-compliance/japi-accumulo-1.5.0.xml | 36 - .../japi-compliance/japi-accumulo-1.5.1.xml | 36 - .../japi-compliance/japi-accumulo-1.5.2.xml | 36 - .../japi-compliance/japi-accumulo-1.6.0.xml | 38 - .../japi-compliance/japi-accumulo-1.6.1.xml | 38 - .../japi-compliance/japi-accumulo-1.6.2.xml | 38 - .../japi-compliance/japi-accumulo-1.7.0.xml | 38 - .../japi-compliance/japi-accumulo-master.xml | 38 - test/scale/agitator.txt | 27 - test/scale/catastrophic.txt | 24 - test/scale/deleteLargeTable.txt | 16 - test/scale/restart.txt | 19 - .../accumulo/test/BulkImportDirectory.java | 68 -- .../accumulo/test/mapreduce/TeraSortIngest.java | 399 ------- .../test/replication/merkle/MerkleTree.java | 92 -- .../test/replication/merkle/MerkleTreeNode.java | 131 --- .../replication/merkle/RangeSerialization.java | 72 -- .../replication/merkle/cli/CompareTables.java | 176 --- .../replication/merkle/cli/ComputeRootHash.java | 100 -- .../replication/merkle/cli/GenerateHashes.java | 287 ----- .../merkle/cli/ManualComparison.java | 95 -- .../merkle/ingest/RandomWorkload.java | 120 --- .../test/replication/merkle/package-info.java | 39 - .../replication/merkle/skvi/DigestIterator.java | 149 --- .../accumulo/test/scalability/Ingest.java | 143 --- .../apache/accumulo/test/scalability/Run.java | 97 -- .../accumulo/test/scalability/ScaleTest.java | 88 -- .../accumulo/test/stress/random/DataWriter.java | 50 - .../test/stress/random/IntArgValidator.java | 34 - .../test/stress/random/RandomByteArrays.java | 33 - .../test/stress/random/RandomMutations.java | 56 - .../test/stress/random/RandomWithinRange.java | 58 - .../accumulo/test/stress/random/Scan.java | 121 --- .../accumulo/test/stress/random/ScanOpts.java | 46 - .../accumulo/test/stress/random/Stream.java | 40 - .../accumulo/test/stress/random/Write.java | 77 -- .../test/stress/random/WriteOptions.java | 169 --- .../test/stress/random/package-info.java | 36 - test/system/agitator/.gitignore | 3 - test/system/agitator/README.md | 39 - test/system/agitator/agitator.ini.example | 56 - test/system/agitator/agitator.py | 241 ----- test/system/agitator/hosts.example | 16 - test/system/bench/README.md | 61 -- test/system/bench/cloudstone1/__init__.py | 15 - test/system/bench/cloudstone1/cloudstone1.py | 44 - test/system/bench/cloudstone2/__init__.py | 15 - test/system/bench/cloudstone2/cloudstone2.py | 49 - test/system/bench/cloudstone3/__init__.py | 15 - test/system/bench/cloudstone3/cloudstone3.py | 50 - test/system/bench/cloudstone4/__init__.py | 15 - test/system/bench/cloudstone4/cloudstone4.py | 29 - test/system/bench/cloudstone5/__init__.py | 15 - test/system/bench/cloudstone5/cloudstone5.py | 29 - test/system/bench/cloudstone6/__init__.py | 15 - test/system/bench/cloudstone6/cloudstone6.py | 29 - test/system/bench/cloudstone7/__init__.py | 15 - test/system/bench/cloudstone7/cloudstone7.py | 29 - test/system/bench/cloudstone8/__init__.py | 15 - test/system/bench/cloudstone8/cloudstone8.py | 64 -- test/system/bench/lib/Benchmark.py | 115 -- test/system/bench/lib/CreateTablesBenchmark.py | 78 -- test/system/bench/lib/IngestBenchmark.py | 94 -- test/system/bench/lib/RowHashBenchmark.py | 136 --- test/system/bench/lib/TableSplitsBenchmark.py | 76 -- test/system/bench/lib/TeraSortBenchmark.py | 110 -- test/system/bench/lib/__init__.py | 15 - test/system/bench/lib/cloudshell.py | 33 - test/system/bench/lib/fastsplits | 300 ------ test/system/bench/lib/mediumsplits | 650 ------------ test/system/bench/lib/options.py | 39 - test/system/bench/lib/path.py | 38 - test/system/bench/lib/runner.py | 28 - test/system/bench/lib/slowsplits | 1000 ------------------ test/system/bench/lib/splits | 190 ---- test/system/bench/lib/tservers.py | 89 -- test/system/bench/lib/util.py | 20 - test/system/bench/run.py | 116 -- test/system/merkle-replication/README | 65 -- .../merkle-replication/configure-replication.sh | 99 -- test/system/merkle-replication/ingest-data.sh | 39 - test/system/merkle-replication/merkle-env.sh | 48 - test/system/merkle-replication/verify-data.sh | 91 -- test/system/scalability/README.md | 57 - .../system/scalability/conf/Ingest.conf.example | 27 - test/system/scalability/conf/site.conf.example | 27 - test/system/scalability/run.py | 228 ---- test/system/stress/README.md | 105 -- test/system/stress/reader.sh | 39 - test/system/stress/readers | 17 - test/system/stress/start-readers.sh | 40 - test/system/stress/start-writers.sh | 40 - test/system/stress/stop-readers.sh | 36 - test/system/stress/stop-writers.sh | 36 - test/system/stress/stress-env.sh.example | 60 -- test/system/stress/writer.sh | 44 - test/system/stress/writers | 17 - test/system/test1/README.md | 46 - test/system/test1/ingest_test.sh | 22 - test/system/test1/ingest_test_2.sh | 22 - test/system/test1/ingest_test_3.sh | 22 - test/system/test1/verify_test.sh | 22 - test/system/test1/verify_test_2.sh | 22 - test/system/test2/README.md | 27 - test/system/test2/concurrent.sh | 99 -- test/system/test3/README.md | 22 - test/system/test3/bigrow.sh | 27 - test/system/test4/README.md | 26 - test/system/test4/bulk_import_test.sh | 72 -- test/system/upgrade_test.sh | 77 -- 115 files changed, 1 insertion(+), 8959 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/TESTING.md ---------------------------------------------------------------------- diff --git a/TESTING.md b/TESTING.md index f5c94fa..a9f5b3c 100644 --- a/TESTING.md +++ b/TESTING.md @@ -172,8 +172,7 @@ These files do exist in the build tree, but at different locations) # Manual Distributed Testing Apache Accumulo has a number of tests which are suitable for running against large clusters for hours to days at a time. -Some of these test suites exist in the repository under `test/system` and contain their own README files for configuration. -Others (like the Continuous Ingest and Random Walk tests) are in the [accumulo-testing repo][2]. +These test suites exist in the [accumulo-testing repo][2]. [2]: https://github.com/apache/accumulo-testing [3]: https://maven.apache.org/guides/introduction/introduction-to-the-lifecycle.html http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/assemble/src/main/assemblies/component.xml ---------------------------------------------------------------------- diff --git a/assemble/src/main/assemblies/component.xml b/assemble/src/main/assemblies/component.xml index 6d76d60..e34bccb 100644 --- a/assemble/src/main/assemblies/component.xml +++ b/assemble/src/main/assemblies/component.xml @@ -148,38 +148,6 @@ **/*.sh - - ../test - test - 0755 - 0755 - - **/*.sh - **/*.py - **/*.pl - - - src/** - target/** - - - - ../test - test - 0755 - 0644 - - **/.*/** - pom.xml - src/** - target/** - **/*.sh - **/*.py - **/*.pl - **/*.pyc - **/*.pyo - - ../proxy/target http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/compat/diffAPI.pl ---------------------------------------------------------------------- diff --git a/test/compat/diffAPI.pl b/test/compat/diffAPI.pl deleted file mode 100755 index 183655d..0000000 --- a/test/compat/diffAPI.pl +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/perl - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -sub trim($) -{ - my $string = shift; - $string =~ s/^\s+//; - $string =~ s/\s+$//; - return $string; -} - -sub getDeprecated { - my($jar, $class) = @_; - - open(JAVAP, "javap -verbose -public -classpath '$jar' '$class'|"); - - my $lastMethod = ""; - my %deprecated; - - while(){ - chomp(); - if(/^public\s/){ - $lastMethod = $_; - } - if(/Deprecated\:\strue/){ - $lastMethod =~ s/\s+/ /g; - $deprecated{$lastMethod}="true"; - } - } - - close(JAVAP); - - return %deprecated; -} - -sub annotateDeprecated { - my($jar, $class, $deprecated, $outFile) = @_; - open(JAVAP, "javap -public -classpath '$jar' '$class'|"); - open(OUT, ">$outFile"); - my @javapOut = ; - @javapOut = sort(@javapOut); - - for my $line (@javapOut){ - my $trimLine = trim($line); - chomp($line); - $trimLine =~ s/\s+/ /g; - if($deprecated->{$trimLine}){ - print OUT "$line DEPRECATED\n"; - }else{ - print OUT "$line\n"; - } - } - - close(JAVAP); - close(OUT); - -} - -if(scalar(@ARGV) != 2){ - print "Usage : diffAPI.pl \n"; - exit(-1); -} - -$jar1 = $ARGV[0]; -$jar2 = $ARGV[1]; - -$gtCmd = 'egrep "accumulo/core/client/.*class|accumulo/core/data/.*class" | grep -v accumulo/core/client/impl | grep -v accumulo/core/data/thrift | egrep -v "Impl.*class$" | tr / .'; - -@classes1 = `jar tf $jar1 | $gtCmd`; -@classes2 = `jar tf $jar2 | $gtCmd`; - -mkdir("diffWorkDir"); -mkdir("diffWorkDir/jar1"); -mkdir("diffWorkDir/jar2"); - -for $class (@classes1){ - $class = substr($class, 0, length($class) - 7); - %deprecated = getDeprecated($jar1, $class); - annotateDeprecated($jar1, $class, \%deprecated, "diffWorkDir/jar1/$class"); -} - -for $class (@classes2){ - $class = substr($class, 0, length($class) - 7); - %deprecated = getDeprecated($jar2, $class); - annotateDeprecated($jar2, $class, \%deprecated, "diffWorkDir/jar2/$class"); -} - -system("diff -u diffWorkDir/jar1 diffWorkDir/jar2"); -system("rm -rf diffWorkDir"); - http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/compat/japi-compliance/README ---------------------------------------------------------------------- diff --git a/test/compat/japi-compliance/README b/test/compat/japi-compliance/README deleted file mode 100644 index 0b77050..0000000 --- a/test/compat/japi-compliance/README +++ /dev/null @@ -1,53 +0,0 @@ -Title: Java API Compliance Checker Instructions -Notice: Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, - software distributed under the License is distributed on an - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - KIND, either express or implied. See the License for the - specific language governing permissions and limitations - under the License. - -There is a tool that can analyze the difference between APIs called -japi-compliance[1]. This tool is useful for checking API compatability of -different Accumulo versions. To run this tool edit the xml files to specify -the location of accumulo core jars and set the library version. Then run the -following command. - - japi-compliance-checker.pl -skip-deprecated -old japi-accumulo-1.5.xml -new japi-accumulo-1.6.xml -l accumulo - -Optionally, you can use the --skip-classes argument with the provided exclude_classes.txt file to skip classes from -org.apache.accumulo.core.data that aren't in the public API. - -This directory should have a library configuration file for each release on supported lines as well as an in-progress -for whatever version is currently the master branch. The examples below all make use of version-specific library definitions. - -When looking at a patch release, you should verify that changes introduced are forwards and backwards compatible, per -semver. - - # Backwards compatibility from x.y.z to x.y.(z+1) - japi-compliance-checker.pl -old japi-accumulo-1.6.1.xml -new japi-accumulo-1.6.2.xml -l accumulo --skip-classes=exclude_classes.txt - # Forwards compatibility from x.y.z to x.y.(z+1). Note that the old / new arguments have been swapped. - japi-compliance-checker.pl -new japi-accumulo-1.6.1.xml -old japi-accumulo-1.6.2.xml -l accumulo --skip-classes=exclude_classes.txt - -When looking at a minor release, you should verify that change are backwards compatible, per semver. - - # Backwards compatibility from x.y.z to x.(y+1).0 - japi-compliance-checker.pl -old japi-accumulo-1.6.1.xml -new japi-accumulo-1.7.0.xml -l accumulo --skip-classes=exclude_classes.txt - -When looking at a major release, you should examine removals to make sure they are not capricious. Specifically, you should ensure that -they have been deprecated for a full major version. - - # Advisory backwards compatibility check from x.y.z to (x+1).0.0 - japi-compliance-checker.pl -old japi-accumulo-1.7.0.xml -new japi-accumulo-2.0.0.xml -l accumulo --skip-classes=exclude_classes.txt - -[1]: http://ispras.linuxbase.org/index.php/Java_API_Compliance_Checker - http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/compat/japi-compliance/exclude_classes.txt ---------------------------------------------------------------------- diff --git a/test/compat/japi-compliance/exclude_classes.txt b/test/compat/japi-compliance/exclude_classes.txt deleted file mode 100644 index 94356b7..0000000 --- a/test/compat/japi-compliance/exclude_classes.txt +++ /dev/null @@ -1 +0,0 @@ -org.apache.accumulo.core.data.KeyValue http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/compat/japi-compliance/japi-accumulo-1.5.0.xml ---------------------------------------------------------------------- diff --git a/test/compat/japi-compliance/japi-accumulo-1.5.0.xml b/test/compat/japi-compliance/japi-accumulo-1.5.0.xml deleted file mode 100644 index f49dbb5..0000000 --- a/test/compat/japi-compliance/japi-accumulo-1.5.0.xml +++ /dev/null @@ -1,36 +0,0 @@ - - - 1.5.0 - - - - CHANGEME/accumulo-1.5.0/lib/accumulo-core.jar - CHANGEME/accumulo-1.5.0/lib/accumulo-minicluster.jar - - - - org.apache.accumulo.core.client.impl - org.apache.accumulo.core.data.thrift - - - - org.apache.accumulo.core.client - org.apache.accumulo.core.data - org.apache.accumulo.minicluster - - http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/compat/japi-compliance/japi-accumulo-1.5.1.xml ---------------------------------------------------------------------- diff --git a/test/compat/japi-compliance/japi-accumulo-1.5.1.xml b/test/compat/japi-compliance/japi-accumulo-1.5.1.xml deleted file mode 100644 index ff92506..0000000 --- a/test/compat/japi-compliance/japi-accumulo-1.5.1.xml +++ /dev/null @@ -1,36 +0,0 @@ - - - 1.5.1 - - - - CHANGEME/accumulo-1.5.1/lib/accumulo-core.jar - CHANGEME/accumulo-1.5.1/lib/accumulo-minicluster.jar - - - - org.apache.accumulo.core.client.impl - org.apache.accumulo.core.data.thrift - - - - org.apache.accumulo.core.client - org.apache.accumulo.core.data - org.apache.accumulo.minicluster - - http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/compat/japi-compliance/japi-accumulo-1.5.2.xml ---------------------------------------------------------------------- diff --git a/test/compat/japi-compliance/japi-accumulo-1.5.2.xml b/test/compat/japi-compliance/japi-accumulo-1.5.2.xml deleted file mode 100644 index 8d7a668..0000000 --- a/test/compat/japi-compliance/japi-accumulo-1.5.2.xml +++ /dev/null @@ -1,36 +0,0 @@ - - - 1.5.2 - - - - CHANGEME/accumulo-1.5.2/lib/accumulo-core.jar - CHANGEME/accumulo-1.5.2/lib/accumulo-minicluster.jar - - - - org.apache.accumulo.core.client.impl - org.apache.accumulo.core.data.thrift - - - - org.apache.accumulo.core.client - org.apache.accumulo.core.data - org.apache.accumulo.minicluster - - http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/compat/japi-compliance/japi-accumulo-1.6.0.xml ---------------------------------------------------------------------- diff --git a/test/compat/japi-compliance/japi-accumulo-1.6.0.xml b/test/compat/japi-compliance/japi-accumulo-1.6.0.xml deleted file mode 100644 index cb1c22f..0000000 --- a/test/compat/japi-compliance/japi-accumulo-1.6.0.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - 1.6.0 - - - - CHANGEME/accumulo-1.6.0/core/target/accumulo-core-1.6.0.jar - CHANGEME/accumulo-1.6.0/minicluster/target/accumulo-minicluster-1.6.0.jar - - - - org.apache.accumulo.core.client.impl - org.apache.accumulo.core.client.lexicoders.impl - org.apache.accumulo.core.client.mapreduce.lib.impl - org.apache.accumulo.core.data.thrift - org.apache.accumulo.minicluster.impl - - - - org.apache.accumulo.core.client - org.apache.accumulo.core.data - org.apache.accumulo.minicluster - http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/compat/japi-compliance/japi-accumulo-1.6.1.xml ---------------------------------------------------------------------- diff --git a/test/compat/japi-compliance/japi-accumulo-1.6.1.xml b/test/compat/japi-compliance/japi-accumulo-1.6.1.xml deleted file mode 100644 index 30e6068..0000000 --- a/test/compat/japi-compliance/japi-accumulo-1.6.1.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - 1.6.1 - - - - CHANGEME/accumulo-1.6.1/core/target/accumulo-core-1.6.1.jar - CHANGEME/accumulo-1.6.1/minicluster/target/accumulo-minicluster-1.6.1.jar - - - - org.apache.accumulo.core.client.impl - org.apache.accumulo.core.client.lexicoders.impl - org.apache.accumulo.core.client.mapreduce.lib.impl - org.apache.accumulo.core.data.thrift - org.apache.accumulo.minicluster.impl - - - - org.apache.accumulo.core.client - org.apache.accumulo.core.data - org.apache.accumulo.minicluster - http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/compat/japi-compliance/japi-accumulo-1.6.2.xml ---------------------------------------------------------------------- diff --git a/test/compat/japi-compliance/japi-accumulo-1.6.2.xml b/test/compat/japi-compliance/japi-accumulo-1.6.2.xml deleted file mode 100644 index 8327206..0000000 --- a/test/compat/japi-compliance/japi-accumulo-1.6.2.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - 1.6.2 - - - - CHANGEME/accumulo-1.6.2/core/target/accumulo-core-1.6.2.jar - CHANGEME/accumulo-1.6.2/minicluster/target/accumulo-minicluster-1.6.2.jar - - - - org.apache.accumulo.core.client.impl - org.apache.accumulo.core.client.lexicoders.impl - org.apache.accumulo.core.client.mapreduce.lib.impl - org.apache.accumulo.core.data.thrift - org.apache.accumulo.minicluster.impl - - - - org.apache.accumulo.core.client - org.apache.accumulo.core.data - org.apache.accumulo.minicluster - http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/compat/japi-compliance/japi-accumulo-1.7.0.xml ---------------------------------------------------------------------- diff --git a/test/compat/japi-compliance/japi-accumulo-1.7.0.xml b/test/compat/japi-compliance/japi-accumulo-1.7.0.xml deleted file mode 100644 index 6caee4d..0000000 --- a/test/compat/japi-compliance/japi-accumulo-1.7.0.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - 1.7.0 - - - - ../../../core/target/accumulo-core-1.7.0.jar - ../../../minicluster/target/accumulo-minicluster-1.7.0.jar - - - - org.apache.accumulo.core.client.impl - org.apache.accumulo.core.client.lexicoders.impl - org.apache.accumulo.core.client.mapreduce.lib.impl - org.apache.accumulo.core.data.thrift - org.apache.accumulo.minicluster.impl - - - - org.apache.accumulo.core.client - org.apache.accumulo.core.data - org.apache.accumulo.minicluster - http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/compat/japi-compliance/japi-accumulo-master.xml ---------------------------------------------------------------------- diff --git a/test/compat/japi-compliance/japi-accumulo-master.xml b/test/compat/japi-compliance/japi-accumulo-master.xml deleted file mode 100644 index 2fc184e..0000000 --- a/test/compat/japi-compliance/japi-accumulo-master.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - 1.7.0-SNAPSHOT - - - - ../../../core/target/accumulo-core-1.7.0-SNAPSHOT.jar - ../../../minicluster/target/accumulo-minicluster-1.7.0-SNAPSHOT.jar - - - - org.apache.accumulo.core.client.impl - org.apache.accumulo.core.client.lexicoders.impl - org.apache.accumulo.core.client.mapreduce.lib.impl - org.apache.accumulo.core.data.thrift - org.apache.accumulo.minicluster.impl - - - - org.apache.accumulo.core.client - org.apache.accumulo.core.data - org.apache.accumulo.minicluster - http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/scale/agitator.txt ---------------------------------------------------------------------- diff --git a/test/scale/agitator.txt b/test/scale/agitator.txt deleted file mode 100644 index 9715bf0..0000000 --- a/test/scale/agitator.txt +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -* run continuous ingest, continuous walkers, and agitator for 24 hours -* stop continuous ingest, walkers, and agitator -* run verification map reduce job - -This test should be run with the following configurations of the agitator. -* agitator does not kill master and only kills one tablet server at a time -* agitator does not kill master and only kills multiple tablet server at a time -* agitator kills master and only kills multiple tablet server at a time -* agitator kills master and does not kill tablet servers - -The agitator can also be run during verification. Another test is to lower -a tables split threshold during verification. http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/scale/catastrophic.txt ---------------------------------------------------------------------- diff --git a/test/scale/catastrophic.txt b/test/scale/catastrophic.txt deleted file mode 100644 index 322cab7..0000000 --- a/test/scale/catastrophic.txt +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -* run continuous ingest and continuous walkers for 24 hours -* stop continuous ingest and walkers -* disable idle compactions -* run verification map reduce job -* ensure there are entries in memory and then kill -9 all tablet servers and master -* restart accumulo -* allow log recoveries to run -* run verification map reduce job -* compare counts from two verification runs http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/scale/deleteLargeTable.txt ---------------------------------------------------------------------- diff --git a/test/scale/deleteLargeTable.txt b/test/scale/deleteLargeTable.txt deleted file mode 100644 index 59dd648..0000000 --- a/test/scale/deleteLargeTable.txt +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -Create and delete a large table (do this with accumulo.metadata split alot) see #2381 http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/scale/restart.txt ---------------------------------------------------------------------- diff --git a/test/scale/restart.txt b/test/scale/restart.txt deleted file mode 100644 index bab7cd9..0000000 --- a/test/scale/restart.txt +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - * create a continuous ingest table with 16k tablets - * start continuous ingest - * restart accumulo, but leave the ingesters running - * after accumulo is up, ingest should quickly resume to pre-restart rates http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/src/main/java/org/apache/accumulo/test/BulkImportDirectory.java ---------------------------------------------------------------------- diff --git a/test/src/main/java/org/apache/accumulo/test/BulkImportDirectory.java b/test/src/main/java/org/apache/accumulo/test/BulkImportDirectory.java deleted file mode 100644 index 4cbba8e..0000000 --- a/test/src/main/java/org/apache/accumulo/test/BulkImportDirectory.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test; - -import static java.nio.charset.StandardCharsets.UTF_8; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.accumulo.core.client.AccumuloException; -import org.apache.accumulo.core.client.AccumuloSecurityException; -import org.apache.accumulo.core.client.TableNotFoundException; -import org.apache.accumulo.core.client.security.tokens.PasswordToken; -import org.apache.accumulo.core.util.CachedConfiguration; -import org.apache.accumulo.server.cli.ClientOnRequiredTable; -import org.apache.accumulo.server.client.HdfsZooInstance; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; - -import com.beust.jcommander.Parameter; - -public class BulkImportDirectory { - static class Opts extends ClientOnRequiredTable { - @Parameter(names = {"-s", "--source"}, description = "directory to import from") - String source = null; - @Parameter(names = {"-f", "--failures"}, description = "directory to copy failures into: will be deleted before the bulk import") - String failures = null; - @Parameter(description = " ") - List args = new ArrayList<>(); - } - - public static void main(String[] args) throws IOException, AccumuloException, AccumuloSecurityException, TableNotFoundException { - final FileSystem fs = FileSystem.get(CachedConfiguration.getInstance()); - Opts opts = new Opts(); - if (args.length == 5) { - System.err.println("Deprecated syntax for BulkImportDirectory, please use the new style (see --help)"); - final String user = args[0]; - final byte[] pass = args[1].getBytes(UTF_8); - final String tableName = args[2]; - final String dir = args[3]; - final String failureDir = args[4]; - final Path failureDirPath = new Path(failureDir); - fs.delete(failureDirPath, true); - fs.mkdirs(failureDirPath); - HdfsZooInstance.getInstance().getConnector(user, new PasswordToken(pass)).tableOperations().importDirectory(tableName, dir, failureDir, false); - } else { - opts.parseArgs(BulkImportDirectory.class.getName(), args); - fs.delete(new Path(opts.failures), true); - fs.mkdirs(new Path(opts.failures)); - opts.getConnector().tableOperations().importDirectory(opts.getTableName(), opts.source, opts.failures, false); - } - } -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/src/main/java/org/apache/accumulo/test/mapreduce/TeraSortIngest.java ---------------------------------------------------------------------- diff --git a/test/src/main/java/org/apache/accumulo/test/mapreduce/TeraSortIngest.java b/test/src/main/java/org/apache/accumulo/test/mapreduce/TeraSortIngest.java deleted file mode 100644 index 28762e0..0000000 --- a/test/src/main/java/org/apache/accumulo/test/mapreduce/TeraSortIngest.java +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.accumulo.test.mapreduce; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Random; - -import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable; -import org.apache.accumulo.core.client.BatchWriterConfig; -import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat; -import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.core.data.Value; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.NullWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableUtils; -import org.apache.hadoop.mapreduce.InputFormat; -import org.apache.hadoop.mapreduce.InputSplit; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.JobContext; -import org.apache.hadoop.mapreduce.Mapper; -import org.apache.hadoop.mapreduce.RecordReader; -import org.apache.hadoop.mapreduce.TaskAttemptContext; -import org.apache.hadoop.util.Tool; -import org.apache.hadoop.util.ToolRunner; - -import com.beust.jcommander.Parameter; - -/** - * Generate the *almost* official terasort input data set. (See below) The user specifies the number of rows and the output directory and this class runs a - * map/reduce program to generate the data. The format of the data is: - *
    - *
  • (10 bytes key) (10 bytes rowid) (78 bytes filler) \r \n - *
  • The keys are random characters from the set ' ' .. '~'. - *
  • The rowid is the right justified row id as a int. - *
  • The filler consists of 7 runs of 10 characters from 'A' to 'Z'. - *
- * - * This TeraSort is slightly modified to allow for variable length key sizes and value sizes. The row length isn't variable. To generate a terabyte of data in - * the same way TeraSort does use 10000000000 rows and 10/10 byte key length and 78/78 byte value length. Along with the 10 byte row id and \r\n this gives you - * 100 byte row * 10000000000 rows = 1tb. Min/Max ranges for key and value parameters are inclusive/inclusive respectively. - * - * - */ -public class TeraSortIngest extends Configured implements Tool { - /** - * An input format that assigns ranges of longs to each mapper. - */ - static class RangeInputFormat extends InputFormat { - /** - * An input split consisting of a range on numbers. - */ - static class RangeInputSplit extends InputSplit implements Writable { - long firstRow; - long rowCount; - - public RangeInputSplit() {} - - public RangeInputSplit(long offset, long length) { - firstRow = offset; - rowCount = length; - } - - @Override - public long getLength() throws IOException { - return 0; - } - - @Override - public String[] getLocations() throws IOException { - return new String[] {}; - } - - @Override - public void readFields(DataInput in) throws IOException { - firstRow = WritableUtils.readVLong(in); - rowCount = WritableUtils.readVLong(in); - } - - @Override - public void write(DataOutput out) throws IOException { - WritableUtils.writeVLong(out, firstRow); - WritableUtils.writeVLong(out, rowCount); - } - } - - /** - * A record reader that will generate a range of numbers. - */ - static class RangeRecordReader extends RecordReader { - long startRow; - long finishedRows; - long totalRows; - - public RangeRecordReader(RangeInputSplit split) { - startRow = split.firstRow; - finishedRows = 0; - totalRows = split.rowCount; - } - - @Override - public void close() throws IOException {} - - @Override - public float getProgress() throws IOException { - return finishedRows / (float) totalRows; - } - - @Override - public LongWritable getCurrentKey() throws IOException, InterruptedException { - return new LongWritable(startRow + finishedRows); - } - - @Override - public NullWritable getCurrentValue() throws IOException, InterruptedException { - return NullWritable.get(); - } - - @Override - public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {} - - @Override - public boolean nextKeyValue() throws IOException, InterruptedException { - if (finishedRows < totalRows) { - ++finishedRows; - return true; - } - return false; - } - } - - @Override - public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException { - // reporter.setStatus("Creating record reader"); - return new RangeRecordReader((RangeInputSplit) split); - } - - /** - * Create the desired number of splits, dividing the number of rows between the mappers. - */ - @Override - public List getSplits(JobContext job) { - long totalRows = job.getConfiguration().getLong(NUMROWS, 0); - int numSplits = job.getConfiguration().getInt(NUMSPLITS, 1); - long rowsPerSplit = totalRows / numSplits; - System.out.println("Generating " + totalRows + " using " + numSplits + " maps with step of " + rowsPerSplit); - ArrayList splits = new ArrayList<>(numSplits); - long currentRow = 0; - for (int split = 0; split < numSplits - 1; ++split) { - splits.add(new RangeInputSplit(currentRow, rowsPerSplit)); - currentRow += rowsPerSplit; - } - splits.add(new RangeInputSplit(currentRow, totalRows - currentRow)); - System.out.println("Done Generating."); - return splits; - } - - } - - private static String NUMSPLITS = "terasort.overridesplits"; - private static String NUMROWS = "terasort.numrows"; - - static class RandomGenerator { - private long seed = 0; - private static final long mask32 = (1l << 32) - 1; - /** - * The number of iterations separating the precomputed seeds. - */ - private static final int seedSkip = 128 * 1024 * 1024; - /** - * The precomputed seed values after every seedSkip iterations. There should be enough values so that a 2**32 iterations are covered. - */ - private static final long[] seeds = new long[] {0L, 4160749568L, 4026531840L, 3892314112L, 3758096384L, 3623878656L, 3489660928L, 3355443200L, 3221225472L, - 3087007744L, 2952790016L, 2818572288L, 2684354560L, 2550136832L, 2415919104L, 2281701376L, 2147483648L, 2013265920L, 1879048192L, 1744830464L, - 1610612736L, 1476395008L, 1342177280L, 1207959552L, 1073741824L, 939524096L, 805306368L, 671088640L, 536870912L, 402653184L, 268435456L, 134217728L,}; - - /** - * Start the random number generator on the given iteration. - * - * @param initalIteration - * the iteration number to start on - */ - RandomGenerator(long initalIteration) { - int baseIndex = (int) ((initalIteration & mask32) / seedSkip); - seed = seeds[baseIndex]; - for (int i = 0; i < initalIteration % seedSkip; ++i) { - next(); - } - } - - RandomGenerator() { - this(0); - } - - long next() { - seed = (seed * 3141592621l + 663896637) & mask32; - return seed; - } - } - - /** - * The Mapper class that given a row number, will generate the appropriate output line. - */ - public static class SortGenMapper extends Mapper { - private Text tableName = null; - private int minkeylength = 0; - private int maxkeylength = 0; - private int minvaluelength = 0; - private int maxvaluelength = 0; - - private Text key = new Text(); - private Text value = new Text(); - private RandomGenerator rand; - private byte[] keyBytes; // = new byte[12]; - private byte[] spaces = " ".getBytes(); - private byte[][] filler = new byte[26][]; - { - for (int i = 0; i < 26; ++i) { - filler[i] = new byte[10]; - for (int j = 0; j < 10; ++j) { - filler[i][j] = (byte) ('A' + i); - } - } - } - - /** - * Add a random key to the text - */ - private Random random = new Random(); - - private void addKey() { - int range = random.nextInt(maxkeylength - minkeylength + 1); - int keylen = range + minkeylength; - int keyceil = keylen + (4 - (keylen % 4)); - keyBytes = new byte[keyceil]; - - long temp = 0; - for (int i = 0; i < keyceil / 4; i++) { - temp = rand.next() / 52; - keyBytes[3 + 4 * i] = (byte) (' ' + (temp % 95)); - temp /= 95; - keyBytes[2 + 4 * i] = (byte) (' ' + (temp % 95)); - temp /= 95; - keyBytes[1 + 4 * i] = (byte) (' ' + (temp % 95)); - temp /= 95; - keyBytes[4 * i] = (byte) (' ' + (temp % 95)); - } - key.set(keyBytes, 0, keylen); - } - - /** - * Add the rowid to the row. - */ - private Text getRowIdString(long rowId) { - Text paddedRowIdString = new Text(); - byte[] rowid = Integer.toString((int) rowId).getBytes(); - int padSpace = 10 - rowid.length; - if (padSpace > 0) { - paddedRowIdString.append(spaces, 0, 10 - rowid.length); - } - paddedRowIdString.append(rowid, 0, Math.min(rowid.length, 10)); - return paddedRowIdString; - } - - /** - * Add the required filler bytes. Each row consists of 7 blocks of 10 characters and 1 block of 8 characters. - * - * @param rowId - * the current row number - */ - private void addFiller(long rowId) { - int base = (int) ((rowId * 8) % 26); - - // Get Random var - Random random = new Random(rand.seed); - - int range = random.nextInt(maxvaluelength - minvaluelength + 1); - int valuelen = range + minvaluelength; - - while (valuelen > 10) { - value.append(filler[(base + valuelen) % 26], 0, 10); - valuelen -= 10; - } - - if (valuelen > 0) - value.append(filler[(base + valuelen) % 26], 0, valuelen); - } - - @Override - public void map(LongWritable row, NullWritable ignored, Context context) throws IOException, InterruptedException { - context.setStatus("Entering"); - long rowId = row.get(); - if (rand == null) { - // we use 3 random numbers per a row - rand = new RandomGenerator(rowId * 3); - } - addKey(); - value.clear(); - // addRowId(rowId); - addFiller(rowId); - - // New - Mutation m = new Mutation(key); - m.put(new Text("c"), // column family - getRowIdString(rowId), // column qual - new Value(value.toString().getBytes())); // data - - context.setStatus("About to add to accumulo"); - context.write(tableName, m); - context.setStatus("Added to accumulo " + key.toString()); - } - - @Override - public void setup(Context job) { - minkeylength = job.getConfiguration().getInt("cloudgen.minkeylength", 0); - maxkeylength = job.getConfiguration().getInt("cloudgen.maxkeylength", 0); - minvaluelength = job.getConfiguration().getInt("cloudgen.minvaluelength", 0); - maxvaluelength = job.getConfiguration().getInt("cloudgen.maxvaluelength", 0); - tableName = new Text(job.getConfiguration().get("cloudgen.tablename")); - } - } - - public static void main(String[] args) throws Exception { - ToolRunner.run(new Configuration(), new TeraSortIngest(), args); - } - - static class Opts extends MapReduceClientOnRequiredTable { - @Parameter(names = "--count", description = "number of rows to ingest", required = true) - long numRows; - @Parameter(names = {"-nk", "--minKeySize"}, description = "miniumum key size", required = true) - int minKeyLength; - @Parameter(names = {"-xk", "--maxKeySize"}, description = "maximum key size", required = true) - int maxKeyLength; - @Parameter(names = {"-nv", "--minValueSize"}, description = "minimum key size", required = true) - int minValueLength; - @Parameter(names = {"-xv", "--maxValueSize"}, description = "maximum key size", required = true) - int maxValueLength; - @Parameter(names = "--splits", description = "number of splits to create in the table") - int splits = 0; - } - - @Override - public int run(String[] args) throws Exception { - Job job = Job.getInstance(getConf()); - job.setJobName("TeraSortCloud"); - job.setJarByClass(this.getClass()); - Opts opts = new Opts(); - opts.parseArgs(TeraSortIngest.class.getName(), args); - - job.setInputFormatClass(RangeInputFormat.class); - job.setMapperClass(SortGenMapper.class); - job.setMapOutputKeyClass(Text.class); - job.setMapOutputValueClass(Mutation.class); - - job.setNumReduceTasks(0); - - job.setOutputFormatClass(AccumuloOutputFormat.class); - opts.setAccumuloConfigs(job); - BatchWriterConfig bwConfig = new BatchWriterConfig().setMaxMemory(10L * 1000 * 1000); - AccumuloOutputFormat.setBatchWriterOptions(job, bwConfig); - - Configuration conf = job.getConfiguration(); - conf.setLong(NUMROWS, opts.numRows); - conf.setInt("cloudgen.minkeylength", opts.minKeyLength); - conf.setInt("cloudgen.maxkeylength", opts.maxKeyLength); - conf.setInt("cloudgen.minvaluelength", opts.minValueLength); - conf.setInt("cloudgen.maxvaluelength", opts.maxValueLength); - conf.set("cloudgen.tablename", opts.getTableName()); - - if (args.length > 10) - conf.setInt(NUMSPLITS, opts.splits); - - job.waitForCompletion(true); - return job.isSuccessful() ? 0 : 1; - } -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/src/main/java/org/apache/accumulo/test/replication/merkle/MerkleTree.java ---------------------------------------------------------------------- diff --git a/test/src/main/java/org/apache/accumulo/test/replication/merkle/MerkleTree.java b/test/src/main/java/org/apache/accumulo/test/replication/merkle/MerkleTree.java deleted file mode 100644 index 9a4b127..0000000 --- a/test/src/main/java/org/apache/accumulo/test/replication/merkle/MerkleTree.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.replication.merkle; - -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import org.apache.accumulo.core.util.Pair; - -import com.google.common.collect.Iterables; - -/** - * Simple implementation of a Merkle tree - */ -public class MerkleTree { - protected List leaves; - protected String digestAlgorithm; - - public MerkleTree(List leaves, String digestAlgorithm) { - this.leaves = leaves; - this.digestAlgorithm = digestAlgorithm; - } - - public MerkleTreeNode getRootNode() throws NoSuchAlgorithmException { - ArrayList buffer = new ArrayList<>(leaves.size()); - buffer.addAll(leaves); - - while (buffer.size() > 1) { - // Find two nodes that we want to roll up - Pair pairToJoin = findNextPair(buffer); - - // Make a parent node from them - MerkleTreeNode parent = new MerkleTreeNode(Arrays.asList(buffer.get(pairToJoin.getFirst()), buffer.get(pairToJoin.getSecond())), digestAlgorithm); - - // Insert it back into the "tree" at the position of the first child - buffer.set(pairToJoin.getFirst(), parent); - - // Remove the second child completely - buffer.remove(pairToJoin.getSecond().intValue()); - - // "recurse" - } - - return Iterables.getOnlyElement(buffer); - } - - protected Pair findNextPair(List nodes) { - int i = 0, j = 1; - while (i < nodes.size() && j < nodes.size()) { - MerkleTreeNode left = nodes.get(i), right = nodes.get(j); - - // At the same level - if (left.getLevel() == right.getLevel()) { - return new Pair<>(i, j); - } - - // Peek to see if we have another element - if (j + 1 < nodes.size()) { - // If we do, try to match those - i++; - j++; - } else { - // Otherwise, the last two elements must be paired - return new Pair<>(i, j); - } - } - - if (2 < nodes.size()) { - throw new IllegalStateException("Should not have exited loop without pairing two elements when we have at least 3 nodes"); - } else if (2 == nodes.size()) { - return new Pair<>(0, 1); - } else { - throw new IllegalStateException("Must have at least two nodes to pair"); - } - } -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/src/main/java/org/apache/accumulo/test/replication/merkle/MerkleTreeNode.java ---------------------------------------------------------------------- diff --git a/test/src/main/java/org/apache/accumulo/test/replication/merkle/MerkleTreeNode.java b/test/src/main/java/org/apache/accumulo/test/replication/merkle/MerkleTreeNode.java deleted file mode 100644 index f392f12..0000000 --- a/test/src/main/java/org/apache/accumulo/test/replication/merkle/MerkleTreeNode.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.replication.merkle; - -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Range; -import org.apache.accumulo.core.data.Value; -import org.apache.commons.codec.binary.Hex; -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Encapsulates the level (height) within the tree, the ranges that it covers, and the new hash - */ -public class MerkleTreeNode { - private static final Logger log = LoggerFactory.getLogger(MerkleTreeNode.class); - - private Range range; - private int level; - private List children; - private byte[] hash; - - public MerkleTreeNode(Range range, int level, List children, byte[] hash) { - this.range = range; - this.level = level; - this.children = children; - this.hash = hash; - } - - public MerkleTreeNode(Key k, Value v) { - range = RangeSerialization.toRange(k); - level = 0; - children = Collections.emptyList(); - hash = v.get(); - } - - public MerkleTreeNode(List children, String digestAlgorithm) throws NoSuchAlgorithmException { - level = 0; - this.children = new ArrayList<>(children.size()); - MessageDigest digest = MessageDigest.getInstance(digestAlgorithm); - - Range childrenRange = null; - for (MerkleTreeNode child : children) { - this.children.add(child.getRange()); - level = Math.max(child.getLevel(), level); - digest.update(child.getHash()); - - if (null == childrenRange) { - childrenRange = child.getRange(); - } else { - List overlappingRanges = Range.mergeOverlapping(Arrays.asList(childrenRange, child.getRange())); - if (1 != overlappingRanges.size()) { - log.error("Tried to merge non-contiguous ranges: {} {}", childrenRange, child.getRange()); - throw new IllegalArgumentException("Ranges must be contiguous: " + childrenRange + ", " + child.getRange()); - } - - childrenRange = overlappingRanges.get(0); - } - } - - // Our actual level is one more than the highest level of our children - level++; - - // Roll the hash up the tree - hash = digest.digest(); - - // Set the range to be the merged result of the children - range = childrenRange; - } - - public Range getRange() { - return range; - } - - public int getLevel() { - return level; - } - - public List getChildren() { - return children; - } - - public byte[] getHash() { - return hash; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(32); - sb.append("range=").append(range).append(" level=").append(level).append(" hash=").append(Hex.encodeHexString(hash)).append(" children=").append(children); - return sb.toString(); - } - - @Override - public boolean equals(Object o) { - if (o instanceof MerkleTreeNode) { - MerkleTreeNode other = (MerkleTreeNode) o; - return range.equals(other.getRange()) && level == other.getLevel() && children.equals(other.getChildren()) && Arrays.equals(hash, other.getHash()); - } - - return false; - } - - @Override - public int hashCode() { - HashCodeBuilder hcb = new HashCodeBuilder(1395, 39532); - return hcb.append(range).append(level).append(children).append(hash).toHashCode(); - } -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/src/main/java/org/apache/accumulo/test/replication/merkle/RangeSerialization.java ---------------------------------------------------------------------- diff --git a/test/src/main/java/org/apache/accumulo/test/replication/merkle/RangeSerialization.java b/test/src/main/java/org/apache/accumulo/test/replication/merkle/RangeSerialization.java deleted file mode 100644 index 6b07f2f..0000000 --- a/test/src/main/java/org/apache/accumulo/test/replication/merkle/RangeSerialization.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.replication.merkle; - -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.core.data.Range; -import org.apache.accumulo.core.data.Value; -import org.apache.hadoop.io.Text; - -/** - * - */ -public class RangeSerialization { - private static final Text EMPTY = new Text(new byte[0]); - - public static Range toRange(Key key) { - Text holder = new Text(); - key.getRow(holder); - Key startKey; - if (0 == holder.getLength()) { - startKey = null; - } else { - startKey = new Key(holder); - } - - key.getColumnQualifier(holder); - Key endKey; - if (0 == holder.getLength()) { - endKey = null; - } else { - endKey = new Key(holder); - } - - // Don't be inclusive for no bounds on a Range - return new Range(startKey, startKey != null, endKey, endKey != null); - } - - public static Key toKey(Range range) { - Text row = getRow(range); - return new Key(row, EMPTY, getColumnQualifier(range)); - } - - public static Mutation toMutation(Range range, Value v) { - Text row = getRow(range); - Mutation m = new Mutation(row); - m.put(EMPTY, getColumnQualifier(range), v); - return m; - } - - public static Text getRow(Range range) { - return range.isInfiniteStartKey() ? EMPTY : range.getStartKey().getRow(); - } - - public static Text getColumnQualifier(Range range) { - return range.isInfiniteStopKey() ? EMPTY : range.getEndKey().getRow(); - } -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/src/main/java/org/apache/accumulo/test/replication/merkle/cli/CompareTables.java ---------------------------------------------------------------------- diff --git a/test/src/main/java/org/apache/accumulo/test/replication/merkle/cli/CompareTables.java b/test/src/main/java/org/apache/accumulo/test/replication/merkle/cli/CompareTables.java deleted file mode 100644 index 8e97beb..0000000 --- a/test/src/main/java/org/apache/accumulo/test/replication/merkle/cli/CompareTables.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.replication.merkle.cli; - -import java.io.FileNotFoundException; -import java.security.NoSuchAlgorithmException; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -import org.apache.accumulo.core.cli.BatchWriterOpts; -import org.apache.accumulo.core.cli.ClientOpts; -import org.apache.accumulo.core.client.AccumuloException; -import org.apache.accumulo.core.client.AccumuloSecurityException; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.TableExistsException; -import org.apache.accumulo.core.client.TableNotFoundException; -import org.apache.accumulo.core.data.Range; -import org.apache.commons.codec.binary.Hex; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.beust.jcommander.Parameter; - -/** - * Accepts a set of tables, computes the hashes for each, and prints the top-level hash for each table. - *

- * Will automatically create output tables for intermediate hashes instead of requiring their existence. This will raise an exception when the table we want to - * use already exists. - */ -public class CompareTables { - private static final Logger log = LoggerFactory.getLogger(CompareTables.class); - - public static class CompareTablesOpts extends ClientOpts { - @Parameter(names = {"--tables"}, description = "Tables to compare", variableArity = true) - public List tables; - - @Parameter(names = {"-nt", "--numThreads"}, required = false, description = "number of concurrent threads calculating digests") - private int numThreads = 4; - - @Parameter(names = {"-hash", "--hash"}, required = true, description = "type of hash to use") - private String hashName; - - @Parameter(names = {"-iter", "--iterator"}, required = false, description = "Should pushdown digest to iterators") - private boolean iteratorPushdown = false; - - @Parameter(names = {"-s", "--splits"}, required = false, description = "File of splits to use for merkle tree") - private String splitsFile = null; - - public List getTables() { - return this.tables; - } - - public void setTables(List tables) { - this.tables = tables; - } - - public int getNumThreads() { - return numThreads; - } - - public void setNumThreads(int numThreads) { - this.numThreads = numThreads; - } - - public String getHashName() { - return hashName; - } - - public void setHashName(String hashName) { - this.hashName = hashName; - } - - public boolean isIteratorPushdown() { - return iteratorPushdown; - } - - public void setIteratorPushdown(boolean iteratorPushdown) { - this.iteratorPushdown = iteratorPushdown; - } - - public String getSplitsFile() { - return splitsFile; - } - - public void setSplitsFile(String splitsFile) { - this.splitsFile = splitsFile; - } - } - - private CompareTablesOpts opts; - - protected CompareTables() {} - - public CompareTables(CompareTablesOpts opts) { - this.opts = opts; - } - - public Map computeAllHashes() throws AccumuloException, AccumuloSecurityException, TableExistsException, NoSuchAlgorithmException, - TableNotFoundException, FileNotFoundException { - final Connector conn = opts.getConnector(); - final Map hashesByTable = new HashMap<>(); - - for (String table : opts.getTables()) { - final String outputTableName = table + "_merkle"; - - if (conn.tableOperations().exists(outputTableName)) { - throw new IllegalArgumentException("Expected output table name to not yet exist: " + outputTableName); - } - - conn.tableOperations().create(outputTableName); - - GenerateHashes genHashes = new GenerateHashes(); - Collection ranges = genHashes.getRanges(opts.getConnector(), table, opts.getSplitsFile()); - - try { - genHashes.run(opts.getConnector(), table, table + "_merkle", opts.getHashName(), opts.getNumThreads(), opts.isIteratorPushdown(), ranges); - } catch (Exception e) { - log.error("Error generating hashes for {}", table, e); - throw new RuntimeException(e); - } - - ComputeRootHash computeRootHash = new ComputeRootHash(); - String hash = Hex.encodeHexString(computeRootHash.getHash(conn, outputTableName, opts.getHashName())); - - hashesByTable.put(table, hash); - } - - return hashesByTable; - } - - public static void main(String[] args) throws Exception { - CompareTablesOpts opts = new CompareTablesOpts(); - BatchWriterOpts bwOpts = new BatchWriterOpts(); - opts.parseArgs("CompareTables", args, bwOpts); - - if (opts.isIteratorPushdown() && null != opts.getSplitsFile()) { - throw new IllegalArgumentException("Cannot use iterator pushdown with anything other than table split points"); - } - - CompareTables compareTables = new CompareTables(opts); - Map tableToHashes = compareTables.computeAllHashes(); - - boolean hashesEqual = true; - String previousHash = null; - for (Entry entry : tableToHashes.entrySet()) { - // Set the previous hash if we dont' have one - if (null == previousHash) { - previousHash = entry.getValue(); - } else if (hashesEqual) { - // If the hashes are still equal, check that the new hash is also equal - hashesEqual = previousHash.equals(entry.getValue()); - } - - System.out.println(entry.getKey() + " " + entry.getValue()); - } - - System.exit(hashesEqual ? 0 : 1); - } -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/81f215c0/test/src/main/java/org/apache/accumulo/test/replication/merkle/cli/ComputeRootHash.java ---------------------------------------------------------------------- diff --git a/test/src/main/java/org/apache/accumulo/test/replication/merkle/cli/ComputeRootHash.java b/test/src/main/java/org/apache/accumulo/test/replication/merkle/cli/ComputeRootHash.java deleted file mode 100644 index 56a5931..0000000 --- a/test/src/main/java/org/apache/accumulo/test/replication/merkle/cli/ComputeRootHash.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.replication.merkle.cli; - -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map.Entry; - -import org.apache.accumulo.core.cli.ClientOnRequiredTable; -import org.apache.accumulo.core.client.AccumuloException; -import org.apache.accumulo.core.client.AccumuloSecurityException; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.Scanner; -import org.apache.accumulo.core.client.TableNotFoundException; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Range; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.test.replication.merkle.MerkleTree; -import org.apache.accumulo.test.replication.merkle.MerkleTreeNode; -import org.apache.accumulo.test.replication.merkle.RangeSerialization; -import org.apache.commons.codec.binary.Hex; - -import com.beust.jcommander.Parameter; - -/** - * Given a table created by {@link GenerateHashes} which contains the leaves of a Merkle tree, compute the root node of the Merkle tree which can be quickly - * compared to the root node of another Merkle tree to ascertain equality. - */ -public class ComputeRootHash { - - public static class ComputeRootHashOpts extends ClientOnRequiredTable { - @Parameter(names = {"-hash", "--hash"}, required = true, description = "type of hash to use") - private String hashName; - - public String getHashName() { - return hashName; - } - - public void setHashName(String hashName) { - this.hashName = hashName; - } - } - - public byte[] getHash(ComputeRootHashOpts opts) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, NoSuchAlgorithmException { - Connector conn = opts.getConnector(); - String table = opts.getTableName(); - - return getHash(conn, table, opts.getHashName()); - } - - public byte[] getHash(Connector conn, String table, String hashName) throws TableNotFoundException, NoSuchAlgorithmException { - List leaves = getLeaves(conn, table); - - MerkleTree tree = new MerkleTree(leaves, hashName); - - return tree.getRootNode().getHash(); - } - - protected ArrayList getLeaves(Connector conn, String tableName) throws TableNotFoundException { - // TODO make this a bit more resilient to very large merkle trees by lazily reading more data from the table when necessary - final Scanner s = conn.createScanner(tableName, Authorizations.EMPTY); - final ArrayList leaves = new ArrayList<>(); - - for (Entry entry : s) { - Range range = RangeSerialization.toRange(entry.getKey()); - byte[] hash = entry.getValue().get(); - - leaves.add(new MerkleTreeNode(range, 0, Collections. emptyList(), hash)); - } - - return leaves; - } - - public static void main(String[] args) throws Exception { - ComputeRootHashOpts opts = new ComputeRootHashOpts(); - opts.parseArgs("ComputeRootHash", args); - - ComputeRootHash computeRootHash = new ComputeRootHash(); - byte[] rootHash = computeRootHash.getHash(opts); - - System.out.println(Hex.encodeHexString(rootHash)); - } -}