Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 02CDF10ECD for ; Mon, 23 Sep 2013 16:46:16 +0000 (UTC) Received: (qmail 58837 invoked by uid 500); 23 Sep 2013 16:46:14 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 58798 invoked by uid 500); 23 Sep 2013 16:46:14 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 58787 invoked by uid 99); 23 Sep 2013 16:46:14 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 23 Sep 2013 16:46:14 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 23 Sep 2013 16:46:10 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id D7B1B2388900; Mon, 23 Sep 2013 16:45:49 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1525642 [1/4] - in /hbase/branches/0.96: ./ bin/ hbase-assembly/src/main/assembly/ hbase-it/ hbase-server/ hbase-server/src/main/ruby/ hbase-server/src/main/ruby/hbase/ hbase-server/src/main/ruby/irb/ hbase-server/src/main/ruby/shell/ hbas... Date: Mon, 23 Sep 2013 16:45:46 -0000 To: commits@hbase.apache.org From: nkeywal@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20130923164549.D7B1B2388900@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: nkeywal Date: Mon Sep 23 16:45:42 2013 New Revision: 1525642 URL: http://svn.apache.org/r1525642 Log: HBASE-9632 Put the shell in a maven sub module (hbase-shell) instead of hbase-server Added: hbase/branches/0.96/hbase-shell/ hbase/branches/0.96/hbase-shell/pom.xml hbase/branches/0.96/hbase-shell/src/ hbase/branches/0.96/hbase-shell/src/main/ hbase/branches/0.96/hbase-shell/src/main/ruby/ hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/ hbase/branches/0.96/hbase-shell/src/main/ruby/hbase.rb hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/admin.rb hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/hbase.rb hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/replication_admin.rb hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/security.rb hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/table.rb hbase/branches/0.96/hbase-shell/src/main/ruby/irb/ hbase/branches/0.96/hbase-shell/src/main/ruby/irb/hirb.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/ hbase/branches/0.96/hbase-shell/src/main/ruby/shell.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/ hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/add_peer.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/alter.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/alter_async.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/alter_namespace.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/alter_status.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/assign.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/balance_switch.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/balancer.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_enabled.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_run.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_switch.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/close_region.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/compact.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/count.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/create.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/create_namespace.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/delete.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/delete_snapshot.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/deleteall.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/describe.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/describe_namespace.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/disable.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/disable_all.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/disable_peer.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/drop.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/drop_all.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/drop_namespace.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/enable.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/enable_all.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/enable_peer.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/exists.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/flush.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/get.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/get_counter.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/get_table.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/grant.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/hlog_roll.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/incr.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/is_disabled.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/is_enabled.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/list.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/list_namespace.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/list_peers.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/list_replicated_tables.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/major_compact.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/merge_region.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/move.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/put.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/remove_peer.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/rename_snapshot.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/revoke.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/scan.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/show_filters.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/snapshot.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/split.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/status.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/table_help.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/truncate.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/unassign.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/user_permission.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/version.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/whoami.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/commands/zk_dump.rb hbase/branches/0.96/hbase-shell/src/main/ruby/shell/formatter.rb hbase/branches/0.96/hbase-shell/src/test/ hbase/branches/0.96/hbase-shell/src/test/java/ hbase/branches/0.96/hbase-shell/src/test/java/org/ hbase/branches/0.96/hbase-shell/src/test/java/org/apache/ hbase/branches/0.96/hbase-shell/src/test/java/org/apache/hadoop/ hbase/branches/0.96/hbase-shell/src/test/java/org/apache/hadoop/hbase/ hbase/branches/0.96/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/ hbase/branches/0.96/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java hbase/branches/0.96/hbase-shell/src/test/ruby/ hbase/branches/0.96/hbase-shell/src/test/ruby/hbase/ hbase/branches/0.96/hbase-shell/src/test/ruby/hbase/admin_test.rb hbase/branches/0.96/hbase-shell/src/test/ruby/hbase/hbase_test.rb hbase/branches/0.96/hbase-shell/src/test/ruby/hbase/table_test.rb hbase/branches/0.96/hbase-shell/src/test/ruby/shell/ hbase/branches/0.96/hbase-shell/src/test/ruby/shell/commands_test.rb hbase/branches/0.96/hbase-shell/src/test/ruby/shell/formatter_test.rb hbase/branches/0.96/hbase-shell/src/test/ruby/shell/shell_test.rb hbase/branches/0.96/hbase-shell/src/test/ruby/test_helper.rb hbase/branches/0.96/hbase-shell/src/test/ruby/tests_runner.rb Removed: hbase/branches/0.96/hbase-server/src/main/ruby/hbase.rb hbase/branches/0.96/hbase-server/src/main/ruby/hbase/admin.rb hbase/branches/0.96/hbase-server/src/main/ruby/hbase/hbase.rb hbase/branches/0.96/hbase-server/src/main/ruby/hbase/replication_admin.rb hbase/branches/0.96/hbase-server/src/main/ruby/hbase/security.rb hbase/branches/0.96/hbase-server/src/main/ruby/hbase/table.rb hbase/branches/0.96/hbase-server/src/main/ruby/irb/hirb.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/add_peer.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/alter.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/alter_async.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/alter_namespace.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/alter_status.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/assign.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/balance_switch.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/balancer.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/catalogjanitor_enabled.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/catalogjanitor_run.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/catalogjanitor_switch.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/clone_snapshot.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/close_region.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/compact.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/count.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/create.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/create_namespace.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/delete.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/delete_snapshot.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/deleteall.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/describe.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/describe_namespace.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/disable.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/disable_all.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/disable_peer.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/drop.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/drop_all.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/drop_namespace.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/enable.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/enable_all.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/enable_peer.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/exists.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/flush.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/get.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/get_counter.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/get_table.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/grant.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/hlog_roll.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/incr.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/is_disabled.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/is_enabled.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/list.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/list_namespace.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/list_namespace_tables.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/list_peers.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/list_replicated_tables.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/list_snapshots.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/major_compact.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/merge_region.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/move.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/put.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/remove_peer.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/rename_snapshot.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/restore_snapshot.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/revoke.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/scan.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/show_filters.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/snapshot.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/split.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/status.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/table_help.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/truncate.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/truncate_preserve.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/unassign.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/user_permission.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/version.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/whoami.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/commands/zk_dump.rb hbase/branches/0.96/hbase-server/src/main/ruby/shell/formatter.rb hbase/branches/0.96/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShell.java hbase/branches/0.96/hbase-server/src/test/ruby/hbase/admin_test.rb hbase/branches/0.96/hbase-server/src/test/ruby/hbase/hbase_test.rb hbase/branches/0.96/hbase-server/src/test/ruby/hbase/table_test.rb hbase/branches/0.96/hbase-server/src/test/ruby/shell/commands_test.rb hbase/branches/0.96/hbase-server/src/test/ruby/shell/formatter_test.rb hbase/branches/0.96/hbase-server/src/test/ruby/shell/shell_test.rb hbase/branches/0.96/hbase-server/src/test/ruby/test_helper.rb hbase/branches/0.96/hbase-server/src/test/ruby/tests_runner.rb Modified: hbase/branches/0.96/bin/hbase hbase/branches/0.96/bin/hbase.cmd hbase/branches/0.96/hbase-assembly/src/main/assembly/components.xml hbase/branches/0.96/hbase-it/pom.xml hbase/branches/0.96/hbase-server/pom.xml hbase/branches/0.96/pom.xml Modified: hbase/branches/0.96/bin/hbase URL: http://svn.apache.org/viewvc/hbase/branches/0.96/bin/hbase?rev=1525642&r1=1525641&r2=1525642&view=diff ============================================================================== --- hbase/branches/0.96/bin/hbase (original) +++ hbase/branches/0.96/bin/hbase Mon Sep 23 16:45:42 2013 @@ -257,7 +257,7 @@ if [ "$COMMAND" = "shell" ] ; then if [ -d "$HBASE_HOME/lib/ruby" ]; then HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/lib/ruby" else - HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/hbase-server/src/main/ruby" + HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/hbase-shell/src/main/ruby" fi CLASS="org.jruby.Main -X+O ${JRUBY_OPTS} ${HBASE_HOME}/bin/hirb.rb" elif [ "$COMMAND" = "hbck" ] ; then Modified: hbase/branches/0.96/bin/hbase.cmd URL: http://svn.apache.org/viewvc/hbase/branches/0.96/bin/hbase.cmd?rev=1525642&r1=1525641&r2=1525642&view=diff ============================================================================== --- hbase/branches/0.96/bin/hbase.cmd (original) +++ hbase/branches/0.96/bin/hbase.cmd Mon Sep 23 16:45:42 2013 @@ -305,7 +305,7 @@ goto :eof if EXIST %HBASE_HOME%\lib\ruby ( set HBASE_OPTS=%HBASE_OPTS% -Dhbase.ruby.sources=%HBASE_HOME%\lib\ruby ) else ( - set HBASE_OPTS=%HBASE_OPTS% -Dhbase.ruby.sources=%HBASE_HOME%\hbase-server\src\main\ruby + set HBASE_OPTS=%HBASE_OPTS% -Dhbase.ruby.sources=%HBASE_HOME%\hbase-shell\src\main\ruby ) set CLASS=org.jruby.Main -X+O %JRUBY_OPTS% %HBASE_HOME%\bin\hirb.rb Modified: hbase/branches/0.96/hbase-assembly/src/main/assembly/components.xml URL: http://svn.apache.org/viewvc/hbase/branches/0.96/hbase-assembly/src/main/assembly/components.xml?rev=1525642&r1=1525641&r2=1525642&view=diff ============================================================================== Binary files - no diff available. Modified: hbase/branches/0.96/hbase-it/pom.xml URL: http://svn.apache.org/viewvc/hbase/branches/0.96/hbase-it/pom.xml?rev=1525642&r1=1525641&r2=1525642&view=diff ============================================================================== --- hbase/branches/0.96/hbase-it/pom.xml (original) +++ hbase/branches/0.96/hbase-it/pom.xml Mon Sep 23 16:45:42 2013 @@ -177,6 +177,10 @@ org.apache.hbase + hbase-shell + + + org.apache.hbase hbase-server Modified: hbase/branches/0.96/hbase-server/pom.xml URL: http://svn.apache.org/viewvc/hbase/branches/0.96/hbase-server/pom.xml?rev=1525642&r1=1525641&r2=1525642&view=diff ============================================================================== --- hbase/branches/0.96/hbase-server/pom.xml (original) +++ hbase/branches/0.96/hbase-server/pom.xml Mon Sep 23 16:45:42 2013 @@ -370,10 +370,6 @@ zookeeper - org.jruby - jruby-complete - - org.mortbay.jetty jetty Added: hbase/branches/0.96/hbase-shell/pom.xml URL: http://svn.apache.org/viewvc/hbase/branches/0.96/hbase-shell/pom.xml?rev=1525642&view=auto ============================================================================== --- hbase/branches/0.96/hbase-shell/pom.xml (added) +++ hbase/branches/0.96/hbase-shell/pom.xml Mon Sep 23 16:45:42 2013 @@ -0,0 +1,472 @@ + + + + 4.0.0 + + hbase + org.apache.hbase + 0.96.0 + .. + + hbase-shell + HBase - Shell + Shell for HBase + + + + + + ${project.build.directory} + + hbase-webapps/** + + + + + + src/test/resources + + **/** + + + + + + org.apache.maven.plugins + maven-site-plugin + + true + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-jar-plugin + + + + org/apache/hadoop/hbase/mapreduce/Driver + + + + + org/apache/jute/** + org/apache/zookeeper/** + **/*.jsp + hbase-site.xml + hdfs-site.xml + log4j.properties + mapred-queues.xml + mapred-site.xml + zoo.cfg + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + org.codehaus.mojo + build-helper-maven-plugin + + + + jspcSource-packageInfo-source + generate-sources + + add-source + + + + ${project.build.directory}/generated-jamon + ${project.build.directory}/generated-sources/java + + + + + + + + + org.codehaus.mojo + findbugs-maven-plugin + + + + maven-surefire-plugin + + + + listener + org.apache.hadoop.hbase.ServerResourceCheckerJUnitListener + + + + + + + + + + maven-surefire-plugin + ${surefire.version} + + + ${surefire.firstPartGroups} + + + + + + + + + org.apache.hbase + hbase-common + + + org.apache.hbase + hbase-protocol + + + org.apache.hbase + hbase-client + + + org.apache.hbase + hbase-prefix-tree + runtime + + + org.apache.hbase + hbase-server + + + org.apache.hbase + hbase-common + test-jar + + + org.apache.hbase + hbase-hadoop-compat + + + org.apache.hbase + hbase-hadoop-compat + test-jar + test + + + org.apache.hbase + ${compat.module} + ${project.version} + + + org.apache.hbase + hbase-server + test-jar + test + + + org.apache.hbase + ${compat.module} + ${project.version} + test-jar + test + + + + com.yammer.metrics + metrics-core + + + commons-logging + commons-logging + + + org.jruby + jruby-complete + + + + org.cloudera.htrace + htrace-core + + + org.cloudera.htrace + htrace-zipkin + + + + + + skipServerTests + + + skipServerTests + + + + true + true + + + + + hadoop-snappy + + false + + snappy + + + + + org.apache.hadoop + hadoop-snappy + ${hadoop-snappy.version} + + + + + native + + false + + + + + org.apache.maven.plugins + maven-antrun-plugin + + + make + compile + run + + + + + + + + + + + + + + + + + + + + + + hadoop-1.1 + + + + !hadoop.profile + + + + + org.apache.hadoop + hadoop-core + + + org.apache.hadoop + hadoop-test + + + + + hadoop-1.0 + + + hadoop.profile + 1.0 + + + + + org.apache.hadoop + hadoop-core + + + org.apache.hadoop + hadoop-test + + + + + + hadoop-2.0 + + + + hadoop.profile2.0 + + + + + org.apache.hadoop + hadoop-common + + + org.apache.hadoop + hadoop-auth + + + org.apache.hadoop + hadoop-client + + + org.apache.hadoop + hadoop-mapreduce-client-core + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + test-jar + + + org.apache.hadoop + hadoop-hdfs + + + org.apache.hadoop + hadoop-hdfs + test-jar + + + org.apache.hadoop + hadoop-annotations + + + org.apache.hadoop + hadoop-minicluster + test + + + + + + maven-dependency-plugin + + + create-mrapp-generated-classpath + generate-test-resources + + build-classpath + + + + ${project.build.directory}/test-classes/mrapp-generated-classpath + + + + + + + + + + hadoop-3.0 + + + hadoop.profile + 3.0 + + + + 3.0-SNAPSHOT + + + + org.apache.hadoop + hadoop-common + + + org.apache.hadoop + hadoop-annotations + + + org.apache.hadoop + hadoop-minicluster + + + + + + maven-dependency-plugin + + + create-mrapp-generated-classpath + generate-test-resources + + build-classpath + + + + ${project.build.directory}/test-classes/mrapp-generated-classpath + + + + + + + + + Added: hbase/branches/0.96/hbase-shell/src/main/ruby/hbase.rb URL: http://svn.apache.org/viewvc/hbase/branches/0.96/hbase-shell/src/main/ruby/hbase.rb?rev=1525642&view=auto ============================================================================== --- hbase/branches/0.96/hbase-shell/src/main/ruby/hbase.rb (added) +++ hbase/branches/0.96/hbase-shell/src/main/ruby/hbase.rb Mon Sep 23 16:45:42 2013 @@ -0,0 +1,80 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# HBase ruby classes. +# Has wrapper classes for org.apache.hadoop.hbase.client.HBaseAdmin +# and for org.apache.hadoop.hbase.client.HTable. Classes take +# Formatters on construction and outputs any results using +# Formatter methods. These classes are only really for use by +# the hirb.rb HBase Shell script; they don't make much sense elsewhere. +# For example, the exists method on Admin class prints to the formatter +# whether the table exists and returns nil regardless. +include Java + +include_class('java.lang.Integer') {|package,name| "J#{name}" } +include_class('java.lang.Long') {|package,name| "J#{name}" } +include_class('java.lang.Boolean') {|package,name| "J#{name}" } + +module HBaseConstants + COLUMN = "COLUMN" + COLUMNS = "COLUMNS" + TIMESTAMP = "TIMESTAMP" + TIMERANGE = "TIMERANGE" + NAME = org.apache.hadoop.hbase.HConstants::NAME + VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS + IN_MEMORY = org.apache.hadoop.hbase.HConstants::IN_MEMORY + METADATA = org.apache.hadoop.hbase.HConstants::METADATA + STOPROW = "STOPROW" + STARTROW = "STARTROW" + ENDROW = STOPROW + RAW = "RAW" + LIMIT = "LIMIT" + METHOD = "METHOD" + MAXLENGTH = "MAXLENGTH" + CACHE_BLOCKS = "CACHE_BLOCKS" + REPLICATION_SCOPE = "REPLICATION_SCOPE" + INTERVAL = 'INTERVAL' + CACHE = 'CACHE' + FILTER = 'FILTER' + SPLITS = 'SPLITS' + SPLITS_FILE = 'SPLITS_FILE' + SPLITALGO = 'SPLITALGO' + NUMREGIONS = 'NUMREGIONS' + CONFIGURATION = org.apache.hadoop.hbase.HConstants::CONFIGURATION + + # Load constants from hbase java API + def self.promote_constants(constants) + # The constants to import are all in uppercase + constants.each do |c| + next if c =~ /DEFAULT_.*/ || c != c.upcase + next if eval("defined?(#{c})") + eval("#{c} = '#{c}'") + end + end + + promote_constants(org.apache.hadoop.hbase.HColumnDescriptor.constants) + promote_constants(org.apache.hadoop.hbase.HTableDescriptor.constants) +end + +# Include classes definition +require 'hbase/hbase' +require 'hbase/admin' +require 'hbase/table' +require 'hbase/replication_admin' +require 'hbase/security' Added: hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/admin.rb URL: http://svn.apache.org/viewvc/hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/admin.rb?rev=1525642&view=auto ============================================================================== --- hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/admin.rb (added) +++ hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/admin.rb Mon Sep 23 16:45:42 2013 @@ -0,0 +1,824 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include Java +java_import java.util.Arrays +java_import org.apache.hadoop.hbase.util.Pair +java_import org.apache.hadoop.hbase.util.RegionSplitter +java_import org.apache.hadoop.hbase.util.Bytes + +# Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin + +module Hbase + class Admin + include HBaseConstants + + def initialize(configuration, formatter) + @admin = org.apache.hadoop.hbase.client.HBaseAdmin.new(configuration) + connection = @admin.getConnection() + @conf = configuration + @zk_wrapper = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(configuration, + "admin", nil) + zk = @zk_wrapper.getRecoverableZooKeeper().getZooKeeper() + @zk_main = org.apache.zookeeper.ZooKeeperMain.new(zk) + @formatter = formatter + end + + #---------------------------------------------------------------------------------------------- + # Returns a list of tables in hbase + def list(regex = ".*") + Arrays.asList(@admin.getTableNames(regex)) + end + + #---------------------------------------------------------------------------------------------- + # Requests a table or region flush + def flush(table_or_region_name) + @admin.flush(table_or_region_name) + end + + #---------------------------------------------------------------------------------------------- + # Requests a table or region or column family compaction + def compact(table_or_region_name, family = nil) + if family == nil + @admin.compact(table_or_region_name) + else + # We are compacting a column family within a region. + @admin.compact(table_or_region_name, family) + end + end + + #---------------------------------------------------------------------------------------------- + # Requests a table or region or column family major compaction + def major_compact(table_or_region_name, family = nil) + if family == nil + @admin.majorCompact(table_or_region_name) + else + # We are major compacting a column family within a region or table. + @admin.majorCompact(table_or_region_name, family) + end + end + + #---------------------------------------------------------------------------------------------- + # Requests a regionserver's HLog roll + def hlog_roll(server_name) + @admin.rollHLogWriter(server_name) + end + + #---------------------------------------------------------------------------------------------- + # Requests a table or region split + def split(table_or_region_name, split_point) + if split_point == nil + @admin.split(table_or_region_name) + else + @admin.split(table_or_region_name, split_point) + end + end + + #---------------------------------------------------------------------------------------------- + # Requests a cluster balance + # Returns true if balancer ran + def balancer() + @admin.balancer() + end + + #---------------------------------------------------------------------------------------------- + # Enable/disable balancer + # Returns previous balancer switch setting. + def balance_switch(enableDisable) + @admin.setBalancerRunning( + java.lang.Boolean::valueOf(enableDisable), java.lang.Boolean::valueOf(false)) + end + + #---------------------------------------------------------------------------------------------- + # Request a scan of the catalog table (for garbage collection) + # Returns an int signifying the number of entries cleaned + def catalogjanitor_run() + @admin.runCatalogScan() + end + + #---------------------------------------------------------------------------------------------- + # Enable/disable the catalog janitor + # Returns previous catalog janitor switch setting. + def catalogjanitor_switch(enableDisable) + @admin.enableCatalogJanitor(java.lang.Boolean::valueOf(enableDisable)) + end + + #---------------------------------------------------------------------------------------------- + # Query on the catalog janitor state (enabled/disabled?) + # Returns catalog janitor state (true signifies enabled). + def catalogjanitor_enabled() + @admin.isCatalogJanitorEnabled() + end + + #---------------------------------------------------------------------------------------------- + # Enables a table + def enable(table_name) + tableExists(table_name) + return if enabled?(table_name) + @admin.enableTable(table_name) + end + + #---------------------------------------------------------------------------------------------- + # Enables all tables matching the given regex + def enable_all(regex) + regex = regex.to_s + @admin.enableTables(regex) + end + + #---------------------------------------------------------------------------------------------- + # Disables a table + def disable(table_name) + tableExists(table_name) + return if disabled?(table_name) + @admin.disableTable(table_name) + end + + #---------------------------------------------------------------------------------------------- + # Disables all tables matching the given regex + def disable_all(regex) + regex = regex.to_s + @admin.disableTables(regex).map { |t| t.getTableName().getNameAsString } + end + + #--------------------------------------------------------------------------------------------- + # Throw exception if table doesn't exist + def tableExists(table_name) + raise ArgumentError, "Table #{table_name} does not exist.'" unless exists?(table_name) + end + + #---------------------------------------------------------------------------------------------- + # Is table disabled? + def disabled?(table_name) + @admin.isTableDisabled(table_name) + end + + #---------------------------------------------------------------------------------------------- + # Drops a table + def drop(table_name) + tableExists(table_name) + raise ArgumentError, "Table #{table_name} is enabled. Disable it first.'" if enabled?(table_name) + + @admin.deleteTable(table_name) + end + + #---------------------------------------------------------------------------------------------- + # Drops a table + def drop_all(regex) + regex = regex.to_s + failed = @admin.deleteTables(regex).map { |t| t.getTableName().getNameAsString } + return failed + end + + #---------------------------------------------------------------------------------------------- + # Returns ZooKeeper status dump + def zk_dump + org.apache.hadoop.hbase.zookeeper.ZKUtil::dump(@zk_wrapper) + end + + #---------------------------------------------------------------------------------------------- + # Creates a table + def create(table_name, *args) + # Fail if table name is not a string + raise(ArgumentError, "Table name must be of type String") unless table_name.kind_of?(String) + + # Flatten params array + args = args.flatten.compact + has_columns = false + + # Start defining the table + htd = org.apache.hadoop.hbase.HTableDescriptor.new(org.apache.hadoop.hbase.TableName.valueOf(table_name)) + splits = nil + # Args are either columns or splits, add them to the table definition + # TODO: add table options support + args.each do |arg| + unless arg.kind_of?(String) || arg.kind_of?(Hash) + raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type") + end + + # First, handle all the cases where arg is a column family. + if arg.kind_of?(String) or arg.has_key?(NAME) + # If the arg is a string, default action is to add a column to the table. + # If arg has a name, it must also be a column descriptor. + htd.addFamily(hcd(arg, htd)) + has_columns = true + next + end + + # Get rid of the "METHOD", which is deprecated for create. + # We'll do whatever it used to do below if it's table_att. + if (method = arg.delete(METHOD)) + raise(ArgumentError, "table_att is currently the only supported method") unless method == 'table_att' + end + + # The hash is not a column family. Figure out what's in it. + # First, handle splits. + if arg.has_key?(SPLITS_FILE) + splits_file = arg.delete(SPLITS_FILE) + unless File.exist?(splits_file) + raise(ArgumentError, "Splits file #{splits_file} doesn't exist") + end + arg[SPLITS] = [] + File.foreach(splits_file) do |line| + arg[SPLITS].push(line.strip()) + end + end + + if arg.has_key?(SPLITS) + splits = Java::byte[][arg[SPLITS].size].new + idx = 0 + arg.delete(SPLITS).each do |split| + splits[idx] = org.apache.hadoop.hbase.util.Bytes.toBytesBinary(split) + idx = idx + 1 + end + elsif arg.has_key?(NUMREGIONS) or arg.has_key?(SPLITALGO) + # deprecated region pre-split API; if one of the above is specified, will be ignored. + raise(ArgumentError, "Number of regions must be specified") unless arg.has_key?(NUMREGIONS) + raise(ArgumentError, "Split algorithm must be specified") unless arg.has_key?(SPLITALGO) + raise(ArgumentError, "Number of regions must be greater than 1") unless arg[NUMREGIONS] > 1 + num_regions = arg.delete(NUMREGIONS) + split_algo = RegionSplitter.newSplitAlgoInstance(@conf, arg.delete(SPLITALGO)) + splits = split_algo.split(JInteger.valueOf(num_regions)) + end + + # Done with splits; apply formerly-table_att parameters. + htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER] + htd.setMaxFileSize(JLong.valueOf(arg.delete(MAX_FILESIZE))) if arg[MAX_FILESIZE] + htd.setReadOnly(JBoolean.valueOf(arg.delete(READONLY))) if arg[READONLY] + htd.setCompactionEnabled(JBoolean.valueOf(arg[COMPACTION_ENABLED])) if arg[COMPACTION_ENABLED] + htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE] + htd.setDeferredLogFlush(JBoolean.valueOf(arg.delete(DEFERRED_LOG_FLUSH))) if arg[DEFERRED_LOG_FLUSH] + htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY] + set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA] + set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] + + arg.each_key do |ignored_key| + puts("An argument ignored (unknown or overridden): %s" % [ ignored_key ]) + end + end + + # Fail if no column families defined + raise(ArgumentError, "Table must have at least one column family") if !has_columns + + if splits.nil? + # Perform the create table call + @admin.createTable(htd) + else + # Perform the create table call + @admin.createTable(htd, splits) + end + end + + #---------------------------------------------------------------------------------------------- + # Closes a region. + # If server name is nil, we presume region_name is full region name (HRegionInfo.getRegionName). + # If server name is not nil, we presume it is the region's encoded name (HRegionInfo.getEncodedName) + def close_region(region_name, server) + if (server == nil || !closeEncodedRegion?(region_name, server)) + @admin.closeRegion(region_name, server) + end + end + + #---------------------------------------------------------------------------------------------- + #---------------------------------------------------------------------------------------------- + # Assign a region + def assign(region_name) + @admin.assign(region_name.to_java_bytes) + end + + #---------------------------------------------------------------------------------------------- + # Unassign a region + def unassign(region_name, force) + @admin.unassign(region_name.to_java_bytes, java.lang.Boolean::valueOf(force)) + end + + #---------------------------------------------------------------------------------------------- + # Move a region + def move(encoded_region_name, server = nil) + @admin.move(encoded_region_name.to_java_bytes, server ? server.to_java_bytes: nil) + end + + #---------------------------------------------------------------------------------------------- + # Merge two regions + def merge_region(encoded_region_a_name, encoded_region_b_name, force) + @admin.mergeRegions(encoded_region_a_name.to_java_bytes, encoded_region_b_name.to_java_bytes, java.lang.Boolean::valueOf(force)) + end + + #---------------------------------------------------------------------------------------------- + # Returns table's structure description + def describe(table_name) + @admin.getTableDescriptor(table_name.to_java_bytes).to_s + end + + #---------------------------------------------------------------------------------------------- + # Truncates table (deletes all records by recreating the table) + def truncate(table_name, conf = @conf) + h_table = org.apache.hadoop.hbase.client.HTable.new(conf, table_name) + table_description = h_table.getTableDescriptor() + raise ArgumentError, "Table #{table_name} is not enabled. Enable it first.'" unless enabled?(table_name) + yield 'Disabling table...' if block_given? + @admin.disableTable(table_name) + + yield 'Dropping table...' if block_given? + @admin.deleteTable(table_name) + + yield 'Creating table...' if block_given? + @admin.createTable(table_description) + end + + #---------------------------------------------------------------------------------------------- + # Truncates table while maintaing region boundaries (deletes all records by recreating the table) + def truncate_preserve(table_name, conf = @conf) + h_table = org.apache.hadoop.hbase.client.HTable.new(conf, table_name) + splits = h_table.getRegionLocations().keys().map{|i| Bytes.toString(i.getStartKey)}.delete_if{|k| k == ""}.to_java :String + splits = org.apache.hadoop.hbase.util.Bytes.toByteArrays(splits) + table_description = h_table.getTableDescriptor() + yield 'Disabling table...' if block_given? + disable(table_name) + + yield 'Dropping table...' if block_given? + drop(table_name) + + yield 'Creating table with region boundaries...' if block_given? + @admin.createTable(table_description, splits) + end + + #---------------------------------------------------------------------------------------------- + # Check the status of alter command (number of regions reopened) + def alter_status(table_name) + # Table name should be a string + raise(ArgumentError, "Table name must be of type String") unless table_name.kind_of?(String) + + # Table should exist + raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name) + + status = Pair.new() + begin + status = @admin.getAlterStatus(org.apache.hadoop.hbase.TableName.valueOf(table_name)) + if status.getSecond() != 0 + puts "#{status.getSecond() - status.getFirst()}/#{status.getSecond()} regions updated." + else + puts "All regions updated." + end + sleep 1 + end while status != nil && status.getFirst() != 0 + puts "Done." + end + + #---------------------------------------------------------------------------------------------- + # Change table structure or table options + def alter(table_name, wait = true, *args) + # Table name should be a string + raise(ArgumentError, "Table name must be of type String") unless table_name.kind_of?(String) + + # Table should exist + raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name) + + # There should be at least one argument + raise(ArgumentError, "There should be at least one argument but the table name") if args.empty? + + # Get table descriptor + htd = @admin.getTableDescriptor(table_name.to_java_bytes) + + # Process all args + args.each do |arg| + + + # Normalize args to support column name only alter specs + arg = { NAME => arg } if arg.kind_of?(String) + + # Normalize args to support shortcut delete syntax + arg = { METHOD => 'delete', NAME => arg['delete'] } if arg['delete'] + + # There are 3 possible options. + # 1) Column family spec. Distinguished by having a NAME and no METHOD. + method = arg.delete(METHOD) + if method == nil and arg.has_key?(NAME) + descriptor = hcd(arg, htd) + column_name = descriptor.getNameAsString + + # If column already exist, then try to alter it. Create otherwise. + if htd.hasFamily(column_name.to_java_bytes) + @admin.modifyColumn(table_name, descriptor) + else + @admin.addColumn(table_name, descriptor) + end + + if wait == true + puts "Updating all regions with the new schema..." + alter_status(table_name) + end + + # We bypass descriptor when adding column families; refresh it to apply other args correctly. + htd = @admin.getTableDescriptor(table_name.to_java_bytes) + next + end + + # 2) Method other than table_att, with some args. + name = arg.delete(NAME) + if method != nil and method != "table_att" + # Delete column family + if method == "delete" + raise(ArgumentError, "NAME parameter missing for delete method") unless name + @admin.deleteColumn(table_name, name) + # Unset table attributes + elsif method == "table_att_unset" + raise(ArgumentError, "NAME parameter missing for table_att_unset method") unless name + if (htd.getValue(name) == nil) + raise ArgumentError, "Can not find attribute: #{name}" + end + htd.remove(name) + @admin.modifyTable(table_name.to_java_bytes, htd) + # Unknown method + else + raise ArgumentError, "Unknown method: #{method}" + end + + arg.each_key do |unknown_key| + puts("Unknown argument ignored: %s" % [unknown_key]) + end + + if wait == true + puts "Updating all regions with the new schema..." + alter_status(table_name) + end + + if method == "delete" + # We bypass descriptor when deleting column families; refresh it to apply other args correctly. + htd = @admin.getTableDescriptor(table_name.to_java_bytes) + end + next + end + + # 3) Some args for the table, optionally with METHOD => table_att (deprecated) + raise(ArgumentError, "NAME argument in an unexpected place") if name + htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER] + htd.setMaxFileSize(JLong.valueOf(arg.delete(MAX_FILESIZE))) if arg[MAX_FILESIZE] + htd.setReadOnly(JBoolean.valueOf(arg.delete(READONLY))) if arg[READONLY] + htd.setCompactionEnabled(JBoolean.valueOf(arg[COMPACTION_ENABLED])) if arg[COMPACTION_ENABLED] + htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE] + htd.setDeferredLogFlush(JBoolean.valueOf(arg.delete(DEFERRED_LOG_FLUSH))) if arg[DEFERRED_LOG_FLUSH] + htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY] + set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA] + set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] + + # set a coprocessor attribute + valid_coproc_keys = [] + if arg.kind_of?(Hash) + arg.each do |key, value| + k = String.new(key) # prepare to strip + k.strip! + + if (k =~ /coprocessor/i) + # validate coprocessor specs + v = String.new(value) + v.strip! + if !(v =~ /^([^\|]*)\|([^\|]+)\|[\s]*([\d]*)[\s]*(\|.*)?$/) + raise ArgumentError, "Coprocessor value doesn't match spec: #{v}" + end + + # generate a coprocessor ordinal by checking max id of existing cps + maxId = 0 + htd.getValues().each do |k1, v1| + attrName = org.apache.hadoop.hbase.util.Bytes.toString(k1.get()) + # a cp key is coprocessor$(\d) + if (attrName =~ /coprocessor\$(\d+)/i) + ids = attrName.scan(/coprocessor\$(\d+)/i) + maxId = ids[0][0].to_i if ids[0][0].to_i > maxId + end + end + maxId += 1 + htd.setValue(k + "\$" + maxId.to_s, value) + valid_coproc_keys << key + end + end + + valid_coproc_keys.each do |key| + arg.delete(key) + end + + @admin.modifyTable(table_name.to_java_bytes, htd) + + arg.each_key do |unknown_key| + puts("Unknown argument ignored: %s" % [unknown_key]) + end + + if wait == true + puts "Updating all regions with the new schema..." + alter_status(table_name) + end + next + end + end + end + + def status(format) + status = @admin.getClusterStatus() + if format == "detailed" + puts("version %s" % [ status.getHBaseVersion() ]) + # Put regions in transition first because usually empty + puts("%d regionsInTransition" % status.getRegionsInTransition().size()) + for k, v in status.getRegionsInTransition() + puts(" %s" % [v]) + end + master_coprocs = java.util.Arrays.toString(@admin.getMasterCoprocessors()) + if master_coprocs != nil + puts("master coprocessors: %s" % master_coprocs) + end + puts("%d live servers" % [ status.getServersSize() ]) + for server in status.getServers() + puts(" %s:%d %d" % \ + [ server.getHostname(), server.getPort(), server.getStartcode() ]) + puts(" %s" % [ status.getLoad(server).toString() ]) + for name, region in status.getLoad(server).getRegionsLoad() + puts(" %s" % [ region.getNameAsString().dump ]) + puts(" %s" % [ region.toString() ]) + end + end + puts("%d dead servers" % [ status.getDeadServers() ]) + for server in status.getDeadServerNames() + puts(" %s" % [ server ]) + end + elsif format == "simple" + load = 0 + regions = 0 + puts("%d live servers" % [ status.getServersSize() ]) + for server in status.getServers() + puts(" %s:%d %d" % \ + [ server.getHostname(), server.getPort(), server.getStartcode() ]) + puts(" %s" % [ status.getLoad(server).toString() ]) + load += status.getLoad(server).getNumberOfRequests() + regions += status.getLoad(server).getNumberOfRegions() + end + puts("%d dead servers" % [ status.getDeadServers() ]) + for server in status.getDeadServerNames() + puts(" %s" % [ server ]) + end + puts("Aggregate load: %d, regions: %d" % [ load , regions ] ) + else + puts "#{status.getServersSize} servers, #{status.getDeadServers} dead, #{'%.4f' % status.getAverageLoad} average load" + end + end + + #---------------------------------------------------------------------------------------------- + # + # Helper methods + # + + # Does table exist? + def exists?(table_name) + @admin.tableExists(table_name) + end + + #---------------------------------------------------------------------------------------------- + # Is table enabled + def enabled?(table_name) + @admin.isTableEnabled(table_name) + end + + #---------------------------------------------------------------------------------------------- + #Is supplied region name is encoded region name + def closeEncodedRegion?(region_name, server) + @admin.closeRegionWithEncodedRegionName(region_name, server) + end + + #---------------------------------------------------------------------------------------------- + # Return a new HColumnDescriptor made of passed args + def hcd(arg, htd) + # String arg, single parameter constructor + return org.apache.hadoop.hbase.HColumnDescriptor.new(arg) if arg.kind_of?(String) + + raise(ArgumentError, "Column family #{arg} must have a name") unless name = arg.delete(NAME) + + family = htd.getFamily(name.to_java_bytes) + # create it if it's a new family + family ||= org.apache.hadoop.hbase.HColumnDescriptor.new(name.to_java_bytes) + + family.setBlockCacheEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE) + family.setScope(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE) + family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY) + family.setTimeToLive(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL) + family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING) + family.setEncodeOnDisk(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::ENCODE_ON_DISK))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCODE_ON_DISK) + family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE) + family.setMaxVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS) + family.setMinVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS) + family.setKeepDeletedCells(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS) + family.setValue(COMPRESSION_COMPACT, arg.delete(COMPRESSION_COMPACT)) if arg.include?(COMPRESSION_COMPACT) + if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER) + bloomtype = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER).upcase + unless org.apache.hadoop.hbase.regionserver.BloomType.constants.include?(bloomtype) + raise(ArgumentError, "BloomFilter type #{bloomtype} is not supported. Use one of " + org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.constants.join(" ")) + else + family.setBloomFilterType(org.apache.hadoop.hbase.regionserver.BloomType.valueOf(bloomtype)) + end + end + if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION) + compression = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION).upcase + unless org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.include?(compression) + raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.join(" ")) + else + family.setCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression)) + end + end + + set_user_metadata(family, arg.delete(METADATA)) if arg[METADATA] + set_descriptor_config(family, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] + + arg.each_key do |unknown_key| + puts("Unknown argument ignored for column family %s: %s" % [name, unknown_key]) + end + + return family + end + + #---------------------------------------------------------------------------------------------- + # Enables/disables a region by name + def online(region_name, on_off) + # Open meta table + meta = org.apache.hadoop.hbase.client.HTable.new( + org.apache.hadoop.hbase.TableName::META_TABLE_NAME) + + # Read region info + # FIXME: fail gracefully if can't find the region + region_bytes = region_name.to_java_bytes + g = org.apache.hadoop.hbase.client.Get.new(region_bytes) + g.addColumn(org.apache.hadoop.hbase.HConstants::CATALOG_FAMILY, org.apache.hadoop.hbase.HConstants::REGIONINFO_QUALIFIER) + hri_bytes = meta.get(g).value + + # Change region status + hri = org.apache.hadoop.hbase.util.Writables.getWritable(hri_bytes, org.apache.hadoop.hbase.HRegionInfo.new) + hri.setOffline(on_off) + + # Write it back + put = org.apache.hadoop.hbase.client.Put.new(region_bytes) + put.add(org.apache.hadoop.hbase.HConstants::CATALOG_FAMILY, org.apache.hadoop.hbase.HConstants::REGIONINFO_QUALIFIER, org.apache.hadoop.hbase.util.Writables.getBytes(hri)) + meta.put(put) + end + # Apply user metadata to table/column descriptor + def set_user_metadata(descriptor, metadata) + raise(ArgumentError, "#{METADATA} must be a Hash type") unless metadata.kind_of?(Hash) + for k,v in metadata + v = v.to_s unless v.nil? + descriptor.setValue(k, v) + end + end + + #---------------------------------------------------------------------------------------------- + # Take a snapshot of specified table + def snapshot(table, snapshot_name) + @admin.snapshot(snapshot_name.to_java_bytes, table.to_java_bytes) + end + + #---------------------------------------------------------------------------------------------- + # Restore specified snapshot + def restore_snapshot(snapshot_name) + @admin.restoreSnapshot(snapshot_name.to_java_bytes) + end + + #---------------------------------------------------------------------------------------------- + # Create a new table by cloning the snapshot content + def clone_snapshot(snapshot_name, table) + @admin.cloneSnapshot(snapshot_name.to_java_bytes, table.to_java_bytes) + end + + #---------------------------------------------------------------------------------------------- + # Rename specified snapshot + def rename_snapshot(old_snapshot_name, new_snapshot_name) + @admin.renameSnapshot(old_snapshot_name.to_java_bytes, new_snapshot_name.to_java_bytes) + end + + #---------------------------------------------------------------------------------------------- + # Delete specified snapshot + def delete_snapshot(snapshot_name) + @admin.deleteSnapshot(snapshot_name.to_java_bytes) + end + + #---------------------------------------------------------------------------------------------- + # Returns a list of snapshots + def list_snapshot + @admin.listSnapshots + end + + # Apply config specific to a table/column to its descriptor + def set_descriptor_config(descriptor, config) + raise(ArgumentError, "#{CONFIGURATION} must be a Hash type") unless config.kind_of?(Hash) + for k,v in config + v = v.to_s unless v.nil? + descriptor.setConfiguration(k, v) + end + end + + #---------------------------------------------------------------------------------------------- + # Returns namespace's structure description + def describe_namespace(namespace_name) + namespace = @admin.getNamespaceDescriptor(namespace_name) + + unless namespace.nil? + return namespace.to_s + end + + raise(ArgumentError, "Failed to find namespace named #{namespace_name}") + end + + #---------------------------------------------------------------------------------------------- + # Returns a list of namespaces in hbase + def list_namespace + @admin.listNamespaceDescriptors.map { |ns| ns.getName } + end + + #---------------------------------------------------------------------------------------------- + # Returns a list of tables in namespace + def list_namespace_tables(namespace_name) + unless namespace_name.nil? + return @admin.listTableNamesByNamespace(namespace_name).map { |t| t.getQualifierAsString() } + end + + raise(ArgumentError, "Failed to find namespace named #{namespace_name}") + end + + #---------------------------------------------------------------------------------------------- + # Creates a namespace + def create_namespace(namespace_name, *args) + # Fail if table name is not a string + raise(ArgumentError, "Namespace name must be of type String") unless namespace_name.kind_of?(String) + + # Flatten params array + args = args.flatten.compact + + # Start defining the table + nsb = org.apache.hadoop.hbase.NamespaceDescriptor::create(namespace_name) + args.each do |arg| + unless arg.kind_of?(Hash) + raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type") + end + for k,v in arg + v = v.to_s unless v.nil? + nsb.addConfiguration(k, v) + end + end + @admin.createNamespace(nsb.build()); + end + + #---------------------------------------------------------------------------------------------- + # modify a namespace + def alter_namespace(namespace_name, *args) + # Fail if table name is not a string + raise(ArgumentError, "Namespace name must be of type String") unless namespace_name.kind_of?(String) + + nsd = @admin.getNamespaceDescriptor(namespace_name) + + unless nsd + raise(ArgumentError, "Namespace does not exist") + end + nsb = org.apache.hadoop.hbase.NamespaceDescriptor::create(nsd) + + # Flatten params array + args = args.flatten.compact + + # Start defining the table + args.each do |arg| + unless arg.kind_of?(Hash) + raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash type") + end + method = arg[METHOD] + if method == "unset" + nsb.removeConfiguration(arg[NAME]) + elsif method == "set" + arg.delete(METHOD) + for k,v in arg + v = v.to_s unless v.nil? + + nsb.addConfiguration(k, v) + end + else + raise(ArgumentError, "Unknown method #{method}") + end + end + @admin.modifyNamespace(nsb.build()); + end + + + #---------------------------------------------------------------------------------------------- + # Drops a table + def drop_namespace(namespace_name) + @admin.deleteNamespace(namespace_name) + end + + end +end Added: hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/hbase.rb URL: http://svn.apache.org/viewvc/hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/hbase.rb?rev=1525642&view=auto ============================================================================== --- hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/hbase.rb (added) +++ hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/hbase.rb Mon Sep 23 16:45:42 2013 @@ -0,0 +1,59 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include Java + +require 'hbase/admin' +require 'hbase/table' +require 'hbase/security' + +module Hbase + class Hbase + attr_accessor :configuration + + def initialize(config = nil) + # Create configuration + if config + self.configuration = config + else + self.configuration = org.apache.hadoop.hbase.HBaseConfiguration.create + # Turn off retries in hbase and ipc. Human doesn't want to wait on N retries. + configuration.setInt("hbase.client.retries.number", 7) + configuration.setInt("ipc.client.connect.max.retries", 3) + end + end + + def admin(formatter) + ::Hbase::Admin.new(configuration, formatter) + end + + # Create new one each time + def table(table, shell) + ::Hbase::Table.new(configuration, table, shell) + end + + def replication_admin(formatter) + ::Hbase::RepAdmin.new(configuration, formatter) + end + + def security_admin(formatter) + ::Hbase::SecurityAdmin.new(configuration, formatter) + end + end +end Added: hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/replication_admin.rb URL: http://svn.apache.org/viewvc/hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/replication_admin.rb?rev=1525642&view=auto ============================================================================== --- hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/replication_admin.rb (added) +++ hbase/branches/0.96/hbase-shell/src/main/ruby/hbase/replication_admin.rb Mon Sep 23 16:45:42 2013 @@ -0,0 +1,76 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include Java + +# Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin + +module Hbase + class RepAdmin + include HBaseConstants + + def initialize(configuration, formatter) + @replication_admin = org.apache.hadoop.hbase.client.replication.ReplicationAdmin.new(configuration) + @formatter = formatter + end + + #---------------------------------------------------------------------------------------------- + # Add a new peer cluster to replicate to + def add_peer(id, cluster_key) + @replication_admin.addPeer(id, cluster_key) + end + + #---------------------------------------------------------------------------------------------- + # Remove a peer cluster, stops the replication + def remove_peer(id) + @replication_admin.removePeer(id) + end + + + #--------------------------------------------------------------------------------------------- + # Show replcated tables/column families, and their ReplicationType + def list_replicated_tables + @replication_admin.listReplicated() + end + + #---------------------------------------------------------------------------------------------- + # List all peer clusters + def list_peers + @replication_admin.listPeers + end + + #---------------------------------------------------------------------------------------------- + # Get peer cluster state + def get_peer_state(id) + @replication_admin.getPeerState(id) ? "ENABLED" : "DISABLED" + end + + #---------------------------------------------------------------------------------------------- + # Restart the replication stream to the specified peer + def enable_peer(id) + @replication_admin.enablePeer(id) + end + + #---------------------------------------------------------------------------------------------- + # Stop the replication stream to the specified peer + def disable_peer(id) + @replication_admin.disablePeer(id) + end + end +end