hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nkey...@apache.org
Subject svn commit: r1525641 [1/4] - in /hbase/trunk: ./ bin/ hbase-assembly/src/main/assembly/ hbase-it/ hbase-server/ hbase-server/src/main/ruby/ hbase-server/src/main/ruby/hbase/ hbase-server/src/main/ruby/irb/ hbase-server/src/main/ruby/shell/ hbase-server...
Date Mon, 23 Sep 2013 16:40:54 GMT
Author: nkeywal
Date: Mon Sep 23 16:40:51 2013
New Revision: 1525641

URL: http://svn.apache.org/r1525641
Log:
HBASE-9632 Put the shell in a maven sub module (hbase-shell) instead of hbase-server

Added:
    hbase/trunk/hbase-shell/
    hbase/trunk/hbase-shell/pom.xml
    hbase/trunk/hbase-shell/src/
    hbase/trunk/hbase-shell/src/main/
    hbase/trunk/hbase-shell/src/main/ruby/
    hbase/trunk/hbase-shell/src/main/ruby/hbase/
    hbase/trunk/hbase-shell/src/main/ruby/hbase.rb
    hbase/trunk/hbase-shell/src/main/ruby/hbase/admin.rb
    hbase/trunk/hbase-shell/src/main/ruby/hbase/hbase.rb
    hbase/trunk/hbase-shell/src/main/ruby/hbase/replication_admin.rb
    hbase/trunk/hbase-shell/src/main/ruby/hbase/security.rb
    hbase/trunk/hbase-shell/src/main/ruby/hbase/table.rb
    hbase/trunk/hbase-shell/src/main/ruby/irb/
    hbase/trunk/hbase-shell/src/main/ruby/irb/hirb.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/
    hbase/trunk/hbase-shell/src/main/ruby/shell.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/alter.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/alter_async.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/alter_namespace.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/alter_status.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/assign.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/balance_switch.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/balancer.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_enabled.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_run.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_switch.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/close_region.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/compact.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/count.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/create.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/create_namespace.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/delete.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/delete_snapshot.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/deleteall.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/describe.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/describe_namespace.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/disable.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/disable_all.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/disable_peer.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/drop.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/drop_all.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/drop_namespace.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/enable.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/enable_all.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/enable_peer.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/exists.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/flush.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/get.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/get_counter.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/get_table.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/grant.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/hlog_roll.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/incr.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/is_disabled.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/is_enabled.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/list.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/list_namespace.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/list_replicated_tables.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/major_compact.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/merge_region.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/move.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/put.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/remove_peer.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/rename_snapshot.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/revoke.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/scan.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/show_filters.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/snapshot.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/split.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/status.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/table_help.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/truncate.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/unassign.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/user_permission.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/version.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/whoami.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/commands/zk_dump.rb
    hbase/trunk/hbase-shell/src/main/ruby/shell/formatter.rb
    hbase/trunk/hbase-shell/src/test/
    hbase/trunk/hbase-shell/src/test/java/
    hbase/trunk/hbase-shell/src/test/java/org/
    hbase/trunk/hbase-shell/src/test/java/org/apache/
    hbase/trunk/hbase-shell/src/test/java/org/apache/hadoop/
    hbase/trunk/hbase-shell/src/test/java/org/apache/hadoop/hbase/
    hbase/trunk/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/
    hbase/trunk/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java
    hbase/trunk/hbase-shell/src/test/ruby/
    hbase/trunk/hbase-shell/src/test/ruby/hbase/
    hbase/trunk/hbase-shell/src/test/ruby/hbase/admin_test.rb
    hbase/trunk/hbase-shell/src/test/ruby/hbase/hbase_test.rb
    hbase/trunk/hbase-shell/src/test/ruby/hbase/table_test.rb
    hbase/trunk/hbase-shell/src/test/ruby/shell/
    hbase/trunk/hbase-shell/src/test/ruby/shell/commands_test.rb
    hbase/trunk/hbase-shell/src/test/ruby/shell/formatter_test.rb
    hbase/trunk/hbase-shell/src/test/ruby/shell/shell_test.rb
    hbase/trunk/hbase-shell/src/test/ruby/test_helper.rb
    hbase/trunk/hbase-shell/src/test/ruby/tests_runner.rb
Removed:
    hbase/trunk/hbase-server/src/main/ruby/hbase.rb
    hbase/trunk/hbase-server/src/main/ruby/hbase/admin.rb
    hbase/trunk/hbase-server/src/main/ruby/hbase/hbase.rb
    hbase/trunk/hbase-server/src/main/ruby/hbase/replication_admin.rb
    hbase/trunk/hbase-server/src/main/ruby/hbase/security.rb
    hbase/trunk/hbase-server/src/main/ruby/hbase/table.rb
    hbase/trunk/hbase-server/src/main/ruby/irb/hirb.rb
    hbase/trunk/hbase-server/src/main/ruby/shell.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/add_peer.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/alter.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/alter_async.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/alter_namespace.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/alter_status.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/assign.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/balance_switch.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/balancer.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/catalogjanitor_enabled.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/catalogjanitor_run.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/catalogjanitor_switch.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/clone_snapshot.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/close_region.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/compact.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/count.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/create.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/create_namespace.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/delete.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/delete_snapshot.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/deleteall.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/describe.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/describe_namespace.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/disable.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/disable_all.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/disable_peer.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/drop.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/drop_all.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/drop_namespace.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/enable.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/enable_all.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/enable_peer.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/exists.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/flush.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/get.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/get_counter.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/get_table.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/grant.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/hlog_roll.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/incr.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/is_disabled.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/is_enabled.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/list.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/list_namespace.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/list_namespace_tables.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/list_peers.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/list_replicated_tables.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/list_snapshots.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/major_compact.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/merge_region.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/move.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/put.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/remove_peer.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/rename_snapshot.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/restore_snapshot.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/revoke.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/scan.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/show_filters.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/snapshot.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/split.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/status.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/table_help.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/truncate.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/truncate_preserve.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/unassign.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/user_permission.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/version.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/whoami.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/commands/zk_dump.rb
    hbase/trunk/hbase-server/src/main/ruby/shell/formatter.rb
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShell.java
    hbase/trunk/hbase-server/src/test/ruby/hbase/admin_test.rb
    hbase/trunk/hbase-server/src/test/ruby/hbase/hbase_test.rb
    hbase/trunk/hbase-server/src/test/ruby/hbase/table_test.rb
    hbase/trunk/hbase-server/src/test/ruby/shell/commands_test.rb
    hbase/trunk/hbase-server/src/test/ruby/shell/formatter_test.rb
    hbase/trunk/hbase-server/src/test/ruby/shell/shell_test.rb
    hbase/trunk/hbase-server/src/test/ruby/test_helper.rb
    hbase/trunk/hbase-server/src/test/ruby/tests_runner.rb
Modified:
    hbase/trunk/bin/hbase
    hbase/trunk/bin/hbase.cmd
    hbase/trunk/hbase-assembly/src/main/assembly/components.xml
    hbase/trunk/hbase-it/pom.xml
    hbase/trunk/hbase-server/pom.xml
    hbase/trunk/pom.xml

Modified: hbase/trunk/bin/hbase
URL: http://svn.apache.org/viewvc/hbase/trunk/bin/hbase?rev=1525641&r1=1525640&r2=1525641&view=diff
==============================================================================
--- hbase/trunk/bin/hbase (original)
+++ hbase/trunk/bin/hbase Mon Sep 23 16:40:51 2013
@@ -257,7 +257,7 @@ if [ "$COMMAND" = "shell" ] ; then
   if [ -d "$HBASE_HOME/lib/ruby" ]; then
     HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/lib/ruby"
   else
-    HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/hbase-server/src/main/ruby"
+    HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/hbase-shell/src/main/ruby"
   fi
   CLASS="org.jruby.Main -X+O ${JRUBY_OPTS} ${HBASE_HOME}/bin/hirb.rb"
 elif [ "$COMMAND" = "hbck" ] ; then

Modified: hbase/trunk/bin/hbase.cmd
URL: http://svn.apache.org/viewvc/hbase/trunk/bin/hbase.cmd?rev=1525641&r1=1525640&r2=1525641&view=diff
==============================================================================
--- hbase/trunk/bin/hbase.cmd (original)
+++ hbase/trunk/bin/hbase.cmd Mon Sep 23 16:40:51 2013
@@ -305,7 +305,7 @@ goto :eof
   if EXIST %HBASE_HOME%\lib\ruby (
     set HBASE_OPTS=%HBASE_OPTS% -Dhbase.ruby.sources=%HBASE_HOME%\lib\ruby
   ) else (
-    set HBASE_OPTS=%HBASE_OPTS% -Dhbase.ruby.sources=%HBASE_HOME%\hbase-server\src\main\ruby
+    set HBASE_OPTS=%HBASE_OPTS% -Dhbase.ruby.sources=%HBASE_HOME%\hbase-shell\src\main\ruby
   )
 
   set CLASS=org.jruby.Main -X+O %JRUBY_OPTS% %HBASE_HOME%\bin\hirb.rb

Modified: hbase/trunk/hbase-assembly/src/main/assembly/components.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-assembly/src/main/assembly/components.xml?rev=1525641&r1=1525640&r2=1525641&view=diff
==============================================================================
Binary files - no diff available.

Modified: hbase/trunk/hbase-it/pom.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-it/pom.xml?rev=1525641&r1=1525640&r2=1525641&view=diff
==============================================================================
--- hbase/trunk/hbase-it/pom.xml (original)
+++ hbase/trunk/hbase-it/pom.xml Mon Sep 23 16:40:51 2013
@@ -177,6 +177,10 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-shell</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-server</artifactId>
     </dependency>
     <dependency>

Modified: hbase/trunk/hbase-server/pom.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/pom.xml?rev=1525641&r1=1525640&r2=1525641&view=diff
==============================================================================
--- hbase/trunk/hbase-server/pom.xml (original)
+++ hbase/trunk/hbase-server/pom.xml Mon Sep 23 16:40:51 2013
@@ -370,10 +370,6 @@
       <artifactId>zookeeper</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.jruby</groupId>
-      <artifactId>jruby-complete</artifactId>
-    </dependency>
-    <dependency>
       <groupId>org.mortbay.jetty</groupId>
       <artifactId>jetty</artifactId>
     </dependency>

Added: hbase/trunk/hbase-shell/pom.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-shell/pom.xml?rev=1525641&view=auto
==============================================================================
--- hbase/trunk/hbase-shell/pom.xml (added)
+++ hbase/trunk/hbase-shell/pom.xml Mon Sep 23 16:40:51 2013
@@ -0,0 +1,472 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <artifactId>hbase</artifactId>
+    <groupId>org.apache.hbase</groupId>
+    <version>0.97.0-SNAPSHOT</version>
+    <relativePath>..</relativePath>
+  </parent>
+  <artifactId>hbase-shell</artifactId>
+  <name>HBase - Shell</name>
+  <description>Shell for HBase</description>
+  <build>
+    <!-- Makes sure the resources get added before they are processed
+      by placing this first -->
+    <resources>
+      <!-- Add the build webabpps to the classpth -->
+      <resource>
+        <directory>${project.build.directory}</directory>
+        <includes>
+          <include>hbase-webapps/**</include>
+        </includes>
+      </resource>
+    </resources>
+    <testResources>
+      <testResource>
+        <directory>src/test/resources</directory>
+        <includes>
+          <include>**/**</include>
+        </includes>
+      </testResource>
+    </testResources>
+    <plugins>
+        <plugin>
+          <groupId>org.apache.maven.plugins</groupId>
+          <artifactId>maven-site-plugin</artifactId>
+          <configuration>
+            <skip>true</skip>
+          </configuration>
+        </plugin>
+      <!-- Run with -Dmaven.test.skip.exec=true to build -tests.jar without running
+        tests (this is needed for upstream projects whose tests need this jar simply for
+        compilation) -->
+      <plugin>
+        <!--Make it so assembly:single does nothing in here-->
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <skipAssembly>true</skipAssembly>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <configuration>
+          <archive>
+            <manifest>
+              <mainClass>org/apache/hadoop/hbase/mapreduce/Driver</mainClass>
+            </manifest>
+          </archive>
+          <!-- Exclude these 2 packages, because their dependency _binary_ files
+            include the sources, and Maven 2.2 appears to add them to the sources to compile,
+            weird -->
+          <excludes>
+            <exclude>org/apache/jute/**</exclude>
+            <exclude>org/apache/zookeeper/**</exclude>
+            <exclude>**/*.jsp</exclude>
+            <exclude>hbase-site.xml</exclude>
+            <exclude>hdfs-site.xml</exclude>
+            <exclude>log4j.properties</exclude>
+            <exclude>mapred-queues.xml</exclude>
+            <exclude>mapred-site.xml</exclude>
+            <exclude>zoo.cfg</exclude>
+          </excludes>
+        </configuration>
+      </plugin>
+      <!-- Make a jar and put the sources in the jar -->
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-source-plugin</artifactId>
+      </plugin>
+      <!-- General ant tasks, bound to different build phases -->
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <executions>
+          <!-- Add the generated sources -->
+          <execution>
+            <id>jspcSource-packageInfo-source</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>add-source</goal>
+            </goals>
+            <configuration>
+              <sources>
+                <source>${project.build.directory}/generated-jamon</source>
+                <source>${project.build.directory}/generated-sources/java</source>
+              </sources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <!-- General plugins -->
+      <!-- Run findbugs -->
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+      </plugin>
+      <!-- Testing plugins -->
+      <plugin>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <properties>
+            <property>
+              <name>listener</name>
+              <value>org.apache.hadoop.hbase.ServerResourceCheckerJUnitListener</value>
+            </property>
+          </properties>
+        </configuration>
+      </plugin>
+    </plugins>
+    <!-- General Resources -->
+    <pluginManagement>
+       <plugins>
+        <plugin>
+          <artifactId>maven-surefire-plugin</artifactId>
+          <version>${surefire.version}</version>
+          <configuration>
+            <!-- Have to set the groups here because we only do
+            split tests in this package, so groups on live in this module -->
+            <groups>${surefire.firstPartGroups}</groups>
+          </configuration>
+        </plugin>
+      </plugins>
+    </pluginManagement>
+  </build>
+  <dependencies>
+    <!-- Intra-project dependencies -->
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-protocol</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-prefix-tree</artifactId>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop-compat</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop-compat</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>${compat.module}</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>${compat.module}</artifactId>
+      <version>${project.version}</version>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <!-- General dependencies -->
+    <dependency>
+      <groupId>com.yammer.metrics</groupId>
+      <artifactId>metrics-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.jruby</groupId>
+      <artifactId>jruby-complete</artifactId>
+    </dependency>
+    <!-- Test Dependencies -->
+    <dependency>
+      <groupId>org.cloudera.htrace</groupId>
+      <artifactId>htrace-core</artifactId>
+   </dependency>
+    <dependency>
+      <groupId>org.cloudera.htrace</groupId>
+      <artifactId>htrace-zipkin</artifactId>
+    </dependency>
+  </dependencies>
+  <profiles>
+    <!-- Skip the tests in this module -->
+    <profile>
+      <id>skipServerTests</id>
+      <activation>
+        <property>
+          <name>skipServerTests</name>
+        </property>
+      </activation>
+      <properties>
+        <surefire.skipFirstPart>true</surefire.skipFirstPart>
+        <surefire.skipSecondPart>true</surefire.skipSecondPart>
+      </properties>
+    </profile>
+    <!-- Special builds -->
+    <profile>
+      <id>hadoop-snappy</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+        <property>
+          <name>snappy</name>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-snappy</artifactId>
+          <version>${hadoop-snappy.version}</version>
+        </dependency>
+      </dependencies>
+    </profile>
+    <profile>
+      <id>native</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>make</id>
+                <phase>compile</phase>
+                <goals><goal>run</goal></goals>
+                <configuration>
+                  <target>
+                    <mkdir dir="${project.build.directory}/native"/>
+                    <exec executable="cmake" dir="${project.build.directory}/native" failonerror="true">
+                      <arg line="${basedir}/src/main/native -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model}"/>
+                    </exec>
+                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
+                      <arg line="VERBOSE=1"/>
+                    </exec>
+                  </target>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+    <!-- Profiles for building against different hadoop versions -->
+    <!-- There are a lot of common dependencies used here, should investigate
+    if we can combine these profiles somehow -->
+    <!-- profile against Hadoop 1.1.x: This is the default. It has to have the same
+    activation property as the parent Hadoop 1.1.x profile to make sure it gets run at
+    the same time. -->
+    <profile>
+      <id>hadoop-1.1</id>
+      <activation>
+        <property>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h1--><name>!hadoop.profile</name>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-core</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-test</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+    <profile>
+      <id>hadoop-1.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>1.0</value>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-core</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-test</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+    <!--
+      profile for building against Hadoop 2.0.0-alpha. Activate using:
+       mvn -Dhadoop.profile=2.0
+    -->
+    <profile>
+      <id>hadoop-2.0</id>
+      <activation>
+        <property>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h2--><name>hadoop.profile</name><value>2.0</value>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-auth</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-client</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-core</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+          <type>test-jar</type>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs</artifactId>
+          <type>test-jar</type>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-annotations</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-minicluster</artifactId>
+          <scope>test</scope>
+        </dependency>
+      </dependencies>
+      <build>
+        <plugins>
+          <plugin>
+            <artifactId>maven-dependency-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>create-mrapp-generated-classpath</id>
+                <phase>generate-test-resources</phase>
+                <goals>
+                  <goal>build-classpath</goal>
+                </goals>
+                <configuration>
+                  <!-- needed to run the unit test for DS to generate
+                  the required classpath that is required in the env
+                  of the launch container in the mini mr/yarn cluster
+                  -->
+                  <outputFile>${project.build.directory}/test-classes/mrapp-generated-classpath</outputFile>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+    <!--
+      profile for building against Hadoop 3.0.x. Activate using:
+       mvn -Dhadoop.profile=3.0
+    -->
+    <profile>
+      <id>hadoop-3.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>3.0</value>
+        </property>
+      </activation>
+      <properties>
+        <hadoop.version>3.0-SNAPSHOT</hadoop.version>
+      </properties>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-annotations</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-minicluster</artifactId>
+        </dependency>
+      </dependencies>
+      <build>
+        <plugins>
+          <plugin>
+            <artifactId>maven-dependency-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>create-mrapp-generated-classpath</id>
+                <phase>generate-test-resources</phase>
+                <goals>
+                  <goal>build-classpath</goal>
+                </goals>
+                <configuration>
+                  <!-- needed to run the unit test for DS to generate
+                  the required classpath that is required in the env
+                  of the launch container in the mini mr/yarn cluster
+                  -->
+                  <outputFile>${project.build.directory}/test-classes/mrapp-generated-classpath</outputFile>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+</project>

Added: hbase/trunk/hbase-shell/src/main/ruby/hbase.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-shell/src/main/ruby/hbase.rb?rev=1525641&view=auto
==============================================================================
--- hbase/trunk/hbase-shell/src/main/ruby/hbase.rb (added)
+++ hbase/trunk/hbase-shell/src/main/ruby/hbase.rb Mon Sep 23 16:40:51 2013
@@ -0,0 +1,80 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# HBase ruby classes.
+# Has wrapper classes for org.apache.hadoop.hbase.client.HBaseAdmin
+# and for org.apache.hadoop.hbase.client.HTable.  Classes take
+# Formatters on construction and outputs any results using
+# Formatter methods.  These classes are only really for use by
+# the hirb.rb HBase Shell script; they don't make much sense elsewhere.
+# For example, the exists method on Admin class prints to the formatter
+# whether the table exists and returns nil regardless.
+include Java
+
+include_class('java.lang.Integer') {|package,name| "J#{name}" }
+include_class('java.lang.Long') {|package,name| "J#{name}" }
+include_class('java.lang.Boolean') {|package,name| "J#{name}" }
+
+module HBaseConstants
+  COLUMN = "COLUMN"
+  COLUMNS = "COLUMNS"
+  TIMESTAMP = "TIMESTAMP"
+  TIMERANGE = "TIMERANGE"
+  NAME = org.apache.hadoop.hbase.HConstants::NAME
+  VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS
+  IN_MEMORY = org.apache.hadoop.hbase.HConstants::IN_MEMORY
+  METADATA = org.apache.hadoop.hbase.HConstants::METADATA
+  STOPROW = "STOPROW"
+  STARTROW = "STARTROW"
+  ENDROW = STOPROW
+  RAW = "RAW"
+  LIMIT = "LIMIT"
+  METHOD = "METHOD"
+  MAXLENGTH = "MAXLENGTH"
+  CACHE_BLOCKS = "CACHE_BLOCKS"
+  REPLICATION_SCOPE = "REPLICATION_SCOPE"
+  INTERVAL = 'INTERVAL'
+  CACHE = 'CACHE'
+  FILTER = 'FILTER'
+  SPLITS = 'SPLITS'
+  SPLITS_FILE = 'SPLITS_FILE'
+  SPLITALGO = 'SPLITALGO'
+  NUMREGIONS = 'NUMREGIONS'
+  CONFIGURATION = org.apache.hadoop.hbase.HConstants::CONFIGURATION
+
+  # Load constants from hbase java API
+  def self.promote_constants(constants)
+    # The constants to import are all in uppercase
+    constants.each do |c|
+      next if c =~ /DEFAULT_.*/ || c != c.upcase
+      next if eval("defined?(#{c})")
+      eval("#{c} = '#{c}'")
+    end
+  end
+
+  promote_constants(org.apache.hadoop.hbase.HColumnDescriptor.constants)
+  promote_constants(org.apache.hadoop.hbase.HTableDescriptor.constants)
+end
+
+# Include classes definition
+require 'hbase/hbase'
+require 'hbase/admin'
+require 'hbase/table'
+require 'hbase/replication_admin'
+require 'hbase/security'

Added: hbase/trunk/hbase-shell/src/main/ruby/hbase/admin.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-shell/src/main/ruby/hbase/admin.rb?rev=1525641&view=auto
==============================================================================
--- hbase/trunk/hbase-shell/src/main/ruby/hbase/admin.rb (added)
+++ hbase/trunk/hbase-shell/src/main/ruby/hbase/admin.rb Mon Sep 23 16:40:51 2013
@@ -0,0 +1,824 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include Java
+java_import java.util.Arrays
+java_import org.apache.hadoop.hbase.util.Pair
+java_import org.apache.hadoop.hbase.util.RegionSplitter
+java_import org.apache.hadoop.hbase.util.Bytes
+
+# Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin
+
+module Hbase
+  class Admin
+    include HBaseConstants
+
+    def initialize(configuration, formatter)
+      @admin = org.apache.hadoop.hbase.client.HBaseAdmin.new(configuration)
+      connection = @admin.getConnection()
+      @conf = configuration
+      @zk_wrapper = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(configuration,
+        "admin", nil)
+      zk = @zk_wrapper.getRecoverableZooKeeper().getZooKeeper()
+      @zk_main = org.apache.zookeeper.ZooKeeperMain.new(zk)
+      @formatter = formatter
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Returns a list of tables in hbase
+    def list(regex = ".*")
+      Arrays.asList(@admin.getTableNames(regex))
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Requests a table or region flush
+    def flush(table_or_region_name)
+      @admin.flush(table_or_region_name)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Requests a table or region or column family compaction
+    def compact(table_or_region_name, family = nil)
+      if family == nil
+        @admin.compact(table_or_region_name)
+      else
+        # We are compacting a column family within a region.
+        @admin.compact(table_or_region_name, family)
+      end
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Requests a table or region or column family major compaction
+    def major_compact(table_or_region_name, family = nil)
+      if family == nil
+        @admin.majorCompact(table_or_region_name)
+      else
+        # We are major compacting a column family within a region or table.
+        @admin.majorCompact(table_or_region_name, family)
+      end
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Requests a regionserver's HLog roll
+    def hlog_roll(server_name)
+      @admin.rollHLogWriter(server_name)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Requests a table or region split
+    def split(table_or_region_name, split_point)
+      if split_point == nil
+        @admin.split(table_or_region_name)
+      else
+        @admin.split(table_or_region_name, split_point)
+      end
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Requests a cluster balance
+    # Returns true if balancer ran
+    def balancer()
+      @admin.balancer()
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Enable/disable balancer
+    # Returns previous balancer switch setting.
+    def balance_switch(enableDisable)
+      @admin.setBalancerRunning(
+        java.lang.Boolean::valueOf(enableDisable), java.lang.Boolean::valueOf(false))
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Request a scan of the catalog table (for garbage collection)
+    # Returns an int signifying the number of entries cleaned
+    def catalogjanitor_run()
+      @admin.runCatalogScan()
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Enable/disable the catalog janitor
+    # Returns previous catalog janitor switch setting.
+    def catalogjanitor_switch(enableDisable)
+      @admin.enableCatalogJanitor(java.lang.Boolean::valueOf(enableDisable))
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Query on the catalog janitor state (enabled/disabled?)
+    # Returns catalog janitor state (true signifies enabled).
+    def catalogjanitor_enabled()
+      @admin.isCatalogJanitorEnabled()
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Enables a table
+    def enable(table_name)
+      tableExists(table_name)
+      return if enabled?(table_name)
+      @admin.enableTable(table_name)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Enables all tables matching the given regex
+    def enable_all(regex)
+      regex = regex.to_s
+      @admin.enableTables(regex)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Disables a table
+    def disable(table_name)
+      tableExists(table_name)
+      return if disabled?(table_name)
+      @admin.disableTable(table_name)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Disables all tables matching the given regex
+    def disable_all(regex)
+      regex = regex.to_s
+      @admin.disableTables(regex).map { |t| t.getTableName().getNameAsString }
+    end
+
+    #---------------------------------------------------------------------------------------------
+    # Throw exception if table doesn't exist
+    def tableExists(table_name)
+      raise ArgumentError, "Table #{table_name} does not exist.'" unless exists?(table_name)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Is table disabled?
+    def disabled?(table_name)
+      @admin.isTableDisabled(table_name)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Drops a table
+    def drop(table_name)
+      tableExists(table_name)
+      raise ArgumentError, "Table #{table_name} is enabled. Disable it first.'" if enabled?(table_name)
+
+      @admin.deleteTable(table_name)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Drops a table
+    def drop_all(regex)
+      regex = regex.to_s
+      failed  = @admin.deleteTables(regex).map { |t| t.getTableName().getNameAsString }
+      return failed
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Returns ZooKeeper status dump
+    def zk_dump
+      org.apache.hadoop.hbase.zookeeper.ZKUtil::dump(@zk_wrapper)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Creates a table
+    def create(table_name, *args)
+      # Fail if table name is not a string
+      raise(ArgumentError, "Table name must be of type String") unless table_name.kind_of?(String)
+
+      # Flatten params array
+      args = args.flatten.compact
+      has_columns = false
+
+      # Start defining the table
+      htd = org.apache.hadoop.hbase.HTableDescriptor.new(org.apache.hadoop.hbase.TableName.valueOf(table_name))
+      splits = nil
+      # Args are either columns or splits, add them to the table definition
+      # TODO: add table options support
+      args.each do |arg|
+        unless arg.kind_of?(String) || arg.kind_of?(Hash)
+          raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type")
+        end
+        
+        # First, handle all the cases where arg is a column family.
+        if arg.kind_of?(String) or arg.has_key?(NAME)
+          # If the arg is a string, default action is to add a column to the table.
+          # If arg has a name, it must also be a column descriptor.
+          htd.addFamily(hcd(arg, htd))
+          has_columns = true
+          next
+        end
+        
+        # Get rid of the "METHOD", which is deprecated for create.
+        # We'll do whatever it used to do below if it's table_att.
+        if (method = arg.delete(METHOD))
+            raise(ArgumentError, "table_att is currently the only supported method") unless method == 'table_att'
+        end
+        
+        # The hash is not a column family. Figure out what's in it.
+        # First, handle splits.
+        if arg.has_key?(SPLITS_FILE)
+          splits_file = arg.delete(SPLITS_FILE)
+          unless File.exist?(splits_file)
+            raise(ArgumentError, "Splits file #{splits_file} doesn't exist")
+          end
+          arg[SPLITS] = []
+          File.foreach(splits_file) do |line|
+            arg[SPLITS].push(line.strip())
+          end
+        end
+
+        if arg.has_key?(SPLITS) 
+          splits = Java::byte[][arg[SPLITS].size].new
+          idx = 0
+          arg.delete(SPLITS).each do |split|
+            splits[idx] = org.apache.hadoop.hbase.util.Bytes.toBytesBinary(split)
+            idx = idx + 1
+          end
+        elsif arg.has_key?(NUMREGIONS) or arg.has_key?(SPLITALGO)
+          # deprecated region pre-split API; if one of the above is specified, will be ignored.
+          raise(ArgumentError, "Number of regions must be specified") unless arg.has_key?(NUMREGIONS)
+          raise(ArgumentError, "Split algorithm must be specified") unless arg.has_key?(SPLITALGO)
+          raise(ArgumentError, "Number of regions must be greater than 1") unless arg[NUMREGIONS] > 1
+          num_regions = arg.delete(NUMREGIONS)
+          split_algo = RegionSplitter.newSplitAlgoInstance(@conf, arg.delete(SPLITALGO))
+          splits = split_algo.split(JInteger.valueOf(num_regions))
+        end
+        
+        # Done with splits; apply formerly-table_att parameters.
+        htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER] 
+        htd.setMaxFileSize(JLong.valueOf(arg.delete(MAX_FILESIZE))) if arg[MAX_FILESIZE]
+        htd.setReadOnly(JBoolean.valueOf(arg.delete(READONLY))) if arg[READONLY]
+        htd.setCompactionEnabled(JBoolean.valueOf(arg[COMPACTION_ENABLED])) if arg[COMPACTION_ENABLED]
+        htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE]
+        htd.setDeferredLogFlush(JBoolean.valueOf(arg.delete(DEFERRED_LOG_FLUSH))) if arg[DEFERRED_LOG_FLUSH]
+        htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY]
+        set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA]
+        set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION]
+        
+        arg.each_key do |ignored_key|
+          puts("An argument ignored (unknown or overridden): %s" % [ ignored_key ])
+        end
+      end
+      
+      # Fail if no column families defined
+      raise(ArgumentError, "Table must have at least one column family") if !has_columns
+      
+      if splits.nil?
+        # Perform the create table call
+        @admin.createTable(htd)
+      else
+        # Perform the create table call
+        @admin.createTable(htd, splits)
+      end
+    end
+    
+    #----------------------------------------------------------------------------------------------
+    # Closes a region.
+    # If server name is nil, we presume region_name is full region name (HRegionInfo.getRegionName).
+    # If server name is not nil, we presume it is the region's encoded name (HRegionInfo.getEncodedName)
+    def close_region(region_name, server)
+      if (server == nil || !closeEncodedRegion?(region_name, server))         
+      	@admin.closeRegion(region_name, server)
+      end	
+    end
+
+    #----------------------------------------------------------------------------------------------
+    #----------------------------------------------------------------------------------------------
+    # Assign a region
+    def assign(region_name)
+      @admin.assign(region_name.to_java_bytes)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Unassign a region
+    def unassign(region_name, force)
+      @admin.unassign(region_name.to_java_bytes, java.lang.Boolean::valueOf(force))
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Move a region
+    def move(encoded_region_name, server = nil)
+      @admin.move(encoded_region_name.to_java_bytes, server ? server.to_java_bytes: nil)
+    end
+    
+    #----------------------------------------------------------------------------------------------
+    # Merge two regions
+    def merge_region(encoded_region_a_name, encoded_region_b_name, force)
+      @admin.mergeRegions(encoded_region_a_name.to_java_bytes, encoded_region_b_name.to_java_bytes, java.lang.Boolean::valueOf(force))
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Returns table's structure description
+    def describe(table_name)
+      @admin.getTableDescriptor(table_name.to_java_bytes).to_s
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Truncates table (deletes all records by recreating the table)
+    def truncate(table_name, conf = @conf)
+      h_table = org.apache.hadoop.hbase.client.HTable.new(conf, table_name)
+      table_description = h_table.getTableDescriptor()
+      raise ArgumentError, "Table #{table_name} is not enabled. Enable it first.'" unless enabled?(table_name)
+      yield 'Disabling table...' if block_given?
+      @admin.disableTable(table_name)
+
+      yield 'Dropping table...' if block_given?
+      @admin.deleteTable(table_name)
+
+      yield 'Creating table...' if block_given?
+      @admin.createTable(table_description)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Truncates table while maintaing region boundaries (deletes all records by recreating the table)
+    def truncate_preserve(table_name, conf = @conf)
+      h_table = org.apache.hadoop.hbase.client.HTable.new(conf, table_name)
+      splits = h_table.getRegionLocations().keys().map{|i| Bytes.toString(i.getStartKey)}.delete_if{|k| k == ""}.to_java :String
+      splits = org.apache.hadoop.hbase.util.Bytes.toByteArrays(splits)
+      table_description = h_table.getTableDescriptor()
+      yield 'Disabling table...' if block_given?
+      disable(table_name)
+
+      yield 'Dropping table...' if block_given?
+      drop(table_name)
+
+      yield 'Creating table with region boundaries...' if block_given?
+      @admin.createTable(table_description, splits)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Check the status of alter command (number of regions reopened)
+    def alter_status(table_name)
+      # Table name should be a string
+      raise(ArgumentError, "Table name must be of type String") unless table_name.kind_of?(String)
+
+      # Table should exist
+      raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name)
+
+      status = Pair.new()
+      begin
+        status = @admin.getAlterStatus(org.apache.hadoop.hbase.TableName.valueOf(table_name))
+        if status.getSecond() != 0
+          puts "#{status.getSecond() - status.getFirst()}/#{status.getSecond()} regions updated."
+        else
+          puts "All regions updated."
+        end
+	      sleep 1
+      end while status != nil && status.getFirst() != 0
+      puts "Done."
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Change table structure or table options
+    def alter(table_name, wait = true, *args)
+      # Table name should be a string
+      raise(ArgumentError, "Table name must be of type String") unless table_name.kind_of?(String)
+
+      # Table should exist
+      raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name)
+
+      # There should be at least one argument
+      raise(ArgumentError, "There should be at least one argument but the table name") if args.empty?
+
+      # Get table descriptor
+      htd = @admin.getTableDescriptor(table_name.to_java_bytes)
+
+      # Process all args
+      args.each do |arg|
+      
+      
+        # Normalize args to support column name only alter specs
+        arg = { NAME => arg } if arg.kind_of?(String)
+
+        # Normalize args to support shortcut delete syntax
+        arg = { METHOD => 'delete', NAME => arg['delete'] } if arg['delete']
+        
+        # There are 3 possible options.
+        # 1) Column family spec. Distinguished by having a NAME and no METHOD.
+        method = arg.delete(METHOD)
+        if method == nil and arg.has_key?(NAME)
+          descriptor = hcd(arg, htd)
+          column_name = descriptor.getNameAsString
+
+          # If column already exist, then try to alter it. Create otherwise.
+          if htd.hasFamily(column_name.to_java_bytes)
+            @admin.modifyColumn(table_name, descriptor)
+          else
+            @admin.addColumn(table_name, descriptor)
+          end
+
+          if wait == true
+            puts "Updating all regions with the new schema..."
+            alter_status(table_name)
+          end
+          
+          # We bypass descriptor when adding column families; refresh it to apply other args correctly.
+          htd = @admin.getTableDescriptor(table_name.to_java_bytes)
+          next
+        end
+          
+        # 2) Method other than table_att, with some args.
+        name = arg.delete(NAME)
+        if method != nil and method != "table_att"
+          # Delete column family
+          if method == "delete"
+            raise(ArgumentError, "NAME parameter missing for delete method") unless name
+            @admin.deleteColumn(table_name, name)
+          # Unset table attributes
+          elsif method == "table_att_unset"
+            raise(ArgumentError, "NAME parameter missing for table_att_unset method") unless name
+            if (htd.getValue(name) == nil)
+              raise ArgumentError, "Can not find attribute: #{name}"
+            end
+            htd.remove(name)
+            @admin.modifyTable(table_name.to_java_bytes, htd)
+          # Unknown method
+          else
+            raise ArgumentError, "Unknown method: #{method}"
+          end
+          
+          arg.each_key do |unknown_key|
+            puts("Unknown argument ignored: %s" % [unknown_key])
+          end
+          
+          if wait == true
+            puts "Updating all regions with the new schema..."
+            alter_status(table_name)
+          end
+          
+          if method == "delete"
+            # We bypass descriptor when deleting column families; refresh it to apply other args correctly.
+            htd = @admin.getTableDescriptor(table_name.to_java_bytes)
+          end
+          next          
+        end
+        
+        # 3) Some args for the table, optionally with METHOD => table_att (deprecated)
+        raise(ArgumentError, "NAME argument in an unexpected place") if name
+        htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER] 
+        htd.setMaxFileSize(JLong.valueOf(arg.delete(MAX_FILESIZE))) if arg[MAX_FILESIZE]
+        htd.setReadOnly(JBoolean.valueOf(arg.delete(READONLY))) if arg[READONLY]
+        htd.setCompactionEnabled(JBoolean.valueOf(arg[COMPACTION_ENABLED])) if arg[COMPACTION_ENABLED]
+        htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE]
+        htd.setDeferredLogFlush(JBoolean.valueOf(arg.delete(DEFERRED_LOG_FLUSH))) if arg[DEFERRED_LOG_FLUSH]
+        htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY]
+        set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA]
+        set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION]
+
+        # set a coprocessor attribute
+        valid_coproc_keys = []
+        if arg.kind_of?(Hash)
+          arg.each do |key, value|
+            k = String.new(key) # prepare to strip
+            k.strip!
+
+            if (k =~ /coprocessor/i)
+              # validate coprocessor specs
+              v = String.new(value)
+              v.strip!
+              if !(v =~ /^([^\|]*)\|([^\|]+)\|[\s]*([\d]*)[\s]*(\|.*)?$/)
+                raise ArgumentError, "Coprocessor value doesn't match spec: #{v}"
+              end
+
+              # generate a coprocessor ordinal by checking max id of existing cps
+              maxId = 0
+              htd.getValues().each do |k1, v1|
+                attrName = org.apache.hadoop.hbase.util.Bytes.toString(k1.get())
+                # a cp key is coprocessor$(\d)
+                if (attrName =~ /coprocessor\$(\d+)/i)
+                  ids = attrName.scan(/coprocessor\$(\d+)/i)
+                  maxId = ids[0][0].to_i if ids[0][0].to_i > maxId
+                end
+              end
+              maxId += 1
+              htd.setValue(k + "\$" + maxId.to_s, value)
+              valid_coproc_keys << key
+            end
+          end
+          
+          valid_coproc_keys.each do |key|
+            arg.delete(key)
+          end
+
+          @admin.modifyTable(table_name.to_java_bytes, htd)
+                    
+          arg.each_key do |unknown_key|
+            puts("Unknown argument ignored: %s" % [unknown_key])
+          end
+          
+          if wait == true
+            puts "Updating all regions with the new schema..."
+            alter_status(table_name)
+          end
+          next
+        end
+      end
+    end
+
+    def status(format)
+      status = @admin.getClusterStatus()
+      if format == "detailed"
+        puts("version %s" % [ status.getHBaseVersion() ])
+        # Put regions in transition first because usually empty
+        puts("%d regionsInTransition" % status.getRegionsInTransition().size())
+        for k, v in status.getRegionsInTransition()
+          puts("    %s" % [v])
+        end
+        master_coprocs = java.util.Arrays.toString(@admin.getMasterCoprocessors())
+        if master_coprocs != nil
+          puts("master coprocessors: %s" % master_coprocs)
+        end
+        puts("%d live servers" % [ status.getServersSize() ])
+        for server in status.getServers()
+          puts("    %s:%d %d" % \
+            [ server.getHostname(), server.getPort(), server.getStartcode() ])
+          puts("        %s" % [ status.getLoad(server).toString() ])
+          for name, region in status.getLoad(server).getRegionsLoad()
+            puts("        %s" % [ region.getNameAsString().dump ])
+            puts("            %s" % [ region.toString() ])
+          end
+        end
+        puts("%d dead servers" % [ status.getDeadServers() ])
+        for server in status.getDeadServerNames()
+          puts("    %s" % [ server ])
+        end
+      elsif format == "simple"
+        load = 0
+        regions = 0
+        puts("%d live servers" % [ status.getServersSize() ])
+        for server in status.getServers()
+          puts("    %s:%d %d" % \
+            [ server.getHostname(), server.getPort(), server.getStartcode() ])
+          puts("        %s" % [ status.getLoad(server).toString() ])
+          load += status.getLoad(server).getNumberOfRequests()
+          regions += status.getLoad(server).getNumberOfRegions()
+        end
+        puts("%d dead servers" % [ status.getDeadServers() ])
+        for server in status.getDeadServerNames()
+          puts("    %s" % [ server ])
+        end
+        puts("Aggregate load: %d, regions: %d" % [ load , regions ] )
+      else
+        puts "#{status.getServersSize} servers, #{status.getDeadServers} dead, #{'%.4f' % status.getAverageLoad} average load"
+      end
+    end
+
+    #----------------------------------------------------------------------------------------------
+    #
+    # Helper methods
+    #
+
+    # Does table exist?
+    def exists?(table_name)
+      @admin.tableExists(table_name)
+    end
+    
+    #----------------------------------------------------------------------------------------------
+    # Is table enabled
+    def enabled?(table_name)
+      @admin.isTableEnabled(table_name)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    #Is supplied region name is encoded region name
+    def closeEncodedRegion?(region_name, server)
+       @admin.closeRegionWithEncodedRegionName(region_name, server)
+    end   
+
+    #----------------------------------------------------------------------------------------------
+    # Return a new HColumnDescriptor made of passed args
+    def hcd(arg, htd)
+      # String arg, single parameter constructor
+      return org.apache.hadoop.hbase.HColumnDescriptor.new(arg) if arg.kind_of?(String)
+
+      raise(ArgumentError, "Column family #{arg} must have a name") unless name = arg.delete(NAME)
+
+      family = htd.getFamily(name.to_java_bytes)
+      # create it if it's a new family
+      family ||= org.apache.hadoop.hbase.HColumnDescriptor.new(name.to_java_bytes)
+
+      family.setBlockCacheEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE)
+      family.setScope(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE)
+      family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY)
+      family.setTimeToLive(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL)
+      family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING)
+      family.setEncodeOnDisk(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::ENCODE_ON_DISK))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCODE_ON_DISK)
+      family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE)
+      family.setMaxVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS)
+      family.setMinVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS)
+      family.setKeepDeletedCells(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS)
+      family.setValue(COMPRESSION_COMPACT, arg.delete(COMPRESSION_COMPACT)) if arg.include?(COMPRESSION_COMPACT)
+      if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER)
+        bloomtype = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER).upcase
+        unless org.apache.hadoop.hbase.regionserver.BloomType.constants.include?(bloomtype)
+          raise(ArgumentError, "BloomFilter type #{bloomtype} is not supported. Use one of " + org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.constants.join(" ")) 
+        else 
+          family.setBloomFilterType(org.apache.hadoop.hbase.regionserver.BloomType.valueOf(bloomtype))
+        end
+      end
+      if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION)
+        compression = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION).upcase
+        unless org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.include?(compression)
+          raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.join(" "))
+        else
+          family.setCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression))
+        end
+      end
+
+      set_user_metadata(family, arg.delete(METADATA)) if arg[METADATA]
+      set_descriptor_config(family, arg.delete(CONFIGURATION)) if arg[CONFIGURATION]
+
+      arg.each_key do |unknown_key|
+        puts("Unknown argument ignored for column family %s: %s" % [name, unknown_key])
+      end
+      
+      return family
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Enables/disables a region by name
+    def online(region_name, on_off)
+      # Open meta table
+      meta = org.apache.hadoop.hbase.client.HTable.new(
+          org.apache.hadoop.hbase.TableName::META_TABLE_NAME)
+
+      # Read region info
+      # FIXME: fail gracefully if can't find the region
+      region_bytes = region_name.to_java_bytes
+      g = org.apache.hadoop.hbase.client.Get.new(region_bytes)
+      g.addColumn(org.apache.hadoop.hbase.HConstants::CATALOG_FAMILY, org.apache.hadoop.hbase.HConstants::REGIONINFO_QUALIFIER)
+      hri_bytes = meta.get(g).value
+
+      # Change region status
+      hri = org.apache.hadoop.hbase.util.Writables.getWritable(hri_bytes, org.apache.hadoop.hbase.HRegionInfo.new)
+      hri.setOffline(on_off)
+
+      # Write it back
+      put = org.apache.hadoop.hbase.client.Put.new(region_bytes)
+      put.add(org.apache.hadoop.hbase.HConstants::CATALOG_FAMILY, org.apache.hadoop.hbase.HConstants::REGIONINFO_QUALIFIER, org.apache.hadoop.hbase.util.Writables.getBytes(hri))
+      meta.put(put)
+    end
+    # Apply user metadata to table/column descriptor
+    def set_user_metadata(descriptor, metadata)
+      raise(ArgumentError, "#{METADATA} must be a Hash type") unless metadata.kind_of?(Hash)
+        for k,v in metadata
+          v = v.to_s unless v.nil?
+          descriptor.setValue(k, v)
+        end
+    end
+    
+    #----------------------------------------------------------------------------------------------
+    # Take a snapshot of specified table
+    def snapshot(table, snapshot_name)
+      @admin.snapshot(snapshot_name.to_java_bytes, table.to_java_bytes)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Restore specified snapshot
+    def restore_snapshot(snapshot_name)
+      @admin.restoreSnapshot(snapshot_name.to_java_bytes)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Create a new table by cloning the snapshot content
+    def clone_snapshot(snapshot_name, table)
+      @admin.cloneSnapshot(snapshot_name.to_java_bytes, table.to_java_bytes)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Rename specified snapshot
+    def rename_snapshot(old_snapshot_name, new_snapshot_name)
+      @admin.renameSnapshot(old_snapshot_name.to_java_bytes, new_snapshot_name.to_java_bytes)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Delete specified snapshot
+    def delete_snapshot(snapshot_name)
+      @admin.deleteSnapshot(snapshot_name.to_java_bytes)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Returns a list of snapshots
+    def list_snapshot
+      @admin.listSnapshots
+    end
+
+    # Apply config specific to a table/column to its descriptor
+    def set_descriptor_config(descriptor, config)
+      raise(ArgumentError, "#{CONFIGURATION} must be a Hash type") unless config.kind_of?(Hash)
+        for k,v in config
+          v = v.to_s unless v.nil?
+          descriptor.setConfiguration(k, v)
+        end
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Returns namespace's structure description
+    def describe_namespace(namespace_name)
+      namespace = @admin.getNamespaceDescriptor(namespace_name)
+
+      unless namespace.nil?
+        return namespace.to_s
+      end
+
+      raise(ArgumentError, "Failed to find namespace named #{namespace_name}")
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Returns a list of namespaces in hbase
+    def list_namespace
+      @admin.listNamespaceDescriptors.map { |ns| ns.getName }
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Returns a list of tables in namespace
+    def list_namespace_tables(namespace_name)
+      unless namespace_name.nil?
+        return @admin.listTableNamesByNamespace(namespace_name).map { |t| t.getQualifierAsString() }
+      end
+
+      raise(ArgumentError, "Failed to find namespace named #{namespace_name}")
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Creates a namespace
+    def create_namespace(namespace_name, *args)
+      # Fail if table name is not a string
+      raise(ArgumentError, "Namespace name must be of type String") unless namespace_name.kind_of?(String)
+
+      # Flatten params array
+      args = args.flatten.compact
+
+      # Start defining the table
+      nsb = org.apache.hadoop.hbase.NamespaceDescriptor::create(namespace_name)
+      args.each do |arg|
+        unless arg.kind_of?(Hash)
+          raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type")
+        end
+        for k,v in arg
+          v = v.to_s unless v.nil?
+          nsb.addConfiguration(k, v)
+        end
+      end
+      @admin.createNamespace(nsb.build());
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # modify a namespace
+    def alter_namespace(namespace_name, *args)
+      # Fail if table name is not a string
+      raise(ArgumentError, "Namespace name must be of type String") unless namespace_name.kind_of?(String)
+
+      nsd = @admin.getNamespaceDescriptor(namespace_name)
+
+      unless nsd
+        raise(ArgumentError, "Namespace does not exist")
+      end
+      nsb = org.apache.hadoop.hbase.NamespaceDescriptor::create(nsd)
+
+      # Flatten params array
+      args = args.flatten.compact
+
+      # Start defining the table
+      args.each do |arg|
+        unless arg.kind_of?(Hash)
+          raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash type")
+        end
+        method = arg[METHOD]
+        if method == "unset"
+          nsb.removeConfiguration(arg[NAME])
+        elsif  method == "set"
+          arg.delete(METHOD)
+          for k,v in arg
+            v = v.to_s unless v.nil?
+
+            nsb.addConfiguration(k, v)
+          end
+        else
+          raise(ArgumentError, "Unknown method #{method}")
+        end
+      end
+      @admin.modifyNamespace(nsb.build());
+    end
+
+
+    #----------------------------------------------------------------------------------------------
+    # Drops a table
+    def drop_namespace(namespace_name)
+      @admin.deleteNamespace(namespace_name)
+    end
+
+  end
+end

Added: hbase/trunk/hbase-shell/src/main/ruby/hbase/hbase.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-shell/src/main/ruby/hbase/hbase.rb?rev=1525641&view=auto
==============================================================================
--- hbase/trunk/hbase-shell/src/main/ruby/hbase/hbase.rb (added)
+++ hbase/trunk/hbase-shell/src/main/ruby/hbase/hbase.rb Mon Sep 23 16:40:51 2013
@@ -0,0 +1,59 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include Java
+
+require 'hbase/admin'
+require 'hbase/table'
+require 'hbase/security'
+
+module Hbase
+  class Hbase
+    attr_accessor :configuration
+
+    def initialize(config = nil)
+      # Create configuration
+      if config
+        self.configuration = config
+      else
+        self.configuration = org.apache.hadoop.hbase.HBaseConfiguration.create
+        # Turn off retries in hbase and ipc.  Human doesn't want to wait on N retries.
+        configuration.setInt("hbase.client.retries.number", 7)
+        configuration.setInt("ipc.client.connect.max.retries", 3)
+      end
+    end
+
+    def admin(formatter)
+      ::Hbase::Admin.new(configuration, formatter)
+    end
+
+    # Create new one each time
+    def table(table, shell)
+      ::Hbase::Table.new(configuration, table, shell)
+    end
+
+    def replication_admin(formatter)
+      ::Hbase::RepAdmin.new(configuration, formatter)
+    end
+
+    def security_admin(formatter)
+      ::Hbase::SecurityAdmin.new(configuration, formatter)
+    end
+  end
+end

Added: hbase/trunk/hbase-shell/src/main/ruby/hbase/replication_admin.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-shell/src/main/ruby/hbase/replication_admin.rb?rev=1525641&view=auto
==============================================================================
--- hbase/trunk/hbase-shell/src/main/ruby/hbase/replication_admin.rb (added)
+++ hbase/trunk/hbase-shell/src/main/ruby/hbase/replication_admin.rb Mon Sep 23 16:40:51 2013
@@ -0,0 +1,76 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include Java
+
+# Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin
+
+module Hbase
+  class RepAdmin
+    include HBaseConstants
+
+    def initialize(configuration, formatter)
+      @replication_admin = org.apache.hadoop.hbase.client.replication.ReplicationAdmin.new(configuration)
+      @formatter = formatter
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Add a new peer cluster to replicate to
+    def add_peer(id, cluster_key)
+      @replication_admin.addPeer(id, cluster_key)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Remove a peer cluster, stops the replication
+    def remove_peer(id)
+      @replication_admin.removePeer(id)
+    end
+
+
+    #---------------------------------------------------------------------------------------------
+    # Show replcated tables/column families, and their ReplicationType
+    def list_replicated_tables
+       @replication_admin.listReplicated()
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # List all peer clusters
+    def list_peers
+      @replication_admin.listPeers
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Get peer cluster state
+    def get_peer_state(id)
+      @replication_admin.getPeerState(id) ? "ENABLED" : "DISABLED"
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Restart the replication stream to the specified peer
+    def enable_peer(id)
+      @replication_admin.enablePeer(id)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Stop the replication stream to the specified peer
+    def disable_peer(id)
+      @replication_admin.disablePeer(id)
+    end
+  end
+end

Added: hbase/trunk/hbase-shell/src/main/ruby/hbase/security.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-shell/src/main/ruby/hbase/security.rb?rev=1525641&view=auto
==============================================================================
--- hbase/trunk/hbase-shell/src/main/ruby/hbase/security.rb (added)
+++ hbase/trunk/hbase-shell/src/main/ruby/hbase/security.rb Mon Sep 23 16:40:51 2013
@@ -0,0 +1,233 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include Java
+
+# Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin
+
+module Hbase
+  class SecurityAdmin
+    include HBaseConstants
+
+    def initialize(configuration, formatter)
+      @config = configuration
+      @admin = org.apache.hadoop.hbase.client.HBaseAdmin.new(configuration)
+      @formatter = formatter
+    end
+
+    #----------------------------------------------------------------------------------------------
+    def grant(user, permissions, table_name=nil, family=nil, qualifier=nil)
+      security_available?
+
+      # TODO: need to validate user name
+
+      begin
+        meta_table = org.apache.hadoop.hbase.client.HTable.new(@config,
+          org.apache.hadoop.hbase.security.access.AccessControlLists::ACL_TABLE_NAME)
+        service = meta_table.coprocessorService(
+          org.apache.hadoop.hbase.HConstants::EMPTY_START_ROW)
+
+        protocol = org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos::
+          AccessControlService.newBlockingStub(service)
+        perm = org.apache.hadoop.hbase.security.access.Permission.new(
+          permissions.to_java_bytes)
+
+        # Verify that the specified permission is valid
+        if (permissions == nil || permissions.length == 0)
+          raise(ArgumentError, "Invalid permission: no actions associated with user")
+        end
+
+        if (table_name != nil)
+          tablebytes=table_name.to_java_bytes
+          #check if the tablename passed is actually a namespace
+          if (isNamespace?(table_name))
+            # Namespace should exist first.
+            namespace_name = table_name[1...table_name.length]
+            raise(ArgumentError, "Can't find a namespace: #{namespace_name}") unless namespace_exists?(namespace_name)
+
+            #We pass the namespace name along with "@" so that we can differentiate a namespace from a table.
+            # invoke cp endpoint to perform access controlse
+            org.apache.hadoop.hbase.protobuf.ProtobufUtil.grant(
+              protocol, user, tablebytes, perm.getActions())
+          else
+            # Table should exist
+            raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name)
+
+            tableName = org.apache.hadoop.hbase.TableName.valueOf(table_name.to_java_bytes)
+            htd = @admin.getTableDescriptor(tablebytes)
+
+            if (family != nil)
+             raise(ArgumentError, "Can't find a family: #{family}") unless htd.hasFamily(family.to_java_bytes)
+            end
+
+            fambytes = family.to_java_bytes if (family != nil)
+            qualbytes = qualifier.to_java_bytes if (qualifier != nil)
+
+            # invoke cp endpoint to perform access controlse
+            org.apache.hadoop.hbase.protobuf.ProtobufUtil.grant(
+              protocol, user, tableName, fambytes,
+              qualbytes, perm.getActions())
+          end
+        else
+          # invoke cp endpoint to perform access controlse
+          org.apache.hadoop.hbase.protobuf.ProtobufUtil.grant(
+            protocol, user, perm.getActions())
+        end
+
+      ensure
+        meta_table.close()
+      end
+    end
+
+    #----------------------------------------------------------------------------------------------
+    def revoke(user, table_name=nil, family=nil, qualifier=nil)
+      security_available?
+
+      # TODO: need to validate user name
+
+      begin
+        meta_table = org.apache.hadoop.hbase.client.HTable.new(@config,
+          org.apache.hadoop.hbase.security.access.AccessControlLists::ACL_TABLE_NAME)
+        service = meta_table.coprocessorService(
+          org.apache.hadoop.hbase.HConstants::EMPTY_START_ROW)
+
+        protocol = org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos::
+          AccessControlService.newBlockingStub(service)
+
+        if (table_name != nil)
+          #check if the tablename passed is actually a namespace
+          if (isNamespace?(table_name))
+            # Namespace should exist first.
+            namespace_name = table_name[1...table_name.length]
+            raise(ArgumentError, "Can't find a namespace: #{namespace_name}") unless namespace_exists?(namespace_name)
+
+            #We pass the namespace name along with "@" so that we can differentiate a namespace from a table.
+            tablebytes=table_name.to_java_bytes
+            # invoke cp endpoint to perform access controlse
+            org.apache.hadoop.hbase.protobuf.ProtobufUtil.revoke(
+              protocol, user, tablebytes)
+          else
+             # Table should exist
+             raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name)
+
+             tableName = org.apache.hadoop.hbase.TableName.valueOf(table_name.to_java_bytes)
+             htd = @admin.getTableDescriptor(tableName)
+
+             if (family != nil)
+               raise(ArgumentError, "Can't find a family: #{family}") unless htd.hasFamily(family.to_java_bytes)
+             end
+
+             fambytes = family.to_java_bytes if (family != nil)
+             qualbytes = qualifier.to_java_bytes if (qualifier != nil)
+
+            # invoke cp endpoint to perform access controlse
+            org.apache.hadoop.hbase.protobuf.ProtobufUtil.revoke(
+              protocol, user, tableName, fambytes, qualbytes)
+          end
+        else
+          # invoke cp endpoint to perform access controlse
+          perm = org.apache.hadoop.hbase.security.access.Permission.new(''.to_java_bytes)
+          org.apache.hadoop.hbase.protobuf.ProtobufUtil.revoke(protocol, user, perm.getActions())
+        end
+      ensure
+        meta_table.close()
+      end
+    end
+
+    #----------------------------------------------------------------------------------------------
+    def user_permission(table_name=nil)
+      security_available?
+
+      begin
+        meta_table = org.apache.hadoop.hbase.client.HTable.new(@config,
+          org.apache.hadoop.hbase.security.access.AccessControlLists::ACL_TABLE_NAME)
+        service = meta_table.coprocessorService(
+          org.apache.hadoop.hbase.HConstants::EMPTY_START_ROW)
+
+        protocol = org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos::
+          AccessControlService.newBlockingStub(service)
+
+        if (table_name != nil)
+          #check if namespace is passed.
+          if (isNamespace?(table_name))
+            # Namespace should exist first.
+            namespace_name = table_name[1...table_name.length]
+            raise(ArgumentError, "Can't find a namespace: #{namespace_name}") unless namespace_exists?(namespace_name)
+            # invoke cp endpoint to perform access controls
+            perms = org.apache.hadoop.hbase.protobuf.ProtobufUtil.getUserPermissions(
+              protocol, table_name.to_java_bytes)
+          else
+             raise(ArgumentError, "Can't find table: #{table_name}") unless exists?(table_name)
+             perms = org.apache.hadoop.hbase.protobuf.ProtobufUtil.getUserPermissions(
+               protocol, org.apache.hadoop.hbase.TableName.valueOf(table_name))
+          end
+        else
+          perms = org.apache.hadoop.hbase.protobuf.ProtobufUtil.getUserPermissions(protocol)
+        end
+      ensure
+        meta_table.close()
+      end
+
+      res = {}
+      count  = 0
+      perms.each do |value|
+        user_name = String.from_java_bytes(value.getUser)
+        table = (value.getTable != nil) ? value.getTable.toString() : ''
+        family = (value.getFamily != nil) ? org.apache.hadoop.hbase.util.Bytes::toStringBinary(value.getFamily) : ''
+        qualifier = (value.getQualifier != nil) ? org.apache.hadoop.hbase.util.Bytes::toStringBinary(value.getQualifier) : ''
+
+        action = org.apache.hadoop.hbase.security.access.Permission.new value.getActions
+
+        if block_given?
+          yield(user_name, "#{table},#{family},#{qualifier}: #{action.to_s}")
+        else
+          res[user_name] ||= {}
+          res[user_name][family + ":" +qualifier] = action
+        end
+        count += 1
+      end
+
+      return ((block_given?) ? count : res)
+    end
+
+    # Does table exist?
+    def exists?(table_name)
+      @admin.tableExists(table_name)
+    end
+
+    def isNamespace?(table_name)
+      table_name.start_with?('@')
+    end
+
+     # Does Namespace exist
+    def namespace_exists?(namespace_name)
+      namespaceDesc = @admin.getNamespaceDescriptor(namespace_name)
+      if(namespaceDesc == nil)
+        return false
+      else
+        return true
+      end
+    end
+
+    # Make sure that security tables are available
+    def security_available?()
+      raise(ArgumentError, "DISABLED: Security features are not available") \
+        unless exists?(org.apache.hadoop.hbase.security.access.AccessControlLists::ACL_TABLE_NAME)
+    end
+  end
+end



Mime
View raw message