hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From j...@apache.org
Subject svn commit: r540737 - in /lucene/hadoop/trunk/src/contrib/hbase: ./ src/java/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/
Date Tue, 22 May 2007 20:24:23 GMT
Author: jimk
Date: Tue May 22 13:24:22 2007
New Revision: 540737

URL: http://svn.apache.org/viewvc?view=rev&rev=540737
Log:
HADOOP-1418. HBase miscellaneous: unit test for HClient, client to do 'Performance Evaluation',
etc.

Added:
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHClient.java
Removed:
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLocking.java
Modified:
    lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java

Modified: lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt?view=diff&rev=540737&r1=540736&r2=540737
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt Tue May 22 13:24:22 2007
@@ -10,3 +10,5 @@
      java.util.concurrent.locks.ReentrantLock (Michael Stack)
   5. HADOOP-1403. HBase reliability - make master and region server more fault
      tolerant.
+  6. HADOOP-1418. HBase miscellaneous: unit test for HClient, client to do
+     'Performance Evaluation', etc.

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java?view=diff&rev=540737&r1=540736&r2=540737
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java Tue
May 22 13:24:22 2007
@@ -32,9 +32,9 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RPC;
 
-/*******************************************************************************
+/**
  * HClient manages a connection to a single HRegionServer.
- ******************************************************************************/
+ */
 public class HClient implements HConstants {
   private final Log LOG = LogFactory.getLog(this.getClass().getName());
   
@@ -44,7 +44,6 @@
   
   private static final Text EMPTY_START_ROW = new Text();
   
-  private boolean closed;
   private long clientTimeout;
   private int numTimeouts;
   private int numRetries;
@@ -82,7 +81,6 @@
 
   /** Creates a new HClient */
   public HClient(Configuration conf) {
-    this.closed = false;
     this.conf = conf;
 
     this.clientTimeout = conf.getLong("hbase.client.timeout.length", 30 * 1000);
@@ -102,15 +100,6 @@
   }
   
   /**
-   * Check client is open.
-   */
-  private void checkOpen() {
-    if (this.closed) {
-      throw new IllegalStateException("client is not open");
-    }
-  }
-  
-  /**
    * Find the address of the master and connect to it
    */
   private void checkMaster() {
@@ -119,7 +108,8 @@
     }
     for(int tries = 0; this.master == null && tries < numRetries; tries++) {
       HServerAddress masterLocation =
-        new HServerAddress(this.conf.get(MASTER_ADDRESS));
+        new HServerAddress(this.conf.get(MASTER_ADDRESS,
+          DEFAULT_MASTER_ADDRESS));
       
       try {
         HMasterInterface tryMaster =
@@ -159,21 +149,18 @@
       throw new IllegalArgumentException(desc.getName().toString()
           + " is a reserved table name");
     }
-    checkOpen();
     checkMaster();
     locateRootRegion();
     this.master.createTable(desc);
   }
 
   public synchronized void deleteTable(Text tableName) throws IOException {
-    checkOpen();
     checkMaster();
     locateRootRegion();
     this.master.deleteTable(tableName);
   }
   
   public synchronized void shutdown() throws IOException {
-    checkOpen();
     checkMaster();
     this.master.shutdown();
   }
@@ -182,7 +169,6 @@
     if(tableName == null || tableName.getLength() == 0) {
       throw new IllegalArgumentException("table name cannot be null or zero length");
     }
-    checkOpen();
     this.tableServers = tablesToServers.get(tableName);
     if(this.tableServers == null ) {            // We don't know where the table is
       findTableInMeta(tableName);               // Load the information from meta
@@ -322,7 +308,6 @@
     }
     
     if (rootRegionLocation == null) {
-      this.closed = true;
       throw new IOException("unable to locate root region server");
     }
   }
@@ -455,14 +440,6 @@
     return server;
   }
 
-  /** Close the connection */
-  public synchronized void close() throws IOException {
-    if(! this.closed) {
-      RPC.stopClient();
-      this.closed = true;
-    }
-  }
-
   /**
    * List all the userspace tables.  In other words, scan the META table.
    *
@@ -927,7 +904,7 @@
         " createTable webcrawl contents: anchors: 10");
   }
   
-  int doCommandLine(final String args[]) {
+  public int doCommandLine(final String args[]) {
     // Process command-line args. TODO: Better cmd-line processing
     // (but hopefully something not as painful as cli options).    
     int errCode = -1;

Added: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java?view=auto&rev=540737
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java
(added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java
Tue May 22 13:24:22 2007
@@ -0,0 +1,414 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+import java.util.Random;
+import java.util.TreeMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.log4j.Logger;
+
+/**
+ * Client used evaluating HBase performance and scalability.  Steps through
+ * one of a set of hardcoded tests or 'experiments' (e.g. a random reads test,
+ * a random writes test, etc.). Pass on the command-line which test to run,
+ * how many clients are participating in this experiment, and the row range
+ * this client instance is to operate on. Run
+ * <code>java EvaluationClient --help</code> to obtain usage.
+ * 
+ * <p>This class implements the client used in the
+ * <i>Performance Evaluation</i> benchmarks described in Section 7 of the <a
+ * href="http://labs.google.com/papers/bigtable.html">Bigtable</a>
+ * paper on pages 8-10.
+ */
+public class EvaluationClient implements HConstants {
+  private final Logger LOG = Logger.getLogger(this.getClass().getName());
+  
+  private static final int ROW_LENGTH = 1024;
+  
+
+  private static final int ONE_HUNDRED_MB = 1024 * 1024 * 1 /*100 RESTORE*/;
+  private static final int ROWS_PER_100_MB = ONE_HUNDRED_MB / ROW_LENGTH;
+  
+  private static final int ONE_GB = ONE_HUNDRED_MB * 10;
+  private static final int ROWS_PER_GB = ONE_GB / ROW_LENGTH;
+  
+  private static final Text COLUMN_NAME = new Text(COLUMN_FAMILY + "data");
+  
+  private static HTableDescriptor tableDescriptor;
+  
+  static {
+    tableDescriptor = new HTableDescriptor("TestTable", 1);
+    tableDescriptor.addFamily(COLUMN_FAMILY);
+  }
+  
+  private static enum Test {RANDOM_READ,
+    RANDOM_READ_MEM,
+    RANDOM_WRITE,
+    SEQUENTIAL_READ,
+    SEQUENTIAL_WRITE,
+    SCAN};
+  
+  private Random rand;
+  private Configuration conf;
+  private HClient client;
+  private boolean miniCluster;
+  private int N;                        // Number of clients and HRegionServers
+  private int range;                    // Row range for this client
+  private int R;                        // Total number of rows
+  
+  private EvaluationClient() {
+    this.rand = new Random(System.currentTimeMillis());
+    this.conf = new HBaseConfiguration();
+    this.miniCluster = false;
+    this.client = new HClient(conf);
+    this.N = 1;                                 // Default is one client
+    this.range = 0;                             // Range for this client
+    this.R = ROWS_PER_GB;                  // Default for one client
+  }
+  
+  private byte[] generateValue() {
+    StringBuilder val = new StringBuilder();
+    while(val.length() < ROW_LENGTH) {
+      val.append(Long.toString(rand.nextLong()));
+    }
+    return val.toString().getBytes();
+  }
+  
+  private long randomRead(int startRow, int nRows) throws Exception {
+    LOG.info("startRow: " + startRow + ", nRows: " + nRows);
+    client.openTable(tableDescriptor.getName());
+    
+    long startTime = System.currentTimeMillis();
+    int lastRow = startRow + nRows;
+    for(int i = startRow; i < lastRow; i++) {
+      client.get(new Text(Integer.toString(rand.nextInt() % R)), COLUMN_NAME);
+    }
+    return System.currentTimeMillis() - startTime;
+  }
+
+  private long randomWrite(int startRow, int nRows) throws Exception {
+    LOG.info("startRow: " + startRow + ", nRows: " + nRows);
+    client.openTable(tableDescriptor.getName());
+    
+    long startTime = System.currentTimeMillis();
+    int lastRow = startRow + nRows;
+    for(int i = startRow; i < lastRow; i++) {
+      long lockid = client.startUpdate(new Text(Integer.toString(rand.nextInt() % R)));
+      client.put(lockid, COLUMN_NAME, generateValue());
+      client.commit(lockid);
+    }
+    return System.currentTimeMillis() - startTime;
+  }
+  
+  private long scan(int startRow, int nRows) throws Exception {
+    LOG.info("startRow: " + startRow + ", nRows: " + nRows);
+    client.openTable(tableDescriptor.getName());
+    
+    HScannerInterface scanner = client.obtainScanner(new Text[] { COLUMN_NAME },
+        new Text(Integer.toString(startRow)));
+    
+    long startTime = System.currentTimeMillis();
+    
+    try {
+      int lastRow = startRow + nRows;
+      HStoreKey key = new HStoreKey();
+      TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
+      for(int i = startRow; i < lastRow; i++) {
+        scanner.next(key, results);
+        results.clear();
+      }
+      
+    } finally {
+      scanner.close();
+    }
+    
+    return System.currentTimeMillis() - startTime;
+  }
+
+  private long sequentialRead(int startRow, int nRows) throws Exception {
+    LOG.info("startRow: " + startRow + ", nRows: " + nRows);
+    client.openTable(tableDescriptor.getName());
+    
+    long startTime = System.currentTimeMillis();
+    int lastRow = startRow + nRows;
+    for(int i = startRow; i < lastRow; i++) {
+      client.get(new Text(Integer.toString(i)), COLUMN_NAME);
+    }
+    return System.currentTimeMillis() - startTime;
+  }
+
+  private long sequentialWrite(int startRow, int nRows) throws Exception {
+    LOG.info("startRow: " + startRow + ", nRows: " + nRows);
+    client.openTable(tableDescriptor.getName());
+    long startTime = System.currentTimeMillis();
+    int lastRow = startRow + nRows;
+    for(int i = startRow; i < lastRow; i++) {
+      long lockid = client.startUpdate(new Text(Integer.toString(i)));
+      client.put(lockid, COLUMN_NAME, generateValue());
+      client.commit(lockid);
+    }
+    return System.currentTimeMillis() - startTime;
+  }
+  
+  private void runNIsOne(Test test) throws IOException {
+    try {
+      client.createTable(tableDescriptor);
+
+      long totalElapsedTime = 0;
+      int nRows = R / (10 * N);
+
+      if (test == Test.RANDOM_READ || test == Test.RANDOM_READ_MEM ||
+          test == Test.SCAN || test == Test.SEQUENTIAL_READ ||
+          test == Test.SEQUENTIAL_WRITE) {
+
+        for(int range = 0; range < 10; range++) {
+          long elapsedTime = sequentialWrite(range * nRows, nRows);
+          if (test == Test.SEQUENTIAL_WRITE) {
+            totalElapsedTime += elapsedTime;
+          }
+        }
+      }
+
+      switch(test) {
+      
+      case RANDOM_READ:
+        for(int range = 0 ; range < 10; range++) {
+          long elapsedTime = randomRead(range * nRows, nRows);
+          totalElapsedTime += elapsedTime;
+        }
+        System.out.print("Random read of " + R + " rows completed in: ");
+        break;
+
+      case RANDOM_READ_MEM:
+        throw new UnsupportedOperationException("Not yet implemented");
+
+      case RANDOM_WRITE:
+        for(int range = 0 ; range < 10; range++) {
+          long elapsedTime = randomWrite(range * nRows, nRows);
+          totalElapsedTime += elapsedTime;
+        }
+        System.out.print("Random write of " + R + " rows completed in: ");
+        break;
+
+      case SCAN:
+        for(int range = 0 ; range < 10; range++) {
+          long elapsedTime = scan(range * nRows, nRows);
+          totalElapsedTime += elapsedTime;
+        }
+        System.out.print("Scan of " + R + " rows completed in: ");
+        break;
+
+      case SEQUENTIAL_READ:
+        for(int range = 0 ; range < 10; range++) {
+          long elapsedTime = sequentialRead(range * nRows, nRows);
+          totalElapsedTime += elapsedTime;
+        }
+        System.out.print("Sequential read of " + R + " rows completed in: ");
+        break;
+
+      case SEQUENTIAL_WRITE:                      // We already did it!
+        System.out.print("Sequential write of " + R + " rows completed in: ");
+        break;
+
+      default:
+        throw new IllegalArgumentException("Invalid command value: " + test);
+      }
+      System.out.println((totalElapsedTime / 1000.0));
+
+    } catch(Exception e) {
+      e.printStackTrace();
+      
+    } finally {
+      this.client.deleteTable(tableDescriptor.getName());
+    }
+  }
+  
+  private void runOneTest(Test cmd) {
+  }
+  
+  private void runTest(Test test) throws IOException {
+    if (test  == Test.RANDOM_READ_MEM) {
+      // For this one test, so all fits in memory, make R smaller (See
+      // pg. 9 of BigTable paper).
+      R = ROWS_PER_100_MB * N;
+    }
+    
+    MiniHBaseCluster hbaseMiniCluster = null;
+    if (this.miniCluster) {
+      hbaseMiniCluster = new MiniHBaseCluster(this.conf, N);
+    }
+    
+    try {
+      if (N == 1) {
+        // If there is only one client and one HRegionServer, we assume nothing
+        // has been set up at all.
+        runNIsOne(test);
+      } else {
+        // Else, run 
+        runOneTest(test);
+      }
+    } finally {
+      if(this.miniCluster && hbaseMiniCluster != null) {
+        hbaseMiniCluster.shutdown();
+      }
+    }
+  }
+  
+  private void printUsage() {
+    printUsage(null);
+  }
+  
+  private void printUsage(final String message) {
+    if (message != null && message.length() > 0) {
+      System.err.println(message);
+    }
+    System.err.println("Usage: java " + this.getClass().getName() +
+        "[--master=host:port] [--miniCluster] <command> <args>");
+    System.err.println();
+    System.err.println("Options:");
+    System.err.println(" master          Specify host and port of HBase " +
+        "cluster master. If not present,");
+    System.err.println("                 address is read from configuration");
+    System.err.println(" miniCluster     Run the test on an HBaseMiniCluster");
+    System.err.println();
+    System.err.println("Commands:");
+    System.err.println(" randomRead      Run random read test");
+    System.err.println(" randomReadMem   Run random read test where table " +
+      "is in memory");
+    System.err.println(" randomWrite     Run random write test");
+    System.err.println(" sequentialRead  Run sequential read test");
+    System.err.println(" sequentialWrite Run sequential write test");
+    System.err.println(" scan            Run scan test");
+    System.err.println();
+    System.err.println("Args:");
+    System.err.println(" nclients        Integer. Required. Total number of " +
+      "clients (and HRegionServers)");
+    System.err.println("                 running: 1 <= value <= 500");
+    System.err.println(" range           Integer. Required. 0 <= value <= " +
+      "(nclients * 10) - 1");
+  }
+
+  private void getArgs(final int start, final String[] args) {
+    if(start + 1 > args.length) {
+      throw new IllegalArgumentException("must supply the number of clients " +
+        "and the range for this client.");
+    }
+    
+    N = Integer.parseInt(args[start]);
+    if (N > 500 || N < 1) {
+      throw new IllegalArgumentException("Number of clients must be between " +
+        "1 and 500.");
+    }
+   
+    R = ROWS_PER_GB * N;
+    
+    range = Integer.parseInt(args[start + 1]);
+    if (range < 0 || range > (N * 10) - 1) {
+      throw new IllegalArgumentException("Range must be between 0 and "
+          + ((N * 10) - 1));
+    }
+  }
+  
+  private int doCommandLine(final String[] args) {
+    // Process command-line args. TODO: Better cmd-line processing
+    // (but hopefully something not as painful as cli options).    
+    int errCode = -1;
+    if (args.length < 1) {
+      printUsage();
+      return errCode;
+    }
+    
+    try {
+      for (int i = 0; i < args.length; i++) {
+        String cmd = args[i];
+        if (cmd.equals("-h") || cmd.startsWith("--h")) {
+          printUsage();
+          errCode = 0;
+          break;
+        }
+        
+        final String masterArgKey = "--master=";
+        if (cmd.startsWith(masterArgKey)) {
+          this.conf.set(MASTER_ADDRESS, cmd.substring(masterArgKey.length()));
+          continue;
+        }
+       
+        final String miniClusterArgKey = "--miniCluster";
+        if (cmd.startsWith(miniClusterArgKey)) {
+          this.miniCluster = true;
+          continue;
+        }
+       
+        if (cmd.equals("randomRead")) {
+          getArgs(i + 1, args);
+          runTest(Test.RANDOM_READ);
+          errCode = 0;
+          break;
+        }
+        
+        if (cmd.equals("randomReadMem")) {
+          getArgs(i + 1, args);
+          runTest(Test.RANDOM_READ_MEM);
+          errCode = 0;
+          break;
+        }
+        
+        if (cmd.equals("randomWrite")) {
+          getArgs(i + 1, args);
+          runTest(Test.RANDOM_WRITE);
+          errCode = 0;
+          break;
+        }
+        
+        if (cmd.equals("sequentialRead")) {
+          getArgs(i + 1, args);
+          runTest(Test.SEQUENTIAL_READ);
+          errCode = 0;
+          break;
+        }
+        
+        if (cmd.equals("sequentialWrite")) {
+          getArgs(i + 1, args);
+          runTest(Test.SEQUENTIAL_WRITE);
+          errCode = 0;
+          break;
+        }
+        
+        if (cmd.equals("scan")) {
+          getArgs(i + 1, args);
+          runTest(Test.SCAN);
+          errCode = 0;
+          break;
+        }
+        
+        printUsage();
+        break;
+      }
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+    
+    return errCode;
+  }
+  
+  public static void main(final String[] args) {
+    StaticTestEnvironment.initialize();
+    System.exit(new EvaluationClient().doCommandLine(args));
+  }
+}
\ No newline at end of file

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java?view=diff&rev=540737&r1=540736&r2=540737
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
Tue May 22 13:24:22 2007
@@ -24,11 +24,11 @@
   final boolean miniHdfs;
   
   protected HBaseClusterTestCase() {
-    this(false);
+    this(true);
   }
 
   protected HBaseClusterTestCase(String name) {
-    this(name, false);
+    this(name, true);
   }
   
   protected HBaseClusterTestCase(final boolean miniHdfs) {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java?view=diff&rev=540737&r1=540736&r2=540737
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
Tue May 22 13:24:22 2007
@@ -58,9 +58,6 @@
 
   public void tearDown() throws Exception {
     super.tearDown();
-    if(client != null) {
-      client.close();
-    }
   }
 
   private static final int FIRST_ROW = 1;

Added: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHClient.java?view=auto&rev=540737
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHClient.java
(added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHClient.java
Tue May 22 13:24:22 2007
@@ -0,0 +1,46 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * Test HClient.
+ * This class can only run one unit test while hadoop-1299 is outstanding.
+ */
+public class TestHClient extends HBaseClusterTestCase {
+  private Log LOG = LogFactory.getLog(this.getClass().getName());
+  private HClient client;
+
+  public void setUp() throws Exception {
+    super.setUp();
+    this.client = new HClient(this.conf);
+  }
+  
+  public void testCommandline() throws Exception {
+    final String m =  "--master=" + this.conf.get(HConstants.MASTER_ADDRESS);
+    LOG.info("Creating table");
+    // Assert each of below returns 0: i.e. success.
+    assertEquals("create table", 0,
+      this.client.doCommandLine(
+        new String [] {m, "createTable", getName(), "family:", "1"}));
+    assertEquals("list tables", 0,
+      this.client.doCommandLine(new String [] {m, "listTables"}));
+    assertEquals("delete table", 0,
+      this.client.doCommandLine(new String [] {m, "deleteTable", getName()}));
+  }
+}
\ No newline at end of file



Mime
View raw message