hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From raw...@apache.org
Subject svn commit: r782178 [12/16] - in /hadoop/hbase/trunk: bin/ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/client/tableindexed/ src/java/org/apache/hadoop/hbase/client/transactional/ src/java/...
Date Sat, 06 Jun 2009 01:26:27 GMT
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java Sat Jun  6 01:26:21 2009
@@ -27,7 +27,7 @@
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -62,9 +62,10 @@
           k[0] = b1;
           k[1] = b2;
           k[2] = b3;
-          BatchUpdate update = new BatchUpdate(k);
-          update.put(columnName, k);
-          table.commit(update);
+          Put put = new Put(k);
+          byte [][] famAndQf = KeyValue.parseColumn(columnName);
+          put.add(famAndQf[0], famAndQf[1], k);
+          table.put(put);
         }
       }
     }

Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestGet.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestGet.java?rev=782178&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestGet.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestGet.java Sat Jun  6 01:26:21 2009
@@ -0,0 +1,94 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+
+import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Test gets
+ */
+public class TestGet extends HBaseClusterTestCase {
+  
+  private static final byte [] FAMILY = Bytes.toBytes("family");
+  
+  private static final byte [] ROW = Bytes.toBytes("row");
+  
+  private static final byte [] QUALIFIER = Bytes.toBytes("qualifier");
+  private static final byte [] VALUE = Bytes.toBytes("value");
+  
+  private static final byte [] MISSING_ROW = Bytes.toBytes("missingrow");
+  
+  private HTableDescriptor desc = null;
+  private HTable table = null;
+
+  /**
+   * @throws UnsupportedEncodingException
+   */
+  public TestGet() throws UnsupportedEncodingException {
+    super();
+  }
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    this.desc = new HTableDescriptor("testGet");
+    desc.addFamily(new HColumnDescriptor(FAMILY));
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.createTable(desc);
+    table = new HTable(conf, desc.getName());
+  }
+
+  public void testGet_EmptyTable() throws IOException {
+    
+    Get get = new Get(ROW);
+    get.addFamily(FAMILY);
+    Result r = table.get(get);
+    assertTrue(r.isEmpty());
+    
+  }
+  
+  public void testGet_NonExistentRow() throws IOException {
+    
+    Put put = new Put(ROW);
+    put.add(FAMILY, QUALIFIER, VALUE);
+    table.put(put);
+    System.out.println("Row put");
+    
+    Get get = new Get(ROW);
+    get.addFamily(FAMILY);
+    Result r = table.get(get);
+    assertFalse(r.isEmpty());
+    System.out.println("Row retrieved successfully");
+    
+    get = new Get(MISSING_ROW);
+    get.addFamily(FAMILY);
+    r = table.get(get);
+    assertTrue(r.isEmpty());
+    System.out.println("Row missing as it should be");
+    
+  }
+     
+}

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestGetRowVersions.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestGetRowVersions.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestGetRowVersions.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestGetRowVersions.java Sat Jun  6 01:26:21 2009
@@ -20,18 +20,13 @@
 
 package org.apache.hadoop.hbase.client;
 
-import java.util.Iterator;
-import java.util.Map;
+import java.util.NavigableMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseClusterTestCase;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.io.Cell;
-import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -39,13 +34,14 @@
  */
 public class TestGetRowVersions extends HBaseClusterTestCase {
   private static final Log LOG = LogFactory.getLog(TestGetRowVersions.class);
+  
   private static final String TABLE_NAME = "test";
-  private static final String CONTENTS_STR = "contents:";
-  private static final String ROW = "row";
-  private static final String COLUMN = "contents:contents";
-  private static final long TIMESTAMP = System.currentTimeMillis();
-  private static final String VALUE1 = "value1";
-  private static final String VALUE2 = "value2";
+  private static final byte [] CONTENTS = Bytes.toBytes("contents");
+  private static final byte [] ROW = Bytes.toBytes("row");
+  private static final byte [] VALUE1 = Bytes.toBytes("value1");
+  private static final byte [] VALUE2 = Bytes.toBytes("value2");
+  private static final long TIMESTAMP1 = 100L;
+  private static final long TIMESTAMP2 = 200L;
   private HBaseAdmin admin = null;
   private HTable table = null;
 
@@ -53,7 +49,7 @@
   public void setUp() throws Exception {
     super.setUp();
     HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
-    desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
+    desc.addFamily(new HColumnDescriptor(CONTENTS));
     this.admin = new HBaseAdmin(conf);
     this.admin.createTable(desc);
     this.table = new HTable(conf, TABLE_NAME);
@@ -61,9 +57,10 @@
 
   /** @throws Exception */
   public void testGetRowMultipleVersions() throws Exception {
-    BatchUpdate b = new BatchUpdate(ROW, TIMESTAMP);
-    b.put(COLUMN, Bytes.toBytes(VALUE1));
-    this.table.commit(b);
+    Put put = new Put(ROW);
+    put.setTimeStamp(TIMESTAMP1);
+    put.add(CONTENTS, CONTENTS, VALUE1);
+    this.table.put(put);
     // Shut down and restart the HBase cluster
     this.cluster.shutdown();
     this.zooKeeperCluster.shutdown();
@@ -72,33 +69,35 @@
     // Make a new connection
     this.table = new HTable(conf, TABLE_NAME);
     // Overwrite previous value
-    b = new BatchUpdate(ROW, TIMESTAMP);
-    b.put(COLUMN, Bytes.toBytes(VALUE2));
-    this.table.commit(b);
+    put = new Put(ROW);
+    put.setTimeStamp(TIMESTAMP2);
+    put.add(CONTENTS, CONTENTS, VALUE2);
+    this.table.put(put);
     // Now verify that getRow(row, column, latest) works
-    RowResult r = table.getRow(ROW);
+    Get get = new Get(ROW);
+    // Should get one version by default
+    Result r = table.get(get);
     assertNotNull(r);
-    assertTrue(r.size() != 0);
-    Cell c = r.get(COLUMN);
-    assertNotNull(c);
-    assertTrue(c.getValue().length != 0);
-    String value = Bytes.toString(c.getValue());
-    assertTrue(value.compareTo(VALUE2) == 0);
+    assertFalse(r.isEmpty());
+    assertTrue(r.size() == 1);
+    byte [] value = r.getValue(CONTENTS, CONTENTS);
+    assertTrue(value.length != 0);
+    assertTrue(Bytes.equals(value, VALUE2));
     // Now check getRow with multiple versions
-    r = table.getRow(ROW, HConstants.ALL_VERSIONS);
-    for (Map.Entry<byte[], Cell> e: r.entrySet()) {
-      // Column name
-//      System.err.print("  " + Bytes.toString(e.getKey()));
-      c = e.getValue();
-      
-      // Need to iterate since there may be multiple versions
-      for (Iterator<Map.Entry<Long, byte[]>> it = c.iterator();
-            it.hasNext(); ) {
-        Map.Entry<Long, byte[]> v = it.next();
-        value = Bytes.toString(v.getValue());
-//        System.err.println(" = " + value);
-        assertTrue(VALUE2.compareTo(Bytes.toString(v.getValue())) == 0);
-      }
-    }
+    get = new Get(ROW);
+    get.setMaxVersions();
+    r = table.get(get);
+    assertTrue(r.size() == 2);
+    value = r.getValue(CONTENTS, CONTENTS);
+    assertTrue(value.length != 0);
+    assertTrue(Bytes.equals(value, VALUE2));
+    NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map =
+      r.getMap();
+    NavigableMap<byte[], NavigableMap<Long, byte[]>> familyMap = 
+      map.get(CONTENTS);
+    NavigableMap<Long, byte[]> versionMap = familyMap.get(CONTENTS);
+    assertTrue(versionMap.size() == 2);
+    assertTrue(Bytes.equals(VALUE1, versionMap.get(TIMESTAMP1)));
+    assertTrue(Bytes.equals(VALUE2, versionMap.get(TIMESTAMP2)));
   }
 }
\ No newline at end of file

Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHBaseAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHBaseAdmin.java?rev=782178&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHBaseAdmin.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHBaseAdmin.java Sat Jun  6 01:26:21 2009
@@ -0,0 +1,323 @@
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+public class TestHBaseAdmin extends HBaseClusterTestCase {
+  static final Log LOG = LogFactory.getLog(TestHBaseAdmin.class.getName());
+  
+  private String TABLE_STR = "testTable";
+  private byte [] TABLE = Bytes.toBytes(TABLE_STR);
+  private byte [] ROW = Bytes.toBytes("testRow");
+  private byte [] FAMILY = Bytes.toBytes("testFamily");
+  private byte [] QUALIFIER = Bytes.toBytes("testQualifier");
+  private byte [] VALUE = Bytes.toBytes("testValue");
+  
+  private HBaseAdmin admin = null;
+  private HConnection connection = null;
+  
+  /**
+   * Constructor does nothing special, start cluster.
+   */
+  public TestHBaseAdmin() throws Exception{
+    super();
+  }
+
+  
+  public void testCreateTable() throws IOException {
+    init();
+    
+    HTableDescriptor [] tables = connection.listTables();
+    int numTables = tables.length;
+    
+    createTable(TABLE, FAMILY);
+    tables = connection.listTables();
+    
+    assertEquals(numTables + 1, tables.length);
+  }
+  
+  
+  public void testDisableAndEnableTable() throws IOException {
+    init();
+    
+    HTable ht =  createTable(TABLE, FAMILY);
+    
+    Put put = new Put(ROW);
+    put.add(FAMILY, QUALIFIER, VALUE);
+    ht.put(put);
+    
+    admin.disableTable(TABLE);
+    
+    //Test that table is disabled
+    Get get = new Get(ROW);
+    get.addColumn(FAMILY, QUALIFIER);
+    boolean ok = false;
+    try {
+      ht.get(get);
+    } catch (RetriesExhaustedException e) {
+      ok = true;
+    }
+    assertEquals(true, ok);
+    
+    admin.enableTable(TABLE);
+    
+    //Test that table is enabled
+    try {
+      ht.get(get);
+    } catch (RetriesExhaustedException e) {
+      ok = false;
+    }
+    assertEquals(true, ok);
+  }
+  
+  
+  public void testTableExist() throws IOException {
+    init();
+    boolean exist = false;
+    
+    exist = admin.tableExists(TABLE);
+    assertEquals(false, exist);
+    
+    createTable(TABLE, FAMILY);
+    
+    exist = admin.tableExists(TABLE);
+    assertEquals(true, exist);    
+  }
+  
+
+//  public void testMajorCompact() throws Exception {
+//    init();
+//    
+//    int testTableCount = 0;
+//    int flushSleep = 1000;
+//    int majocCompactSleep = 7000;
+//    
+//    HTable ht = createTable(TABLE, FAMILY);
+//    byte [][] ROWS = makeN(ROW, 5);
+//    
+//    Put put = new Put(ROWS[0]);
+//    put.add(FAMILY, QUALIFIER, VALUE);
+//    ht.put(put);
+//    
+//    admin.flush(TABLE);
+//    Thread.sleep(flushSleep);
+//    
+//    put = new Put(ROWS[1]);
+//    put.add(FAMILY, QUALIFIER, VALUE);
+//    ht.put(put);
+//    
+//    admin.flush(TABLE);
+//    Thread.sleep(flushSleep);
+//    
+//    put = new Put(ROWS[2]);
+//    put.add(FAMILY, QUALIFIER, VALUE);
+//    ht.put(put);
+//    
+//    admin.flush(TABLE);
+//    Thread.sleep(flushSleep);
+//    
+//    put = new Put(ROWS[3]);
+//    put.add(FAMILY, QUALIFIER, VALUE);
+//    ht.put(put);
+//    
+//    admin.majorCompact(TABLE);
+//    Thread.sleep(majocCompactSleep);
+//    
+//    HRegion [] regions = null;
+//    
+//    regions = connection.getRegionServerWithRetries(
+//        new ServerCallable<HRegion []>(connection, TABLE, ROW) {
+//          public HRegion [] call() throws IOException {
+//            return server.getOnlineRegionsAsArray();
+//          }
+//        }
+//    );
+//    for(HRegion region : regions) {
+//      String table = Bytes.toString(region.getRegionName()).split(",")[0];
+//      if(table.equals(TABLE_STR)) {
+//        String output = "table: " + table;
+//        int i = 0;
+//        for(int j : region.getStoresSize()) {
+//          output += ", files in store " + i++ + "(" + j + ")";
+//          testTableCount = j; 
+//        }
+//        if (LOG.isDebugEnabled()) {
+//          LOG.debug(output);
+//        }
+//        System.out.println(output);
+//      }
+//    }
+//    assertEquals(1, testTableCount);
+//  }
+//  
+//
+//
+//  public void testFlush_TableName() throws Exception {
+//    init();
+//
+//    int initTestTableCount = 0;
+//    int testTableCount = 0;
+//    
+//    HTable ht = createTable(TABLE, FAMILY);
+//
+//    Put put = new Put(ROW);
+//    put.add(FAMILY, QUALIFIER, VALUE);
+//    ht.put(put);
+//    
+//    HRegion [] regions = null;
+//    
+//    regions = connection.getRegionServerWithRetries(
+//        new ServerCallable<HRegion []>(connection, TABLE, ROW) {
+//          public HRegion [] call() throws IOException {
+//            return server.getOnlineRegionsAsArray();
+//          }
+//        }
+//    );
+//    for(HRegion region : regions) {
+//      String table = Bytes.toString(region.getRegionName()).split(",")[0];
+//      if(table.equals(TABLE_STR)) {
+//        String output = "table: " + table;
+//        int i = 0;
+//        for(int j : region.getStoresSize()) {
+//          output += ", files in store " + i++ + "(" + j + ")";
+//          initTestTableCount = j; 
+//        }
+//        if (LOG.isDebugEnabled()) {
+//          LOG.debug(output);
+//        }
+//      }
+//    }
+//    
+//    //Flushing 
+//    admin.flush(TABLE);
+//    Thread.sleep(2000);
+//    
+//    regions = connection.getRegionServerWithRetries(
+//        new ServerCallable<HRegion []>(connection, TABLE, ROW) {
+//          public HRegion [] call() throws IOException {
+//            return server.getOnlineRegionsAsArray();
+//          }
+//        }
+//    );
+//    for(HRegion region : regions) {
+//      String table = Bytes.toString(region.getRegionName()).split(",")[0];
+//      if(table.equals(TABLE_STR)) {
+//        String output = "table: " + table;
+//        int i = 0;
+//        for(int j : region.getStoresSize()) {
+//          output += ", files in store " + i++ + "(" + j + ")";
+//          testTableCount = j; 
+//        }
+//        if (LOG.isDebugEnabled()) {
+//          LOG.debug(output);
+//        }
+//      }
+//    }
+//
+//    assertEquals(initTestTableCount + 1, testTableCount);
+//  }
+// 
+//
+//  public void testFlush_RegionName() throws Exception{
+//    init();
+//    int initTestTableCount = 0;
+//    int testTableCount = 0;
+//    String regionName = null;
+//    
+//    HTable ht = createTable(TABLE, FAMILY);
+//
+//    Put put = new Put(ROW);
+//    put.add(FAMILY, QUALIFIER, VALUE);
+//    ht.put(put);
+//    
+//    HRegion [] regions = null;
+//    
+//    regions = connection.getRegionServerWithRetries(
+//        new ServerCallable<HRegion []>(connection, TABLE, ROW) {
+//          public HRegion [] call() throws IOException {
+//            return server.getOnlineRegionsAsArray();
+//          }
+//        }
+//    );
+//    for(HRegion region : regions) {
+//      String reg = Bytes.toString(region.getRegionName());
+//      String table = reg.split(",")[0];
+//      if(table.equals(TABLE_STR)) {
+//        regionName = reg;
+//        String output = "table: " + table;
+//        int i = 0;
+//        for(int j : region.getStoresSize()) {
+//          output += ", files in store " + i++ + "(" + j + ")";
+//          initTestTableCount = j; 
+//        }
+//        if (LOG.isDebugEnabled()) {
+//          LOG.debug(output);
+//        }
+//      }
+//    }
+//    
+//    //Flushing 
+//    admin.flush(regionName);
+//    Thread.sleep(2000);
+//    
+//    regions = connection.getRegionServerWithRetries(
+//        new ServerCallable<HRegion []>(connection, TABLE, ROW) {
+//          public HRegion [] call() throws IOException {
+//            return server.getOnlineRegionsAsArray();
+//          }
+//        }
+//    );
+//    for(HRegion region : regions) {
+//      String table = Bytes.toString(region.getRegionName()).split(",")[0];
+//      if(table.equals(TABLE_STR)) {
+//        String output = "table: " + table;
+//        int i = 0;
+//        for(int j : region.getStoresSize()) {
+//          output += ", files in store " + i++ + "(" + j + ")";
+//          testTableCount = j; 
+//        }
+//        if (LOG.isDebugEnabled()) {
+//          LOG.debug(output);
+//        }
+//      }
+//    }
+//
+//    assertEquals(initTestTableCount + 1, testTableCount);
+//  }
+
+  //////////////////////////////////////////////////////////////////////////////
+  // Helpers
+  //////////////////////////////////////////////////////////////////////////////
+  private byte [][] makeN(byte [] base, int n) {
+    byte [][] ret = new byte[n][];
+    for(int i=0;i<n;i++) {
+      ret[i] = Bytes.add(base, new byte[]{(byte)i});
+    }
+    return ret;
+  }
+
+  private HTable createTable(byte [] tableName, byte [] ... families) 
+  throws IOException {
+    HTableDescriptor desc = new HTableDescriptor(tableName);
+    for(byte [] family : families) {
+      desc.addFamily(new HColumnDescriptor(family));
+    }
+    admin = new HBaseAdmin(conf);
+    admin.createTable(desc);
+    return new HTable(conf, tableName);
+  }
+  
+  private void init() throws IOException {
+    connection = new HBaseAdmin(conf).connection;
+    admin = new HBaseAdmin(conf);
+  }
+  
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java Sat Jun  6 01:26:21 2009
@@ -1,5 +1,5 @@
 /**
- * Copyright 2007 The Apache Software Foundation
+ * Copyright 2009 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -20,17 +20,13 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
-import java.util.Map;
 
 import org.apache.hadoop.hbase.HBaseClusterTestCase;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.io.Cell;
-import org.apache.hadoop.hbase.io.RowResult;
-import org.apache.hadoop.hbase.io.HbaseMapWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -38,7 +34,7 @@
  */
 public class TestHTable extends HBaseClusterTestCase implements HConstants {
   private static final HColumnDescriptor column =
-    new HColumnDescriptor(COLUMN_FAMILY);
+    new HColumnDescriptor(CATALOG_FAMILY);
 
   private static final byte [] nosuchTable = Bytes.toBytes("nosuchTable");
   private static final byte [] tableAname = Bytes.toBytes("tableA");
@@ -50,7 +46,10 @@
   private static final byte [] attrValue = Bytes.toBytes("somevalue");
 
 
-  public void testGetRow() {
+  
+  
+  
+  public void testGet() throws IOException {
     HTable table = null;
     try {
       HColumnDescriptor column2 =
@@ -63,42 +62,76 @@
       admin.createTable(testTableADesc);
       
       table = new HTable(conf, tableAname);
-      BatchUpdate batchUpdate = new BatchUpdate(row);
+      System.out.println("Adding row to table");
+      Put put = new Put(row);
+      
+      for(int i = 0; i < 5; i++) {
+        put.add(CATALOG_FAMILY, Bytes.toBytes(Integer.toString(i)), 
+            Bytes.toBytes(i));
+      }
+      
+      table.put(put);
+      
+//      Get get = new Get(row);
+//      get.addColumn(CATALOG_FAMILY,Bytes.toBytes(2));
+//      
+//      System.out.println("Getting data from table");
+//      Result res = table.get(get);
+//      System.out.println("Got data from table");
+//      System.out.println(res);
       
-      for(int i = 0; i < 5; i++)
-        batchUpdate.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i));
       
-      table.commit(batchUpdate);
 
-      assertTrue(table.exists(row));
-      for(int i = 0; i < 5; i++)
-        assertTrue(table.exists(row, Bytes.toBytes(COLUMN_FAMILY_STR+i)));
+//      assertTrue(table.exists(row));
+//      for(int i = 0; i < 5; i++)
+//        assertTrue(table.exists(row, Bytes.toBytes(CATALOG_FAMILY_STR + i)));
 
-      RowResult result = null;
-      result = table.getRow(row,  new byte[][] {COLUMN_FAMILY});
-      for(int i = 0; i < 5; i++)
-        assertTrue(result.containsKey(Bytes.toBytes(COLUMN_FAMILY_STR+i)));
+      Get get = null;
+      Result result = null;
       
-      result = table.getRow(row);
+      get = new Get(row);
+      get.addFamily(CATALOG_FAMILY);
+//      get.addColumn(CATALOG_FAMILY, Bytes.toBytes(Integer.toString(1)));
+      System.out.println("Getting row");
+      long start = System.nanoTime();
+      result = table.get(get);
+      long stop = System.nanoTime();
+      System.out.println("timer " +(stop-start));
+      System.out.println("result " +result);
       for(int i = 0; i < 5; i++)
-        assertTrue(result.containsKey(Bytes.toBytes(COLUMN_FAMILY_STR+i)));
+        assertTrue(result.containsColumn(CATALOG_FAMILY, 
+            Bytes.toBytes(Integer.toString(i))));
 
-      batchUpdate = new BatchUpdate(row);
-      batchUpdate.put("info2:a", Bytes.toBytes("a"));
-      table.commit(batchUpdate);
-      
-      result = table.getRow(row, new byte[][] { COLUMN_FAMILY,
-          Bytes.toBytes("info2:a") });
-      for(int i = 0; i < 5; i++)
-        assertTrue(result.containsKey(Bytes.toBytes(COLUMN_FAMILY_STR+i)));
-      assertTrue(result.containsKey(Bytes.toBytes("info2:a")));
+//      get = new Get(row);
+//      result = table.get(get);
+//      for(int i = 0; i < 5; i++)
+//        assertTrue(result.containsColumn(CATALOG_FAMILY, 
+//            Bytes.toBytes(Integer.toString(i))));
+//
+//      byte [] family = Bytes.toBytes("info2");
+//      byte [] qf = Bytes.toBytes("a");
+//      
+//      put = new Put(row);
+//      put.add(family, qf, qf);
+//      table.put(put);
+//      
+//      get = new Get(row);
+//      get.addFamily(CATALOG_FAMILY);
+//      get.addColumn(family, qf);
+//      result = table.get(get);
+//      for(int i = 0; i < 5; i++)
+//        assertTrue(result.containsColumn(CATALOG_FAMILY, 
+//            Bytes.toBytes(Integer.toString(i))));
+//      assertTrue(result.containsColumn(family, qf));
     } catch (IOException e) {
       e.printStackTrace();
       fail("Should not have any exception " +
         e.getClass());
-    }
+    }    
   }
 
+  
+
   /**
    * the test
    * @throws IOException
@@ -138,9 +171,9 @@
       a.getConnection().getHTableDescriptor(tableAdesc.getName());
     assertTrue(meta.equals(tableAdesc));
     
-    BatchUpdate batchUpdate = new BatchUpdate(row);
-    batchUpdate.put(COLUMN_FAMILY, value);
-    a.commit(batchUpdate);
+    Put put = new Put(row);
+    put.add(CATALOG_FAMILY, null, value);
+    a.put(put);
     
     // open a new connection to A and a connection to b
     
@@ -149,16 +182,18 @@
 
     // copy data from A to B
     
-    Scanner s =
-      newA.getScanner(COLUMN_FAMILY_ARRAY, EMPTY_START_ROW);
+    Scan scan = new Scan();
+    scan.addFamily(HConstants.CATALOG_FAMILY);
+
+    ResultScanner s = newA.getScanner(scan);
     
     try {
-      for (RowResult r : s) {
-        batchUpdate = new BatchUpdate(r.getRow());
-        for(Map.Entry<byte [], Cell> e: r.entrySet()) {
-          batchUpdate.put(e.getKey(), e.getValue().getValue());
+      for (Result r : s) {
+        put = new Put(r.getRow());
+        for(KeyValue kv : r.sorted()) {
+          put.add(kv);
         }
-        b.commit(batchUpdate);
+        b.put(put);
       }
     } finally {
       s.close();
@@ -168,7 +203,9 @@
 
     try {
       HTable anotherA = new HTable(conf, tableAname);
-      anotherA.get(row, COLUMN_FAMILY);
+      Get get = new Get(row);
+      get.addFamily(CATALOG_FAMILY);
+      anotherA.get(get);
     } catch (Exception e) {
       e.printStackTrace();
       fail();
@@ -191,7 +228,7 @@
       for (HColumnDescriptor c: desc.getFamilies())
         c.setValue(attrName, attrValue);
       // update metadata for all regions of this table
-      admin.modifyTable(tableAname, HConstants.MODIFY_TABLE_SET_HTD, desc);
+      admin.modifyTable(tableAname, HConstants.Modify.TABLE_SET_HTD, desc);
       // enable the table
       admin.enableTable(tableAname);
 
@@ -220,144 +257,6 @@
     }
   }
 
-  public void testCheckAndSave() throws IOException {
-    HTable table = null;
-    HColumnDescriptor column2 =
-      new HColumnDescriptor(Bytes.toBytes("info2:"));
-    HBaseAdmin admin = new HBaseAdmin(conf);
-    HTableDescriptor testTableADesc =
-      new HTableDescriptor(tableAname);
-    testTableADesc.addFamily(column);
-    testTableADesc.addFamily(column2);
-    admin.createTable(testTableADesc);
-    
-    table = new HTable(conf, tableAname);
-    BatchUpdate batchUpdate = new BatchUpdate(row);
-    BatchUpdate batchUpdate2 = new BatchUpdate(row);
-    BatchUpdate batchUpdate3 = new BatchUpdate(row);
-
-    // this row doesn't exist when checkAndSave is invoked
-    byte [] row1 = Bytes.toBytes("row1");
-    BatchUpdate batchUpdate4 = new BatchUpdate(row1);
-    
-    // to be used for a checkAndSave for expected empty columns
-    BatchUpdate batchUpdate5 = new BatchUpdate(row);
-
-    HbaseMapWritable<byte[],byte[]> expectedValues =
-      new HbaseMapWritable<byte[],byte[]>();
-    HbaseMapWritable<byte[],byte[]> badExpectedValues =
-      new HbaseMapWritable<byte[],byte[]>();
-    HbaseMapWritable<byte[],byte[]> expectedNoValues =
-      new HbaseMapWritable<byte[],byte[]>();
-    // the columns used here must not be updated on batchupate
-    HbaseMapWritable<byte[],byte[]> expectedNoValues1 =
-      new HbaseMapWritable<byte[],byte[]>();
-
-    for(int i = 0; i < 5; i++) {
-      // This batchupdate is our initial batch update,
-      // As such we also set our expected values to the same values
-      // since we will be comparing the two
-      batchUpdate.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i));
-      expectedValues.put(Bytes.toBytes(COLUMN_FAMILY_STR+i), Bytes.toBytes(i));
-      
-      badExpectedValues.put(Bytes.toBytes(COLUMN_FAMILY_STR+i),
-        Bytes.toBytes(500));
-
-      expectedNoValues.put(Bytes.toBytes(COLUMN_FAMILY_STR+i), new byte[] {});
-      // the columns used here must not be updated on batchupate
-      expectedNoValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+i+","+i), new byte[] {});
-
-
-      // This is our second batchupdate that we will use to update the initial
-      // batchupdate
-      batchUpdate2.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+1));
-      
-      // This final batch update is to check that our expected values (which
-      // are now wrong)
-      batchUpdate3.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+2));
-
-      // Batch update that will not happen because it is to happen with some 
-      // expected values, but the row doesn't exist
-      batchUpdate4.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i));
-
-      // Batch update will happen: the row exists, but the expected columns don't,
-      // just as the condition
-      batchUpdate5.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+3));
-    }
-    
-    // Initialize rows
-    table.commit(batchUpdate);
-    
-    // check if incorrect values are returned false
-    assertFalse(table.checkAndSave(batchUpdate2,badExpectedValues,null));
-    
-    // make sure first expected values are correct
-    assertTrue(table.checkAndSave(batchUpdate2, expectedValues,null));
-        
-    // make sure check and save truly saves the data after checking the expected
-    // values
-    RowResult r = table.getRow(row);
-    byte[][] columns = batchUpdate2.getColumns();
-    for(int i = 0;i < columns.length;i++) {
-      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate2.get(columns[i])));
-    }
-    
-    // make sure that the old expected values fail
-    assertFalse(table.checkAndSave(batchUpdate3, expectedValues,null));
-
-    // row doesn't exist, so doesn't matter the expected 
-    // values (unless they are empty) 
-    assertFalse(table.checkAndSave(batchUpdate4, badExpectedValues, null));
-
-    assertTrue(table.checkAndSave(batchUpdate4, expectedNoValues, null));
-    // make sure check and save saves the data when expected values were empty and the row
-    // didn't exist
-    r = table.getRow(row1);
-    columns = batchUpdate4.getColumns();
-    for(int i = 0; i < columns.length;i++) {
-      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate4.get(columns[i])));
-    }  
-
-    // since the row isn't empty anymore, those expected (empty) values 
-    // are not valid anymore, so check and save method doesn't save. 
-    assertFalse(table.checkAndSave(batchUpdate4, expectedNoValues, null));
-    
-    // the row exists, but the columns don't. since the expected values are 
-    // for columns without value, checkAndSave must be successful. 
-    assertTrue(table.checkAndSave(batchUpdate5, expectedNoValues1, null));
-    // make sure checkAndSave saved values for batchUpdate5.
-    r = table.getRow(row);
-    columns = batchUpdate5.getColumns();
-    for(int i = 0; i < columns.length;i++) {
-      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate5.get(columns[i])));
-    }  
-
-    // since the condition wasn't changed, the following checkAndSave 
-    // must also be successful.
-    assertTrue(table.checkAndSave(batchUpdate, expectedNoValues1, null));
-    // make sure checkAndSave saved values for batchUpdate1
-    r = table.getRow(row);
-    columns = batchUpdate.getColumns();
-    for(int i = 0; i < columns.length;i++) {
-      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate.get(columns[i])));
-    }
-
-    // one failing condition must make the following checkAndSave fail
-    // the failing condition is a column to be empty, however, it has a value.
-    HbaseMapWritable<byte[],byte[]> expectedValues1 =
-      new HbaseMapWritable<byte[],byte[]>();
-    expectedValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+0), new byte[] {});
-    expectedValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+"EMPTY+ROW"), new byte[] {});
-    assertFalse(table.checkAndSave(batchUpdate5, expectedValues1, null));
-
-    // assure the values on the row remain the same
-    r = table.getRow(row);
-    columns = batchUpdate.getColumns();
-    for(int i = 0; i < columns.length;i++) {
-      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate.get(columns[i])));
-    }    
-  }
-
   /**
    * For HADOOP-2579
    */
@@ -389,43 +288,42 @@
     byte[] beforeSecondRow = Bytes.toBytes("rov");
     
     HTable table = new HTable(conf, tableAname);
-    BatchUpdate batchUpdate = new BatchUpdate(firstRow);
-    BatchUpdate batchUpdate2 = new BatchUpdate(row);
+    Put put = new Put(firstRow);
+    Put put2 = new Put(row);
     byte[] zero = new byte[]{0};
     byte[] one = new byte[]{1};
-    byte[] columnFamilyBytes = Bytes.toBytes(COLUMN_FAMILY_STR);
     
-    batchUpdate.put(COLUMN_FAMILY_STR,zero);
-    batchUpdate2.put(COLUMN_FAMILY_STR,one);
+    put.add(CATALOG_FAMILY, null, zero);
+    put2.add(CATALOG_FAMILY, null, one);
     
-    table.commit(batchUpdate);
-    table.commit(batchUpdate2);
+    table.put(put);
+    table.put(put2);
     
-    RowResult result = null;
+    Result result = null;
     
     // Test before first that null is returned
-    result = table.getClosestRowBefore(beforeFirstRow, columnFamilyBytes);
+    result = table.getRowOrBefore(beforeFirstRow, CATALOG_FAMILY);
     assertTrue(result == null);
     
     // Test at first that first is returned
-    result = table.getClosestRowBefore(firstRow, columnFamilyBytes);
-    assertTrue(result.containsKey(COLUMN_FAMILY_STR));
-    assertTrue(Bytes.equals(result.get(COLUMN_FAMILY_STR).getValue(), zero));
-    
-    // Test inbetween first and second that first is returned
-    result = table.getClosestRowBefore(beforeSecondRow, columnFamilyBytes);
-    assertTrue(result.containsKey(COLUMN_FAMILY_STR));
-    assertTrue(Bytes.equals(result.get(COLUMN_FAMILY_STR).getValue(), zero));
+    result = table.getRowOrBefore(firstRow, CATALOG_FAMILY);
+    assertTrue(result.containsColumn(CATALOG_FAMILY, null));
+    assertTrue(Bytes.equals(result.getValue(CATALOG_FAMILY, null), zero));
+    
+    // Test in between first and second that first is returned
+    result = table.getRowOrBefore(beforeSecondRow, CATALOG_FAMILY);
+    assertTrue(result.containsColumn(CATALOG_FAMILY, null));
+    assertTrue(Bytes.equals(result.getValue(CATALOG_FAMILY, null), zero));
     
     // Test at second make sure second is returned
-    result = table.getClosestRowBefore(row, columnFamilyBytes);
-    assertTrue(result.containsKey(COLUMN_FAMILY_STR));
-    assertTrue(Bytes.equals(result.get(COLUMN_FAMILY_STR).getValue(), one));
+    result = table.getRowOrBefore(row, CATALOG_FAMILY);
+    assertTrue(result.containsColumn(CATALOG_FAMILY, null));
+    assertTrue(Bytes.equals(result.getValue(CATALOG_FAMILY, null), one));
     
     // Test after second, make sure second is returned
-    result = table.getClosestRowBefore(Bytes.add(row,one), columnFamilyBytes);
-    assertTrue(result.containsKey(COLUMN_FAMILY_STR));
-    assertTrue(Bytes.equals(result.get(COLUMN_FAMILY_STR).getValue(), one));
+    result = table.getRowOrBefore(Bytes.add(row,one), CATALOG_FAMILY);
+    assertTrue(result.containsColumn(CATALOG_FAMILY, null));
+    assertTrue(Bytes.equals(result.getValue(CATALOG_FAMILY, null), one));
   }
 
   /**

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestListTables.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestListTables.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestListTables.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestListTables.java Sat Jun  6 01:26:21 2009
@@ -44,7 +44,7 @@
     super.setUp();
     admin = new HBaseAdmin(conf);
     HColumnDescriptor family =
-      new HColumnDescriptor(HConstants.COLUMN_FAMILY_STR);
+      new HColumnDescriptor(HConstants.CATALOG_FAMILY);
     for (int i = 0; i < TABLES.length; i++) {
       TABLES[i].addFamily(family);
       admin.createTable(TABLES[i]);

Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPIGetRowVersions.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPIGetRowVersions.java?rev=782178&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPIGetRowVersions.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPIGetRowVersions.java Sat Jun  6 01:26:21 2009
@@ -0,0 +1,107 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import java.util.Iterator;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * 
+ */
+public class TestOldAPIGetRowVersions extends HBaseClusterTestCase {
+  private static final Log LOG = LogFactory.getLog(TestGetRowVersions.class);
+  private static final String TABLE_NAME = "test";
+  private static final String CONTENTS_STR = "contents:";
+  private static final String ROW = "row";
+  private static final String COLUMN = "contents:contents";
+  private static final long TIMESTAMP = System.currentTimeMillis();
+  private static final String VALUE1 = "value1";
+  private static final String VALUE2 = "value2";
+  private HBaseAdmin admin = null;
+  private HTable table = null;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
+    desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
+    this.admin = new HBaseAdmin(conf);
+    this.admin.createTable(desc);
+    this.table = new HTable(conf, TABLE_NAME);
+  }
+
+  /** @throws Exception */
+  public void testGetRowMultipleVersions() throws Exception {
+    BatchUpdate b = new BatchUpdate(ROW, TIMESTAMP);
+    b.put(COLUMN, Bytes.toBytes(VALUE1));
+    this.table.commit(b);
+    /* Taking out this recycle of the mini cluster -- it don't work well
+     * Debug it if fails in TestGetRowVersion, not this old api version.
+    // Shut down and restart the HBase cluster
+    this.cluster.shutdown();
+    this.zooKeeperCluster.shutdown();
+    LOG.debug("HBase cluster shut down -- restarting");
+    this.hBaseClusterSetup();
+    */
+    // Make a new connection
+    this.table = new HTable(conf, TABLE_NAME);
+    // Overwrite previous value
+    b = new BatchUpdate(ROW, TIMESTAMP);
+    b.put(COLUMN, Bytes.toBytes(VALUE2));
+    this.table.commit(b);
+    // Now verify that getRow(row, column, latest) works
+    RowResult r = table.getRow(ROW);
+    assertNotNull(r);
+    assertTrue(r.size() != 0);
+    Cell c = r.get(COLUMN);
+    assertNotNull(c);
+    assertTrue(c.getValue().length != 0);
+    String value = Bytes.toString(c.getValue());
+    assertTrue(value.compareTo(VALUE2) == 0);
+    // Now check getRow with multiple versions
+    r = table.getRow(ROW, HConstants.ALL_VERSIONS);
+    for (Map.Entry<byte[], Cell> e: r.entrySet()) {
+      // Column name
+//      System.err.print("  " + Bytes.toString(e.getKey()));
+      c = e.getValue();
+      
+      // Need to iterate since there may be multiple versions
+      for (Iterator<Map.Entry<Long, byte[]>> it = c.iterator();
+            it.hasNext(); ) {
+        Map.Entry<Long, byte[]> v = it.next();
+        value = Bytes.toString(v.getValue());
+//        System.err.println(" = " + value);
+        assertTrue(VALUE2.compareTo(Bytes.toString(v.getValue())) == 0);
+      }
+    }
+  }
+}
\ No newline at end of file

Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPIHTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPIHTable.java?rev=782178&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPIHTable.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPIHTable.java Sat Jun  6 01:26:21 2009
@@ -0,0 +1,459 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.io.HbaseMapWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Tests HTable
+ */
+public class TestOldAPIHTable extends HBaseClusterTestCase implements HConstants {
+  private static final String COLUMN_FAMILY_STR = "contents:";
+  private static final byte [] COLUMN_FAMILY = Bytes.toBytes(COLUMN_FAMILY_STR);
+  private static final byte [][] COLUMN_FAMILY_ARRAY = {COLUMN_FAMILY};
+  
+  private static final HColumnDescriptor column =
+    new HColumnDescriptor(COLUMN_FAMILY);
+
+  private static final byte [] nosuchTable = Bytes.toBytes("nosuchTable");
+  private static final byte [] tableAname = Bytes.toBytes("tableA");
+  private static final byte [] tableBname = Bytes.toBytes("tableB");
+  
+  private static final byte [] row = Bytes.toBytes("row");
+ 
+  private static final byte [] attrName = Bytes.toBytes("TESTATTR");
+  private static final byte [] attrValue = Bytes.toBytes("somevalue");
+
+
+  /**
+   * For HADOOP-2579
+   */
+  public void testTableNotFoundExceptionWithoutAnyTables() {
+    try {
+      new HTable(conf, "notATable");
+      fail("Should have thrown a TableNotFoundException");
+    } catch (TableNotFoundException e) {
+      // expected
+    } catch (IOException e) {
+      e.printStackTrace();
+      fail("Should have thrown a TableNotFoundException instead of a " +
+        e.getClass());
+    }
+  }
+
+  public void testGetClosestRowBefore() throws IOException {
+    HColumnDescriptor column2 =
+      new HColumnDescriptor(Bytes.toBytes("info2:"));
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    HTableDescriptor testTableADesc =
+      new HTableDescriptor(tableAname);
+    testTableADesc.addFamily(column);
+    testTableADesc.addFamily(column2);
+    admin.createTable(testTableADesc);
+    
+    byte[] firstRow = Bytes.toBytes("ro");
+    byte[] beforeFirstRow = Bytes.toBytes("rn");
+    byte[] beforeSecondRow = Bytes.toBytes("rov");
+    
+    HTable table = new HTable(conf, tableAname);
+    BatchUpdate batchUpdate = new BatchUpdate(firstRow);
+    BatchUpdate batchUpdate2 = new BatchUpdate(row);
+    byte[] zero = new byte[]{0};
+    byte[] one = new byte[]{1};
+    byte[] columnFamilyBytes = Bytes.toBytes(COLUMN_FAMILY_STR);
+    
+    batchUpdate.put(COLUMN_FAMILY_STR,zero);
+    batchUpdate2.put(COLUMN_FAMILY_STR,one);
+    
+    table.commit(batchUpdate);
+    table.commit(batchUpdate2);
+    
+    RowResult result = null;
+    
+    // Test before first that null is returned
+    result = table.getClosestRowBefore(beforeFirstRow, columnFamilyBytes);
+    assertTrue(result == null);
+    
+    // Test at first that first is returned
+    result = table.getClosestRowBefore(firstRow, columnFamilyBytes);
+    assertTrue(result.containsKey(COLUMN_FAMILY_STR));
+    assertTrue(Bytes.equals(result.get(COLUMN_FAMILY_STR).getValue(), zero));
+    
+    // Test inbetween first and second that first is returned
+    result = table.getClosestRowBefore(beforeSecondRow, columnFamilyBytes);
+    assertTrue(result.containsKey(COLUMN_FAMILY_STR));
+    assertTrue(Bytes.equals(result.get(COLUMN_FAMILY_STR).getValue(), zero));
+    
+    // Test at second make sure second is returned
+    result = table.getClosestRowBefore(row, columnFamilyBytes);
+    assertTrue(result.containsKey(COLUMN_FAMILY_STR));
+    assertTrue(Bytes.equals(result.get(COLUMN_FAMILY_STR).getValue(), one));
+    
+    // Test after second, make sure second is returned
+    result = table.getClosestRowBefore(Bytes.add(row,one), columnFamilyBytes);
+    assertTrue(result.containsKey(COLUMN_FAMILY_STR));
+    assertTrue(Bytes.equals(result.get(COLUMN_FAMILY_STR).getValue(), one));
+  }
+
+  /**
+   * For HADOOP-2579
+   */
+  public void testTableNotFoundExceptionWithATable() {
+   try {
+     HBaseAdmin admin = new HBaseAdmin(conf);
+     HTableDescriptor testTableADesc =
+       new HTableDescriptor("table");
+     testTableADesc.addFamily(column);
+     admin.createTable(testTableADesc);
+
+     // This should throw a TableNotFoundException, it has not been created
+     new HTable(conf, "notATable");
+     
+     fail("Should have thrown a TableNotFoundException");
+   } catch (TableNotFoundException e) {
+     // expected
+   } catch (IOException e) {
+     e.printStackTrace();
+     fail("Should have thrown a TableNotFoundException instead of a " +
+       e.getClass());
+   }
+   }
+
+  public void testGetRow() {
+    HTable table = null;
+    try {
+      HColumnDescriptor column2 =
+        new HColumnDescriptor(Bytes.toBytes("info2:"));
+      HBaseAdmin admin = new HBaseAdmin(conf);
+      HTableDescriptor testTableADesc =
+        new HTableDescriptor(tableAname);
+      testTableADesc.addFamily(column);
+      testTableADesc.addFamily(column2);
+      admin.createTable(testTableADesc);
+      
+      table = new HTable(conf, tableAname);
+      BatchUpdate batchUpdate = new BatchUpdate(row);
+      
+      for(int i = 0; i < 5; i++)
+        batchUpdate.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i));
+      
+      table.commit(batchUpdate);
+
+      assertTrue(table.exists(row));
+      for(int i = 0; i < 5; i++)
+        assertTrue(table.exists(row, Bytes.toBytes(COLUMN_FAMILY_STR+i)));
+
+      RowResult result = null;
+      result = table.getRow(row,  new byte[][] {COLUMN_FAMILY});
+      for(int i = 0; i < 5; i++)
+        assertTrue(result.containsKey(Bytes.toBytes(COLUMN_FAMILY_STR+i)));
+      
+      result = table.getRow(row);
+      for(int i = 0; i < 5; i++)
+        assertTrue(result.containsKey(Bytes.toBytes(COLUMN_FAMILY_STR+i)));
+
+      batchUpdate = new BatchUpdate(row);
+      batchUpdate.put("info2:a", Bytes.toBytes("a"));
+      table.commit(batchUpdate);
+      
+      result = table.getRow(row, new byte[][] { COLUMN_FAMILY,
+          Bytes.toBytes("info2:a") });
+      for(int i = 0; i < 5; i++)
+        assertTrue(result.containsKey(Bytes.toBytes(COLUMN_FAMILY_STR+i)));
+      assertTrue(result.containsKey(Bytes.toBytes("info2:a")));
+    } catch (IOException e) {
+      e.printStackTrace();
+      fail("Should not have any exception " +
+        e.getClass());
+    }
+  }
+
+  /**
+   * the test
+   * @throws IOException
+   */
+  public void testHTable() throws IOException {
+    byte[] value = "value".getBytes(UTF8_ENCODING);
+    
+    try {
+      new HTable(conf, nosuchTable);
+      
+    } catch (TableNotFoundException e) {
+      // expected
+
+    } catch (IOException e) {
+      e.printStackTrace();
+      fail();
+    }
+    
+    HTableDescriptor tableAdesc = new HTableDescriptor(tableAname);
+    tableAdesc.addFamily(column);
+    
+    HTableDescriptor tableBdesc = new HTableDescriptor(tableBname);
+    tableBdesc.addFamily(column);
+
+    // create a couple of tables
+    
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.createTable(tableAdesc);
+    admin.createTable(tableBdesc);
+    
+    // put some data into table A
+    
+    HTable a = new HTable(conf, tableAname);
+    
+    // Assert the metadata is good.
+    HTableDescriptor meta =
+      a.getConnection().getHTableDescriptor(tableAdesc.getName());
+    assertTrue(meta.equals(tableAdesc));
+    
+    BatchUpdate batchUpdate = new BatchUpdate(row);
+    batchUpdate.put(COLUMN_FAMILY, value);
+    a.commit(batchUpdate);
+    
+    // open a new connection to A and a connection to b
+    
+    HTable newA = new HTable(conf, tableAname);
+    HTable b = new HTable(conf, tableBname);
+
+    // copy data from A to B
+    
+    Scanner s =
+      newA.getScanner(COLUMN_FAMILY_ARRAY, EMPTY_START_ROW);
+    
+    try {
+      for (RowResult r : s) {
+        batchUpdate = new BatchUpdate(r.getRow());
+        for(Map.Entry<byte [], Cell> e: r.entrySet()) {
+          batchUpdate.put(e.getKey(), e.getValue().getValue());
+        }
+        b.commit(batchUpdate);
+      }
+    } finally {
+      s.close();
+    }
+    
+    // Opening a new connection to A will cause the tables to be reloaded
+
+    try {
+      HTable anotherA = new HTable(conf, tableAname);
+      anotherA.get(row, COLUMN_FAMILY);
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail();
+    }
+    
+    // We can still access A through newA because it has the table information
+    // cached. And if it needs to recalibrate, that will cause the information
+    // to be reloaded.
+
+    // Test user metadata
+
+    try {
+      // make a modifiable descriptor
+      HTableDescriptor desc = new HTableDescriptor(a.getTableDescriptor());
+      // offline the table
+      admin.disableTable(tableAname);
+      // add a user attribute to HTD
+      desc.setValue(attrName, attrValue);
+      // add a user attribute to HCD
+      for (HColumnDescriptor c: desc.getFamilies())
+        c.setValue(attrName, attrValue);
+      // update metadata for all regions of this table
+      admin.modifyTable(tableAname, HConstants.Modify.TABLE_SET_HTD, desc);
+      // enable the table
+      admin.enableTable(tableAname);
+
+      // test that attribute changes were applied
+      desc = a.getTableDescriptor();
+      if (Bytes.compareTo(desc.getName(), tableAname) != 0)
+        fail("wrong table descriptor returned");
+      // check HTD attribute
+      value = desc.getValue(attrName);
+      if (value == null)
+        fail("missing HTD attribute value");
+      if (Bytes.compareTo(value, attrValue) != 0)
+        fail("HTD attribute value is incorrect");
+      // check HCD attribute
+      for (HColumnDescriptor c: desc.getFamilies()) {
+        value = c.getValue(attrName);
+        if (value == null)
+          fail("missing HCD attribute value");
+        if (Bytes.compareTo(value, attrValue) != 0)
+          fail("HCD attribute value is incorrect");
+      }
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail();
+    }
+  }
+
+//  public void testCheckAndSave() throws IOException {
+//    HTable table = null;
+//    HColumnDescriptor column2 =
+//      new HColumnDescriptor(Bytes.toBytes("info2:"));
+//    HBaseAdmin admin = new HBaseAdmin(conf);
+//    HTableDescriptor testTableADesc =
+//      new HTableDescriptor(tableAname);
+//    testTableADesc.addFamily(column);
+//    testTableADesc.addFamily(column2);
+//    admin.createTable(testTableADesc);
+//    
+//    table = new HTable(conf, tableAname);
+//    BatchUpdate batchUpdate = new BatchUpdate(row);
+//    BatchUpdate batchUpdate2 = new BatchUpdate(row);
+//    BatchUpdate batchUpdate3 = new BatchUpdate(row);
+//
+//    // this row doesn't exist when checkAndSave is invoked
+//    byte [] row1 = Bytes.toBytes("row1");
+//    BatchUpdate batchUpdate4 = new BatchUpdate(row1);
+//    
+//    // to be used for a checkAndSave for expected empty columns
+//    BatchUpdate batchUpdate5 = new BatchUpdate(row);
+//
+//    HbaseMapWritable<byte[],byte[]> expectedValues =
+//      new HbaseMapWritable<byte[],byte[]>();
+//    HbaseMapWritable<byte[],byte[]> badExpectedValues =
+//      new HbaseMapWritable<byte[],byte[]>();
+//    HbaseMapWritable<byte[],byte[]> expectedNoValues =
+//      new HbaseMapWritable<byte[],byte[]>();
+//    // the columns used here must not be updated on batchupate
+//    HbaseMapWritable<byte[],byte[]> expectedNoValues1 =
+//      new HbaseMapWritable<byte[],byte[]>();
+//
+//    for(int i = 0; i < 5; i++) {
+//      // This batchupdate is our initial batch update,
+//      // As such we also set our expected values to the same values
+//      // since we will be comparing the two
+//      batchUpdate.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i));
+//      expectedValues.put(Bytes.toBytes(COLUMN_FAMILY_STR+i), Bytes.toBytes(i));
+//      
+//      badExpectedValues.put(Bytes.toBytes(COLUMN_FAMILY_STR+i),
+//        Bytes.toBytes(500));
+//
+//      expectedNoValues.put(Bytes.toBytes(COLUMN_FAMILY_STR+i), new byte[] {});
+//      // the columns used here must not be updated on batchupate
+//      expectedNoValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+i+","+i), new byte[] {});
+//
+//
+//      // This is our second batchupdate that we will use to update the initial
+//      // batchupdate
+//      batchUpdate2.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+1));
+//      
+//      // This final batch update is to check that our expected values (which
+//      // are now wrong)
+//      batchUpdate3.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+2));
+//
+//      // Batch update that will not happen because it is to happen with some 
+//      // expected values, but the row doesn't exist
+//      batchUpdate4.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i));
+//
+//      // Batch update will happen: the row exists, but the expected columns don't,
+//      // just as the condition
+//      batchUpdate5.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+3));
+//    }
+//    
+//    // Initialize rows
+//    table.commit(batchUpdate);
+//    
+//    // check if incorrect values are returned false
+//    assertFalse(table.checkAndSave(batchUpdate2,badExpectedValues,null));
+//    
+//    // make sure first expected values are correct
+//    assertTrue(table.checkAndSave(batchUpdate2, expectedValues,null));
+//        
+//    // make sure check and save truly saves the data after checking the expected
+//    // values
+//    RowResult r = table.getRow(row);
+//    byte[][] columns = batchUpdate2.getColumns();
+//    for(int i = 0;i < columns.length;i++) {
+//      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate2.get(columns[i])));
+//    }
+//    
+//    // make sure that the old expected values fail
+//    assertFalse(table.checkAndSave(batchUpdate3, expectedValues,null));
+//
+//    // row doesn't exist, so doesn't matter the expected 
+//    // values (unless they are empty) 
+//    assertFalse(table.checkAndSave(batchUpdate4, badExpectedValues, null));
+//
+//    assertTrue(table.checkAndSave(batchUpdate4, expectedNoValues, null));
+//    // make sure check and save saves the data when expected values were empty and the row
+//    // didn't exist
+//    r = table.getRow(row1);
+//    columns = batchUpdate4.getColumns();
+//    for(int i = 0; i < columns.length;i++) {
+//      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate4.get(columns[i])));
+//    }  
+//
+//    // since the row isn't empty anymore, those expected (empty) values 
+//    // are not valid anymore, so check and save method doesn't save. 
+//    assertFalse(table.checkAndSave(batchUpdate4, expectedNoValues, null));
+//    
+//    // the row exists, but the columns don't. since the expected values are 
+//    // for columns without value, checkAndSave must be successful. 
+//    assertTrue(table.checkAndSave(batchUpdate5, expectedNoValues1, null));
+//    // make sure checkAndSave saved values for batchUpdate5.
+//    r = table.getRow(row);
+//    columns = batchUpdate5.getColumns();
+//    for(int i = 0; i < columns.length;i++) {
+//      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate5.get(columns[i])));
+//    }  
+//
+//    // since the condition wasn't changed, the following checkAndSave 
+//    // must also be successful.
+//    assertTrue(table.checkAndSave(batchUpdate, expectedNoValues1, null));
+//    // make sure checkAndSave saved values for batchUpdate1
+//    r = table.getRow(row);
+//    columns = batchUpdate.getColumns();
+//    for(int i = 0; i < columns.length;i++) {
+//      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate.get(columns[i])));
+//    }
+//
+//    // one failing condition must make the following checkAndSave fail
+//    // the failing condition is a column to be empty, however, it has a value.
+//    HbaseMapWritable<byte[],byte[]> expectedValues1 =
+//      new HbaseMapWritable<byte[],byte[]>();
+//    expectedValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+0), new byte[] {});
+//    expectedValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+"EMPTY+ROW"), new byte[] {});
+//    assertFalse(table.checkAndSave(batchUpdate5, expectedValues1, null));
+//
+//    // assure the values on the row remain the same
+//    r = table.getRow(row);
+//    columns = batchUpdate.getColumns();
+//    for(int i = 0; i < columns.length;i++) {
+//      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate.get(columns[i])));
+//    }    
+//  }
+
+}

Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPITimestamp.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPITimestamp.java?rev=782178&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPITimestamp.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPITimestamp.java Sat Jun  6 01:26:21 2009
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TimestampTestBase;
+
+/**
+ * Tests user specifiable time stamps putting, getting and scanning.  Also
+ * tests same in presence of deletes.  Test cores are written so can be
+ * run against an HRegion and against an HTable: i.e. both local and remote.
+ */
+public class TestOldAPITimestamp extends HBaseClusterTestCase {
+  private static final String COLUMN_NAME = "contents:";
+
+  /**
+   * Basic test of timestamps.
+   * Do the above tests from client side.
+   * @throws IOException
+   */
+  public void testTimestamps() throws IOException {
+    HTable t = createTable();
+    Incommon incommon = new HTableIncommon(t);
+    TimestampTestBase.doTestDelete(incommon, new FlushCache() {
+      public void flushcache() throws IOException {
+        cluster.flushcache();
+      }
+     });
+    
+    // Perhaps drop and readd the table between tests so the former does
+    // not pollute this latter?  Or put into separate tests.
+    TimestampTestBase.doTestTimestampScanning(incommon, new FlushCache() {
+      public void flushcache() throws IOException {
+        cluster.flushcache();
+      }
+    });
+  }
+  
+  /* 
+   * Create a table named TABLE_NAME.
+   * @return An instance of an HTable connected to the created table.
+   * @throws IOException
+   */
+  private HTable createTable() throws IOException {
+    HTableDescriptor desc = new HTableDescriptor(getName());
+    desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.createTable(desc);
+    return new HTable(conf, getName());
+  }
+}

Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestPut.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestPut.java?rev=782178&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestPut.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestPut.java Sat Jun  6 01:26:21 2009
@@ -0,0 +1,202 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Test puts
+ */
+public class TestPut extends HBaseClusterTestCase {
+  private static final byte [] CONTENTS_FAMILY = Bytes.toBytes("contents");
+  private static final byte [] SMALL_FAMILY = Bytes.toBytes("smallfam");
+
+  private static final byte [] row1 = Bytes.toBytes("row1");
+  private static final byte [] row2 = Bytes.toBytes("row2");
+  
+  private static final int SMALL_LENGTH = 1;
+  private static final int NB_BATCH_ROWS = 10;
+  private byte [] value;
+  private byte [] smallValue;
+
+  private HTableDescriptor desc = null;
+  private HTable table = null;
+
+  /**
+   * @throws UnsupportedEncodingException
+   */
+  public TestPut() throws UnsupportedEncodingException {
+    super();
+    value = Bytes.toBytes("abcd");
+    smallValue = Bytes.toBytes("a");
+  }
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    this.desc = new HTableDescriptor("test");
+    desc.addFamily(new HColumnDescriptor(CONTENTS_FAMILY));
+    desc.addFamily(new HColumnDescriptor(SMALL_FAMILY, 
+        HColumnDescriptor.DEFAULT_VERSIONS, 
+        HColumnDescriptor.DEFAULT_COMPRESSION,
+        HColumnDescriptor.DEFAULT_IN_MEMORY, 
+        HColumnDescriptor.DEFAULT_BLOCKCACHE, SMALL_LENGTH, 
+        HColumnDescriptor.DEFAULT_TTL, HColumnDescriptor.DEFAULT_BLOOMFILTER));
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.createTable(desc);
+    table = new HTable(conf, desc.getName());
+  }
+
+  /**
+   * @throws IOException
+   */
+  public void testPut() throws IOException {
+    
+    Put put = new Put(row1);
+    put.add(CONTENTS_FAMILY, null, value);
+    table.put(put);
+
+    put = new Put(row2);
+    put.add(CONTENTS_FAMILY, null, value);
+    
+    assertEquals(put.size(), 1);
+    assertEquals(put.getFamilyMap().get(CONTENTS_FAMILY).size(), 1);
+    
+    KeyValue kv = put.getFamilyMap().get(CONTENTS_FAMILY).get(0);
+    
+    assertTrue(Bytes.equals(kv.getFamily(), CONTENTS_FAMILY));
+    // will it return null or an empty byte array?
+    assertTrue(Bytes.equals(kv.getQualifier(), new byte[0]));
+    
+    assertTrue(Bytes.equals(kv.getValue(), value));
+    
+    table.put(put);
+
+    Scan scan = new Scan();
+    scan.addColumn(CONTENTS_FAMILY, null);
+    ResultScanner scanner = table.getScanner(scan);
+    for (Result r : scanner) {
+      for(KeyValue key : r.sorted()) {
+        System.out.println(Bytes.toString(r.getRow()) + ": " + key.toString());
+      }
+    }
+  }
+  
+  public void testRowsPut() {
+    ArrayList<Put> rowsUpdate = new ArrayList<Put>();
+    for(int i = 0; i < NB_BATCH_ROWS; i++) {
+      byte [] row = Bytes.toBytes("row" + i);
+      Put put = new Put(row);
+      put.add(CONTENTS_FAMILY, null, value);
+      rowsUpdate.add(put);
+    }
+    try {
+      table.put(rowsUpdate);  
+    
+      Scan scan = new Scan();
+      scan.addFamily(CONTENTS_FAMILY);
+      ResultScanner scanner = table.getScanner(scan);
+      int nbRows = 0;
+      for(@SuppressWarnings("unused") Result row : scanner)
+        nbRows++;
+      assertEquals(NB_BATCH_ROWS, nbRows);
+    } catch (IOException e) {
+      fail("This is unexpected : " + e);
+    }
+  }
+  
+  public void testRowsPutBufferedOneFlush() {
+    table.setAutoFlush(false);
+    ArrayList<Put> rowsUpdate = new ArrayList<Put>();
+    for(int i = 0; i < NB_BATCH_ROWS*10; i++) {
+      byte [] row = Bytes.toBytes("row" + i);
+      Put put = new Put(row);
+      put.add(CONTENTS_FAMILY, null, value);
+      rowsUpdate.add(put);
+    }
+    try {
+      table.put(rowsUpdate);  
+    
+      Scan scan = new Scan();
+      scan.addFamily(CONTENTS_FAMILY);
+      ResultScanner scanner = table.getScanner(scan);
+      int nbRows = 0;
+      for(@SuppressWarnings("unused") Result row : scanner)
+        nbRows++;
+      assertEquals(0, nbRows);  
+      scanner.close();
+      
+      table.flushCommits();
+      
+      scan = new Scan();
+      scan.addFamily(CONTENTS_FAMILY);
+      scanner = table.getScanner(scan);
+      nbRows = 0;
+      for(@SuppressWarnings("unused") Result row : scanner)
+        nbRows++;
+      assertEquals(NB_BATCH_ROWS*10, nbRows);
+    } catch (IOException e) {
+      fail("This is unexpected : " + e);
+    }
+  }
+  
+  public void testRowsPutBufferedManyManyFlushes() {
+    table.setAutoFlush(false);
+    table.setWriteBufferSize(10);
+    ArrayList<Put> rowsUpdate = new ArrayList<Put>();
+    for(int i = 0; i < NB_BATCH_ROWS*10; i++) {
+      byte [] row = Bytes.toBytes("row" + i);
+      Put put = new Put(row);
+      put.add(CONTENTS_FAMILY, null, value);
+      rowsUpdate.add(put);
+    }
+    try {
+      table.put(rowsUpdate);
+      
+      table.flushCommits();
+      
+      Scan scan = new Scan();
+      scan.addFamily(CONTENTS_FAMILY);
+      ResultScanner scanner = table.getScanner(scan);
+      int nbRows = 0;
+      for(@SuppressWarnings("unused") Result row : scanner)
+        nbRows++;
+      assertEquals(NB_BATCH_ROWS*10, nbRows);
+    } catch (IOException e) {
+      fail("This is unexpected : " + e);
+    }
+  }
+  
+  
+}

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestScannerTimes.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestScannerTimes.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestScannerTimes.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestScannerTimes.java Sat Jun  6 01:26:21 2009
@@ -26,9 +26,11 @@
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Test that verifies that scanners return a different timestamp for values that
@@ -36,9 +38,9 @@
  */
 public class TestScannerTimes extends HBaseClusterTestCase {
   private static final String TABLE_NAME = "hbase737";
-  private static final String FAM1 = "fam1:";
-  private static final String FAM2 = "fam2:";
-  private static final String ROW = "row";
+  private static final byte [] FAM1 = Bytes.toBytes("fam1");
+  private static final byte [] FAM2 = Bytes.toBytes("fam2");
+  private static final byte [] ROW = Bytes.toBytes("row");
   
   /**
    * test for HBASE-737
@@ -57,9 +59,9 @@
     HTable table = new HTable(conf, TABLE_NAME);
     
     // Insert some values
-    BatchUpdate b = new BatchUpdate(ROW);
-    b.put(FAM1 + "letters", "abcdefg".getBytes(HConstants.UTF8_ENCODING));
-    table.commit(b);
+    Put put = new Put(ROW);
+    put.add(FAM1, Bytes.toBytes("letters"), Bytes.toBytes("abcdefg"));
+    table.put(put);
     
     try {
       Thread.sleep(1000);
@@ -67,35 +69,34 @@
       //ignore
     }
     
-    b = new BatchUpdate(ROW);
-    b.put(FAM1 + "numbers", "123456".getBytes(HConstants.UTF8_ENCODING));
-    table.commit(b);
+    put = new Put(ROW);
+    put.add(FAM1, Bytes.toBytes("numbers"), Bytes.toBytes("123456"));
+    table.put(put);
     
     try {
       Thread.sleep(1000);
     } catch (InterruptedException i) {
       //ignore
     }
-    
-    b = new BatchUpdate(ROW);
-    b.put(FAM2 + "letters", "hijklmnop".getBytes(HConstants.UTF8_ENCODING));
-    table.commit(b);
+
+    put = new Put(ROW);
+    put.add(FAM2, Bytes.toBytes("letters"), Bytes.toBytes("hijklmnop"));
+    table.put(put);
     
     long times[] = new long[3];
-    byte[][] columns = new byte[][] {
-        FAM1.getBytes(HConstants.UTF8_ENCODING),
-        FAM2.getBytes(HConstants.UTF8_ENCODING)
-    };
     
     // First scan the memcache
     
-    Scanner s = table.getScanner(columns);
+    Scan scan = new Scan();
+    scan.addFamily(FAM1);
+    scan.addFamily(FAM2);
+    ResultScanner s = table.getScanner(scan);
     try {
       int index = 0;
-      RowResult r = null;
+      Result r = null;
       while ((r = s.next()) != null) {
-        for (Cell c: r.values()) {
-          times[index++] = c.getTimestamp();
+        for(KeyValue key : r.sorted()) {
+          times[index++] = key.getTimestamp();
         }
       }
     } finally {
@@ -107,23 +108,30 @@
       }
     }
     
-    // Fush data to disk and try again
+    // Flush data to disk and try again
     
     cluster.flushcache();
     
+    // Reset times
+    for(int i=0;i<times.length;i++) {
+      times[i] = 0;
+    }
+    
     try {
       Thread.sleep(1000);
     } catch (InterruptedException i) {
       //ignore
     }
-    
-    s = table.getScanner(columns);
+    scan = new Scan();
+    scan.addFamily(FAM1);
+    scan.addFamily(FAM2);
+    s = table.getScanner(scan);
     try {
       int index = 0;
-      RowResult r = null;
+      Result r = null;
       while ((r = s.next()) != null) {
-        for (Cell c: r.values()) {
-          times[index++] = c.getTimestamp();
+        for(KeyValue key : r.sorted()) {
+          times[index++] = key.getTimestamp();
         }
       }
     } finally {
@@ -134,6 +142,5 @@
         assertTrue(times[j] > times[i]);
       }
     }
-    
   }
 }

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestRowFilterOnMultipleFamilies.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestRowFilterOnMultipleFamilies.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestRowFilterOnMultipleFamilies.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestRowFilterOnMultipleFamilies.java Sat Jun  6 01:26:21 2009
@@ -32,9 +32,13 @@
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Scanner;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.RowResult;
@@ -71,10 +75,14 @@
     HTable table = new HTable(conf, TABLE_NAME);
 
     for (int i = 0; i < NUM_ROWS; i++) {
-      BatchUpdate b = new BatchUpdate("row_" + String.format("%1$05d", i));
-      b.put(TEXT_COLUMN1, VALUE);
-      b.put(TEXT_COLUMN2, String.format("%1$05d", i).getBytes());
-      table.commit(b);
+      Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i)));
+      byte [][] famAndQf = KeyValue.parseColumn(TEXT_COLUMN1);
+      put.add(famAndQf[0], famAndQf[1], VALUE);
+      
+      famAndQf = KeyValue.parseColumn(TEXT_COLUMN2);
+      put.add(famAndQf[0], famAndQf[1], Bytes.toBytes(String.format("%1$05d", i)));
+      
+      table.put(put);
     }
 
     LOG.info("Print table contents using scanner before map/reduce for " + TABLE_NAME);
@@ -85,7 +93,9 @@
 
   private void scanTable(final String tableName, final boolean printValues) throws IOException {
     HTable table = new HTable(conf, tableName);
-    Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
+    Scan scan = new Scan();
+    scan.addColumns(columns);
+    ResultScanner scanner = table.getScanner(scan);
     int numFound = doScan(scanner, printValues);
     Assert.assertEquals(NUM_ROWS, numFound);
   }
@@ -96,21 +106,24 @@
     columnMap.put(TEXT_COLUMN1,
         new Cell(VALUE, HConstants.LATEST_TIMESTAMP));
     RegExpRowFilter filter = new RegExpRowFilter(null, columnMap);
-    Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW, filter);
+    Scan scan = new Scan();
+    scan.addColumns(columns);
+//    scan.setFilter(filter);
+    ResultScanner scanner = table.getScanner(scan);
     int numFound = doScan(scanner, printValues);
     Assert.assertEquals(NUM_ROWS, numFound);
   }
 
-  private int doScan(final Scanner scanner, final boolean printValues) throws IOException {
+  private int doScan(final ResultScanner scanner, final boolean printValues) throws IOException {
     {
       int count = 0;
 
       try {
-        for (RowResult result : scanner) {
+        for (Result result : scanner) {
           if (printValues) {
             LOG.info("row: " + Bytes.toString(result.getRow()));
 
-            for (Map.Entry<byte [], Cell> e : result.entrySet()) {
+            for (Map.Entry<byte [], Cell> e : result.getRowResult().entrySet()) {
               LOG.info(" column: " + e.getKey() + " value: "
                   + new String(e.getValue().getValue(), HConstants.UTF8_ENCODING));
             }

Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestPageFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestPageFilter.java?rev=782178&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestPageFilter.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestPageFilter.java Sat Jun  6 01:26:21 2009
@@ -0,0 +1,98 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.filter;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+import junit.framework.TestCase;
+
+/**
+ * Tests for the page filter
+ */
+public class TestPageFilter extends TestCase {
+  static final int ROW_LIMIT = 3;
+
+  /**
+   * test page size filter
+   * @throws Exception
+   */
+  public void testPageSize() throws Exception {
+    Filter f = new PageFilter(ROW_LIMIT);
+    pageSizeTests(f);
+  }
+  
+  /**
+   * Test filter serialization
+   * @throws Exception
+   */
+  public void testSerialization() throws Exception {
+    Filter f = new PageFilter(ROW_LIMIT);
+    // Decompose mainFilter to bytes.
+    ByteArrayOutputStream stream = new ByteArrayOutputStream();
+    DataOutputStream out = new DataOutputStream(stream);
+    f.write(out);
+    out.close();
+    byte[] buffer = stream.toByteArray();
+    // Recompose mainFilter.
+    DataInputStream in = new DataInputStream(new ByteArrayInputStream(buffer));
+    Filter newFilter = new PageFilter();
+    newFilter.readFields(in);
+    
+    // Ensure the serialization preserved the filter by running a full test.
+    pageSizeTests(newFilter);
+  }
+  
+  private void pageSizeTests(Filter f) throws Exception {
+    testFiltersBeyondPageSize(f, ROW_LIMIT);
+    // Test reset works by going in again.
+    f.reset();
+    testFiltersBeyondPageSize(f, ROW_LIMIT);
+  }
+  
+  private void testFiltersBeyondPageSize(final Filter f, final int pageSize) {
+    int count = 0;
+    for (int i = 0; i < (pageSize * 2); i++) {
+      byte [] bytes = Bytes.toBytes(Integer.toString(i) + ":tail");
+      KeyValue kv = new KeyValue(bytes, bytes);
+      boolean filterOut =
+        f.filterRowKey(kv.getBuffer(), kv.getRowOffset(), kv.getRowLength());
+      if (!filterOut) {
+        assertFalse("Disagrees with 'filter'", f.filterAllRemaining());
+      } else {
+        // Once we have all for a page, calls to filterAllRemaining should
+        // stay true.
+        assertTrue("Disagrees with 'filter'", f.filterAllRemaining());
+        assertTrue(i >= pageSize);
+      }
+      count++;
+      if (Filter.ReturnCode.NEXT_ROW == f.filterKeyValue(kv)) {
+        break;
+      }
+    }
+    assertEquals(pageSize, count);
+  }
+}

Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowPrefixFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowPrefixFilter.java?rev=782178&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowPrefixFilter.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowPrefixFilter.java Sat Jun  6 01:26:21 2009
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.filter;
+
+import junit.framework.TestCase;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.UnsupportedEncodingException;
+
+public class TestRowPrefixFilter extends TestCase {
+  Filter mainFilter;
+  static final char FIRST_CHAR = 'a';
+  static final char LAST_CHAR = 'e';
+  static final String HOST_PREFIX = "org.apache.site-";
+  static byte [] GOOD_BYTES = null;
+
+  static {
+    try {
+      GOOD_BYTES = "abc".getBytes(HConstants.UTF8_ENCODING);
+    } catch (UnsupportedEncodingException e) {
+      fail();
+    }
+  }
+
+  protected void setUp() throws Exception {
+    super.setUp();
+    this.mainFilter = new RowPrefixFilter(Bytes.toBytes(HOST_PREFIX));
+  }
+
+  public void testPrefixOnRow() throws Exception {
+    prefixRowTests(mainFilter);
+  }
+
+  public void testSerialization() throws Exception {
+    // Decompose mainFilter to bytes.
+    ByteArrayOutputStream stream = new ByteArrayOutputStream();
+    DataOutputStream out = new DataOutputStream(stream);
+    mainFilter.write(out);
+    out.close();
+    byte[] buffer = stream.toByteArray();
+
+    // Recompose filter.
+    DataInputStream in = new DataInputStream(new ByteArrayInputStream(buffer));
+    Filter newFilter = new RowPrefixFilter();
+    newFilter.readFields(in);
+
+    // Ensure the serialization preserved the filter by running all test.
+    prefixRowTests(newFilter);
+  }
+
+  private void prefixRowTests(Filter filter) throws Exception {
+    for (char c = FIRST_CHAR; c <= LAST_CHAR; c++) {
+      byte [] t = createRow(c);
+      assertFalse("Failed with characer " + c, filter.filterRowKey(t, 0, t.length));
+    }
+    String yahooSite = "com.yahoo.www";
+    byte [] yahooSiteBytes = Bytes.toBytes(yahooSite);
+    assertTrue("Failed with character " +
+      yahooSite, filter.filterRowKey(yahooSiteBytes, 0, yahooSiteBytes.length));
+  }
+
+  private byte [] createRow(final char c) {
+    return Bytes.toBytes(HOST_PREFIX + Character.toString(c));
+  }
+
+
+
+}

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java?rev=782178&r1=782177&r2=782178&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java Sat Jun  6 01:26:21 2009
@@ -95,4 +95,4 @@
     dis.close();
     return product;
   }
-}
+}
\ No newline at end of file



Mime
View raw message