hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r784618 [9/11] - in /hadoop/hbase/trunk_on_hadoop-0.18.3/src: java/ java/org/apache/hadoop/hbase/ java/org/apache/hadoop/hbase/client/ java/org/apache/hadoop/hbase/client/tableindexed/ java/org/apache/hadoop/hbase/filter/ java/org/apache/ha...
Date Sun, 14 Jun 2009 21:34:19 GMT
Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TimestampTestBase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TimestampTestBase.java?rev=784618&r1=784617&r2=784618&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TimestampTestBase.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TimestampTestBase.java Sun Jun 14 21:34:13 2009
@@ -19,13 +19,8 @@
 package org.apache.hadoop.hbase;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
 
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -39,7 +34,7 @@
   private static final long T1 = 100L;
   private static final long T2 = 200L;
   
-  private static final String COLUMN_NAME = "contents:contents";
+  private static final String COLUMN_NAME = "contents:";
   
   private static final byte [] COLUMN = Bytes.toBytes(COLUMN_NAME);
   private static final byte [] ROW = Bytes.toBytes("row");
@@ -60,7 +55,6 @@
     put(incommon);
     // Verify that returned versions match passed timestamps.
     assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1});
-
     // If I delete w/o specifying a timestamp, this means I'm deleting the
     // latest.
     delete(incommon);
@@ -80,23 +74,14 @@
     // Flush everything out to disk and then retry
     flusher.flushcache();
     assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T1, T0});
-
+    
     // Now try deleting all from T2 back inclusive (We first need to add T2
     // back into the mix and to make things a little interesting, delete and
     // then readd T1.
     put(incommon, T2);
     delete(incommon, T1);
     put(incommon, T1);
-
-    Delete delete = new Delete(ROW);
-    byte [][] famAndQf = KeyValue.parseColumn(COLUMN);
-    if (famAndQf[1].length == 0){
-      delete.deleteFamily(famAndQf[0], T2);
-    } else {
-      delete.deleteColumns(famAndQf[0], famAndQf[1], T2);
-    }
-    incommon.delete(delete, null, true);
- 
+    incommon.deleteAll(ROW, COLUMN, T2);
     // Should only be current value in set.  Assert this is so
     assertOnlyLatest(incommon, HConstants.LATEST_TIMESTAMP);
     
@@ -108,16 +93,12 @@
   private static void assertOnlyLatest(final Incommon incommon,
     final long currentTime)
   throws IOException {
-    Get get = null;
-    byte [][] famAndQf = null;
-    get = new Get(ROW);
-    famAndQf = KeyValue.parseColumn(COLUMN);
-    get.addColumn(famAndQf[0], famAndQf[1]);
-    get.setMaxVersions(3);
-    Result result = incommon.get(get);
-    assertEquals(1, result.size());
-    long time = Bytes.toLong(result.sorted()[0].getValue());
+    Cell [] cellValues = incommon.get(ROW, COLUMN, 3/*Ask for too much*/);
+    assertEquals(1, cellValues.length);
+    long time = Bytes.toLong(cellValues[0].getValue());
     assertEquals(time, currentTime);
+    assertNull(incommon.get(ROW, COLUMN, T1, 3 /*Too many*/));
+    assertTrue(assertScanContentTimestamp(incommon, T1) == 0);
   }
   
   /*
@@ -131,49 +112,22 @@
   public static void assertVersions(final Incommon incommon, final long [] tss)
   throws IOException {
     // Assert that 'latest' is what we expect.
-    Get get = null;
-    byte [][] famAndQf = null;
-    get = new Get(ROW);
-    famAndQf = KeyValue.parseColumn(COLUMN);
-    get.addColumn(famAndQf[0], famAndQf[1]);
-    Result r = incommon.get(get);
-    byte [] bytes = r.getValue(famAndQf[0], famAndQf[1]);
-    long t = Bytes.toLong(bytes);
-    assertEquals(tss[0], t);
-
+    byte [] bytes = incommon.get(ROW, COLUMN).getValue();
+    assertEquals(Bytes.toLong(bytes), tss[0]);
     // Now assert that if we ask for multiple versions, that they come out in
     // order.
-    get = new Get(ROW);
-    famAndQf = KeyValue.parseColumn(COLUMN);
-    get.addColumn(famAndQf[0], famAndQf[1]);
-    get.setMaxVersions(tss.length);
-    Result result = incommon.get(get);
-    List<Cell> cells = new ArrayList<Cell>();
-    for(KeyValue kv : result.sorted()) {
-      cells.add(new Cell(kv.getValue(), kv.getTimestamp()));
-    }
-    assertEquals(tss.length, cells.size());
-    for (int i = 0; i < cells.size(); i++) {
-      long ts = Bytes.toLong(cells.get(i).getValue());
+    Cell[] cellValues = incommon.get(ROW, COLUMN, tss.length);
+    assertEquals(tss.length, cellValues.length);
+    for (int i = 0; i < cellValues.length; i++) {
+      long ts = Bytes.toLong(cellValues[i].getValue());
       assertEquals(ts, tss[i]);
     }
-    
     // Specify a timestamp get multiple versions.
-    get = new Get(ROW);
-    famAndQf = KeyValue.parseColumn(COLUMN);
-    get.addColumn(famAndQf[0], famAndQf[1]);
-    get.setTimeStamp(tss[0]);
-    get.setMaxVersions(cells.size() - 1);
-    result = incommon.get(get);
-    cells = new ArrayList<Cell>();
-    for(KeyValue kv : result.sorted()) {
-      cells.add(new Cell(kv.getValue(), kv.getTimestamp()));
-    }
-    for (int i = 1; i < cells.size(); i++) {
-      long ts = Bytes.toLong(cells.get(i).getValue());
+    cellValues = incommon.get(ROW, COLUMN, tss[0], cellValues.length - 1);
+    for (int i = 1; i < cellValues.length; i++) {
+      long ts = Bytes.toLong(cellValues[i].getValue());
       assertEquals(ts, tss[i]);
     }
-    
     // Test scanner returns expected version
     assertScanContentTimestamp(incommon, tss[0]);
   }
@@ -257,44 +211,20 @@
   public static void put(final Incommon loader, final byte [] bytes,
     final long ts)
   throws IOException {
-    Put put = new Put(ROW);
-    if(ts != HConstants.LATEST_TIMESTAMP) {
-      put.setTimeStamp(ts);
-    }
-    byte [][] famAndQf = KeyValue.parseColumn(COLUMN);
-    put.add(famAndQf[0], famAndQf[1], bytes);
-    loader.put(put);
+    BatchUpdate batchUpdate = ts == HConstants.LATEST_TIMESTAMP ? 
+      new BatchUpdate(ROW) : new BatchUpdate(ROW, ts);
+    batchUpdate.put(COLUMN, bytes);
+    loader.commit(batchUpdate);
   }
   
   public static void delete(final Incommon loader) throws IOException {
-    delete(loader, null);
-  }
-
-  public static void delete(final Incommon loader, final byte [] column)
-  throws IOException {
-    delete(loader, column, HConstants.LATEST_TIMESTAMP);
-  }
-
-  public static void delete(final Incommon loader, final long ts)
-  throws IOException {
-    delete(loader, null, ts);
-  }
-
-  public static void delete(final Incommon loader, final byte [] column,
-      final long ts)
-  throws IOException {
-    Delete delete = ts == HConstants.LATEST_TIMESTAMP?
-      new Delete(ROW): new Delete(ROW, ts, null);
-    byte [][] famAndQf = KeyValue.parseColumn(column == null? COLUMN: column);
-    if (famAndQf[1].length == 0) {
-      delete.deleteFamily(famAndQf[0], ts);
-    } else {
-      delete.deleteColumn(famAndQf[0], famAndQf[1], ts);
-    }
-    loader.delete(delete, null, true);
+    delete(loader, HConstants.LATEST_TIMESTAMP);
   }
 
-  public static Result get(final Incommon loader) throws IOException {
-    return loader.get(new Get(ROW));
+  public static void delete(final Incommon loader, final long ts) throws IOException {
+    BatchUpdate batchUpdate = ts == HConstants.LATEST_TIMESTAMP ? 
+      new BatchUpdate(ROW) : new BatchUpdate(ROW, ts);
+    batchUpdate.delete(COLUMN);
+    loader.commit(batchUpdate);  
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java?rev=784618&r1=784617&r2=784618&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java Sun Jun 14 21:34:13 2009
@@ -75,6 +75,85 @@
     table = new HTable(conf, desc.getName());
   }
 
+  /**
+   * @throws IOException
+   */
+  public void testBatchUpdate() throws IOException {
+    BatchUpdate bu = new BatchUpdate("row1");
+    bu.put(CONTENTS, value);
+    bu.delete(CONTENTS);
+    table.commit(bu);
+
+    bu = new BatchUpdate("row2");
+    bu.put(CONTENTS, value);
+    byte[][] getColumns = bu.getColumns();
+    assertEquals(getColumns.length, 1);
+    assertTrue(Arrays.equals(getColumns[0], CONTENTS));
+    assertTrue(bu.hasColumn(CONTENTS));
+    assertFalse(bu.hasColumn(new byte[] {}));
+    byte[] getValue = bu.get(getColumns[0]);
+    assertTrue(Arrays.equals(getValue, value));
+    table.commit(bu);
+
+    byte [][] columns = { CONTENTS };
+    Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
+    for (RowResult r : scanner) {
+      for(Map.Entry<byte [], Cell> e: r.entrySet()) {
+        System.out.println(Bytes.toString(r.getRow()) + ": row: " + e.getKey() + " value: " + 
+            new String(e.getValue().getValue(), HConstants.UTF8_ENCODING));
+      }
+    }
+  }
+  
+  public void testBatchUpdateMaxLength() {
+    // Test for a single good value
+    BatchUpdate batchUpdate = new BatchUpdate("row1");
+    batchUpdate.put(SMALLFAM, value);
+    try {
+      table.commit(batchUpdate);
+      fail("Value is too long, should throw exception");
+    } catch (IOException e) {
+      // This is expected
+    }
+    // Try to see if it's still inserted
+    try {
+      Cell cell = table.get("row1", SMALLFAM_STR);
+      assertNull(cell);
+    } catch (IOException e) {
+      e.printStackTrace();
+      fail("This is unexpected");
+    }
+    // Try to put a good value
+    batchUpdate = new BatchUpdate("row1");
+    batchUpdate.put(SMALLFAM, smallValue);
+    try {
+      table.commit(batchUpdate);
+    } catch (IOException e) {
+      fail("Value is long enough, should not throw exception");
+    }
+  }
+  
+  public void testRowsBatchUpdate() {
+    ArrayList<BatchUpdate> rowsUpdate = new ArrayList<BatchUpdate>();
+    for(int i = 0; i < NB_BATCH_ROWS; i++) {
+      BatchUpdate batchUpdate = new BatchUpdate("row"+i);
+      batchUpdate.put(CONTENTS, value);
+      rowsUpdate.add(batchUpdate);
+    }
+    try {
+      table.commit(rowsUpdate);  
+    
+      byte [][] columns = { CONTENTS };
+      Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
+      int nbRows = 0;
+      for(@SuppressWarnings("unused") RowResult row : scanner)
+        nbRows++;
+      assertEquals(NB_BATCH_ROWS, nbRows);
+    } catch (IOException e) {
+      fail("This is unexpected : " + e);
+    }
+  }
+  
   public void testRowsBatchUpdateBufferedOneFlush() {
     table.setAutoFlush(false);
     ArrayList<BatchUpdate> rowsUpdate = new ArrayList<BatchUpdate>();
@@ -89,15 +168,17 @@
       byte [][] columns = { CONTENTS };
       Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
       int nbRows = 0;
-      for(@SuppressWarnings("unused") RowResult row : scanner) nbRows++;
+      for(@SuppressWarnings("unused") RowResult row : scanner)
+        nbRows++;
       assertEquals(0, nbRows);  
       scanner.close();
-
+      
       table.flushCommits();
       
       scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
       nbRows = 0;
-      for(@SuppressWarnings("unused") RowResult row : scanner) nbRows++;
+      for(@SuppressWarnings("unused") RowResult row : scanner)
+        nbRows++;
       assertEquals(NB_BATCH_ROWS*10, nbRows);
     } catch (IOException e) {
       fail("This is unexpected : " + e);
@@ -128,55 +209,6 @@
       fail("This is unexpected : " + e);
     }
   }
-
-  /**
-   * @throws IOException
-   */
-  public void testBatchUpdate() throws IOException {
-    BatchUpdate bu = new BatchUpdate("row1");
-    bu.put(CONTENTS, value);
-    // Can't do this in 0.20.0 mix and match put and delete -- bu.delete(CONTENTS);
-    table.commit(bu);
-
-    bu = new BatchUpdate("row2");
-    bu.put(CONTENTS, value);
-    byte[][] getColumns = bu.getColumns();
-    assertEquals(getColumns.length, 1);
-    assertTrue(Arrays.equals(getColumns[0], CONTENTS));
-    assertTrue(bu.hasColumn(CONTENTS));
-    assertFalse(bu.hasColumn(new byte[] {}));
-    byte[] getValue = bu.get(getColumns[0]);
-    assertTrue(Arrays.equals(getValue, value));
-    table.commit(bu);
-
-    byte [][] columns = { CONTENTS };
-    Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
-    for (RowResult r : scanner) {
-      for(Map.Entry<byte [], Cell> e: r.entrySet()) {
-        System.out.println(Bytes.toString(r.getRow()) + ": row: " + e.getKey() + " value: " + 
-            new String(e.getValue().getValue(), HConstants.UTF8_ENCODING));
-      }
-    }
-  }
   
-  public void testRowsBatchUpdate() {
-    ArrayList<BatchUpdate> rowsUpdate = new ArrayList<BatchUpdate>();
-    for(int i = 0; i < NB_BATCH_ROWS; i++) {
-      BatchUpdate batchUpdate = new BatchUpdate("row"+i);
-      batchUpdate.put(CONTENTS, value);
-      rowsUpdate.add(batchUpdate);
-    }
-    try {
-      table.commit(rowsUpdate);  
-    
-      byte [][] columns = { CONTENTS };
-      Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
-      int nbRows = 0;
-      for(@SuppressWarnings("unused") RowResult row : scanner)
-        nbRows++;
-      assertEquals(NB_BATCH_ROWS, nbRows);
-    } catch (IOException e) {
-      fail("This is unexpected : " + e);
-    }
-  }
+  
 }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java?rev=784618&r1=784617&r2=784618&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java Sun Jun 14 21:34:13 2009
@@ -27,7 +27,7 @@
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -62,10 +62,9 @@
           k[0] = b1;
           k[1] = b2;
           k[2] = b3;
-          Put put = new Put(k);
-          byte [][] famAndQf = KeyValue.parseColumn(columnName);
-          put.add(famAndQf[0], famAndQf[1], k);
-          table.put(put);
+          BatchUpdate update = new BatchUpdate(k);
+          update.put(columnName, k);
+          table.commit(update);
         }
       }
     }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestGetRowVersions.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestGetRowVersions.java?rev=784618&r1=784617&r2=784618&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestGetRowVersions.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestGetRowVersions.java Sun Jun 14 21:34:13 2009
@@ -20,13 +20,18 @@
 
 package org.apache.hadoop.hbase.client;
 
-import java.util.NavigableMap;
+import java.util.Iterator;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseClusterTestCase;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -34,14 +39,13 @@
  */
 public class TestGetRowVersions extends HBaseClusterTestCase {
   private static final Log LOG = LogFactory.getLog(TestGetRowVersions.class);
-  
   private static final String TABLE_NAME = "test";
-  private static final byte [] CONTENTS = Bytes.toBytes("contents");
-  private static final byte [] ROW = Bytes.toBytes("row");
-  private static final byte [] VALUE1 = Bytes.toBytes("value1");
-  private static final byte [] VALUE2 = Bytes.toBytes("value2");
-  private static final long TIMESTAMP1 = 100L;
-  private static final long TIMESTAMP2 = 200L;
+  private static final String CONTENTS_STR = "contents:";
+  private static final String ROW = "row";
+  private static final String COLUMN = "contents:contents";
+  private static final long TIMESTAMP = System.currentTimeMillis();
+  private static final String VALUE1 = "value1";
+  private static final String VALUE2 = "value2";
   private HBaseAdmin admin = null;
   private HTable table = null;
 
@@ -49,7 +53,7 @@
   public void setUp() throws Exception {
     super.setUp();
     HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
-    desc.addFamily(new HColumnDescriptor(CONTENTS));
+    desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
     this.admin = new HBaseAdmin(conf);
     this.admin.createTable(desc);
     this.table = new HTable(conf, TABLE_NAME);
@@ -57,10 +61,9 @@
 
   /** @throws Exception */
   public void testGetRowMultipleVersions() throws Exception {
-    Put put = new Put(ROW);
-    put.setTimeStamp(TIMESTAMP1);
-    put.add(CONTENTS, CONTENTS, VALUE1);
-    this.table.put(put);
+    BatchUpdate b = new BatchUpdate(ROW, TIMESTAMP);
+    b.put(COLUMN, Bytes.toBytes(VALUE1));
+    this.table.commit(b);
     // Shut down and restart the HBase cluster
     this.cluster.shutdown();
     this.zooKeeperCluster.shutdown();
@@ -69,35 +72,33 @@
     // Make a new connection
     this.table = new HTable(conf, TABLE_NAME);
     // Overwrite previous value
-    put = new Put(ROW);
-    put.setTimeStamp(TIMESTAMP2);
-    put.add(CONTENTS, CONTENTS, VALUE2);
-    this.table.put(put);
+    b = new BatchUpdate(ROW, TIMESTAMP);
+    b.put(COLUMN, Bytes.toBytes(VALUE2));
+    this.table.commit(b);
     // Now verify that getRow(row, column, latest) works
-    Get get = new Get(ROW);
-    // Should get one version by default
-    Result r = table.get(get);
+    RowResult r = table.getRow(ROW);
     assertNotNull(r);
-    assertFalse(r.isEmpty());
-    assertTrue(r.size() == 1);
-    byte [] value = r.getValue(CONTENTS, CONTENTS);
-    assertTrue(value.length != 0);
-    assertTrue(Bytes.equals(value, VALUE2));
+    assertTrue(r.size() != 0);
+    Cell c = r.get(COLUMN);
+    assertNotNull(c);
+    assertTrue(c.getValue().length != 0);
+    String value = Bytes.toString(c.getValue());
+    assertTrue(value.compareTo(VALUE2) == 0);
     // Now check getRow with multiple versions
-    get = new Get(ROW);
-    get.setMaxVersions();
-    r = table.get(get);
-    assertTrue(r.size() == 2);
-    value = r.getValue(CONTENTS, CONTENTS);
-    assertTrue(value.length != 0);
-    assertTrue(Bytes.equals(value, VALUE2));
-    NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map =
-      r.getMap();
-    NavigableMap<byte[], NavigableMap<Long, byte[]>> familyMap = 
-      map.get(CONTENTS);
-    NavigableMap<Long, byte[]> versionMap = familyMap.get(CONTENTS);
-    assertTrue(versionMap.size() == 2);
-    assertTrue(Bytes.equals(VALUE1, versionMap.get(TIMESTAMP1)));
-    assertTrue(Bytes.equals(VALUE2, versionMap.get(TIMESTAMP2)));
+    r = table.getRow(ROW, HConstants.ALL_VERSIONS);
+    for (Map.Entry<byte[], Cell> e: r.entrySet()) {
+      // Column name
+//      System.err.print("  " + Bytes.toString(e.getKey()));
+      c = e.getValue();
+      
+      // Need to iterate since there may be multiple versions
+      for (Iterator<Map.Entry<Long, byte[]>> it = c.iterator();
+            it.hasNext(); ) {
+        Map.Entry<Long, byte[]> v = it.next();
+        value = Bytes.toString(v.getValue());
+//        System.err.println(" = " + value);
+        assertTrue(VALUE2.compareTo(Bytes.toString(v.getValue())) == 0);
+      }
+    }
   }
 }
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestHTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestHTable.java?rev=784618&r1=784617&r2=784618&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestHTable.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestHTable.java Sun Jun 14 21:34:13 2009
@@ -1,5 +1,5 @@
 /**
- * Copyright 2009 The Apache Software Foundation
+ * Copyright 2007 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -20,13 +20,17 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
+import java.util.Map;
 
 import org.apache.hadoop.hbase.HBaseClusterTestCase;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.io.HbaseMapWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -34,7 +38,7 @@
  */
 public class TestHTable extends HBaseClusterTestCase implements HConstants {
   private static final HColumnDescriptor column =
-    new HColumnDescriptor(CATALOG_FAMILY);
+    new HColumnDescriptor(COLUMN_FAMILY);
 
   private static final byte [] nosuchTable = Bytes.toBytes("nosuchTable");
   private static final byte [] tableAname = Bytes.toBytes("tableA");
@@ -46,10 +50,7 @@
   private static final byte [] attrValue = Bytes.toBytes("somevalue");
 
 
-  
-  
-  
-  public void testGet() throws IOException {
+  public void testGetRow() {
     HTable table = null;
     try {
       HColumnDescriptor column2 =
@@ -62,76 +63,42 @@
       admin.createTable(testTableADesc);
       
       table = new HTable(conf, tableAname);
-      System.out.println("Adding row to table");
-      Put put = new Put(row);
-      
-      for(int i = 0; i < 5; i++) {
-        put.add(CATALOG_FAMILY, Bytes.toBytes(Integer.toString(i)), 
-            Bytes.toBytes(i));
-      }
-      
-      table.put(put);
-      
-//      Get get = new Get(row);
-//      get.addColumn(CATALOG_FAMILY,Bytes.toBytes(2));
-//      
-//      System.out.println("Getting data from table");
-//      Result res = table.get(get);
-//      System.out.println("Got data from table");
-//      System.out.println(res);
+      BatchUpdate batchUpdate = new BatchUpdate(row);
       
+      for(int i = 0; i < 5; i++)
+        batchUpdate.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i));
       
+      table.commit(batchUpdate);
 
-//      assertTrue(table.exists(row));
-//      for(int i = 0; i < 5; i++)
-//        assertTrue(table.exists(row, Bytes.toBytes(CATALOG_FAMILY_STR + i)));
+      assertTrue(table.exists(row));
+      for(int i = 0; i < 5; i++)
+        assertTrue(table.exists(row, Bytes.toBytes(COLUMN_FAMILY_STR+i)));
 
-      Get get = null;
-      Result result = null;
+      RowResult result = null;
+      result = table.getRow(row,  new byte[][] {COLUMN_FAMILY});
+      for(int i = 0; i < 5; i++)
+        assertTrue(result.containsKey(Bytes.toBytes(COLUMN_FAMILY_STR+i)));
       
-      get = new Get(row);
-      get.addFamily(CATALOG_FAMILY);
-//      get.addColumn(CATALOG_FAMILY, Bytes.toBytes(Integer.toString(1)));
-      System.out.println("Getting row");
-      long start = System.nanoTime();
-      result = table.get(get);
-      long stop = System.nanoTime();
-      System.out.println("timer " +(stop-start));
-      System.out.println("result " +result);
+      result = table.getRow(row);
       for(int i = 0; i < 5; i++)
-        assertTrue(result.containsColumn(CATALOG_FAMILY, 
-            Bytes.toBytes(Integer.toString(i))));
+        assertTrue(result.containsKey(Bytes.toBytes(COLUMN_FAMILY_STR+i)));
 
-//      get = new Get(row);
-//      result = table.get(get);
-//      for(int i = 0; i < 5; i++)
-//        assertTrue(result.containsColumn(CATALOG_FAMILY, 
-//            Bytes.toBytes(Integer.toString(i))));
-//
-//      byte [] family = Bytes.toBytes("info2");
-//      byte [] qf = Bytes.toBytes("a");
-//      
-//      put = new Put(row);
-//      put.add(family, qf, qf);
-//      table.put(put);
-//      
-//      get = new Get(row);
-//      get.addFamily(CATALOG_FAMILY);
-//      get.addColumn(family, qf);
-//      result = table.get(get);
-//      for(int i = 0; i < 5; i++)
-//        assertTrue(result.containsColumn(CATALOG_FAMILY, 
-//            Bytes.toBytes(Integer.toString(i))));
-//      assertTrue(result.containsColumn(family, qf));
+      batchUpdate = new BatchUpdate(row);
+      batchUpdate.put("info2:a", Bytes.toBytes("a"));
+      table.commit(batchUpdate);
+      
+      result = table.getRow(row, new byte[][] { COLUMN_FAMILY,
+          Bytes.toBytes("info2:a") });
+      for(int i = 0; i < 5; i++)
+        assertTrue(result.containsKey(Bytes.toBytes(COLUMN_FAMILY_STR+i)));
+      assertTrue(result.containsKey(Bytes.toBytes("info2:a")));
     } catch (IOException e) {
       e.printStackTrace();
       fail("Should not have any exception " +
         e.getClass());
-    }    
+    }
   }
 
-  
-
   /**
    * the test
    * @throws IOException
@@ -171,9 +138,9 @@
       a.getConnection().getHTableDescriptor(tableAdesc.getName());
     assertTrue(meta.equals(tableAdesc));
     
-    Put put = new Put(row);
-    put.add(CATALOG_FAMILY, null, value);
-    a.put(put);
+    BatchUpdate batchUpdate = new BatchUpdate(row);
+    batchUpdate.put(COLUMN_FAMILY, value);
+    a.commit(batchUpdate);
     
     // open a new connection to A and a connection to b
     
@@ -182,18 +149,16 @@
 
     // copy data from A to B
     
-    Scan scan = new Scan();
-    scan.addFamily(HConstants.CATALOG_FAMILY);
-
-    ResultScanner s = newA.getScanner(scan);
+    Scanner s =
+      newA.getScanner(COLUMN_FAMILY_ARRAY, EMPTY_START_ROW);
     
     try {
-      for (Result r : s) {
-        put = new Put(r.getRow());
-        for(KeyValue kv : r.sorted()) {
-          put.add(kv);
+      for (RowResult r : s) {
+        batchUpdate = new BatchUpdate(r.getRow());
+        for(Map.Entry<byte [], Cell> e: r.entrySet()) {
+          batchUpdate.put(e.getKey(), e.getValue().getValue());
         }
-        b.put(put);
+        b.commit(batchUpdate);
       }
     } finally {
       s.close();
@@ -203,9 +168,7 @@
 
     try {
       HTable anotherA = new HTable(conf, tableAname);
-      Get get = new Get(row);
-      get.addFamily(CATALOG_FAMILY);
-      anotherA.get(get);
+      anotherA.get(row, COLUMN_FAMILY);
     } catch (Exception e) {
       e.printStackTrace();
       fail();
@@ -228,7 +191,7 @@
       for (HColumnDescriptor c: desc.getFamilies())
         c.setValue(attrName, attrValue);
       // update metadata for all regions of this table
-      admin.modifyTable(tableAname, HConstants.Modify.TABLE_SET_HTD, desc);
+      admin.modifyTable(tableAname, HConstants.MODIFY_TABLE_SET_HTD, desc);
       // enable the table
       admin.enableTable(tableAname);
 
@@ -257,6 +220,144 @@
     }
   }
 
+  public void testCheckAndSave() throws IOException {
+    HTable table = null;
+    HColumnDescriptor column2 =
+      new HColumnDescriptor(Bytes.toBytes("info2:"));
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    HTableDescriptor testTableADesc =
+      new HTableDescriptor(tableAname);
+    testTableADesc.addFamily(column);
+    testTableADesc.addFamily(column2);
+    admin.createTable(testTableADesc);
+    
+    table = new HTable(conf, tableAname);
+    BatchUpdate batchUpdate = new BatchUpdate(row);
+    BatchUpdate batchUpdate2 = new BatchUpdate(row);
+    BatchUpdate batchUpdate3 = new BatchUpdate(row);
+
+    // this row doesn't exist when checkAndSave is invoked
+    byte [] row1 = Bytes.toBytes("row1");
+    BatchUpdate batchUpdate4 = new BatchUpdate(row1);
+    
+    // to be used for a checkAndSave for expected empty columns
+    BatchUpdate batchUpdate5 = new BatchUpdate(row);
+
+    HbaseMapWritable<byte[],byte[]> expectedValues =
+      new HbaseMapWritable<byte[],byte[]>();
+    HbaseMapWritable<byte[],byte[]> badExpectedValues =
+      new HbaseMapWritable<byte[],byte[]>();
+    HbaseMapWritable<byte[],byte[]> expectedNoValues =
+      new HbaseMapWritable<byte[],byte[]>();
+    // the columns used here must not be updated on batchupate
+    HbaseMapWritable<byte[],byte[]> expectedNoValues1 =
+      new HbaseMapWritable<byte[],byte[]>();
+
+    for(int i = 0; i < 5; i++) {
+      // This batchupdate is our initial batch update,
+      // As such we also set our expected values to the same values
+      // since we will be comparing the two
+      batchUpdate.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i));
+      expectedValues.put(Bytes.toBytes(COLUMN_FAMILY_STR+i), Bytes.toBytes(i));
+      
+      badExpectedValues.put(Bytes.toBytes(COLUMN_FAMILY_STR+i),
+        Bytes.toBytes(500));
+
+      expectedNoValues.put(Bytes.toBytes(COLUMN_FAMILY_STR+i), new byte[] {});
+      // the columns used here must not be updated on batchupate
+      expectedNoValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+i+","+i), new byte[] {});
+
+
+      // This is our second batchupdate that we will use to update the initial
+      // batchupdate
+      batchUpdate2.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+1));
+      
+      // This final batch update is to check that our expected values (which
+      // are now wrong)
+      batchUpdate3.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+2));
+
+      // Batch update that will not happen because it is to happen with some 
+      // expected values, but the row doesn't exist
+      batchUpdate4.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i));
+
+      // Batch update will happen: the row exists, but the expected columns don't,
+      // just as the condition
+      batchUpdate5.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+3));
+    }
+    
+    // Initialize rows
+    table.commit(batchUpdate);
+    
+    // check if incorrect values are returned false
+    assertFalse(table.checkAndSave(batchUpdate2,badExpectedValues,null));
+    
+    // make sure first expected values are correct
+    assertTrue(table.checkAndSave(batchUpdate2, expectedValues,null));
+        
+    // make sure check and save truly saves the data after checking the expected
+    // values
+    RowResult r = table.getRow(row);
+    byte[][] columns = batchUpdate2.getColumns();
+    for(int i = 0;i < columns.length;i++) {
+      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate2.get(columns[i])));
+    }
+    
+    // make sure that the old expected values fail
+    assertFalse(table.checkAndSave(batchUpdate3, expectedValues,null));
+
+    // row doesn't exist, so doesn't matter the expected 
+    // values (unless they are empty) 
+    assertFalse(table.checkAndSave(batchUpdate4, badExpectedValues, null));
+
+    assertTrue(table.checkAndSave(batchUpdate4, expectedNoValues, null));
+    // make sure check and save saves the data when expected values were empty and the row
+    // didn't exist
+    r = table.getRow(row1);
+    columns = batchUpdate4.getColumns();
+    for(int i = 0; i < columns.length;i++) {
+      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate4.get(columns[i])));
+    }  
+
+    // since the row isn't empty anymore, those expected (empty) values 
+    // are not valid anymore, so check and save method doesn't save. 
+    assertFalse(table.checkAndSave(batchUpdate4, expectedNoValues, null));
+    
+    // the row exists, but the columns don't. since the expected values are 
+    // for columns without value, checkAndSave must be successful. 
+    assertTrue(table.checkAndSave(batchUpdate5, expectedNoValues1, null));
+    // make sure checkAndSave saved values for batchUpdate5.
+    r = table.getRow(row);
+    columns = batchUpdate5.getColumns();
+    for(int i = 0; i < columns.length;i++) {
+      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate5.get(columns[i])));
+    }  
+
+    // since the condition wasn't changed, the following checkAndSave 
+    // must also be successful.
+    assertTrue(table.checkAndSave(batchUpdate, expectedNoValues1, null));
+    // make sure checkAndSave saved values for batchUpdate1
+    r = table.getRow(row);
+    columns = batchUpdate.getColumns();
+    for(int i = 0; i < columns.length;i++) {
+      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate.get(columns[i])));
+    }
+
+    // one failing condition must make the following checkAndSave fail
+    // the failing condition is a column to be empty, however, it has a value.
+    HbaseMapWritable<byte[],byte[]> expectedValues1 =
+      new HbaseMapWritable<byte[],byte[]>();
+    expectedValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+0), new byte[] {});
+    expectedValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+"EMPTY+ROW"), new byte[] {});
+    assertFalse(table.checkAndSave(batchUpdate5, expectedValues1, null));
+
+    // assure the values on the row remain the same
+    r = table.getRow(row);
+    columns = batchUpdate.getColumns();
+    for(int i = 0; i < columns.length;i++) {
+      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate.get(columns[i])));
+    }    
+  }
+
   /**
    * For HADOOP-2579
    */
@@ -288,42 +389,43 @@
     byte[] beforeSecondRow = Bytes.toBytes("rov");
     
     HTable table = new HTable(conf, tableAname);
-    Put put = new Put(firstRow);
-    Put put2 = new Put(row);
+    BatchUpdate batchUpdate = new BatchUpdate(firstRow);
+    BatchUpdate batchUpdate2 = new BatchUpdate(row);
     byte[] zero = new byte[]{0};
     byte[] one = new byte[]{1};
+    byte[] columnFamilyBytes = Bytes.toBytes(COLUMN_FAMILY_STR);
     
-    put.add(CATALOG_FAMILY, null, zero);
-    put2.add(CATALOG_FAMILY, null, one);
+    batchUpdate.put(COLUMN_FAMILY_STR,zero);
+    batchUpdate2.put(COLUMN_FAMILY_STR,one);
     
-    table.put(put);
-    table.put(put2);
+    table.commit(batchUpdate);
+    table.commit(batchUpdate2);
     
-    Result result = null;
+    RowResult result = null;
     
     // Test before first that null is returned
-    result = table.getRowOrBefore(beforeFirstRow, CATALOG_FAMILY);
+    result = table.getClosestRowBefore(beforeFirstRow, columnFamilyBytes);
     assertTrue(result == null);
     
     // Test at first that first is returned
-    result = table.getRowOrBefore(firstRow, CATALOG_FAMILY);
-    assertTrue(result.containsColumn(CATALOG_FAMILY, null));
-    assertTrue(Bytes.equals(result.getValue(CATALOG_FAMILY, null), zero));
-    
-    // Test in between first and second that first is returned
-    result = table.getRowOrBefore(beforeSecondRow, CATALOG_FAMILY);
-    assertTrue(result.containsColumn(CATALOG_FAMILY, null));
-    assertTrue(Bytes.equals(result.getValue(CATALOG_FAMILY, null), zero));
+    result = table.getClosestRowBefore(firstRow, columnFamilyBytes);
+    assertTrue(result.containsKey(COLUMN_FAMILY_STR));
+    assertTrue(Bytes.equals(result.get(COLUMN_FAMILY_STR).getValue(), zero));
+    
+    // Test inbetween first and second that first is returned
+    result = table.getClosestRowBefore(beforeSecondRow, columnFamilyBytes);
+    assertTrue(result.containsKey(COLUMN_FAMILY_STR));
+    assertTrue(Bytes.equals(result.get(COLUMN_FAMILY_STR).getValue(), zero));
     
     // Test at second make sure second is returned
-    result = table.getRowOrBefore(row, CATALOG_FAMILY);
-    assertTrue(result.containsColumn(CATALOG_FAMILY, null));
-    assertTrue(Bytes.equals(result.getValue(CATALOG_FAMILY, null), one));
+    result = table.getClosestRowBefore(row, columnFamilyBytes);
+    assertTrue(result.containsKey(COLUMN_FAMILY_STR));
+    assertTrue(Bytes.equals(result.get(COLUMN_FAMILY_STR).getValue(), one));
     
     // Test after second, make sure second is returned
-    result = table.getRowOrBefore(Bytes.add(row,one), CATALOG_FAMILY);
-    assertTrue(result.containsColumn(CATALOG_FAMILY, null));
-    assertTrue(Bytes.equals(result.getValue(CATALOG_FAMILY, null), one));
+    result = table.getClosestRowBefore(Bytes.add(row,one), columnFamilyBytes);
+    assertTrue(result.containsKey(COLUMN_FAMILY_STR));
+    assertTrue(Bytes.equals(result.get(COLUMN_FAMILY_STR).getValue(), one));
   }
 
   /**

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestListTables.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestListTables.java?rev=784618&r1=784617&r2=784618&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestListTables.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestListTables.java Sun Jun 14 21:34:13 2009
@@ -44,7 +44,7 @@
     super.setUp();
     admin = new HBaseAdmin(conf);
     HColumnDescriptor family =
-      new HColumnDescriptor(HConstants.CATALOG_FAMILY);
+      new HColumnDescriptor(HConstants.COLUMN_FAMILY_STR);
     for (int i = 0; i < TABLES.length; i++) {
       TABLES[i].addFamily(family);
       admin.createTable(TABLES[i]);

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestScannerTimes.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestScannerTimes.java?rev=784618&r1=784617&r2=784618&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestScannerTimes.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestScannerTimes.java Sun Jun 14 21:34:13 2009
@@ -26,11 +26,9 @@
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HBaseClusterTestCase;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.RowResult;
-import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Test that verifies that scanners return a different timestamp for values that
@@ -38,9 +36,9 @@
  */
 public class TestScannerTimes extends HBaseClusterTestCase {
   private static final String TABLE_NAME = "hbase737";
-  private static final byte [] FAM1 = Bytes.toBytes("fam1");
-  private static final byte [] FAM2 = Bytes.toBytes("fam2");
-  private static final byte [] ROW = Bytes.toBytes("row");
+  private static final String FAM1 = "fam1:";
+  private static final String FAM2 = "fam2:";
+  private static final String ROW = "row";
   
   /**
    * test for HBASE-737
@@ -59,9 +57,9 @@
     HTable table = new HTable(conf, TABLE_NAME);
     
     // Insert some values
-    Put put = new Put(ROW);
-    put.add(FAM1, Bytes.toBytes("letters"), Bytes.toBytes("abcdefg"));
-    table.put(put);
+    BatchUpdate b = new BatchUpdate(ROW);
+    b.put(FAM1 + "letters", "abcdefg".getBytes(HConstants.UTF8_ENCODING));
+    table.commit(b);
     
     try {
       Thread.sleep(1000);
@@ -69,34 +67,35 @@
       //ignore
     }
     
-    put = new Put(ROW);
-    put.add(FAM1, Bytes.toBytes("numbers"), Bytes.toBytes("123456"));
-    table.put(put);
+    b = new BatchUpdate(ROW);
+    b.put(FAM1 + "numbers", "123456".getBytes(HConstants.UTF8_ENCODING));
+    table.commit(b);
     
     try {
       Thread.sleep(1000);
     } catch (InterruptedException i) {
       //ignore
     }
-
-    put = new Put(ROW);
-    put.add(FAM2, Bytes.toBytes("letters"), Bytes.toBytes("hijklmnop"));
-    table.put(put);
+    
+    b = new BatchUpdate(ROW);
+    b.put(FAM2 + "letters", "hijklmnop".getBytes(HConstants.UTF8_ENCODING));
+    table.commit(b);
     
     long times[] = new long[3];
+    byte[][] columns = new byte[][] {
+        FAM1.getBytes(HConstants.UTF8_ENCODING),
+        FAM2.getBytes(HConstants.UTF8_ENCODING)
+    };
     
     // First scan the memcache
     
-    Scan scan = new Scan();
-    scan.addFamily(FAM1);
-    scan.addFamily(FAM2);
-    ResultScanner s = table.getScanner(scan);
+    Scanner s = table.getScanner(columns);
     try {
       int index = 0;
-      Result r = null;
+      RowResult r = null;
       while ((r = s.next()) != null) {
-        for(KeyValue key : r.sorted()) {
-          times[index++] = key.getTimestamp();
+        for (Cell c: r.values()) {
+          times[index++] = c.getTimestamp();
         }
       }
     } finally {
@@ -108,30 +107,23 @@
       }
     }
     
-    // Flush data to disk and try again
+    // Fush data to disk and try again
     
     cluster.flushcache();
     
-    // Reset times
-    for(int i=0;i<times.length;i++) {
-      times[i] = 0;
-    }
-    
     try {
       Thread.sleep(1000);
     } catch (InterruptedException i) {
       //ignore
     }
-    scan = new Scan();
-    scan.addFamily(FAM1);
-    scan.addFamily(FAM2);
-    s = table.getScanner(scan);
+    
+    s = table.getScanner(columns);
     try {
       int index = 0;
-      Result r = null;
+      RowResult r = null;
       while ((r = s.next()) != null) {
-        for(KeyValue key : r.sorted()) {
-          times[index++] = key.getTimestamp();
+        for (Cell c: r.values()) {
+          times[index++] = c.getTimestamp();
         }
       }
     } finally {
@@ -142,5 +134,6 @@
         assertTrue(times[j] > times[i]);
       }
     }
+    
   }
 }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/DisabledTestRowFilterOnMultipleFamilies.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/DisabledTestRowFilterOnMultipleFamilies.java?rev=784618&r1=784617&r2=784618&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/DisabledTestRowFilterOnMultipleFamilies.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/DisabledTestRowFilterOnMultipleFamilies.java Sun Jun 14 21:34:13 2009
@@ -32,13 +32,9 @@
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scanner;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.RowResult;
@@ -75,14 +71,10 @@
     HTable table = new HTable(conf, TABLE_NAME);
 
     for (int i = 0; i < NUM_ROWS; i++) {
-      Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i)));
-      byte [][] famAndQf = KeyValue.parseColumn(TEXT_COLUMN1);
-      put.add(famAndQf[0], famAndQf[1], VALUE);
-      
-      famAndQf = KeyValue.parseColumn(TEXT_COLUMN2);
-      put.add(famAndQf[0], famAndQf[1], Bytes.toBytes(String.format("%1$05d", i)));
-      
-      table.put(put);
+      BatchUpdate b = new BatchUpdate("row_" + String.format("%1$05d", i));
+      b.put(TEXT_COLUMN1, VALUE);
+      b.put(TEXT_COLUMN2, String.format("%1$05d", i).getBytes());
+      table.commit(b);
     }
 
     LOG.info("Print table contents using scanner before map/reduce for " + TABLE_NAME);
@@ -93,9 +85,7 @@
 
   private void scanTable(final String tableName, final boolean printValues) throws IOException {
     HTable table = new HTable(conf, tableName);
-    Scan scan = new Scan();
-    scan.addColumns(columns);
-    ResultScanner scanner = table.getScanner(scan);
+    Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
     int numFound = doScan(scanner, printValues);
     Assert.assertEquals(NUM_ROWS, numFound);
   }
@@ -106,24 +96,21 @@
     columnMap.put(TEXT_COLUMN1,
         new Cell(VALUE, HConstants.LATEST_TIMESTAMP));
     RegExpRowFilter filter = new RegExpRowFilter(null, columnMap);
-    Scan scan = new Scan();
-    scan.addColumns(columns);
-//    scan.setFilter(filter);
-    ResultScanner scanner = table.getScanner(scan);
+    Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW, filter);
     int numFound = doScan(scanner, printValues);
     Assert.assertEquals(NUM_ROWS, numFound);
   }
 
-  private int doScan(final ResultScanner scanner, final boolean printValues) throws IOException {
+  private int doScan(final Scanner scanner, final boolean printValues) throws IOException {
     {
       int count = 0;
 
       try {
-        for (Result result : scanner) {
+        for (RowResult result : scanner) {
           if (printValues) {
             LOG.info("row: " + Bytes.toString(result.getRow()));
 
-            for (Map.Entry<byte [], Cell> e : result.getRowResult().entrySet()) {
+            for (Map.Entry<byte [], Cell> e : result.entrySet()) {
               LOG.info(" column: " + e.getKey() + " value: "
                   + new String(e.getValue().getValue(), HConstants.UTF8_ENCODING));
             }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java?rev=784618&r1=784617&r2=784618&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java Sun Jun 14 21:34:13 2009
@@ -95,4 +95,4 @@
     dis.close();
     return product;
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java?rev=784618&r1=784617&r2=784618&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java Sun Jun 14 21:34:13 2009
@@ -114,7 +114,7 @@
     Path ncTFile = new Path(ROOT_DIR, "basic.hfile");
     FSDataOutputStream fout = createFSOutput(ncTFile);
     Writer writer = new Writer(fout, minBlockSize,
-      Compression.getCompressionAlgorithmByName(codec), null);
+      Compression.getCompressionAlgorithmByName(codec), null, false);
     LOG.info(writer);
     writeRecords(writer);
     fout.close();
@@ -178,7 +178,7 @@
     Path mFile = new Path(ROOT_DIR, "meta.hfile");
     FSDataOutputStream fout = createFSOutput(mFile);
     Writer writer = new Writer(fout, minBlockSize,
-      Compression.getCompressionAlgorithmByName(compress), null);
+      Compression.getCompressionAlgorithmByName(compress), null, false);
     someTestingWithMetaBlock(writer);
     writer.close();
     fout.close();
@@ -204,7 +204,7 @@
     Path mFile = new Path(ROOT_DIR, "nometa.hfile");
     FSDataOutputStream fout = createFSOutput(mFile);
     Writer writer = new Writer(fout, minBlockSize,
-        Compression.Algorithm.NONE, null);
+        Compression.Algorithm.NONE, null, false);
     writer.append("foo".getBytes(), "value".getBytes());
     writer.close();
     fout.close();
@@ -226,7 +226,7 @@
   public void testComparator() throws IOException {
     Path mFile = new Path(ROOT_DIR, "meta.tfile");
     FSDataOutputStream fout = createFSOutput(mFile);
-    Writer writer = new Writer(fout, minBlockSize, (Compression.Algorithm) null,
+    Writer writer = new Writer(fout, minBlockSize, null,
       new RawComparator<byte []>() {
         @Override
         public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2,
@@ -238,7 +238,7 @@
         public int compare(byte[] o1, byte[] o2) {
           return compare(o1, 0, o1.length, o2, 0, o2.length);
         }
-      });
+      }, false);
     writer.append("3".getBytes(), "0".getBytes());
     writer.append("2".getBytes(), "0".getBytes());
     writer.append("1".getBytes(), "0".getBytes());

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java?rev=784618&r1=784617&r2=784618&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java Sun Jun 14 21:34:13 2009
@@ -32,10 +32,7 @@
 import org.apache.hadoop.hbase.LocalHBaseCluster;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scanner;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -105,15 +102,15 @@
   private byte [] createTableAndAddRow(final String tableName)
   throws IOException {
     HTableDescriptor desc = new HTableDescriptor(tableName);
-    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
+    desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY));
     HBaseAdmin admin = new HBaseAdmin(conf);
     admin.createTable(desc);
     // put some values in the table
     this.table = new HTable(conf, tableName);
     byte [] row = Bytes.toBytes("row1");
-    Put put = new Put(row);
-    put.add(HConstants.CATALOG_FAMILY, null, Bytes.toBytes(tableName));
-    table.put(put);
+    BatchUpdate b = new BatchUpdate(row);
+    b.put(HConstants.COLUMN_FAMILY, Bytes.toBytes(tableName));
+    table.commit(b);
     return row;
   }
 
@@ -169,29 +166,27 @@
           // Now try to open a scanner on the meta table. Should stall until
           // meta server comes back up.
           HTable t = new HTable(conf, HConstants.META_TABLE_NAME);
-          Scan scan = new Scan();
-          scan.addFamily(HConstants.CATALOG_FAMILY);
-
-          ResultScanner s = t.getScanner(scan);
+          Scanner s =
+            t.getScanner(HConstants.COLUMN_FAMILY_ARRAY,
+              HConstants.EMPTY_START_ROW);
           s.close();
           
         } catch (IOException e) {
           LOG.fatal("could not re-open meta table because", e);
           fail();
         }
-        ResultScanner scanner = null;
+        Scanner scanner = null;
         try {
           // Verify that the client can find the data after the region has moved
           // to a different server
-          Scan scan = new Scan();
-          scan.addFamily(HConstants.CATALOG_FAMILY);
-
-          scanner = table.getScanner(scan);
+          scanner =
+            table.getScanner(HConstants.COLUMN_FAMILY_ARRAY,
+               HConstants.EMPTY_START_ROW);
           LOG.info("Obtained scanner " + scanner);
-          for (Result r : scanner) {
+          for (RowResult r : scanner) {
             assertTrue(Bytes.equals(r.getRow(), row));
             assertEquals(1, r.size());
-            byte[] bytes = r.getRowResult().get(HConstants.CATALOG_FAMILY).getValue();
+            byte[] bytes = r.get(HConstants.COLUMN_FAMILY).getValue();
             assertNotNull(bytes);
             assertTrue(tableName.equals(Bytes.toString(bytes)));
           }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java?rev=784618&r1=784617&r2=784618&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java Sun Jun 14 21:34:13 2009
@@ -23,7 +23,7 @@
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HServerAddress;
 
@@ -35,7 +35,7 @@
  * <code>${HBASE_HOME}/bin/hbase ./bin/hbase org.apache.hadoop.hbase.OOMERegionServer start</code>.
  */
 public class OOMERegionServer extends HRegionServer {
-  private List<Put> retainer = new ArrayList<Put>();
+  private List<BatchUpdate> retainer = new ArrayList<BatchUpdate>();
 
   public OOMERegionServer(HBaseConfiguration conf) throws IOException {
     super(conf);
@@ -46,12 +46,12 @@
     super(address, conf);
   }
   
-  public void put(byte [] regionName, Put put)
+  public void batchUpdate(byte [] regionName, BatchUpdate b)
   throws IOException {
-    super.put(regionName, put);
+    super.batchUpdate(regionName, b, -1L);
     for (int i = 0; i < 30; i++) {
       // Add the batch update 30 times to bring on the OOME faster.
-      this.retainer.add(put);
+      this.retainer.add(b);
     }
   }
   

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=784618&r1=784617&r2=784618&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java Sun Jun 14 21:34:13 2009
@@ -23,14 +23,11 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+
 import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -41,7 +38,7 @@
 public class TestCompaction extends HBaseTestCase {
   static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
   private HRegion r = null;
-  private static final byte [] COLUMN_FAMILY = fam1;
+  private static final byte [] COLUMN_FAMILY = COLFAMILY_NAME1;
   private final byte [] STARTROW = Bytes.toBytes(START_KEY);
   private static final byte [] COLUMN_FAMILY_TEXT = COLUMN_FAMILY;
   private static final int COMPACTION_THRESHOLD = MAXVERSIONS;
@@ -94,16 +91,11 @@
     // Default is that there only 3 (MAXVERSIONS) versions allowed per column.
     // Assert == 3 when we ask for versions.
     addContent(new HRegionIncommon(r), Bytes.toString(COLUMN_FAMILY));
-
-    
     // FIX!!
-//    Cell[] cellValues =
-//      Cell.createSingleCellArray(r.get(STARTROW, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
-    Result result = r.get(new Get(STARTROW).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null);
-
+    Cell[] cellValues = 
+      Cell.createSingleCellArray(r.get(STARTROW, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
     // Assert that I can get 3 versions since it is the max I should get
-    assertEquals(3, result.size());
-//    assertEquals(cellValues.length, 3);
+    assertEquals(cellValues.length, 3);
     r.flushcache();
     r.compactStores();
     // Always 3 versions if that is what max versions is.
@@ -111,49 +103,32 @@
     // Increment the least significant character so we get to next row.
     secondRowBytes[START_KEY_BYTES.length - 1]++;
     // FIX
-    result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null);
-
-    // Assert that I can get 3 versions since it is the max I should get
-    assertEquals(3, result.size());
-//
-//    cellValues = Cell.createSingleCellArray(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100/*Too many*/));
-//    LOG.info("Count of " + Bytes.toString(secondRowBytes) + ": " +
-//      cellValues.length);
-//    assertTrue(cellValues.length == 3);
+    cellValues = Cell.createSingleCellArray(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100/*Too many*/));
+    LOG.info("Count of " + Bytes.toString(secondRowBytes) + ": " +
+      cellValues.length);
+    assertTrue(cellValues.length == 3);
 
     // Now add deletes to memcache and then flush it.  That will put us over
     // the compaction threshold of 3 store files.  Compacting these store files
     // should result in a compacted store file that has no references to the
     // deleted row.
-    Delete delete = new Delete(secondRowBytes, System.currentTimeMillis(), null);
-    byte [][] famAndQf = {COLUMN_FAMILY, null};
-    delete.deleteFamily(famAndQf[0]);
-    r.delete(delete, null, true);
-    
+    r.deleteAll(secondRowBytes, COLUMN_FAMILY_TEXT, System.currentTimeMillis(),
+      null);
     // Assert deleted.
-
-    result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
-    assertTrue(result.isEmpty());
-
-
+    assertNull(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
     r.flushcache();
-    result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
-    assertTrue(result.isEmpty());
-
+    assertNull(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
     // Add a bit of data and flush.  Start adding at 'bbb'.
     createSmallerStoreFile(this.r);
     r.flushcache();
     // Assert that the second row is still deleted.
-    result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
-    assertTrue(result.isEmpty());
-
+    cellValues = Cell.createSingleCellArray(r.get(secondRowBytes,
+      COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
+    assertNull(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
     // Force major compaction.
     r.compactStores(true);
     assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 1);
-
-    result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
-    assertTrue(result.isEmpty());
-
+    assertNull(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
     // Make sure the store files do have some 'aaa' keys in them -- exactly 3.
     // Also, that compacted store files do not have any secondRowBytes because
     // they were deleted.

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java?rev=784618&r1=784617&r2=784618&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java Sun Jun 14 21:34:13 2009
@@ -32,7 +32,6 @@
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile.Reader;
 
-
 /** JUnit test case for HLog */
 public class TestHLog extends HBaseTestCase implements HConstants {
   private Path dir;
@@ -170,7 +169,7 @@
         assertTrue(Bytes.equals(regionName, key.getRegionName()));
         assertTrue(Bytes.equals(tableName, key.getTablename()));
         assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
-        assertTrue(Bytes.equals(HLog.METAFAMILY, val.getFamily()));
+        assertTrue(Bytes.equals(HLog.METACOLUMN, val.getColumn()));
         assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
           val.getValue()));
         System.out.println(key + " " + val);



Mime
View raw message