hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r785076 [12/18] - in /hadoop/hbase/trunk_on_hadoop-0.18.3: ./ bin/ conf/ src/java/ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/io/ src/...
Date Tue, 16 Jun 2009 04:34:02 GMT
Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HBaseTestCase.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HBaseTestCase.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HBaseTestCase.java Tue Jun 16 04:33:56 2009
@@ -25,6 +25,7 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.NavigableMap;
 import java.util.SortedMap;
 
 import junit.framework.TestCase;
@@ -34,8 +35,13 @@
 import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Scanner;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.RowResult;
@@ -52,11 +58,11 @@
   /** configuration parameter name for test directory */
   public static final String TEST_DIRECTORY_KEY = "test.build.data";
 
-  protected final static byte [] COLFAMILY_NAME1 = Bytes.toBytes("colfamily1:");
-  protected final static byte [] COLFAMILY_NAME2 = Bytes.toBytes("colfamily2:");
-  protected final static byte [] COLFAMILY_NAME3 = Bytes.toBytes("colfamily3:");
-  protected static final byte [][] COLUMNS = {COLFAMILY_NAME1,
-    COLFAMILY_NAME2, COLFAMILY_NAME3};
+  protected final static byte [] fam1 = Bytes.toBytes("colfamily1");
+  protected final static byte [] fam2 = Bytes.toBytes("colfamily2");
+  protected final static byte [] fam3 = Bytes.toBytes("colfamily3");
+  protected static final byte [][] COLUMNS = {fam1,
+    fam2, fam3};
 
   private boolean localfs = false;
   protected Path testDir = null;
@@ -189,13 +195,13 @@
   protected HTableDescriptor createTableDescriptor(final String name,
       final int versions) {
     HTableDescriptor htd = new HTableDescriptor(name);
-    htd.addFamily(new HColumnDescriptor(COLFAMILY_NAME1, versions,
+    htd.addFamily(new HColumnDescriptor(fam1, versions,
       HColumnDescriptor.DEFAULT_COMPRESSION, false, false,
       Integer.MAX_VALUE, HConstants.FOREVER, false));
-    htd.addFamily(new HColumnDescriptor(COLFAMILY_NAME2, versions,
+    htd.addFamily(new HColumnDescriptor(fam2, versions,
         HColumnDescriptor.DEFAULT_COMPRESSION, false, false,
         Integer.MAX_VALUE, HConstants.FOREVER, false));
-    htd.addFamily(new HColumnDescriptor(COLFAMILY_NAME3, versions,
+    htd.addFamily(new HColumnDescriptor(fam3, versions,
         HColumnDescriptor.DEFAULT_COMPRESSION, false, false,
         Integer.MAX_VALUE,  HConstants.FOREVER, false));
     return htd;
@@ -284,11 +290,14 @@
             break EXIT;
           }
           try {
-            BatchUpdate batchUpdate = ts == -1 ? 
-              new BatchUpdate(t) : new BatchUpdate(t, ts);
+            Put put = new Put(t);
+            if(ts != -1) {
+              put.setTimeStamp(ts);
+            }
             try {
-              batchUpdate.put(column, t);
-              updater.commit(batchUpdate);
+              byte[][] split = KeyValue.parseColumn(Bytes.toBytes(column));
+              put.add(split[0], split[1], t);
+              updater.put(put);
               count++;
             } catch (RuntimeException ex) {
               ex.printStackTrace();
@@ -331,44 +340,23 @@
    */
   public static interface Incommon {
     /**
-     * @param row
-     * @param column
-     * @return value for row/column pair
+     * 
+     * @param delete
+     * @param lockid
+     * @param writeToWAL
      * @throws IOException
      */
-    public Cell get(byte [] row, byte [] column) throws IOException;
-    /**
-     * @param row
-     * @param column
-     * @param versions
-     * @return value for row/column pair for number of versions requested
-     * @throws IOException
-     */
-    public Cell[] get(byte [] row, byte [] column, int versions) throws IOException;
-    /**
-     * @param row
-     * @param column
-     * @param ts
-     * @param versions
-     * @return value for row/column/timestamp tuple for number of versions
-     * @throws IOException
-     */
-    public Cell[] get(byte [] row, byte [] column, long ts, int versions)
+    public void delete(Delete delete,  Integer lockid, boolean writeToWAL)
     throws IOException;
-    /**
-     * @param row
-     * @param column
-     * @param ts
-     * @throws IOException
-     */
-    public void deleteAll(byte [] row, byte [] column, long ts) throws IOException;
 
     /**
-     * @param batchUpdate
+     * @param put
      * @throws IOException
      */
-    public void commit(BatchUpdate batchUpdate) throws IOException;
+    public void put(Put put) throws IOException;
 
+    public Result get(Get get) throws IOException;
+    
     /**
      * @param columns
      * @param firstRow
@@ -393,48 +381,46 @@
       this.region = HRegion;
     }
     
-    public void commit(BatchUpdate batchUpdate) throws IOException {
-      region.batchUpdate(batchUpdate, null);
+    public void put(Put put) throws IOException {
+      region.put(put);
     }
     
-    public void deleteAll(byte [] row, byte [] column, long ts)
+    public void delete(Delete delete,  Integer lockid, boolean writeToWAL)
     throws IOException {
-      this.region.deleteAll(row, column, ts, null);
+      this.region.delete(delete, lockid, writeToWAL);
     }
-
+    
+    public Result get(Get get) throws IOException {
+      return region.get(get, null);
+    }
+    
     public ScannerIncommon getScanner(byte [][] columns, byte [] firstRow,
       long ts) 
     throws IOException {
+      Scan scan = new Scan(firstRow);
+      scan.addColumns(columns);
+      scan.setTimeRange(0, ts);
       return new 
-        InternalScannerIncommon(region.getScanner(columns, firstRow, ts, null));
-    }
-
-    public Cell get(byte [] row, byte [] column) throws IOException {
-      // TODO: Fix profligacy converting from List to Cell [].
-      Cell[] result = Cell.createSingleCellArray(this.region.get(row, column, -1, -1));
-      return (result == null)? null : result[0];
-    }
-
-    public Cell[] get(byte [] row, byte [] column, int versions)
-    throws IOException {
-      // TODO: Fix profligacy converting from List to Cell [].
-      return Cell.createSingleCellArray(this.region.get(row, column, -1, versions));
-    }
-
-    public Cell[] get(byte [] row, byte [] column, long ts, int versions)
-    throws IOException {
-      // TODO: Fix profligacy converting from List to Cell [].
-      return Cell.createSingleCellArray(this.region.get(row, column, ts, versions));
+        InternalScannerIncommon(region.getScanner(scan));
     }
-
-    /**
-     * @param row
-     * @return values for each column in the specified row
-     * @throws IOException
-     */
-    public Map<byte [], Cell> getFull(byte [] row) throws IOException {
-      return region.getFull(row, null, HConstants.LATEST_TIMESTAMP, 1, null);
+    
+    //New
+    public ScannerIncommon getScanner(byte [] family, byte [][] qualifiers,
+        byte [] firstRow, long ts) 
+      throws IOException {
+        Scan scan = new Scan(firstRow);
+        for(int i=0; i<qualifiers.length; i++){
+          scan.addColumn(HConstants.CATALOG_FAMILY, qualifiers[i]);
+        }
+        scan.setTimeRange(0, ts);
+        return new 
+          InternalScannerIncommon(region.getScanner(scan));
+      }
+    
+    public Result get(Get get, Integer lockid) throws IOException{
+      return this.region.get(get, lockid);
     }
+    
 
     public void flushcache() throws IOException {
       this.region.flushcache();
@@ -455,33 +441,27 @@
       this.table = table;
     }
     
-    public void commit(BatchUpdate batchUpdate) throws IOException {
-      table.commit(batchUpdate);
+    public void put(Put put) throws IOException {
+      table.put(put);
     }
     
-    public void deleteAll(byte [] row, byte [] column, long ts)
-    throws IOException {
-      this.table.deleteAll(row, column, ts);
-    }
     
-    public ScannerIncommon getScanner(byte [][] columns, byte [] firstRow, long ts) 
+    public void delete(Delete delete,  Integer lockid, boolean writeToWAL)
     throws IOException {
-      return new 
-        ClientScannerIncommon(table.getScanner(columns, firstRow, ts, null));
+      this.table.delete(delete);
     }
     
-    public Cell get(byte [] row, byte [] column) throws IOException {
-      return this.table.get(row, column);
+    public Result get(Get get) throws IOException {
+      return table.get(get);
     }
     
-    public Cell[] get(byte [] row, byte [] column, int versions)
-    throws IOException {
-      return this.table.get(row, column, versions);
-    }
-    
-    public Cell[] get(byte [] row, byte [] column, long ts, int versions)
+    public ScannerIncommon getScanner(byte [][] columns, byte [] firstRow, long ts) 
     throws IOException {
-      return this.table.get(row, column, ts, versions);
+      Scan scan = new Scan(firstRow);
+      scan.addColumns(columns);
+      scan.setTimeStamp(ts);
+      return new 
+        ClientScannerIncommon(table.getScanner(scan));
     }
   }
   
@@ -494,22 +474,19 @@
   }
   
   public static class ClientScannerIncommon implements ScannerIncommon {
-    Scanner scanner;
-    public ClientScannerIncommon(Scanner scanner) {
+    ResultScanner scanner;
+    public ClientScannerIncommon(ResultScanner scanner) {
       this.scanner = scanner;
     }
     
     public boolean next(List<KeyValue> values)
     throws IOException {
-      RowResult results = scanner.next();
+      Result results = scanner.next();
       if (results == null) {
         return false;
       }
       values.clear();
-      for (Map.Entry<byte [], Cell> entry : results.entrySet()) {
-        values.add(new KeyValue(results.getRow(), entry.getKey(),
-          entry.getValue().getTimestamp(), entry.getValue().getValue()));
-      }
+      values.addAll(results.list());
       return true;
     }
     
@@ -544,25 +521,53 @@
     }
   }
   
-  protected void assertCellEquals(final HRegion region, final byte [] row,
-    final byte [] column, final long timestamp, final String value)
-  throws IOException {
-    Map<byte [], Cell> result = region.getFull(row, null, timestamp, 1, null);
-    Cell cell_value = result.get(column);
-    if (value == null) {
-      assertEquals(Bytes.toString(column) + " at timestamp " + timestamp, null,
-        cell_value);
-    } else {
-      if (cell_value == null) {
-        fail(Bytes.toString(column) + " at timestamp " + timestamp + 
-          "\" was expected to be \"" + value + " but was null");
-      }
-      if (cell_value != null) {
-        assertEquals(Bytes.toString(column) + " at timestamp " 
-            + timestamp, value, new String(cell_value.getValue()));
+//  protected void assertCellEquals(final HRegion region, final byte [] row,
+//    final byte [] column, final long timestamp, final String value)
+//  throws IOException {
+//    Map<byte [], Cell> result = region.getFull(row, null, timestamp, 1, null);
+//    Cell cell_value = result.get(column);
+//    if (value == null) {
+//      assertEquals(Bytes.toString(column) + " at timestamp " + timestamp, null,
+//        cell_value);
+//    } else {
+//      if (cell_value == null) {
+//        fail(Bytes.toString(column) + " at timestamp " + timestamp + 
+//          "\" was expected to be \"" + value + " but was null");
+//      }
+//      if (cell_value != null) {
+//        assertEquals(Bytes.toString(column) + " at timestamp " 
+//            + timestamp, value, new String(cell_value.getValue()));
+//      }
+//    }
+//  }
+
+  protected void assertResultEquals(final HRegion region, final byte [] row,
+      final byte [] family, final byte [] qualifier, final long timestamp,
+      final byte [] value)
+    throws IOException {
+      Get get = new Get(row);
+      get.setTimeStamp(timestamp);
+      Result res = region.get(get, null);
+      NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map = 
+        res.getMap();
+      byte [] res_value = map.get(family).get(qualifier).get(timestamp);
+    
+      if (value == null) {
+        assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) +
+            " at timestamp " + timestamp, null, res_value);
+      } else {
+        if (res_value == null) {
+          fail(Bytes.toString(family) + " " + Bytes.toString(qualifier) + 
+              " at timestamp " + timestamp + "\" was expected to be \"" + 
+              value + " but was null");
+        }
+        if (res_value != null) {
+          assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) +
+              " at timestamp " + 
+              timestamp, value, new String(res_value));
+        }
       }
     }
-  }
   
   /**
    * Initializes parameters used in the test environment:

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java Tue Jun 16 04:33:56 2009
@@ -33,6 +33,7 @@
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.io.hfile.Compression;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -187,7 +188,7 @@
     
     @Override
     void setUp() throws Exception {
-      writer = new HFile.Writer(this.fs, this.mf, RFILE_BLOCKSIZE, null, null);
+      writer = new HFile.Writer(this.fs, this.mf, RFILE_BLOCKSIZE, (Compression.Algorithm) null, null);
     }
     
     @Override

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/KeyValueTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/KeyValueTestUtil.java?rev=785076&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/KeyValueTestUtil.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/KeyValueTestUtil.java Tue Jun 16 04:33:56 2009
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class KeyValueTestUtil {
+
+  public static KeyValue create(
+      String row,
+      String family,
+      String qualifier,
+      long timestamp,
+      String value)
+  {
+    return create(row, family, qualifier, timestamp, KeyValue.Type.Put, value);
+  }
+
+  public static KeyValue create(
+      String row,
+      String family,
+      String qualifier,
+      long timestamp,
+      KeyValue.Type type,
+      String value)
+  {
+      return new KeyValue(
+          Bytes.toBytes(row),
+          Bytes.toBytes(family),
+          Bytes.toBytes(qualifier),
+          timestamp,
+          type,
+          Bytes.toBytes(value)
+      );
+  }
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java Tue Jun 16 04:33:56 2009
@@ -26,6 +26,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
+import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -62,7 +63,7 @@
         } catch (BindException e) {
           //this port is already in use. try to use another (for multiple testing)
           int port = conf.getInt("hbase.master.port", DEFAULT_MASTER_PORT);
-          LOG.info("MiniHBaseCluster: Failed binding Master to port: " + port);
+          LOG.info("Failed binding Master to port: " + port, e);
           port++;
           conf.setInt("hbase.master.port", port);
           continue;
@@ -172,6 +173,7 @@
     if (this.hbaseCluster != null) {
       this.hbaseCluster.shutdown();
     }
+    HConnectionManager.deleteAllConnections(false);
   }
 
   /**

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java Tue Jun 16 04:33:56 2009
@@ -35,20 +35,22 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Scanner;
-import org.apache.hadoop.hbase.filter.PageRowFilter;
-import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.RowWhileMatchFilter;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Hash;
 import org.apache.hadoop.hbase.util.MurmurHash;
+import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.FileInputFormat;
@@ -86,12 +88,13 @@
   private static final int ONE_GB = 1024 * 1024 * 1000;
   private static final int ROWS_PER_GB = ONE_GB / ROW_LENGTH;
   
-  static final byte [] COLUMN_NAME = Bytes.toBytes(COLUMN_FAMILY_STR + "data");
+  static final byte [] FAMILY_NAME = Bytes.toBytes("info");
+  static final byte [] QUALIFIER_NAME = Bytes.toBytes("data");
   
   protected static final HTableDescriptor TABLE_DESCRIPTOR;
   static {
     TABLE_DESCRIPTOR = new HTableDescriptor("TestTable");
-    TABLE_DESCRIPTOR.addFamily(new HColumnDescriptor(COLUMN_FAMILY));
+    TABLE_DESCRIPTOR.addFamily(new HColumnDescriptor(CATALOG_FAMILY));
   }
   
   private static final String RANDOM_READ = "randomRead";
@@ -431,11 +434,12 @@
     
     @Override
     void testRow(final int i) throws IOException {
-      Scanner s = this.table.getScanner(new byte [][] {COLUMN_NAME},
-        getRandomRow(this.rand, this.totalRows),
-        new WhileMatchRowFilter(new PageRowFilter(120)));
+      Scan scan = new Scan(getRandomRow(this.rand, this.totalRows));
+      scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
+      scan.setFilter(new RowWhileMatchFilter(new PageFilter(120)));
+      ResultScanner s = this.table.getScanner(scan);
       //int count = 0;
-      for (RowResult rr = null; (rr = s.next()) != null;) {
+      for (Result rr = null; (rr = s.next()) != null;) {
         // LOG.info("" + count++ + " " + rr.toString());
       }
       s.close();
@@ -461,7 +465,9 @@
     
     @Override
     void testRow(final int i) throws IOException {
-      this.table.get(getRandomRow(this.rand, this.totalRows), COLUMN_NAME);
+      Get get = new Get(getRandomRow(this.rand, this.totalRows));
+      get.addColumn(FAMILY_NAME, QUALIFIER_NAME);
+      this.table.get(get);
     }
 
     @Override
@@ -485,9 +491,9 @@
     @Override
     void testRow(final int i) throws IOException {
       byte [] row = getRandomRow(this.rand, this.totalRows);
-      BatchUpdate b = new BatchUpdate(row);
-      b.put(COLUMN_NAME, generateValue(this.rand));
-      table.commit(b);
+      Put put = new Put(row);
+      put.add(FAMILY_NAME, QUALIFIER_NAME, generateValue(this.rand));
+      table.put(put);
     }
 
     @Override
@@ -497,7 +503,7 @@
   }
   
   class ScanTest extends Test {
-    private Scanner testScanner;
+    private ResultScanner testScanner;
     
     ScanTest(final HBaseConfiguration conf, final int startRow,
         final int perClientRunRows, final int totalRows, final Status status) {
@@ -507,8 +513,9 @@
     @Override
     void testSetup() throws IOException {
       super.testSetup();
-      this.testScanner = table.getScanner(new byte [][] {COLUMN_NAME},
-        format(this.startRow));
+      Scan scan = new Scan(format(this.startRow));
+      scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
+      this.testScanner = table.getScanner(scan);
     }
     
     @Override
@@ -539,7 +546,9 @@
     
     @Override
     void testRow(final int i) throws IOException {
-      table.get(format(i), COLUMN_NAME);
+      Get get = new Get(format(i));
+      get.addColumn(FAMILY_NAME, QUALIFIER_NAME);
+      table.get(get);
     }
 
     @Override
@@ -556,9 +565,9 @@
     
     @Override
     void testRow(final int i) throws IOException {
-      BatchUpdate b = new BatchUpdate(format(i));
-      b.put(COLUMN_NAME, generateValue(this.rand));
-      table.commit(b);
+      Put put = new Put(format(i));
+      put.add(FAMILY_NAME, QUALIFIER_NAME, generateValue(this.rand));
+      table.put(put);
     }
 
     @Override

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java Tue Jun 16 04:33:56 2009
@@ -23,9 +23,10 @@
 import java.io.IOException;
 
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Scanner;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -44,9 +45,10 @@
       byte [] regionName = HRegionInfo.createRegionName(tableName,
         Bytes.toBytes(i == 0? "": Integer.toString(i)),
         Long.toString(System.currentTimeMillis()));
-      BatchUpdate b = new BatchUpdate(regionName);
-      b.put(HConstants.COL_SERVER, Bytes.toBytes("localhost:1234"));
-      t.commit(b);
+      Put put = new Put(regionName);
+      put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
+          Bytes.toBytes("localhost:1234"));
+      t.put(put);
     }
     long sleepTime =
       conf.getLong("hbase.master.meta.thread.rescanfrequency", 10000);
@@ -59,11 +61,18 @@
       } catch (InterruptedException e) {
         // ignore
       }
-      Scanner scanner = t.getScanner(HConstants.ALL_META_COLUMNS, tableName);
+      Scan scan = new Scan();
+      scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
+      scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
+      scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
+      scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
+      scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
+      ResultScanner scanner = t.getScanner(scan);
       try {
         count = 0;
-        for (RowResult r: scanner) {
-          if (r.size() > 0) {
+        Result r;
+        while((r = scanner.next()) != null) {
+          if (!r.isEmpty()) {
             count += 1;
           }
         }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java Tue Jun 16 04:33:56 2009
@@ -1,5 +1,5 @@
 /**
- * Copyright 2007 The Apache Software Foundation
+ * Copyright 2009 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -21,15 +21,16 @@
 
 import java.io.IOException;
 import java.util.Collection;
-import java.util.Iterator;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Scanner;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -75,18 +76,19 @@
 
   private static final int FIRST_ROW = 1;
   private static final int NUM_VALS = 1000;
-  private static final byte [] CONTENTS = Bytes.toBytes("contents:");
-  private static final String CONTENTS_BASIC_STR = "contents:basic";
-  private static final byte [] CONTENTS_BASIC = Bytes.toBytes(CONTENTS_BASIC_STR);
+  private static final byte [] CONTENTS_CF = Bytes.toBytes("contents");
+  private static final String CONTENTS_CQ_STR = "basic";
+  private static final byte [] CONTENTS_CQ = Bytes.toBytes(CONTENTS_CQ_STR);
   private static final String CONTENTSTR = "contentstr";
-  private static final byte [] ANCHOR = Bytes.toBytes("anchor:");
-  private static final String ANCHORNUM = "anchor:anchornum-";
-  private static final String ANCHORSTR = "anchorstr";
+  //
+  private static final byte [] ANCHOR_CF = Bytes.toBytes("anchor");
+  private static final String ANCHORNUM_CQ = "anchornum-";
+  private static final String ANCHORSTR_VALUE = "anchorstr";
 
   private void setup() throws IOException {
     desc = new HTableDescriptor("test");
-    desc.addFamily(new HColumnDescriptor(CONTENTS));
-    desc.addFamily(new HColumnDescriptor(ANCHOR));
+    desc.addFamily(new HColumnDescriptor(CONTENTS_CF));
+    desc.addFamily(new HColumnDescriptor(ANCHOR_CF));
     admin = new HBaseAdmin(conf);
     admin.createTable(desc);
     table = new HTable(conf, desc.getName());
@@ -100,10 +102,10 @@
     // Write out a bunch of values
 
     for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
-      BatchUpdate b = new BatchUpdate("row_" + k);
-      b.put(CONTENTS_BASIC, Bytes.toBytes(CONTENTSTR + k));
-      b.put(ANCHORNUM + k, Bytes.toBytes(ANCHORSTR + k));
-      table.commit(b);
+      Put put = new Put(Bytes.toBytes("row_" + k));
+      put.add(CONTENTS_CF, CONTENTS_CQ, Bytes.toBytes(CONTENTSTR + k));
+      put.add(ANCHOR_CF, Bytes.toBytes(ANCHORNUM_CQ + k), Bytes.toBytes(ANCHORSTR_VALUE + k));
+      table.put(put);
     }
     LOG.info("Write " + NUM_VALS + " rows. Elapsed time: "
         + ((System.currentTimeMillis() - startTime) / 1000.0));
@@ -117,21 +119,27 @@
       String rowlabelStr = "row_" + k;
       byte [] rowlabel = Bytes.toBytes(rowlabelStr);
 
-      byte bodydata[] = table.get(rowlabel, CONTENTS_BASIC).getValue();
-      assertNotNull("no data for row " + rowlabelStr + "/" + CONTENTS_BASIC_STR,
+      Get get = new Get(rowlabel);
+      get.addColumn(CONTENTS_CF, CONTENTS_CQ);
+      byte [] bodydata = table.get(get).getValue(CONTENTS_CF, CONTENTS_CQ);
+      assertNotNull("no data for row " + rowlabelStr + "/" + CONTENTS_CQ_STR,
           bodydata);
       String bodystr = new String(bodydata, HConstants.UTF8_ENCODING);
       String teststr = CONTENTSTR + k;
       assertTrue("Incorrect value for key: (" + rowlabelStr + "/" +
-          CONTENTS_BASIC_STR + "), expected: '" + teststr + "' got: '" +
+          CONTENTS_CQ_STR + "), expected: '" + teststr + "' got: '" +
           bodystr + "'", teststr.compareTo(bodystr) == 0);
       
-      String collabelStr = ANCHORNUM + k;
+      String collabelStr = ANCHORNUM_CQ + k;
       collabel = Bytes.toBytes(collabelStr);
-      bodydata = table.get(rowlabel, collabel).getValue();
+      
+      get = new Get(rowlabel);
+      get.addColumn(ANCHOR_CF, collabel);
+      
+      bodydata = table.get(get).getValue(ANCHOR_CF, collabel);
       assertNotNull("no data for row " + rowlabelStr + "/" + collabelStr, bodydata);
       bodystr = new String(bodydata, HConstants.UTF8_ENCODING);
-      teststr = ANCHORSTR + k;
+      teststr = ANCHORSTR_VALUE + k;
       assertTrue("Incorrect value for key: (" + rowlabelStr + "/" + collabelStr +
           "), expected: '" + teststr + "' got: '" + bodystr + "'",
           teststr.compareTo(bodystr) == 0);
@@ -142,47 +150,48 @@
   }
   
   private void scanner() throws IOException {
-    byte [][] cols = new byte [][] {Bytes.toBytes(ANCHORNUM + "[0-9]+"),
-      CONTENTS_BASIC};
     
     long startTime = System.currentTimeMillis();
     
-    Scanner s = table.getScanner(cols, HConstants.EMPTY_BYTE_ARRAY);
+    Scan scan = new Scan();
+    scan.addFamily(ANCHOR_CF);
+    scan.addColumn(CONTENTS_CF, CONTENTS_CQ);
+    ResultScanner s = table.getScanner(scan);
     try {
 
       int contentsFetched = 0;
       int anchorFetched = 0;
       int k = 0;
-      for (RowResult curVals : s) {
-        for (Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
-          byte [] col = it.next();
-          byte val[] = curVals.get(col).getValue();
-          String curval = Bytes.toString(val);
-          if (Bytes.compareTo(col, CONTENTS_BASIC) == 0) {
+      for (Result curVals : s) {
+        for(KeyValue kv : curVals.raw()) {
+          byte [] family = kv.getFamily();
+          byte [] qualifier = kv.getQualifier();
+          String strValue = new String(kv.getValue());
+          if(Bytes.equals(family, CONTENTS_CF)) {
             assertTrue("Error at:" + Bytes.toString(curVals.getRow()) 
-                + ", Value for " + Bytes.toString(col) + " should start with: " + CONTENTSTR
-                + ", but was fetched as: " + curval,
-                curval.startsWith(CONTENTSTR));
+                + ", Value for " + Bytes.toString(qualifier) + " should start with: " + CONTENTSTR
+                + ", but was fetched as: " + strValue,
+                strValue.startsWith(CONTENTSTR));
             contentsFetched++;
             
-          } else if (Bytes.toString(col).startsWith(ANCHORNUM)) {
-            assertTrue("Error at:" + Bytes.toString(curVals.getRow())
-                + ", Value for " + Bytes.toString(col) + " should start with: " + ANCHORSTR
-                + ", but was fetched as: " + curval,
-                curval.startsWith(ANCHORSTR));
+          } else if(Bytes.equals(family, ANCHOR_CF)) {
+            assertTrue("Error at:" + Bytes.toString(curVals.getRow()) 
+                + ", Value for " + Bytes.toString(qualifier) + " should start with: " + ANCHORSTR_VALUE
+                + ", but was fetched as: " + strValue,
+                strValue.startsWith(ANCHORSTR_VALUE));
             anchorFetched++;
             
           } else {
-            LOG.info(Bytes.toString(col));
+            LOG.info("Family: " + Bytes.toString(family) + ", Qualifier: " + Bytes.toString(qualifier));
           }
         }
         k++;
       }
       assertEquals("Expected " + NUM_VALS + " " +
-        Bytes.toString(CONTENTS_BASIC) + " values, but fetched " +
+        Bytes.toString(CONTENTS_CQ) + " values, but fetched " +
         contentsFetched,
         NUM_VALS, contentsFetched);
-      assertEquals("Expected " + NUM_VALS + " " + ANCHORNUM +
+      assertEquals("Expected " + NUM_VALS + " " + ANCHORNUM_CQ +
         " values, but fetched " + anchorFetched,
         NUM_VALS, anchorFetched);
 
@@ -201,7 +210,7 @@
     assertTrue(Bytes.equals(desc.getName(), tables[0].getName()));
     Collection<HColumnDescriptor> families = tables[0].getFamilies();
     assertEquals(2, families.size());
-    assertTrue(tables[0].hasFamily(CONTENTS));
-    assertTrue(tables[0].hasFamily(ANCHOR));
+    assertTrue(tables[0].hasFamily(CONTENTS_CF));
+    assertTrue(tables[0].hasFamily(ANCHOR_CF));
   }
 }
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestKeyValue.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestKeyValue.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestKeyValue.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestKeyValue.java Tue Jun 16 04:33:56 2009
@@ -30,6 +30,7 @@
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KVComparator;
+import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.util.Bytes;
 
 public class TestKeyValue extends TestCase {
@@ -39,13 +40,21 @@
     final byte [] a = Bytes.toBytes("aaa");
     byte [] column1 = Bytes.toBytes("abc:def");
     byte [] column2 = Bytes.toBytes("abcd:ef");
-    KeyValue aaa = new KeyValue(a, column1, a);
-    assertFalse(KeyValue.COMPARATOR.
-      compareColumns(aaa, column2, 0, column2.length, 4) == 0);
+    byte [] family2 = Bytes.toBytes("abcd");
+    byte [] qualifier2 = Bytes.toBytes("ef"); 
+    KeyValue aaa = new KeyValue(a, column1, 0L, Type.Put, a);
+    assertFalse(aaa.matchingColumn(column2));
+    assertTrue(aaa.matchingColumn(column1));
+    aaa = new KeyValue(a, column2, 0L, Type.Put, a);
+    assertFalse(aaa.matchingColumn(column1));
+    assertTrue(aaa.matchingColumn(family2,qualifier2));
     column1 = Bytes.toBytes("abcd:");
-    aaa = new KeyValue(a, column1, a);
-    assertFalse(KeyValue.COMPARATOR.
-      compareColumns(aaa, column1, 0, column1.length, 4) == 0);
+    aaa = new KeyValue(a, column1, 0L, Type.Put, a);
+    assertTrue(aaa.matchingColumn(family2,null));
+    assertFalse(aaa.matchingColumn(family2,qualifier2));
+    // Previous test had an assertFalse that I don't understand
+    //    assertFalse(KeyValue.COMPARATOR.
+    //    compareColumns(aaa, column1, 0, column1.length, 4) == 0);
   }
 
   public void testBasics() throws Exception {
@@ -111,31 +120,31 @@
   public void testMoreComparisons() throws Exception {
     // Root compares
     long now = System.currentTimeMillis();
-    KeyValue a = new KeyValue(".META.,,99999999999999", now);
-    KeyValue b = new KeyValue(".META.,,1", now);
+    KeyValue a = new KeyValue(Bytes.toBytes(".META.,,99999999999999"), now);
+    KeyValue b = new KeyValue(Bytes.toBytes(".META.,,1"), now);
     KVComparator c = new KeyValue.RootComparator();
     assertTrue(c.compare(b, a) < 0);
-    KeyValue aa = new KeyValue(".META.,,1", now);
-    KeyValue bb = new KeyValue(".META.,,1", "info:regioninfo",
-      1235943454602L);
+    KeyValue aa = new KeyValue(Bytes.toBytes(".META.,,1"), now);
+    KeyValue bb = new KeyValue(Bytes.toBytes(".META.,,1"), 
+        Bytes.toBytes("info:regioninfo"), 1235943454602L);
     assertTrue(c.compare(aa, bb) < 0);
     
     // Meta compares
-    KeyValue aaa =
-      new KeyValue("TestScanMultipleVersions,row_0500,1236020145502", now);
-    KeyValue bbb = new KeyValue("TestScanMultipleVersions,,99999999999999",
-      now);
+    KeyValue aaa = new KeyValue(
+        Bytes.toBytes("TestScanMultipleVersions,row_0500,1236020145502"), now);
+    KeyValue bbb = new KeyValue(
+        Bytes.toBytes("TestScanMultipleVersions,,99999999999999"), now);
     c = new KeyValue.MetaComparator();
     assertTrue(c.compare(bbb, aaa) < 0);
     
-    KeyValue aaaa = new KeyValue("TestScanMultipleVersions,,1236023996656",
-      "info:regioninfo", 1236024396271L);
+    KeyValue aaaa = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,,1236023996656"),
+        Bytes.toBytes("info:regioninfo"), 1236024396271L);
     assertTrue(c.compare(aaaa, bbb) < 0);
     
-    KeyValue x = new KeyValue("TestScanMultipleVersions,row_0500,1236034574162",
-      "", 9223372036854775807L);
-    KeyValue y = new KeyValue("TestScanMultipleVersions,row_0500,1236034574162",
-      "info:regioninfo", 1236034574912L);
+    KeyValue x = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,row_0500,1236034574162"),
+        Bytes.toBytes(""), 9223372036854775807L);
+    KeyValue y = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,row_0500,1236034574162"),
+        Bytes.toBytes("info:regioninfo"), 1236034574912L);
     assertTrue(c.compare(x, y) < 0);
     comparisons(new KeyValue.MetaComparator());
     comparisons(new KeyValue.KVComparator());
@@ -151,53 +160,53 @@
   public void testKeyValueBorderCases() throws IOException {
     // % sorts before , so if we don't do special comparator, rowB would
     // come before rowA.
-    KeyValue rowA = new KeyValue("testtable,www.hbase.org/,1234",
-      "", Long.MAX_VALUE);
-    KeyValue rowB = new KeyValue("testtable,www.hbase.org/%20,99999",
-      "", Long.MAX_VALUE);
+    KeyValue rowA = new KeyValue(Bytes.toBytes("testtable,www.hbase.org/,1234"),
+      Bytes.toBytes(""), Long.MAX_VALUE);
+    KeyValue rowB = new KeyValue(Bytes.toBytes("testtable,www.hbase.org/%20,99999"),
+      Bytes.toBytes(""), Long.MAX_VALUE);
     assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) < 0);
 
-    rowA = new KeyValue("testtable,,1234", "", Long.MAX_VALUE);
-    rowB = new KeyValue("testtable,$www.hbase.org/,99999", "", Long.MAX_VALUE);
+    rowA = new KeyValue(Bytes.toBytes("testtable,,1234"), Bytes.toBytes(""), Long.MAX_VALUE);
+    rowB = new KeyValue(Bytes.toBytes("testtable,$www.hbase.org/,99999"), Bytes.toBytes(""), Long.MAX_VALUE);
     assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) < 0);
 
-    rowA = new KeyValue(".META.,testtable,www.hbase.org/,1234,4321", "",
+    rowA = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/,1234,4321"), Bytes.toBytes(""),
       Long.MAX_VALUE);
-    rowB = new KeyValue(".META.,testtable,www.hbase.org/%20,99999,99999", "",
+    rowB = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/%20,99999,99999"), Bytes.toBytes(""),
       Long.MAX_VALUE);
     assertTrue(KeyValue.ROOT_COMPARATOR.compare(rowA, rowB) < 0);
   }
 
   private void metacomparisons(final KeyValue.MetaComparator c) {
     long now = System.currentTimeMillis();
-    assertTrue(c.compare(new KeyValue(".META.,a,,0,1", now),
-      new KeyValue(".META.,a,,0,1", now)) == 0);
-    KeyValue a = new KeyValue(".META.,a,,0,1", now);
-    KeyValue b = new KeyValue(".META.,a,,0,2", now);
+    assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now),
+      new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now)) == 0);
+    KeyValue a = new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now);
+    KeyValue b = new KeyValue(Bytes.toBytes(".META.,a,,0,2"), now);
     assertTrue(c.compare(a, b) < 0);
-    assertTrue(c.compare(new KeyValue(".META.,a,,0,2", now),
-      new KeyValue(".META.,a,,0,1", now)) > 0);
+    assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,a,,0,2"), now),
+      new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now)) > 0);
   }
 
   private void comparisons(final KeyValue.KVComparator c) {
     long now = System.currentTimeMillis();
-    assertTrue(c.compare(new KeyValue(".META.,,1", now),
-      new KeyValue(".META.,,1", now)) == 0);
-    assertTrue(c.compare(new KeyValue(".META.,,1", now),
-      new KeyValue(".META.,,2", now)) < 0);
-    assertTrue(c.compare(new KeyValue(".META.,,2", now),
-      new KeyValue(".META.,,1", now)) > 0);
+    assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,1"), now),
+      new KeyValue(Bytes.toBytes(".META.,,1"), now)) == 0);
+    assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,1"), now),
+      new KeyValue(Bytes.toBytes(".META.,,2"), now)) < 0);
+    assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,2"), now),
+      new KeyValue(Bytes.toBytes(".META.,,1"), now)) > 0);
   }
 
   public void testBinaryKeys() throws Exception {
     Set<KeyValue> set = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
-    String column = "col:umn";
-    KeyValue [] keys = {new KeyValue("aaaaa,\u0000\u0000,2", column, 2),
-      new KeyValue("aaaaa,\u0001,3", column, 3),
-      new KeyValue("aaaaa,,1", column, 1),
-      new KeyValue("aaaaa,\u1000,5", column, 5),
-      new KeyValue("aaaaa,a,4", column, 4),
-      new KeyValue("a,a,0", column, 0),
+    byte [] column = Bytes.toBytes("col:umn");
+    KeyValue [] keys = {new KeyValue(Bytes.toBytes("aaaaa,\u0000\u0000,2"), column, 2),
+      new KeyValue(Bytes.toBytes("aaaaa,\u0001,3"), column, 3),
+      new KeyValue(Bytes.toBytes("aaaaa,,1"), column, 1),
+      new KeyValue(Bytes.toBytes("aaaaa,\u1000,5"), column, 5),
+      new KeyValue(Bytes.toBytes("aaaaa,a,4"), column, 4),
+      new KeyValue(Bytes.toBytes("a,a,0"), column, 0),
     };
     // Add to set with bad comparator
     for (int i = 0; i < keys.length; i++) {
@@ -226,12 +235,12 @@
     }
     // Make up -ROOT- table keys.
     KeyValue [] rootKeys = {
-        new KeyValue(".META.,aaaaa,\u0000\u0000,0,2", column, 2),
-        new KeyValue(".META.,aaaaa,\u0001,0,3", column, 3),
-        new KeyValue(".META.,aaaaa,,0,1", column, 1),
-        new KeyValue(".META.,aaaaa,\u1000,0,5", column, 5),
-        new KeyValue(".META.,aaaaa,a,0,4", column, 4),
-        new KeyValue(".META.,,0", column, 0),
+        new KeyValue(Bytes.toBytes(".META.,aaaaa,\u0000\u0000,0,2"), column, 2),
+        new KeyValue(Bytes.toBytes(".META.,aaaaa,\u0001,0,3"), column, 3),
+        new KeyValue(Bytes.toBytes(".META.,aaaaa,,0,1"), column, 1),
+        new KeyValue(Bytes.toBytes(".META.,aaaaa,\u1000,0,5"), column, 5),
+        new KeyValue(Bytes.toBytes(".META.,aaaaa,a,0,4"), column, 4),
+        new KeyValue(Bytes.toBytes(".META.,,0"), column, 0),
       };
     // This will output the keys incorrectly.
     set = new TreeSet<KeyValue>(new KeyValue.MetaComparator());
@@ -260,4 +269,11 @@
       assertTrue(count++ == k.getTimestamp());
     }
   }
+
+  public void testStackedUpKeyValue() {
+    // Test multiple KeyValues in a single blob.
+
+    // TODO actually write this test!
+    
+  }
 }
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestRegionRebalancing.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestRegionRebalancing.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestRegionRebalancing.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestRegionRebalancing.java Tue Jun 16 04:33:56 2009
@@ -28,6 +28,7 @@
 
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
 
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -223,9 +224,10 @@
   throws IOException {
     HRegion region = createNewHRegion(desc, startKey, endKey);
     byte [] keyToWrite = startKey == null ? Bytes.toBytes("row_000") : startKey;
-    BatchUpdate bu = new BatchUpdate(keyToWrite);
-    bu.put(COLUMN_NAME, "test".getBytes());
-    region.batchUpdate(bu, null);
+    Put put = new Put(keyToWrite);
+    byte [][] famAndQf = KeyValue.parseColumn(COLUMN_NAME);
+    put.add(famAndQf[0], famAndQf[1], Bytes.toBytes("test"));
+    region.put(put);
     region.close();
     region.getLog().closeAndDelete();
     return region;

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestScanMultipleVersions.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestScanMultipleVersions.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestScanMultipleVersions.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestScanMultipleVersions.java Tue Jun 16 04:33:56 2009
@@ -21,11 +21,12 @@
 package org.apache.hadoop.hbase;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Scanner;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.io.Cell;
-import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -53,7 +54,7 @@
     // Create table description
     
     this.desc = new HTableDescriptor(TABLE_NAME);
-    this.desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY));
+    this.desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
 
     // Region 0 will contain the key range [,row_0500)
     INFOS[0] = new HRegionInfo(this.desc, HConstants.EMPTY_START_ROW,
@@ -70,9 +71,11 @@
         HRegion.createHRegion(this.INFOS[i], this.testDir, this.conf);
       // Insert data
       for (int j = 0; j < TIMESTAMPS.length; j++) {
-        BatchUpdate b = new BatchUpdate(ROWS[i], TIMESTAMPS[j]);
-        b.put(HConstants.COLUMN_FAMILY, Bytes.toBytes(TIMESTAMPS[j]));
-        REGIONS[i].batchUpdate(b, null);
+        Put put = new Put(ROWS[i]);
+        put.setTimeStamp(TIMESTAMPS[j]);
+        put.add(HConstants.CATALOG_FAMILY, null, TIMESTAMPS[j], 
+            Bytes.toBytes(TIMESTAMPS[j]));
+        REGIONS[i].put(put);
       }
       // Insert the region we created into the meta
       HRegion.addRegionToMETA(meta, REGIONS[i]);
@@ -93,19 +96,25 @@
     HTable t = new HTable(conf, TABLE_NAME);
     for (int i = 0; i < ROWS.length; i++) {
       for (int j = 0; j < TIMESTAMPS.length; j++) {
-        Cell [] cells =
-          t.get(ROWS[i], HConstants.COLUMN_FAMILY, TIMESTAMPS[j], 1);
-        assertTrue(cells != null && cells.length == 1);
-        System.out.println("Row=" + Bytes.toString(ROWS[i]) + ", cell=" +
-          cells[0]);
+        Get get = new Get(ROWS[i]);
+        get.addFamily(HConstants.CATALOG_FAMILY);
+        get.setTimeStamp(TIMESTAMPS[j]);
+        Result result = t.get(get);
+        int cellCount = 0;
+        for(@SuppressWarnings("unused")KeyValue kv : result.sorted()) {
+          cellCount++;
+        }
+        assertTrue(cellCount == 1);
       }
     }
     
     // Case 1: scan with LATEST_TIMESTAMP. Should get two rows
     int count = 0;
-    Scanner s = t.getScanner(HConstants.COLUMN_FAMILY_ARRAY);
+    Scan scan = new Scan();
+    scan.addFamily(HConstants.CATALOG_FAMILY);
+    ResultScanner s = t.getScanner(scan);
     try {
-      for (RowResult rr = null; (rr = s.next()) != null;) {
+      for (Result rr = null; (rr = s.next()) != null;) {
         System.out.println(rr.toString());
         count += 1;
       }
@@ -118,8 +127,11 @@
     // (in this case > 1000 and < LATEST_TIMESTAMP. Should get 2 rows.
     
     count = 0;
-    s = t.getScanner(HConstants.COLUMN_FAMILY_ARRAY, HConstants.EMPTY_START_ROW,
-        10000L);
+    scan = new Scan();
+    scan.setTimeRange(1000L, Long.MAX_VALUE);
+    scan.addFamily(HConstants.CATALOG_FAMILY);
+
+    s = t.getScanner(scan);
     try {
       while (s.next() != null) {
         count += 1;
@@ -133,8 +145,11 @@
     // (in this case == 1000. Should get 2 rows.
     
     count = 0;
-    s = t.getScanner(HConstants.COLUMN_FAMILY_ARRAY, HConstants.EMPTY_START_ROW,
-        1000L);
+    scan = new Scan();
+    scan.setTimeStamp(1000L);
+    scan.addFamily(HConstants.CATALOG_FAMILY);
+
+    s = t.getScanner(scan);
     try {
       while (s.next() != null) {
         count += 1;
@@ -148,8 +163,11 @@
     // second timestamp (100 < timestamp < 1000). Should get 2 rows.
     
     count = 0;
-    s = t.getScanner(HConstants.COLUMN_FAMILY_ARRAY, HConstants.EMPTY_START_ROW,
-        500L);
+    scan = new Scan();
+    scan.setTimeRange(100L, 1000L);
+    scan.addFamily(HConstants.CATALOG_FAMILY);
+
+    s = t.getScanner(scan);
     try {
       while (s.next() != null) {
         count += 1;
@@ -163,8 +181,11 @@
     // Should get 2 rows.
     
     count = 0;
-    s = t.getScanner(HConstants.COLUMN_FAMILY_ARRAY, HConstants.EMPTY_START_ROW,
-        100L);
+    scan = new Scan();
+    scan.setTimeStamp(100L);
+    scan.addFamily(HConstants.CATALOG_FAMILY);
+
+    s = t.getScanner(scan);
     try {
       while (s.next() != null) {
         count += 1;

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestSerialization.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestSerialization.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestSerialization.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestSerialization.java Tue Jun 16 04:33:56 2009
@@ -1,5 +1,5 @@
 /**
- * Copyright 2007 The Apache Software Foundation
+ * Copyright 2009 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -20,13 +20,31 @@
 package org.apache.hadoop.hbase;
 
 
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.NavigableSet;
+
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.RowLock;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.BatchOperation;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.HbaseMapWritable;
 import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.Writable;
 
 /**
  * Test HBase Writables serializations
@@ -52,6 +70,7 @@
     assertTrue(KeyValue.COMPARATOR.compare(original, newone) == 0);
   }
 
+  @SuppressWarnings("unchecked")
   public void testHbaseMapWritable() throws Exception {
     HbaseMapWritable<byte [], byte []> hmw =
       new HbaseMapWritable<byte[], byte[]>();
@@ -157,7 +176,7 @@
     assertTrue(Bytes.equals(bu.getRow(), bubu.getRow()));
     // Assert has same number of BatchOperations.
     int firstCount = 0;
-    for (BatchOperation bo: bubu) {
+    for (@SuppressWarnings("unused")BatchOperation bo: bubu) {
       firstCount++;
     }
     // Now deserialize again into same instance to ensure we're not
@@ -166,9 +185,358 @@
     // Assert rows are same again.
     assertTrue(Bytes.equals(bu.getRow(), bububu.getRow()));
     int secondCount = 0;
-    for (BatchOperation bo: bububu) {
+    for (@SuppressWarnings("unused")BatchOperation bo: bububu) {
       secondCount++;
     }
     assertEquals(firstCount, secondCount);
   }
+  
+  
+  //
+  // HBASE-880
+  //
+  
+  public void testPut() throws Exception{
+    byte[] row = "row".getBytes();
+    byte[] fam = "fam".getBytes();
+    byte[] qf1 = "qf1".getBytes();
+    byte[] qf2 = "qf2".getBytes();
+    byte[] qf3 = "qf3".getBytes();
+    byte[] qf4 = "qf4".getBytes();
+    byte[] qf5 = "qf5".getBytes();
+    byte[] qf6 = "qf6".getBytes();
+    byte[] qf7 = "qf7".getBytes();
+    byte[] qf8 = "qf8".getBytes();
+    
+    long ts = System.currentTimeMillis();
+    byte[] val = "val".getBytes();
+    
+    Put put = new Put(row);
+    put.add(fam, qf1, ts, val);
+    put.add(fam, qf2, ts, val);
+    put.add(fam, qf3, ts, val);
+    put.add(fam, qf4, ts, val);
+    put.add(fam, qf5, ts, val);
+    put.add(fam, qf6, ts, val);
+    put.add(fam, qf7, ts, val);
+    put.add(fam, qf8, ts, val);
+    
+    byte[] sb = Writables.getBytes(put);
+    Put desPut = (Put)Writables.getWritable(sb, new Put());
+
+    //Timing test
+//    long start = System.nanoTime();
+//    desPut = (Put)Writables.getWritable(sb, new Put());
+//    long stop = System.nanoTime();
+//    System.out.println("timer " +(stop-start));
+    
+    assertTrue(Bytes.equals(put.getRow(), desPut.getRow()));
+    List<KeyValue> list = null;
+    List<KeyValue> desList = null;
+    for(Map.Entry<byte[], List<KeyValue>> entry : put.getFamilyMap().entrySet()){
+      assertTrue(desPut.getFamilyMap().containsKey(entry.getKey()));
+      list = entry.getValue();
+      desList = desPut.getFamilyMap().get(entry.getKey());
+      for(int i=0; i<list.size(); i++){
+        assertTrue(list.get(i).equals(desList.get(i)));
+      }
+    }
+  }
+
+  
+  public void testPut2() throws Exception{
+    byte[] row = "testAbort,,1243116656250".getBytes();
+    byte[] fam = "historian".getBytes();
+    byte[] qf1 = "creation".getBytes();
+    
+    long ts = 9223372036854775807L;
+    byte[] val = "dont-care".getBytes();
+    
+    Put put = new Put(row);
+    put.add(fam, qf1, ts, val);
+    
+    byte[] sb = Writables.getBytes(put);
+    Put desPut = (Put)Writables.getWritable(sb, new Put());
+
+    assertTrue(Bytes.equals(put.getRow(), desPut.getRow()));
+    List<KeyValue> list = null;
+    List<KeyValue> desList = null;
+    for(Map.Entry<byte[], List<KeyValue>> entry : put.getFamilyMap().entrySet()){
+      assertTrue(desPut.getFamilyMap().containsKey(entry.getKey()));
+      list = entry.getValue();
+      desList = desPut.getFamilyMap().get(entry.getKey());
+      for(int i=0; i<list.size(); i++){
+        assertTrue(list.get(i).equals(desList.get(i)));
+      }
+    }
+  }
+  
+  
+  public void testDelete() throws Exception{
+    byte[] row = "row".getBytes();
+    byte[] fam = "fam".getBytes();
+    byte[] qf1 = "qf1".getBytes();
+    
+    long ts = System.currentTimeMillis();
+    
+    Delete delete = new Delete(row);
+    delete.deleteColumn(fam, qf1, ts);
+    
+    byte[] sb = Writables.getBytes(delete);
+    Delete desDelete = (Delete)Writables.getWritable(sb, new Delete());
+
+    assertTrue(Bytes.equals(delete.getRow(), desDelete.getRow()));
+    List<KeyValue> list = null;
+    List<KeyValue> desList = null;
+    for(Map.Entry<byte[], List<KeyValue>> entry :
+        delete.getFamilyMap().entrySet()){
+      assertTrue(desDelete.getFamilyMap().containsKey(entry.getKey()));
+      list = entry.getValue();
+      desList = desDelete.getFamilyMap().get(entry.getKey());
+      for(int i=0; i<list.size(); i++){
+        assertTrue(list.get(i).equals(desList.get(i)));
+      }
+    }
+  }
+ 
+  public void testGet() throws Exception{
+    byte[] row = "row".getBytes();
+    byte[] fam = "fam".getBytes();
+    byte[] qf1 = "qf1".getBytes();
+    
+    long ts = System.currentTimeMillis();
+    int maxVersions = 2;
+    long lockid = 5;
+    RowLock rowLock = new RowLock(lockid);
+    
+    Get get = new Get(row, rowLock);
+    get.addColumn(fam, qf1);
+    get.setTimeRange(ts, ts+1);
+    get.setMaxVersions(maxVersions);
+    
+    byte[] sb = Writables.getBytes(get);
+    Get desGet = (Get)Writables.getWritable(sb, new Get());
+
+    assertTrue(Bytes.equals(get.getRow(), desGet.getRow()));
+    Set<byte[]> set = null;
+    Set<byte[]> desSet = null;
+    
+    for(Map.Entry<byte[], NavigableSet<byte[]>> entry :
+        get.getFamilyMap().entrySet()){
+      assertTrue(desGet.getFamilyMap().containsKey(entry.getKey()));
+      set = entry.getValue();
+      desSet = desGet.getFamilyMap().get(entry.getKey());
+      for(byte [] qualifier : set){
+        assertTrue(desSet.contains(qualifier));
+      }
+    }
+    
+    assertEquals(get.getLockId(), desGet.getLockId());
+    assertEquals(get.getMaxVersions(), desGet.getMaxVersions());
+    TimeRange tr = get.getTimeRange();
+    TimeRange desTr = desGet.getTimeRange();
+    assertEquals(tr.getMax(), desTr.getMax());
+    assertEquals(tr.getMin(), desTr.getMin());
+  }
+  
+
+  public void testScan() throws Exception{
+    byte[] startRow = "startRow".getBytes();
+    byte[] stopRow  = "stopRow".getBytes();
+    byte[] fam = "fam".getBytes();
+    byte[] qf1 = "qf1".getBytes();
+    
+    long ts = System.currentTimeMillis();
+    int maxVersions = 2;
+    
+    Scan scan = new Scan(startRow, stopRow);
+    scan.addColumn(fam, qf1);
+    scan.setTimeRange(ts, ts+1);
+    scan.setMaxVersions(maxVersions);
+    
+    byte[] sb = Writables.getBytes(scan);
+    Scan desScan = (Scan)Writables.getWritable(sb, new Scan());
+
+    assertTrue(Bytes.equals(scan.getStartRow(), desScan.getStartRow()));
+    assertTrue(Bytes.equals(scan.getStopRow(), desScan.getStopRow()));
+    Set<byte[]> set = null;
+    Set<byte[]> desSet = null;
+    
+    for(Map.Entry<byte[], NavigableSet<byte[]>> entry :
+        scan.getFamilyMap().entrySet()){
+      assertTrue(desScan.getFamilyMap().containsKey(entry.getKey()));
+      set = entry.getValue();
+      desSet = desScan.getFamilyMap().get(entry.getKey());
+      for(byte[] column : set){
+        assertTrue(desSet.contains(column));
+      }
+    }
+    
+    assertEquals(scan.getMaxVersions(), desScan.getMaxVersions());
+    TimeRange tr = scan.getTimeRange();
+    TimeRange desTr = desScan.getTimeRange();
+    assertEquals(tr.getMax(), desTr.getMax());
+    assertEquals(tr.getMin(), desTr.getMin());
+  }
+  
+  public void testResultEmpty() throws Exception {
+    List<KeyValue> keys = new ArrayList<KeyValue>();
+    Result r = new Result(keys);
+    assertTrue(r.isEmpty());
+    byte [] rb = Writables.getBytes(r);
+    Result deserializedR = (Result)Writables.getWritable(rb, new Result());
+    assertTrue(deserializedR.isEmpty());
+  }
+  
+  
+  public void testResult() throws Exception {
+    byte [] rowA = Bytes.toBytes("rowA");
+    byte [] famA = Bytes.toBytes("famA");
+    byte [] qfA = Bytes.toBytes("qfA");
+    byte [] valueA = Bytes.toBytes("valueA");
+    
+    byte [] rowB = Bytes.toBytes("rowB");
+    byte [] famB = Bytes.toBytes("famB");
+    byte [] qfB = Bytes.toBytes("qfB");
+    byte [] valueB = Bytes.toBytes("valueB");
+    
+    KeyValue kvA = new KeyValue(rowA, famA, qfA, valueA);
+    KeyValue kvB = new KeyValue(rowB, famB, qfB, valueB);
+    
+    Result result = new Result(new KeyValue[]{kvA, kvB});
+    
+    byte [] rb = Writables.getBytes(result);
+    Result deResult = (Result)Writables.getWritable(rb, new Result());
+    
+    assertTrue("results are not equivalent, first key mismatch",
+        result.sorted()[0].equals(deResult.sorted()[0]));
+    
+    assertTrue("results are not equivalent, second key mismatch",
+        result.sorted()[1].equals(deResult.sorted()[1]));
+    
+    // Test empty Result
+    Result r = new Result();
+    byte [] b = Writables.getBytes(r);
+    Result deserialized = (Result)Writables.getWritable(b, new Result());
+    assertEquals(r.size(), deserialized.size());
+  }
+  
+  public void testResultArray() throws Exception {
+    byte [] rowA = Bytes.toBytes("rowA");
+    byte [] famA = Bytes.toBytes("famA");
+    byte [] qfA = Bytes.toBytes("qfA");
+    byte [] valueA = Bytes.toBytes("valueA");
+    
+    byte [] rowB = Bytes.toBytes("rowB");
+    byte [] famB = Bytes.toBytes("famB");
+    byte [] qfB = Bytes.toBytes("qfB");
+    byte [] valueB = Bytes.toBytes("valueB");
+    
+    KeyValue kvA = new KeyValue(rowA, famA, qfA, valueA);
+    KeyValue kvB = new KeyValue(rowB, famB, qfB, valueB);
+
+    
+    Result result1 = new Result(new KeyValue[]{kvA, kvB});
+    Result result2 = new Result(new KeyValue[]{kvB});
+    Result result3 = new Result(new KeyValue[]{kvB});
+    
+    Result [] results = new Result [] {result1, result2, result3};
+    
+    ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
+    DataOutputStream out = new DataOutputStream(byteStream);
+    Result.writeArray(out, results);
+    
+    byte [] rb = byteStream.toByteArray();
+    
+    DataInputBuffer in = new DataInputBuffer();
+    in.reset(rb, 0, rb.length);
+    
+    Result [] deResults = Result.readArray(in);
+    
+    assertTrue(results.length == deResults.length);
+    
+    for(int i=0;i<results.length;i++) {
+      KeyValue [] keysA = results[i].sorted();
+      KeyValue [] keysB = deResults[i].sorted();
+      assertTrue(keysA.length == keysB.length);
+      for(int j=0;j<keysA.length;j++) {
+        assertTrue("Expected equivalent keys but found:\n" +
+            "KeyA : " + keysA[j].toString() + "\n" +
+            "KeyB : " + keysB[j].toString() + "\n" + 
+            keysA.length + " total keys, " + i + "th so far"
+            ,keysA[j].equals(keysB[j]));
+      }
+    }
+    
+  }
+  
+  public void testResultArrayEmpty() throws Exception {
+    List<KeyValue> keys = new ArrayList<KeyValue>();
+    Result r = new Result(keys);
+    Result [] results = new Result [] {r};
+
+    ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
+    DataOutputStream out = new DataOutputStream(byteStream);
+    
+    Result.writeArray(out, results);
+    
+    results = null;
+    
+    byteStream = new ByteArrayOutputStream();
+    out = new DataOutputStream(byteStream);
+    Result.writeArray(out, results);
+    
+    byte [] rb = byteStream.toByteArray();
+    
+    DataInputBuffer in = new DataInputBuffer();
+    in.reset(rb, 0, rb.length);
+    
+    Result [] deResults = Result.readArray(in);
+    
+    assertTrue(deResults.length == 0);
+    
+    results = new Result[0];
+
+    byteStream = new ByteArrayOutputStream();
+    out = new DataOutputStream(byteStream);
+    Result.writeArray(out, results);
+    
+    rb = byteStream.toByteArray();
+    
+    in = new DataInputBuffer();
+    in.reset(rb, 0, rb.length);
+    
+    deResults = Result.readArray(in);
+    
+    assertTrue(deResults.length == 0);
+    
+  }
+  
+  public void testTimeRange(String[] args) throws Exception{
+    TimeRange tr = new TimeRange(0,5);
+    byte [] mb = Writables.getBytes(tr);
+    TimeRange deserializedTr =
+      (TimeRange)Writables.getWritable(mb, new TimeRange());
+    
+    assertEquals(tr.getMax(), deserializedTr.getMax());
+    assertEquals(tr.getMin(), deserializedTr.getMin());
+    
+  }
+  
+  public void testKeyValue2() throws Exception {
+    byte[] row = getName().getBytes();
+    byte[] fam = "fam".getBytes();
+    byte[] qf = "qf".getBytes();
+    long ts = System.currentTimeMillis();
+    byte[] val = "val".getBytes();
+    
+    KeyValue kv = new KeyValue(row, fam, qf, ts, val);
+    
+    byte [] mb = Writables.getBytes(kv);
+    KeyValue deserializedKv =
+      (KeyValue)Writables.getWritable(mb, new KeyValue());
+    assertTrue(Bytes.equals(kv.getBuffer(), deserializedKv.getBuffer()));
+    assertEquals(kv.getOffset(), deserializedKv.getOffset());
+    assertEquals(kv.getLength(), deserializedKv.getLength());
+  }
 }
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestTable.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestTable.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestTable.java Tue Jun 16 04:33:56 2009
@@ -23,6 +23,7 @@
 
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -57,7 +58,7 @@
     // Try doing a duplicate database create.
     msg = null;
     HTableDescriptor desc = new HTableDescriptor(getName());
-    desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY));
+    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
     admin.createTable(desc);
     assertTrue("First table creation completed", admin.listTables().length == 1);
     boolean gotException = false;
@@ -74,7 +75,7 @@
     // Now try and do concurrent creation with a bunch of threads.
     final HTableDescriptor threadDesc =
       new HTableDescriptor("threaded_" + getName());
-    threadDesc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY));
+    threadDesc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
     int count = 10;
     Thread [] threads = new Thread [count];
     final AtomicInteger successes = new AtomicInteger(0);
@@ -109,8 +110,8 @@
     }
     // All threads are now dead.  Count up how many tables were created and
     // how many failed w/ appropriate exception.
-    assertTrue(successes.get() == 1);
-    assertTrue(failures.get() == (count - 1));
+    assertEquals(1, successes.get());
+    assertEquals(count - 1, failures.get());
   }
   
   /**
@@ -140,10 +141,12 @@
     HTable table = new HTable(conf, getName());
     try {
       byte[] value = Bytes.toBytes("somedata");
-      BatchUpdate update = new BatchUpdate();
-      update.put(colName, value);
-      table.commit(update);
-      fail("BatchUpdate on read only table succeeded");  
+      // This used to use an empty row... That must have been a bug
+      Put put = new Put(value);
+      byte [][] famAndQf = KeyValue.parseColumn(colName);
+      put.add(famAndQf[0], famAndQf[1], value);
+      table.put(put);
+      fail("Put on read only table succeeded");  
     } catch (Exception e) {
       // expected
     }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestZooKeeper.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestZooKeeper.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestZooKeeper.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestZooKeeper.java Tue Jun 16 04:33:56 2009
@@ -25,6 +25,7 @@
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -141,9 +142,9 @@
       admin.createTable(desc);
   
       HTable table = new HTable("test");
-      BatchUpdate batchUpdate = new BatchUpdate("testrow");
-      batchUpdate.put("fam:col", Bytes.toBytes("testdata"));
-      table.commit(batchUpdate);
+      Put put = new Put(Bytes.toBytes("testrow"));
+      put.add(Bytes.toBytes("fam"), Bytes.toBytes("col"), Bytes.toBytes("testdata"));
+      table.put(put);
     } catch (Exception e) {
       e.printStackTrace();
       fail();

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TimestampTestBase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TimestampTestBase.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TimestampTestBase.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TimestampTestBase.java Tue Jun 16 04:33:56 2009
@@ -19,8 +19,13 @@
 package org.apache.hadoop.hbase;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 
-import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -34,7 +39,7 @@
   private static final long T1 = 100L;
   private static final long T2 = 200L;
   
-  private static final String COLUMN_NAME = "contents:";
+  private static final String COLUMN_NAME = "contents:contents";
   
   private static final byte [] COLUMN = Bytes.toBytes(COLUMN_NAME);
   private static final byte [] ROW = Bytes.toBytes("row");
@@ -55,6 +60,7 @@
     put(incommon);
     // Verify that returned versions match passed timestamps.
     assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1});
+
     // If I delete w/o specifying a timestamp, this means I'm deleting the
     // latest.
     delete(incommon);
@@ -74,14 +80,23 @@
     // Flush everything out to disk and then retry
     flusher.flushcache();
     assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T1, T0});
-    
+
     // Now try deleting all from T2 back inclusive (We first need to add T2
     // back into the mix and to make things a little interesting, delete and
     // then readd T1.
     put(incommon, T2);
     delete(incommon, T1);
     put(incommon, T1);
-    incommon.deleteAll(ROW, COLUMN, T2);
+
+    Delete delete = new Delete(ROW);
+    byte [][] famAndQf = KeyValue.parseColumn(COLUMN);
+    if (famAndQf[1].length == 0){
+      delete.deleteFamily(famAndQf[0], T2);
+    } else {
+      delete.deleteColumns(famAndQf[0], famAndQf[1], T2);
+    }
+    incommon.delete(delete, null, true);
+ 
     // Should only be current value in set.  Assert this is so
     assertOnlyLatest(incommon, HConstants.LATEST_TIMESTAMP);
     
@@ -93,12 +108,16 @@
   private static void assertOnlyLatest(final Incommon incommon,
     final long currentTime)
   throws IOException {
-    Cell [] cellValues = incommon.get(ROW, COLUMN, 3/*Ask for too much*/);
-    assertEquals(1, cellValues.length);
-    long time = Bytes.toLong(cellValues[0].getValue());
+    Get get = null;
+    byte [][] famAndQf = null;
+    get = new Get(ROW);
+    famAndQf = KeyValue.parseColumn(COLUMN);
+    get.addColumn(famAndQf[0], famAndQf[1]);
+    get.setMaxVersions(3);
+    Result result = incommon.get(get);
+    assertEquals(1, result.size());
+    long time = Bytes.toLong(result.sorted()[0].getValue());
     assertEquals(time, currentTime);
-    assertNull(incommon.get(ROW, COLUMN, T1, 3 /*Too many*/));
-    assertTrue(assertScanContentTimestamp(incommon, T1) == 0);
   }
   
   /*
@@ -112,22 +131,49 @@
   public static void assertVersions(final Incommon incommon, final long [] tss)
   throws IOException {
     // Assert that 'latest' is what we expect.
-    byte [] bytes = incommon.get(ROW, COLUMN).getValue();
-    assertEquals(Bytes.toLong(bytes), tss[0]);
+    Get get = null;
+    byte [][] famAndQf = null;
+    get = new Get(ROW);
+    famAndQf = KeyValue.parseColumn(COLUMN);
+    get.addColumn(famAndQf[0], famAndQf[1]);
+    Result r = incommon.get(get);
+    byte [] bytes = r.getValue(famAndQf[0], famAndQf[1]);
+    long t = Bytes.toLong(bytes);
+    assertEquals(tss[0], t);
+
     // Now assert that if we ask for multiple versions, that they come out in
     // order.
-    Cell[] cellValues = incommon.get(ROW, COLUMN, tss.length);
-    assertEquals(tss.length, cellValues.length);
-    for (int i = 0; i < cellValues.length; i++) {
-      long ts = Bytes.toLong(cellValues[i].getValue());
+    get = new Get(ROW);
+    famAndQf = KeyValue.parseColumn(COLUMN);
+    get.addColumn(famAndQf[0], famAndQf[1]);
+    get.setMaxVersions(tss.length);
+    Result result = incommon.get(get);
+    List<Cell> cells = new ArrayList<Cell>();
+    for(KeyValue kv : result.sorted()) {
+      cells.add(new Cell(kv.getValue(), kv.getTimestamp()));
+    }
+    assertEquals(tss.length, cells.size());
+    for (int i = 0; i < cells.size(); i++) {
+      long ts = Bytes.toLong(cells.get(i).getValue());
       assertEquals(ts, tss[i]);
     }
+    
     // Specify a timestamp get multiple versions.
-    cellValues = incommon.get(ROW, COLUMN, tss[0], cellValues.length - 1);
-    for (int i = 1; i < cellValues.length; i++) {
-      long ts = Bytes.toLong(cellValues[i].getValue());
+    get = new Get(ROW);
+    famAndQf = KeyValue.parseColumn(COLUMN);
+    get.addColumn(famAndQf[0], famAndQf[1]);
+    get.setTimeStamp(tss[0]);
+    get.setMaxVersions(cells.size() - 1);
+    result = incommon.get(get);
+    cells = new ArrayList<Cell>();
+    for(KeyValue kv : result.sorted()) {
+      cells.add(new Cell(kv.getValue(), kv.getTimestamp()));
+    }
+    for (int i = 1; i < cells.size(); i++) {
+      long ts = Bytes.toLong(cells.get(i).getValue());
       assertEquals(ts, tss[i]);
     }
+    
     // Test scanner returns expected version
     assertScanContentTimestamp(incommon, tss[0]);
   }
@@ -211,20 +257,44 @@
   public static void put(final Incommon loader, final byte [] bytes,
     final long ts)
   throws IOException {
-    BatchUpdate batchUpdate = ts == HConstants.LATEST_TIMESTAMP ? 
-      new BatchUpdate(ROW) : new BatchUpdate(ROW, ts);
-    batchUpdate.put(COLUMN, bytes);
-    loader.commit(batchUpdate);
+    Put put = new Put(ROW);
+    if(ts != HConstants.LATEST_TIMESTAMP) {
+      put.setTimeStamp(ts);
+    }
+    byte [][] famAndQf = KeyValue.parseColumn(COLUMN);
+    put.add(famAndQf[0], famAndQf[1], bytes);
+    loader.put(put);
   }
   
   public static void delete(final Incommon loader) throws IOException {
-    delete(loader, HConstants.LATEST_TIMESTAMP);
+    delete(loader, null);
+  }
+
+  public static void delete(final Incommon loader, final byte [] column)
+  throws IOException {
+    delete(loader, column, HConstants.LATEST_TIMESTAMP);
+  }
+
+  public static void delete(final Incommon loader, final long ts)
+  throws IOException {
+    delete(loader, null, ts);
+  }
+
+  public static void delete(final Incommon loader, final byte [] column,
+      final long ts)
+  throws IOException {
+    Delete delete = ts == HConstants.LATEST_TIMESTAMP?
+      new Delete(ROW): new Delete(ROW, ts, null);
+    byte [][] famAndQf = KeyValue.parseColumn(column == null? COLUMN: column);
+    if (famAndQf[1].length == 0) {
+      delete.deleteFamily(famAndQf[0], ts);
+    } else {
+      delete.deleteColumn(famAndQf[0], famAndQf[1], ts);
+    }
+    loader.delete(delete, null, true);
   }
 
-  public static void delete(final Incommon loader, final long ts) throws IOException {
-    BatchUpdate batchUpdate = ts == HConstants.LATEST_TIMESTAMP ? 
-      new BatchUpdate(ROW) : new BatchUpdate(ROW, ts);
-    batchUpdate.delete(COLUMN);
-    loader.commit(batchUpdate);  
+  public static Result get(final Incommon loader) throws IOException {
+    return loader.get(new Get(ROW));
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java Tue Jun 16 04:33:56 2009
@@ -75,85 +75,6 @@
     table = new HTable(conf, desc.getName());
   }
 
-  /**
-   * @throws IOException
-   */
-  public void testBatchUpdate() throws IOException {
-    BatchUpdate bu = new BatchUpdate("row1");
-    bu.put(CONTENTS, value);
-    bu.delete(CONTENTS);
-    table.commit(bu);
-
-    bu = new BatchUpdate("row2");
-    bu.put(CONTENTS, value);
-    byte[][] getColumns = bu.getColumns();
-    assertEquals(getColumns.length, 1);
-    assertTrue(Arrays.equals(getColumns[0], CONTENTS));
-    assertTrue(bu.hasColumn(CONTENTS));
-    assertFalse(bu.hasColumn(new byte[] {}));
-    byte[] getValue = bu.get(getColumns[0]);
-    assertTrue(Arrays.equals(getValue, value));
-    table.commit(bu);
-
-    byte [][] columns = { CONTENTS };
-    Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
-    for (RowResult r : scanner) {
-      for(Map.Entry<byte [], Cell> e: r.entrySet()) {
-        System.out.println(Bytes.toString(r.getRow()) + ": row: " + e.getKey() + " value: " + 
-            new String(e.getValue().getValue(), HConstants.UTF8_ENCODING));
-      }
-    }
-  }
-  
-  public void testBatchUpdateMaxLength() {
-    // Test for a single good value
-    BatchUpdate batchUpdate = new BatchUpdate("row1");
-    batchUpdate.put(SMALLFAM, value);
-    try {
-      table.commit(batchUpdate);
-      fail("Value is too long, should throw exception");
-    } catch (IOException e) {
-      // This is expected
-    }
-    // Try to see if it's still inserted
-    try {
-      Cell cell = table.get("row1", SMALLFAM_STR);
-      assertNull(cell);
-    } catch (IOException e) {
-      e.printStackTrace();
-      fail("This is unexpected");
-    }
-    // Try to put a good value
-    batchUpdate = new BatchUpdate("row1");
-    batchUpdate.put(SMALLFAM, smallValue);
-    try {
-      table.commit(batchUpdate);
-    } catch (IOException e) {
-      fail("Value is long enough, should not throw exception");
-    }
-  }
-  
-  public void testRowsBatchUpdate() {
-    ArrayList<BatchUpdate> rowsUpdate = new ArrayList<BatchUpdate>();
-    for(int i = 0; i < NB_BATCH_ROWS; i++) {
-      BatchUpdate batchUpdate = new BatchUpdate("row"+i);
-      batchUpdate.put(CONTENTS, value);
-      rowsUpdate.add(batchUpdate);
-    }
-    try {
-      table.commit(rowsUpdate);  
-    
-      byte [][] columns = { CONTENTS };
-      Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
-      int nbRows = 0;
-      for(@SuppressWarnings("unused") RowResult row : scanner)
-        nbRows++;
-      assertEquals(NB_BATCH_ROWS, nbRows);
-    } catch (IOException e) {
-      fail("This is unexpected : " + e);
-    }
-  }
-  
   public void testRowsBatchUpdateBufferedOneFlush() {
     table.setAutoFlush(false);
     ArrayList<BatchUpdate> rowsUpdate = new ArrayList<BatchUpdate>();
@@ -168,17 +89,15 @@
       byte [][] columns = { CONTENTS };
       Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
       int nbRows = 0;
-      for(@SuppressWarnings("unused") RowResult row : scanner)
-        nbRows++;
+      for(@SuppressWarnings("unused") RowResult row : scanner) nbRows++;
       assertEquals(0, nbRows);  
       scanner.close();
-      
+
       table.flushCommits();
       
       scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
       nbRows = 0;
-      for(@SuppressWarnings("unused") RowResult row : scanner)
-        nbRows++;
+      for(@SuppressWarnings("unused") RowResult row : scanner) nbRows++;
       assertEquals(NB_BATCH_ROWS*10, nbRows);
     } catch (IOException e) {
       fail("This is unexpected : " + e);
@@ -209,6 +128,55 @@
       fail("This is unexpected : " + e);
     }
   }
+
+  /**
+   * @throws IOException
+   */
+  public void testBatchUpdate() throws IOException {
+    BatchUpdate bu = new BatchUpdate("row1");
+    bu.put(CONTENTS, value);
+    // Can't do this in 0.20.0 mix and match put and delete -- bu.delete(CONTENTS);
+    table.commit(bu);
+
+    bu = new BatchUpdate("row2");
+    bu.put(CONTENTS, value);
+    byte[][] getColumns = bu.getColumns();
+    assertEquals(getColumns.length, 1);
+    assertTrue(Arrays.equals(getColumns[0], CONTENTS));
+    assertTrue(bu.hasColumn(CONTENTS));
+    assertFalse(bu.hasColumn(new byte[] {}));
+    byte[] getValue = bu.get(getColumns[0]);
+    assertTrue(Arrays.equals(getValue, value));
+    table.commit(bu);
+
+    byte [][] columns = { CONTENTS };
+    Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
+    for (RowResult r : scanner) {
+      for(Map.Entry<byte [], Cell> e: r.entrySet()) {
+        System.out.println(Bytes.toString(r.getRow()) + ": row: " + e.getKey() + " value: " + 
+            new String(e.getValue().getValue(), HConstants.UTF8_ENCODING));
+      }
+    }
+  }
   
-  
+  public void testRowsBatchUpdate() {
+    ArrayList<BatchUpdate> rowsUpdate = new ArrayList<BatchUpdate>();
+    for(int i = 0; i < NB_BATCH_ROWS; i++) {
+      BatchUpdate batchUpdate = new BatchUpdate("row"+i);
+      batchUpdate.put(CONTENTS, value);
+      rowsUpdate.add(batchUpdate);
+    }
+    try {
+      table.commit(rowsUpdate);  
+    
+      byte [][] columns = { CONTENTS };
+      Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
+      int nbRows = 0;
+      for(@SuppressWarnings("unused") RowResult row : scanner)
+        nbRows++;
+      assertEquals(NB_BATCH_ROWS, nbRows);
+    } catch (IOException e) {
+      fail("This is unexpected : " + e);
+    }
+  }
 }



Mime
View raw message