hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ecl...@apache.org
Subject svn commit: r1449950 [12/35] - in /hbase/trunk: ./ hbase-client/ hbase-client/src/ hbase-client/src/main/ hbase-client/src/main/java/ hbase-client/src/main/java/org/ hbase-client/src/main/java/org/apache/ hbase-client/src/main/java/org/apache/hadoop/ h...
Date Mon, 25 Feb 2013 22:50:29 GMT
Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,214 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.UUID;
+
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class Mutation extends OperationWithAttributes implements Row {
+  // Attribute used in Mutations to indicate the originating cluster.
+  private static final String CLUSTER_ID_ATTR = "_c.id_";
+
+  protected byte [] row = null;
+  protected long ts = HConstants.LATEST_TIMESTAMP;
+  protected boolean writeToWAL = true;
+  protected Map<byte [], List<KeyValue>> familyMap =
+      new TreeMap<byte [], List<KeyValue>>(Bytes.BYTES_COMPARATOR);
+
+  /**
+   * Compile the column family (i.e. schema) information
+   * into a Map. Useful for parsing and aggregation by debugging,
+   * logging, and administration tools.
+   * @return Map
+   */
+  @Override
+  public Map<String, Object> getFingerprint() {
+    Map<String, Object> map = new HashMap<String, Object>();
+    List<String> families = new ArrayList<String>();
+    // ideally, we would also include table information, but that information
+    // is not stored in each Operation instance.
+    map.put("families", families);
+    for (Map.Entry<byte [], List<KeyValue>> entry : this.familyMap.entrySet()) {
+      families.add(Bytes.toStringBinary(entry.getKey()));
+    } 
+    return map;
+  }
+
+  /**
+   * Compile the details beyond the scope of getFingerprint (row, columns,
+   * timestamps, etc.) into a Map along with the fingerprinted information.
+   * Useful for debugging, logging, and administration tools.
+   * @param maxCols a limit on the number of columns output prior to truncation
+   * @return Map
+   */
+  @Override
+  public Map<String, Object> toMap(int maxCols) {
+    // we start with the fingerprint map and build on top of it.
+    Map<String, Object> map = getFingerprint();
+    // replace the fingerprint's simple list of families with a 
+    // map from column families to lists of qualifiers and kv details
+    Map<String, List<Map<String, Object>>> columns =
+      new HashMap<String, List<Map<String, Object>>>();
+    map.put("families", columns);
+    map.put("row", Bytes.toStringBinary(this.row));
+    int colCount = 0;
+    // iterate through all column families affected
+    for (Map.Entry<byte [], List<KeyValue>> entry : this.familyMap.entrySet()) {
+      // map from this family to details for each kv affected within the family
+      List<Map<String, Object>> qualifierDetails =
+        new ArrayList<Map<String, Object>>();
+      columns.put(Bytes.toStringBinary(entry.getKey()), qualifierDetails);
+      colCount += entry.getValue().size();
+      if (maxCols <= 0) {
+        continue;
+      }
+      // add details for each kv
+      for (KeyValue kv : entry.getValue()) {
+        if (--maxCols <= 0 ) {
+          continue;
+        }
+        Map<String, Object> kvMap = kv.toStringMap();
+        // row and family information are already available in the bigger map
+        kvMap.remove("row");
+        kvMap.remove("family");
+        qualifierDetails.add(kvMap);
+      }
+    }
+    map.put("totalColumns", colCount);
+    // add the id if set
+    if (getId() != null) {
+      map.put("id", getId());
+    }
+    return map;
+  }
+
+  /**
+   * @return true if edits should be applied to WAL, false if not
+   */
+  public boolean getWriteToWAL() {
+    return this.writeToWAL;
+  }
+
+  /**
+   * Set whether this Delete should be written to the WAL or not.
+   * Not writing the WAL means you may lose edits on server crash.
+   * @param write true if edits should be written to WAL, false if not
+   */
+  public void setWriteToWAL(boolean write) {
+    this.writeToWAL = write;
+  }
+
+  /**
+   * Method for retrieving the put's familyMap
+   * @return familyMap
+   */
+  public Map<byte [], List<KeyValue>> getFamilyMap() {
+    return this.familyMap;
+  }
+
+  /**
+   * Method for setting the put's familyMap
+   */
+  public void setFamilyMap(Map<byte [], List<KeyValue>> map) {
+    this.familyMap = map;
+  }
+
+  /**
+   * Method to check if the familyMap is empty
+   * @return true if empty, false otherwise
+   */
+  public boolean isEmpty() {
+    return familyMap.isEmpty();
+  }
+
+  /**
+   * Method for retrieving the delete's row
+   * @return row
+   */
+  @Override
+  public byte [] getRow() {
+    return this.row;
+  }
+
+  public int compareTo(final Row d) {
+    return Bytes.compareTo(this.getRow(), d.getRow());
+  }
+
+  /**
+   * Method for retrieving the timestamp
+   * @return timestamp
+   */
+  public long getTimeStamp() {
+    return this.ts;
+  }
+
+  /**
+   * Set the replication custer id.
+   * @param clusterId
+   */
+  public void setClusterId(UUID clusterId) {
+    if (clusterId == null) return;
+    byte[] val = new byte[2*Bytes.SIZEOF_LONG];
+    Bytes.putLong(val, 0, clusterId.getMostSignificantBits());
+    Bytes.putLong(val, Bytes.SIZEOF_LONG, clusterId.getLeastSignificantBits());
+    setAttribute(CLUSTER_ID_ATTR, val);
+  }
+
+  /**
+   * @return The replication cluster id.
+   */
+  public UUID getClusterId() {
+    byte[] attr = getAttribute(CLUSTER_ID_ATTR);
+    if (attr == null) {
+      return HConstants.DEFAULT_CLUSTER_ID;
+    }
+    return new UUID(Bytes.toLong(attr,0), Bytes.toLong(attr, Bytes.SIZEOF_LONG));
+  }
+
+  /**
+   * @return the total number of KeyValues
+   */
+  public int size() {
+    int size = 0;
+    for(List<KeyValue> kvList : this.familyMap.values()) {
+      size += kvList.size();
+    }
+    return size;
+  }
+
+  /**
+   * @return the number of different families
+   */
+  public int numFamilies() {
+    return familyMap.size();
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,45 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.exceptions.RegionException;
+
+/**
+ * Thrown when no region server can be found for a region
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class NoServerForRegionException extends RegionException {
+  private static final long serialVersionUID = 1L << 11 - 1L;
+
+  /** default constructor */
+  public NoServerForRegionException() {
+    super();
+  }
+
+  /**
+   * Constructor
+   * @param s message
+   */
+  public NoServerForRegionException(String s) {
+    super(s);
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,113 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Superclass for any type that maps to a potentially application-level query.
+ * (e.g. Put, Get, Delete, Scan, Next, etc.)
+ * Contains methods for exposure to logging and debugging tools.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class Operation {
+  // TODO make this configurable
+  private static final int DEFAULT_MAX_COLS = 5;
+
+  /**
+   * Produces a Map containing a fingerprint which identifies the type and 
+   * the static schema components of a query (i.e. column families)
+   * @return a map containing fingerprint information (i.e. column families)
+   */
+  public abstract Map<String, Object> getFingerprint();
+
+  /**
+   * Produces a Map containing a summary of the details of a query 
+   * beyond the scope of the fingerprint (i.e. columns, rows...)
+   * @param maxCols a limit on the number of columns output prior to truncation
+   * @return a map containing parameters of a query (i.e. rows, columns...)
+   */
+  public abstract Map<String, Object> toMap(int maxCols);
+
+  /**
+   * Produces a Map containing a full summary of a query.
+   * @return a map containing parameters of a query (i.e. rows, columns...)
+   */
+  public Map<String, Object> toMap() {
+    return toMap(DEFAULT_MAX_COLS);
+  }
+
+  /**
+   * Produces a JSON object for fingerprint and details exposure in a
+   * parseable format.
+   * @param maxCols a limit on the number of columns to include in the JSON
+   * @return a JSONObject containing this Operation's information, as a string
+   */
+  public String toJSON(int maxCols) throws IOException {
+    ObjectMapper mapper = new ObjectMapper();
+    return mapper.writeValueAsString(toMap(maxCols));
+  }
+
+  /**
+   * Produces a JSON object sufficient for description of a query
+   * in a debugging or logging context.
+   * @return the produced JSON object, as a string
+   */
+  public String toJSON() throws IOException {
+    return toJSON(DEFAULT_MAX_COLS);
+  }
+
+  /**
+   * Produces a string representation of this Operation. It defaults to a JSON
+   * representation, but falls back to a string representation of the 
+   * fingerprint and details in the case of a JSON encoding failure.
+   * @param maxCols a limit on the number of columns output in the summary
+   * prior to truncation
+   * @return a JSON-parseable String
+   */
+  public String toString(int maxCols) {
+    /* for now this is merely a wrapper from producing a JSON string, but 
+     * toJSON is kept separate in case this is changed to be a less parsable
+     * pretty printed representation.
+     */
+    try {
+      return toJSON(maxCols);
+    } catch (IOException ioe) {
+      return toMap(maxCols).toString();
+    }
+  }
+
+  /**
+   * Produces a string representation of this Operation. It defaults to a JSON
+   * representation, but falls back to a string representation of the
+   * fingerprint and details in the case of a JSON encoding failure.
+   * @return String
+   */
+  @Override
+  public String toString() {
+    return toString(DEFAULT_MAX_COLS);
+  }
+}
+

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,108 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class OperationWithAttributes extends Operation implements Attributes {
+  // a opaque blob of attributes
+  private Map<String, byte[]> attributes;
+
+  // used for uniquely identifying an operation
+  public static final String ID_ATRIBUTE = "_operation.attributes.id";
+
+  public void setAttribute(String name, byte[] value) {
+    if (attributes == null && value == null) {
+      return;
+    }
+
+    if (attributes == null) {
+      attributes = new HashMap<String, byte[]>();
+    }
+
+    if (value == null) {
+      attributes.remove(name);
+      if (attributes.isEmpty()) {
+        this.attributes = null;
+      }
+    } else {
+      attributes.put(name, value);
+    }
+  }
+
+  public byte[] getAttribute(String name) {
+    if (attributes == null) {
+      return null;
+    }
+
+    return attributes.get(name);
+  }
+
+  public Map<String, byte[]> getAttributesMap() {
+    if (attributes == null) {
+      return Collections.emptyMap();
+    }
+    return Collections.unmodifiableMap(attributes);
+  }
+
+  protected long getAttributeSize() {
+    long size = 0;
+    if (attributes != null) {
+      size += ClassSize.align(this.attributes.size() * ClassSize.MAP_ENTRY);
+      for(Map.Entry<String, byte[]> entry : this.attributes.entrySet()) {
+        size += ClassSize.align(ClassSize.STRING + entry.getKey().length());
+        size += ClassSize.align(ClassSize.ARRAY + entry.getValue().length);
+      }
+    }
+    return size;
+  }
+
+  /**
+   * This method allows you to set an identifier on an operation. The original
+   * motivation for this was to allow the identifier to be used in slow query
+   * logging, but this could obviously be useful in other places. One use of
+   * this could be to put a class.method identifier in here to see where the
+   * slow query is coming from.
+   * @param id
+   *          id to set for the scan
+   */
+  public void setId(String id) {
+    setAttribute(ID_ATRIBUTE, Bytes.toBytes(id));
+  }
+
+  /**
+   * This method allows you to retrieve the identifier for the operation if one
+   * was set.
+   * @return the id or null if not set
+   */
+  public String getId() {
+    byte[] attr = getAttribute(ID_ATRIBUTE);
+    return attr == null? null: Bytes.toString(attr);
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,335 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+/**
+ * Used to perform Put operations for a single row.
+ * <p>
+ * To perform a Put, instantiate a Put object with the row to insert to and
+ * for each column to be inserted, execute {@link #add(byte[], byte[], byte[]) add} or
+ * {@link #add(byte[], byte[], long, byte[]) add} if setting the timestamp.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class Put extends Mutation implements HeapSize, Comparable<Row> {
+  private static final long OVERHEAD = ClassSize.align(
+      ClassSize.OBJECT + 2 * ClassSize.REFERENCE +
+      1 * Bytes.SIZEOF_LONG + Bytes.SIZEOF_BOOLEAN +
+      ClassSize.REFERENCE + ClassSize.TREEMAP);
+
+  /**
+   * Create a Put operation for the specified row.
+   * @param row row key
+   */
+  public Put(byte [] row) {
+    this(row, HConstants.LATEST_TIMESTAMP);
+  }
+
+  /**
+   * Create a Put operation for the specified row, using a given timestamp.
+   *
+   * @param row row key
+   * @param ts timestamp
+   */
+  public Put(byte[] row, long ts) {
+    if(row == null || row.length > HConstants.MAX_ROW_LENGTH) {
+      throw new IllegalArgumentException("Row key is invalid");
+    }
+    this.row = Arrays.copyOf(row, row.length);
+    this.ts = ts;
+  }
+
+  /**
+   * Copy constructor.  Creates a Put operation cloned from the specified Put.
+   * @param putToCopy put to copy
+   */
+  public Put(Put putToCopy) {
+    this(putToCopy.getRow(), putToCopy.ts);
+    this.familyMap =
+      new TreeMap<byte [], List<KeyValue>>(Bytes.BYTES_COMPARATOR);
+    for(Map.Entry<byte [], List<KeyValue>> entry :
+      putToCopy.getFamilyMap().entrySet()) {
+      this.familyMap.put(entry.getKey(), entry.getValue());
+    }
+    this.writeToWAL = putToCopy.writeToWAL;
+  }
+
+  /**
+   * Add the specified column and value to this Put operation.
+   * @param family family name
+   * @param qualifier column qualifier
+   * @param value column value
+   * @return this
+   */
+  public Put add(byte [] family, byte [] qualifier, byte [] value) {
+    return add(family, qualifier, this.ts, value);
+  }
+
+  /**
+   * Add the specified column and value, with the specified timestamp as
+   * its version to this Put operation.
+   * @param family family name
+   * @param qualifier column qualifier
+   * @param ts version timestamp
+   * @param value column value
+   * @return this
+   */
+  public Put add(byte [] family, byte [] qualifier, long ts, byte [] value) {
+    List<KeyValue> list = getKeyValueList(family);
+    KeyValue kv = createPutKeyValue(family, qualifier, ts, value);
+    list.add(kv);
+    familyMap.put(kv.getFamily(), list);
+    return this;
+  }
+
+  /**
+   * Add the specified KeyValue to this Put operation.  Operation assumes that
+   * the passed KeyValue is immutable and its backing array will not be modified
+   * for the duration of this Put.
+   * @param kv individual KeyValue
+   * @return this
+   * @throws java.io.IOException e
+   */
+  public Put add(KeyValue kv) throws IOException{
+    byte [] family = kv.getFamily();
+    List<KeyValue> list = getKeyValueList(family);
+    //Checking that the row of the kv is the same as the put
+    int res = Bytes.compareTo(this.row, 0, row.length,
+        kv.getBuffer(), kv.getRowOffset(), kv.getRowLength());
+    if(res != 0) {
+      throw new IOException("The row in the recently added KeyValue " +
+          Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(),
+        kv.getRowLength()) + " doesn't match the original one " +
+        Bytes.toStringBinary(this.row));
+    }
+    list.add(kv);
+    familyMap.put(family, list);
+    return this;
+  }
+
+  /*
+   * Create a KeyValue with this objects row key and the Put identifier.
+   *
+   * @return a KeyValue with this objects row key and the Put identifier.
+   */
+  private KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts,
+      byte[] value) {
+  return  new KeyValue(this.row, family, qualifier, ts, KeyValue.Type.Put,
+      value);
+  }
+
+  /**
+   * A convenience method to determine if this object's familyMap contains
+   * a value assigned to the given family & qualifier.
+   * Both given arguments must match the KeyValue object to return true.
+   *
+   * @param family column family
+   * @param qualifier column qualifier
+   * @return returns true if the given family and qualifier already has an
+   * existing KeyValue object in the family map.
+   */
+  public boolean has(byte [] family, byte [] qualifier) {
+  return has(family, qualifier, this.ts, new byte[0], true, true);
+  }
+
+  /**
+   * A convenience method to determine if this object's familyMap contains
+   * a value assigned to the given family, qualifier and timestamp.
+   * All 3 given arguments must match the KeyValue object to return true.
+   *
+   * @param family column family
+   * @param qualifier column qualifier
+   * @param ts timestamp
+   * @return returns true if the given family, qualifier and timestamp already has an
+   * existing KeyValue object in the family map.
+   */
+  public boolean has(byte [] family, byte [] qualifier, long ts) {
+  return has(family, qualifier, ts, new byte[0], false, true);
+  }
+
+  /**
+   * A convenience method to determine if this object's familyMap contains
+   * a value assigned to the given family, qualifier and timestamp.
+   * All 3 given arguments must match the KeyValue object to return true.
+   *
+   * @param family column family
+   * @param qualifier column qualifier
+   * @param value value to check
+   * @return returns true if the given family, qualifier and value already has an
+   * existing KeyValue object in the family map.
+   */
+  public boolean has(byte [] family, byte [] qualifier, byte [] value) {
+    return has(family, qualifier, this.ts, value, true, false);
+  }
+
+  /**
+   * A convenience method to determine if this object's familyMap contains
+   * the given value assigned to the given family, qualifier and timestamp.
+   * All 4 given arguments must match the KeyValue object to return true.
+   *
+   * @param family column family
+   * @param qualifier column qualifier
+   * @param ts timestamp
+   * @param value value to check
+   * @return returns true if the given family, qualifier timestamp and value
+   * already has an existing KeyValue object in the family map.
+   */
+  public boolean has(byte [] family, byte [] qualifier, long ts, byte [] value) {
+      return has(family, qualifier, ts, value, false, false);
+  }
+
+  /*
+   * Private method to determine if this object's familyMap contains
+   * the given value assigned to the given family, qualifier and timestamp
+   * respecting the 2 boolean arguments
+   *
+   * @param family
+   * @param qualifier
+   * @param ts
+   * @param value
+   * @param ignoreTS
+   * @param ignoreValue
+   * @return returns true if the given family, qualifier timestamp and value
+   * already has an existing KeyValue object in the family map.
+   */
+  private boolean has(byte[] family, byte[] qualifier, long ts, byte[] value,
+                      boolean ignoreTS, boolean ignoreValue) {
+    List<KeyValue> list = getKeyValueList(family);
+    if (list.size() == 0) {
+      return false;
+    }
+    // Boolean analysis of ignoreTS/ignoreValue.
+    // T T => 2
+    // T F => 3 (first is always true)
+    // F T => 2
+    // F F => 1
+    if (!ignoreTS && !ignoreValue) {
+      for (KeyValue kv : list) {
+        if (Arrays.equals(kv.getFamily(), family) &&
+            Arrays.equals(kv.getQualifier(), qualifier) &&
+            Arrays.equals(kv.getValue(), value) &&
+            kv.getTimestamp() == ts) {
+          return true;
+        }
+      }
+    } else if (ignoreValue && !ignoreTS) {
+      for (KeyValue kv : list) {
+        if (Arrays.equals(kv.getFamily(), family) && Arrays.equals(kv.getQualifier(), qualifier)
+            && kv.getTimestamp() == ts) {
+          return true;
+        }
+      }
+    } else if (!ignoreValue && ignoreTS) {
+      for (KeyValue kv : list) {
+        if (Arrays.equals(kv.getFamily(), family) && Arrays.equals(kv.getQualifier(), qualifier)
+            && Arrays.equals(kv.getValue(), value)) {
+          return true;
+        }
+      }
+    } else {
+      for (KeyValue kv : list) {
+        if (Arrays.equals(kv.getFamily(), family) &&
+            Arrays.equals(kv.getQualifier(), qualifier)) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Returns a list of all KeyValue objects with matching column family and qualifier.
+   *
+   * @param family column family
+   * @param qualifier column qualifier
+   * @return a list of KeyValue objects with the matching family and qualifier,
+   * returns an empty list if one doesnt exist for the given family.
+   */
+  public List<KeyValue> get(byte[] family, byte[] qualifier) {
+    List<KeyValue> filteredList = new ArrayList<KeyValue>();
+    for (KeyValue kv: getKeyValueList(family)) {
+      if (Arrays.equals(kv.getQualifier(), qualifier)) {
+        filteredList.add(kv);
+      }
+    }
+    return filteredList;
+  }
+
+  /**
+   * Creates an empty list if one doesnt exist for the given column family
+   * or else it returns the associated list of KeyValue objects.
+   *
+   * @param family column family
+   * @return a list of KeyValue objects, returns an empty list if one doesnt exist.
+   */
+  private List<KeyValue> getKeyValueList(byte[] family) {
+    List<KeyValue> list = familyMap.get(family);
+    if(list == null) {
+      list = new ArrayList<KeyValue>(0);
+    }
+    return list;
+  }
+
+  //HeapSize
+  public long heapSize() {
+    long heapsize = OVERHEAD;
+    //Adding row
+    heapsize += ClassSize.align(ClassSize.ARRAY + this.row.length);
+
+    //Adding map overhead
+    heapsize +=
+      ClassSize.align(this.familyMap.size() * ClassSize.MAP_ENTRY);
+    for(Map.Entry<byte [], List<KeyValue>> entry : this.familyMap.entrySet()) {
+      //Adding key overhead
+      heapsize +=
+        ClassSize.align(ClassSize.ARRAY + entry.getKey().length);
+
+      //This part is kinds tricky since the JVM can reuse references if you
+      //store the same value, but have a good match with SizeOf at the moment
+      //Adding value overhead
+      heapsize += ClassSize.align(ClassSize.ARRAYLIST);
+      int size = entry.getValue().size();
+      heapsize += ClassSize.align(ClassSize.ARRAY +
+          size * ClassSize.REFERENCE);
+
+      for(KeyValue kv : entry.getValue()) {
+        heapsize += kv.heapSize();
+      }
+    }
+    heapsize += getAttributeSize();
+
+    return ClassSize.align((int)heapsize);
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,39 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.exceptions.RegionException;
+
+/** Thrown when a table can not be located */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class RegionOfflineException extends RegionException {
+  private static final long serialVersionUID = 466008402L;
+  /** default constructor */
+  public RegionOfflineException() {
+    super();
+  }
+
+  /** @param s message */
+  public RegionOfflineException(String s) {
+    super(s);
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,709 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValue.SplitKeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+/**
+ * Single row result of a {@link Get} or {@link Scan} query.<p>
+ *
+ * This class is NOT THREAD SAFE.<p>
+ *
+ * Convenience methods are available that return various {@link Map}
+ * structures and values directly.<p>
+ *
+ * To get a complete mapping of all cells in the Result, which can include
+ * multiple families and multiple versions, use {@link #getMap()}.<p>
+ *
+ * To get a mapping of each family to its columns (qualifiers and values),
+ * including only the latest version of each, use {@link #getNoVersionMap()}.
+ *
+ * To get a mapping of qualifiers to latest values for an individual family use
+ * {@link #getFamilyMap(byte[])}.<p>
+ *
+ * To get the latest value for a specific family and qualifier use {@link #getValue(byte[], byte[])}.
+ *
+ * A Result is backed by an array of {@link KeyValue} objects, each representing
+ * an HBase cell defined by the row, family, qualifier, timestamp, and value.<p>
+ *
+ * The underlying {@link KeyValue} objects can be accessed through the method {@link #list()}.
+ * Each KeyValue can then be accessed through
+ * {@link KeyValue#getRow()}, {@link KeyValue#getFamily()}, {@link KeyValue#getQualifier()},
+ * {@link KeyValue#getTimestamp()}, and {@link KeyValue#getValue()}.<p>
+ * 
+ * If you need to overwrite a Result with another Result instance -- as in the old 'mapred' RecordReader next
+ * invocations -- then create an empty Result with the null constructor and in then use {@link #copyFrom(Result)}
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class Result {
+  private KeyValue [] kvs;
+  // We're not using java serialization.  Transient here is just a marker to say
+  // that this is where we cache row if we're ever asked for it.
+  private transient byte [] row = null;
+  // Ditto for familyMap.  It can be composed on fly from passed in kvs.
+  private transient NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> familyMap = null;
+
+  // never use directly
+  private static byte [] buffer = null;
+  private static final int PAD_WIDTH = 128;
+
+  /**
+   * Creates an empty Result w/ no KeyValue payload; returns null if you call {@link #raw()}.
+   * Use this to represent no results if <code>null</code> won't do or in old 'mapred' as oppposed to 'mapreduce' package
+   * MapReduce where you need to overwrite a Result
+   * instance with a {@link #copyFrom(Result)} call.
+   */
+  public Result() {
+    super();
+  }
+
+  /**
+   * Instantiate a Result with the specified array of KeyValues.
+   * <br><strong>Note:</strong> You must ensure that the keyvalues
+   * are already sorted
+   * @param kvs array of KeyValues
+   */
+  public Result(KeyValue [] kvs) {
+    this.kvs = kvs;
+  }
+
+  /**
+   * Instantiate a Result with the specified List of KeyValues.
+   * <br><strong>Note:</strong> You must ensure that the keyvalues
+   * are already sorted
+   * @param kvs List of KeyValues
+   */
+  public Result(List<KeyValue> kvs) {
+    this(kvs.toArray(new KeyValue[kvs.size()]));
+  }
+
+  /**
+   * Method for retrieving the row key that corresponds to
+   * the row from which this Result was created.
+   * @return row
+   */
+  public byte [] getRow() {
+    if (this.row == null) {
+      this.row = this.kvs == null || this.kvs.length == 0? null: this.kvs[0].getRow();
+    }
+    return this.row;
+  }
+
+  /**
+   * Return the array of KeyValues backing this Result instance.
+   *
+   * The array is sorted from smallest -> largest using the
+   * {@link KeyValue#COMPARATOR}.
+   *
+   * The array only contains what your Get or Scan specifies and no more.
+   * For example if you request column "A" 1 version you will have at most 1
+   * KeyValue in the array. If you request column "A" with 2 version you will
+   * have at most 2 KeyValues, with the first one being the newer timestamp and
+   * the second being the older timestamp (this is the sort order defined by
+   * {@link KeyValue#COMPARATOR}).  If columns don't exist, they won't be
+   * present in the result. Therefore if you ask for 1 version all columns,
+   * it is safe to iterate over this array and expect to see 1 KeyValue for
+   * each column and no more.
+   *
+   * This API is faster than using getFamilyMap() and getMap()
+   *
+   * @return array of KeyValues; can be null if nothing in the result
+   */
+  public KeyValue[] raw() {
+    return kvs;
+  }
+
+  /**
+   * Create a sorted list of the KeyValue's in this result.
+   *
+   * Since HBase 0.20.5 this is equivalent to raw().
+   *
+   * @return The sorted list of KeyValue's.
+   */
+  public List<KeyValue> list() {
+    return isEmpty()? null: Arrays.asList(raw());
+  }
+
+  /**
+   * Return the KeyValues for the specific column.  The KeyValues are sorted in
+   * the {@link KeyValue#COMPARATOR} order.  That implies the first entry in
+   * the list is the most recent column.  If the query (Scan or Get) only
+   * requested 1 version the list will contain at most 1 entry.  If the column
+   * did not exist in the result set (either the column does not exist
+   * or the column was not selected in the query) the list will be empty.
+   *
+   * Also see getColumnLatest which returns just a KeyValue
+   *
+   * @param family the family
+   * @param qualifier
+   * @return a list of KeyValues for this column or empty list if the column
+   * did not exist in the result set
+   */
+  public List<KeyValue> getColumn(byte [] family, byte [] qualifier) {
+    List<KeyValue> result = new ArrayList<KeyValue>();
+
+    KeyValue [] kvs = raw();
+
+    if (kvs == null || kvs.length == 0) {
+      return result;
+    }
+    int pos = binarySearch(kvs, family, qualifier);
+    if (pos == -1) {
+      return result; // cant find it
+    }
+
+    for (int i = pos ; i < kvs.length ; i++ ) {
+      KeyValue kv = kvs[i];
+      if (kv.matchingColumn(family,qualifier)) {
+        result.add(kv);
+      } else {
+        break;
+      }
+    }
+
+    return result;
+  }
+
+  protected int binarySearch(final KeyValue [] kvs,
+                             final byte [] family,
+                             final byte [] qualifier) {
+    KeyValue searchTerm =
+        KeyValue.createFirstOnRow(kvs[0].getRow(),
+            family, qualifier);
+
+    // pos === ( -(insertion point) - 1)
+    int pos = Arrays.binarySearch(kvs, searchTerm, KeyValue.COMPARATOR);
+    // never will exact match
+    if (pos < 0) {
+      pos = (pos+1) * -1;
+      // pos is now insertion point
+    }
+    if (pos == kvs.length) {
+      return -1; // doesn't exist
+    }
+    return pos;
+  }
+
+  /**
+   * Searches for the latest value for the specified column.
+   *
+   * @param kvs the array to search
+   * @param family family name
+   * @param foffset family offset
+   * @param flength family length
+   * @param qualifier column qualifier
+   * @param qoffset qualifier offset
+   * @param qlength qualifier length
+   *
+   * @return the index where the value was found, or -1 otherwise
+   */
+  protected int binarySearch(final KeyValue [] kvs,
+      final byte [] family, final int foffset, final int flength,
+      final byte [] qualifier, final int qoffset, final int qlength) {
+
+    double keyValueSize = (double)
+        KeyValue.getKeyValueDataStructureSize(kvs[0].getRowLength(), flength, qlength, 0);
+
+    if (buffer == null || keyValueSize > buffer.length) {
+      // pad to the smallest multiple of the pad width
+      buffer = new byte[(int) Math.ceil(keyValueSize / PAD_WIDTH) * PAD_WIDTH];
+    }
+
+    KeyValue searchTerm = KeyValue.createFirstOnRow(buffer, 0,
+        kvs[0].getBuffer(), kvs[0].getRowOffset(), kvs[0].getRowLength(),
+        family, foffset, flength,
+        qualifier, qoffset, qlength);
+
+    // pos === ( -(insertion point) - 1)
+    int pos = Arrays.binarySearch(kvs, searchTerm, KeyValue.COMPARATOR);
+    // never will exact match
+    if (pos < 0) {
+      pos = (pos+1) * -1;
+      // pos is now insertion point
+    }
+    if (pos == kvs.length) {
+      return -1; // doesn't exist
+    }
+    return pos;
+  }
+
+  /**
+   * The KeyValue for the most recent timestamp for a given column.
+   *
+   * @param family
+   * @param qualifier
+   *
+   * @return the KeyValue for the column, or null if no value exists in the row or none have been
+   * selected in the query (Get/Scan)
+   */
+  public KeyValue getColumnLatest(byte [] family, byte [] qualifier) {
+    KeyValue [] kvs = raw(); // side effect possibly.
+    if (kvs == null || kvs.length == 0) {
+      return null;
+    }
+    int pos = binarySearch(kvs, family, qualifier);
+    if (pos == -1) {
+      return null;
+    }
+    KeyValue kv = kvs[pos];
+    if (kv.matchingColumn(family, qualifier)) {
+      return kv;
+    }
+    return null;
+  }
+
+  /**
+   * The KeyValue for the most recent timestamp for a given column.
+   *
+   * @param family family name
+   * @param foffset family offset
+   * @param flength family length
+   * @param qualifier column qualifier
+   * @param qoffset qualifier offset
+   * @param qlength qualifier length
+   *
+   * @return the KeyValue for the column, or null if no value exists in the row or none have been
+   * selected in the query (Get/Scan)
+   */
+  public KeyValue getColumnLatest(byte [] family, int foffset, int flength,
+      byte [] qualifier, int qoffset, int qlength) {
+
+    KeyValue [] kvs = raw(); // side effect possibly.
+    if (kvs == null || kvs.length == 0) {
+      return null;
+    }
+    int pos = binarySearch(kvs, family, foffset, flength, qualifier, qoffset, qlength);
+    if (pos == -1) {
+      return null;
+    }
+    KeyValue kv = kvs[pos];
+    if (kv.matchingColumn(family, foffset, flength, qualifier, qoffset, qlength)) {
+      return kv;
+    }
+    return null;
+  }
+
+  /**
+   * Get the latest version of the specified column.
+   * @param family family name
+   * @param qualifier column qualifier
+   * @return value of latest version of column, null if none found
+   */
+  public byte[] getValue(byte [] family, byte [] qualifier) {
+    KeyValue kv = getColumnLatest(family, qualifier);
+    if (kv == null) {
+      return null;
+    }
+    return kv.getValue();
+  }
+
+  /**
+   * Returns the value wrapped in a new <code>ByteBuffer</code>.
+   *
+   * @param family family name
+   * @param qualifier column qualifier
+   *
+   * @return the latest version of the column, or <code>null</code> if none found
+   */
+  public ByteBuffer getValueAsByteBuffer(byte [] family, byte [] qualifier) {
+
+    KeyValue kv = getColumnLatest(family, 0, family.length, qualifier, 0, qualifier.length);
+
+    if (kv == null) {
+      return null;
+    }
+    return kv.getValueAsByteBuffer();
+  }
+
+  /**
+   * Returns the value wrapped in a new <code>ByteBuffer</code>.
+   *
+   * @param family family name
+   * @param foffset family offset
+   * @param flength family length
+   * @param qualifier column qualifier
+   * @param qoffset qualifier offset
+   * @param qlength qualifier length
+   *
+   * @return the latest version of the column, or <code>null</code> if none found
+   */
+  public ByteBuffer getValueAsByteBuffer(byte [] family, int foffset, int flength,
+      byte [] qualifier, int qoffset, int qlength) {
+
+    KeyValue kv = getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength);
+
+    if (kv == null) {
+      return null;
+    }
+    return kv.getValueAsByteBuffer();
+  }
+
+  /**
+   * Loads the latest version of the specified column into the provided <code>ByteBuffer</code>.
+   * <p>
+   * Does not clear or flip the buffer.
+   *
+   * @param family family name
+   * @param qualifier column qualifier
+   * @param dst the buffer where to write the value
+   *
+   * @return <code>true</code> if a value was found, <code>false</code> otherwise
+   *
+   * @throws BufferOverflowException there is insufficient space remaining in the buffer
+   */
+  public boolean loadValue(byte [] family, byte [] qualifier, ByteBuffer dst)
+          throws BufferOverflowException {
+    return loadValue(family, 0, family.length, qualifier, 0, qualifier.length, dst);
+  }
+
+  /**
+   * Loads the latest version of the specified column into the provided <code>ByteBuffer</code>.
+   * <p>
+   * Does not clear or flip the buffer.
+   *
+   * @param family family name
+   * @param foffset family offset
+   * @param flength family length
+   * @param qualifier column qualifier
+   * @param qoffset qualifier offset
+   * @param qlength qualifier length
+   * @param dst the buffer where to write the value
+   *
+   * @return <code>true</code> if a value was found, <code>false</code> otherwise
+   *
+   * @throws BufferOverflowException there is insufficient space remaining in the buffer
+   */
+  public boolean loadValue(byte [] family, int foffset, int flength,
+      byte [] qualifier, int qoffset, int qlength, ByteBuffer dst)
+          throws BufferOverflowException {
+    KeyValue kv = getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength);
+
+    if (kv == null) {
+      return false;
+    }
+    kv.loadValue(dst);
+    return true;
+  }
+
+  /**
+   * Checks if the specified column contains a non-empty value (not a zero-length byte array).
+   *
+   * @param family family name
+   * @param qualifier column qualifier
+   *
+   * @return whether or not a latest value exists and is not empty
+   */
+  public boolean containsNonEmptyColumn(byte [] family, byte [] qualifier) {
+
+    return containsNonEmptyColumn(family, 0, family.length, qualifier, 0, qualifier.length);
+  }
+
+  /**
+   * Checks if the specified column contains a non-empty value (not a zero-length byte array).
+   *
+   * @param family family name
+   * @param foffset family offset
+   * @param flength family length
+   * @param qualifier column qualifier
+   * @param qoffset qualifier offset
+   * @param qlength qualifier length
+   *
+   * @return whether or not a latest value exists and is not empty
+   */
+  public boolean containsNonEmptyColumn(byte [] family, int foffset, int flength,
+      byte [] qualifier, int qoffset, int qlength) {
+
+    KeyValue kv = getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength);
+
+    return (kv != null) && (kv.getValueLength() > 0);
+  }
+
+  /**
+   * Checks if the specified column contains an empty value (a zero-length byte array).
+   *
+   * @param family family name
+   * @param qualifier column qualifier
+   *
+   * @return whether or not a latest value exists and is empty
+   */
+  public boolean containsEmptyColumn(byte [] family, byte [] qualifier) {
+
+    return containsEmptyColumn(family, 0, family.length, qualifier, 0, qualifier.length);
+  }
+
+  /**
+   * Checks if the specified column contains an empty value (a zero-length byte array).
+   *
+   * @param family family name
+   * @param foffset family offset
+   * @param flength family length
+   * @param qualifier column qualifier
+   * @param qoffset qualifier offset
+   * @param qlength qualifier length
+   *
+   * @return whether or not a latest value exists and is empty
+   */
+  public boolean containsEmptyColumn(byte [] family, int foffset, int flength,
+      byte [] qualifier, int qoffset, int qlength) {
+    KeyValue kv = getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength);
+
+    return (kv != null) && (kv.getValueLength() == 0);
+  }
+
+  /**
+   * Checks for existence of a value for the specified column (empty or not).
+   *
+   * @param family family name
+   * @param qualifier column qualifier
+   *
+   * @return true if at least one value exists in the result, false if not
+   */
+  public boolean containsColumn(byte [] family, byte [] qualifier) {
+    KeyValue kv = getColumnLatest(family, qualifier);
+    return kv != null;
+  }
+
+  /**
+   * Checks for existence of a value for the specified column (empty or not).
+   *
+   * @param family family name
+   * @param foffset family offset
+   * @param flength family length
+   * @param qualifier column qualifier
+   * @param qoffset qualifier offset
+   * @param qlength qualifier length
+   *
+   * @return true if at least one value exists in the result, false if not
+   */
+  public boolean containsColumn(byte [] family, int foffset, int flength,
+      byte [] qualifier, int qoffset, int qlength) {
+
+    return getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength) != null;
+  }
+
+  /**
+   * Map of families to all versions of its qualifiers and values.
+   * <p>
+   * Returns a three level Map of the form:
+   * <code>Map&amp;family,Map&lt;qualifier,Map&lt;timestamp,value>>></code>
+   * <p>
+   * Note: All other map returning methods make use of this map internally.
+   * @return map from families to qualifiers to versions
+   */
+  public NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> getMap() {
+    if (this.familyMap != null) {
+      return this.familyMap;
+    }
+    if(isEmpty()) {
+      return null;
+    }
+    this.familyMap = new TreeMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>(Bytes.BYTES_COMPARATOR);
+    for(KeyValue kv : this.kvs) {
+      SplitKeyValue splitKV = kv.split();
+      byte [] family = splitKV.getFamily();
+      NavigableMap<byte[], NavigableMap<Long, byte[]>> columnMap =
+        familyMap.get(family);
+      if(columnMap == null) {
+        columnMap = new TreeMap<byte[], NavigableMap<Long, byte[]>>
+          (Bytes.BYTES_COMPARATOR);
+        familyMap.put(family, columnMap);
+      }
+      byte [] qualifier = splitKV.getQualifier();
+      NavigableMap<Long, byte[]> versionMap = columnMap.get(qualifier);
+      if(versionMap == null) {
+        versionMap = new TreeMap<Long, byte[]>(new Comparator<Long>() {
+          public int compare(Long l1, Long l2) {
+            return l2.compareTo(l1);
+          }
+        });
+        columnMap.put(qualifier, versionMap);
+      }
+      Long timestamp = Bytes.toLong(splitKV.getTimestamp());
+      byte [] value = splitKV.getValue();
+      versionMap.put(timestamp, value);
+    }
+    return this.familyMap;
+  }
+
+  /**
+   * Map of families to their most recent qualifiers and values.
+   * <p>
+   * Returns a two level Map of the form: <code>Map&amp;family,Map&lt;qualifier,value>></code>
+   * <p>
+   * The most recent version of each qualifier will be used.
+   * @return map from families to qualifiers and value
+   */
+  public NavigableMap<byte[], NavigableMap<byte[], byte[]>> getNoVersionMap() {
+    if(this.familyMap == null) {
+      getMap();
+    }
+    if(isEmpty()) {
+      return null;
+    }
+    NavigableMap<byte[], NavigableMap<byte[], byte[]>> returnMap =
+      new TreeMap<byte[], NavigableMap<byte[], byte[]>>(Bytes.BYTES_COMPARATOR);
+    for(Map.Entry<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>
+      familyEntry : familyMap.entrySet()) {
+      NavigableMap<byte[], byte[]> qualifierMap =
+        new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
+      for(Map.Entry<byte[], NavigableMap<Long, byte[]>> qualifierEntry :
+        familyEntry.getValue().entrySet()) {
+        byte [] value =
+          qualifierEntry.getValue().get(qualifierEntry.getValue().firstKey());
+        qualifierMap.put(qualifierEntry.getKey(), value);
+      }
+      returnMap.put(familyEntry.getKey(), qualifierMap);
+    }
+    return returnMap;
+  }
+
+  /**
+   * Map of qualifiers to values.
+   * <p>
+   * Returns a Map of the form: <code>Map&lt;qualifier,value></code>
+   * @param family column family to get
+   * @return map of qualifiers to values
+   */
+  public NavigableMap<byte[], byte[]> getFamilyMap(byte [] family) {
+    if(this.familyMap == null) {
+      getMap();
+    }
+    if(isEmpty()) {
+      return null;
+    }
+    NavigableMap<byte[], byte[]> returnMap =
+      new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
+    NavigableMap<byte[], NavigableMap<Long, byte[]>> qualifierMap =
+      familyMap.get(family);
+    if(qualifierMap == null) {
+      return returnMap;
+    }
+    for(Map.Entry<byte[], NavigableMap<Long, byte[]>> entry :
+      qualifierMap.entrySet()) {
+      byte [] value =
+        entry.getValue().get(entry.getValue().firstKey());
+      returnMap.put(entry.getKey(), value);
+    }
+    return returnMap;
+  }
+
+  /**
+   * Returns the value of the first column in the Result.
+   * @return value of the first column
+   */
+  public byte [] value() {
+    if (isEmpty()) {
+      return null;
+    }
+    return kvs[0].getValue();
+  }
+
+  /**
+   * Check if the underlying KeyValue [] is empty or not
+   * @return true if empty
+   */
+  public boolean isEmpty() {
+    return this.kvs == null || this.kvs.length == 0;
+  }
+
+  /**
+   * @return the size of the underlying KeyValue []
+   */
+  public int size() {
+    return this.kvs == null? 0: this.kvs.length;
+  }
+
+  /**
+   * @return String
+   */
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("keyvalues=");
+    if(isEmpty()) {
+      sb.append("NONE");
+      return sb.toString();
+    }
+    sb.append("{");
+    boolean moreThanOne = false;
+    for(KeyValue kv : this.kvs) {
+      if(moreThanOne) {
+        sb.append(", ");
+      } else {
+        moreThanOne = true;
+      }
+      sb.append(kv.toString());
+    }
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * Does a deep comparison of two Results, down to the byte arrays.
+   * @param res1 first result to compare
+   * @param res2 second result to compare
+   * @throws Exception Every difference is throwing an exception
+   */
+  public static void compareResults(Result res1, Result res2)
+      throws Exception {
+    if (res2 == null) {  
+      throw new Exception("There wasn't enough rows, we stopped at "
+          + Bytes.toStringBinary(res1.getRow()));
+    }
+    if (res1.size() != res2.size()) {
+      throw new Exception("This row doesn't have the same number of KVs: "
+          + res1.toString() + " compared to " + res2.toString());
+    }
+    KeyValue[] ourKVs = res1.raw();
+    KeyValue[] replicatedKVs = res2.raw();
+    for (int i = 0; i < res1.size(); i++) {
+      if (!ourKVs[i].equals(replicatedKVs[i]) ||
+          !Bytes.equals(ourKVs[i].getValue(), replicatedKVs[i].getValue())) {
+        throw new Exception("This result was different: "
+            + res1.toString() + " compared to " + res2.toString());
+      }
+    }
+  }
+
+  /**
+   * Copy another Result into this one. Needed for the old Mapred framework
+   * @param other
+   */
+  public void copyFrom(Result other) {
+    this.row = null;
+    this.familyMap = null;
+    this.kvs = other.kvs;
+  }
+}
\ No newline at end of file

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,54 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.Closeable;
+import java.io.IOException;
+
+/**
+ * Interface for client-side scanning.
+ * Go to {@link HTable} to obtain instances.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public interface ResultScanner extends Closeable, Iterable<Result> {
+
+  /**
+   * Grab the next row's worth of values. The scanner will return a Result.
+   * @return Result object if there is another row, null if the scanner is
+   * exhausted.
+   * @throws IOException e
+   */
+  public Result next() throws IOException;
+
+  /**
+   * @param nbRows number of rows to return
+   * @return Between zero and <param>nbRows</param> Results
+   * @throws IOException e
+   */
+  public Result [] next(int nbRows) throws IOException;
+
+  /**
+   * Closes the scanner and releases any resources it has allocated
+   */
+  public void close();
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,111 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.IOException;
+import java.util.Date;
+import java.util.List;
+
+/**
+ * Exception thrown by HTable methods when an attempt to do something (like
+ * commit changes) fails after a bunch of retries.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class RetriesExhaustedException extends IOException {
+  private static final long serialVersionUID = 1876775844L;
+
+  public RetriesExhaustedException(final String msg) {
+    super(msg);
+  }
+
+  public RetriesExhaustedException(final String msg, final IOException e) {
+    super(msg, e);
+  }
+
+  /**
+   * Datastructure that allows adding more info around Throwable incident.
+   */
+  public static class ThrowableWithExtraContext {
+    private final Throwable t;
+    private final long when;
+    private final String extras;
+
+    public ThrowableWithExtraContext(final Throwable t, final long when,
+        final String extras) {
+      this.t = t;
+      this.when = when;
+      this.extras = extras;
+    }
+ 
+    @Override
+    public String toString() {
+      return new Date(this.when).toString() + ", " + extras + ", " + t.toString();
+    }
+  }
+
+  /**
+   * Create a new RetriesExhaustedException from the list of prior failures.
+   * @param callableVitals Details from the {@link ServerCallable} we were using
+   * when we got this exception.
+   * @param numTries The number of tries we made
+   * @param exceptions List of exceptions that failed before giving up
+   */
+  public RetriesExhaustedException(final String callableVitals, int numTries,
+      List<Throwable> exceptions) {
+    super(getMessage(callableVitals, numTries, exceptions));
+  }
+
+  /**
+   * Create a new RetriesExhaustedException from the list of prior failures.
+   * @param numTries
+   * @param exceptions List of exceptions that failed before giving up
+   */
+  public RetriesExhaustedException(final int numTries,
+                                   final List<ThrowableWithExtraContext> exceptions) {
+    super(getMessage(numTries, exceptions),
+        (exceptions != null && !exceptions.isEmpty() ?
+            exceptions.get(exceptions.size() - 1).t : null));
+  }
+
+  private static String getMessage(String callableVitals, int numTries,
+      List<Throwable> exceptions) {
+    StringBuilder buffer = new StringBuilder("Failed contacting ");
+    buffer.append(callableVitals);
+    buffer.append(" after ");
+    buffer.append(numTries + 1);
+    buffer.append(" attempts.\nExceptions:\n");
+    for (Throwable t : exceptions) {
+      buffer.append(t.toString());
+      buffer.append("\n");
+    }
+    return buffer.toString();
+  }
+
+  private static String getMessage(final int numTries,
+      final List<ThrowableWithExtraContext> exceptions) {
+    StringBuilder buffer = new StringBuilder("Failed after attempts=");
+    buffer.append(numTries + 1);
+    buffer.append(", exceptions:\n");
+    for (ThrowableWithExtraContext t : exceptions) {
+      buffer.append(t.toString());
+      buffer.append("\n");
+    }
+    return buffer.toString();
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,173 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * This subclass of {@link org.apache.hadoop.hbase.client.RetriesExhaustedException}
+ * is thrown when we have more information about which rows were causing which
+ * exceptions on what servers.  You can call {@link #mayHaveClusterIssues()}
+ * and if the result is false, you have input error problems, otherwise you
+ * may have cluster issues.  You can iterate over the causes, rows and last
+ * known server addresses via {@link #getNumExceptions()} and
+ * {@link #getCause(int)}, {@link #getRow(int)} and {@link #getHostnamePort(int)}.
+ */
+@SuppressWarnings("serial")
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class RetriesExhaustedWithDetailsException
+extends RetriesExhaustedException {
+  List<Throwable> exceptions;
+  List<Row> actions;
+  List<String> hostnameAndPort;
+
+  public RetriesExhaustedWithDetailsException(List<Throwable> exceptions,
+                                              List<Row> actions,
+                                              List<String> hostnameAndPort) {
+    super("Failed " + exceptions.size() + " action" +
+        pluralize(exceptions) + ": " +
+        getDesc(exceptions, actions, hostnameAndPort));
+    
+    this.exceptions = exceptions;
+    this.actions = actions;
+    this.hostnameAndPort = hostnameAndPort;
+  }
+
+  public List<Throwable> getCauses() {
+    return exceptions;
+  }
+
+  public int getNumExceptions() {
+    return exceptions.size();
+  }
+
+  public Throwable getCause(int i) {
+    return exceptions.get(i);
+  }
+
+  public Row getRow(int i) {
+    return actions.get(i);
+  }
+
+  public String getHostnamePort(final int i) {
+    return this.hostnameAndPort.get(i);
+  }
+
+  public boolean mayHaveClusterIssues() {
+    boolean res = false;
+
+    // If all of the exceptions are DNRIOE not exception
+    for (Throwable t : exceptions) {
+      if ( !(t instanceof DoNotRetryIOException)) {
+        res = true;
+      }
+    }
+    return res;
+  }
+
+
+  public static String pluralize(Collection<?> c) {
+    return pluralize(c.size());
+  }
+
+  public static String pluralize(int c) {
+    return c > 1 ? "s" : "";
+  }
+
+  public static String getDesc(List<Throwable> exceptions,
+                               List<Row> actions,
+                               List<String> hostnamePort) {
+    String s = getDesc(classifyExs(exceptions));
+    StringBuilder addrs = new StringBuilder(s);
+    addrs.append("servers with issues: ");
+    Set<String> uniqAddr = new HashSet<String>();
+    uniqAddr.addAll(hostnamePort);
+
+    for(String addr : uniqAddr) {
+      addrs.append(addr).append(", ");
+    }
+    return s;
+  }
+
+  public String getExhaustiveDescription() {
+    StringWriter errorWriter = new StringWriter();
+    for (int i = 0; i < this.exceptions.size(); ++i) {
+      Throwable t = this.exceptions.get(i);
+      Row action = this.actions.get(i);
+      String server = this.hostnameAndPort.get(i);
+      errorWriter.append("Error #" + i + " from [" + server + "] for ["
+        + ((action == null) ? "unknown key" : Bytes.toStringBinary(action.getRow())) + "]");
+      if (t != null) {
+        PrintWriter pw = new PrintWriter(errorWriter);
+        t.printStackTrace(pw);
+        pw.flush();
+      }
+    }
+    return errorWriter.toString();
+  }
+
+
+  public static Map<String, Integer> classifyExs(List<Throwable> ths) {
+    Map<String, Integer> cls = new HashMap<String, Integer>();
+    for (Throwable t : ths) {
+      if (t == null) continue;
+      String name = "";
+      if (t instanceof DoNotRetryIOException) {
+        name = t.getMessage();
+      } else {
+        name = t.getClass().getSimpleName();
+      }
+      Integer i = cls.get(name);
+      if (i == null) {
+        i = 0;
+      }
+      i += 1;
+      cls.put(name, i);
+    }
+    return cls;
+  }
+
+  public static String getDesc(Map<String,Integer> classificaton) {
+    StringBuilder classificatons =new StringBuilder(11);
+    for (Map.Entry<String, Integer> e : classificaton.entrySet()) {
+      classificatons.append(e.getKey());
+      classificatons.append(": ");
+      classificatons.append(e.getValue());
+      classificatons.append(" time");
+      classificatons.append(pluralize(e.getValue()));
+      classificatons.append(", ");
+    }
+    return classificatons.toString();
+  }
+
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,34 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Has a row.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public interface Row extends Comparable<Row> {
+  /**
+   * @return The row.
+   */
+  public byte [] getRow();
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Performs multiple mutations atomically on a single row.
+ * Currently {@link Put} and {@link Delete} are supported.
+ *
+ * The mutations are performed in the order in which they
+ * were added.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class RowMutations implements Row {
+  private List<Mutation> mutations = new ArrayList<Mutation>();
+  private byte [] row;
+  private static final byte VERSION = (byte)0;
+
+  /** Constructor for Writable. DO NOT USE */
+  public RowMutations() {}
+
+  /**
+   * Create an atomic mutation for the specified row.
+   * @param row row key
+   */
+  public RowMutations(byte [] row) {
+    if(row == null || row.length > HConstants.MAX_ROW_LENGTH) {
+      throw new IllegalArgumentException("Row key is invalid");
+    }
+    this.row = Arrays.copyOf(row, row.length);
+  }
+
+  /**
+   * Add a {@link Put} operation to the list of mutations
+   * @param p The {@link Put} to add
+   * @throws IOException
+   */
+  public void add(Put p) throws IOException {
+    internalAdd(p);
+  }
+
+  /**
+   * Add a {@link Delete} operation to the list of mutations
+   * @param d The {@link Delete} to add
+   * @throws IOException
+   */
+  public void add(Delete d) throws IOException {
+    internalAdd(d);
+  }
+
+  private void internalAdd(Mutation m) throws IOException {
+    int res = Bytes.compareTo(this.row, m.getRow());
+    if(res != 0) {
+      throw new IOException("The row in the recently added Put/Delete " +
+          Bytes.toStringBinary(m.getRow()) + " doesn't match the original one " +
+          Bytes.toStringBinary(this.row));
+    }
+    mutations.add(m);
+  }
+
+  @Override
+  public int compareTo(Row i) {
+    return Bytes.compareTo(this.getRow(), i.getRow());
+  }
+
+  @Override
+  public byte[] getRow() {
+    return row;
+  }
+
+  /**
+   * @return An unmodifiable list of the current mutations.
+   */
+  public List<Mutation> getMutations() {
+    return Collections.unmodifiableList(mutations);
+  }
+}



Mime
View raw message