hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ecl...@apache.org
Subject svn commit: r1449950 [13/35] - in /hbase/trunk: ./ hbase-client/ hbase-client/src/ hbase-client/src/main/ hbase-client/src/main/java/ hbase-client/src/main/java/org/ hbase-client/src/main/java/org/apache/ hbase-client/src/main/java/org/apache/hadoop/ h...
Date Mon, 25 Feb 2013 22:50:29 GMT
Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,701 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
+import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+/**
+ * Used to perform Scan operations.
+ * <p>
+ * All operations are identical to {@link Get} with the exception of
+ * instantiation.  Rather than specifying a single row, an optional startRow
+ * and stopRow may be defined.  If rows are not specified, the Scanner will
+ * iterate over all rows.
+ * <p>
+ * To scan everything for each row, instantiate a Scan object.
+ * <p>
+ * To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}.
+ * If caching is NOT set, we will use the caching value of the hosting {@link HTable}.  See
+ * {@link HTable#setScannerCaching(int)}. In addition to row caching, it is possible to specify a
+ * maximum result size, using {@link #setMaxResultSize(long)}. When both are used,
+ * single server requests are limited by either number of rows or maximum result size, whichever
+ * limit comes first.
+ * <p>
+ * To further define the scope of what to get when scanning, perform additional
+ * methods as outlined below.
+ * <p>
+ * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily}
+ * for each family to retrieve.
+ * <p>
+ * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn}
+ * for each column to retrieve.
+ * <p>
+ * To only retrieve columns within a specific range of version timestamps,
+ * execute {@link #setTimeRange(long, long) setTimeRange}.
+ * <p>
+ * To only retrieve columns with a specific timestamp, execute
+ * {@link #setTimeStamp(long) setTimestamp}.
+ * <p>
+ * To limit the number of versions of each column to be returned, execute
+ * {@link #setMaxVersions(int) setMaxVersions}.
+ * <p>
+ * To limit the maximum number of values returned for each call to next(),
+ * execute {@link #setBatch(int) setBatch}.
+ * <p>
+ * To add a filter, execute {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}.
+ * <p>
+ * Expert: To explicitly disable server-side block caching for this scan,
+ * execute {@link #setCacheBlocks(boolean)}.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class Scan extends OperationWithAttributes {
+  private static final String RAW_ATTR = "_raw_";
+  private static final String ISOLATION_LEVEL = "_isolationlevel_";
+
+  private byte [] startRow = HConstants.EMPTY_START_ROW;
+  private byte [] stopRow  = HConstants.EMPTY_END_ROW;
+  private int maxVersions = 1;
+  private int batch = -1;
+
+  private int storeLimit = -1;
+  private int storeOffset = 0;
+  private boolean getScan;
+
+  // If application wants to collect scan metrics, it needs to
+  // call scan.setAttribute(SCAN_ATTRIBUTES_ENABLE, Bytes.toBytes(Boolean.TRUE))
+  static public final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable";
+  static public final String SCAN_ATTRIBUTES_METRICS_DATA = "scan.attributes.metrics.data";
+  
+  // If an application wants to use multiple scans over different tables each scan must
+  // define this attribute with the appropriate table name by calling
+  // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName))
+  static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name";
+  
+  /*
+   * -1 means no caching
+   */
+  private int caching = -1;
+  private long maxResultSize = -1;
+  private boolean cacheBlocks = true;
+  private Filter filter = null;
+  private TimeRange tr = new TimeRange();
+  private Map<byte [], NavigableSet<byte []>> familyMap =
+    new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
+  private Boolean loadColumnFamiliesOnDemand = null;
+
+  /**
+   * Create a Scan operation across all rows.
+   */
+  public Scan() {}
+
+  public Scan(byte [] startRow, Filter filter) {
+    this(startRow);
+    this.filter = filter;
+  }
+
+  /**
+   * Create a Scan operation starting at the specified row.
+   * <p>
+   * If the specified row does not exist, the Scanner will start from the
+   * next closest row after the specified row.
+   * @param startRow row to start scanner at or after
+   */
+  public Scan(byte [] startRow) {
+    this.startRow = startRow;
+  }
+
+  /**
+   * Create a Scan operation for the range of rows specified.
+   * @param startRow row to start scanner at or after (inclusive)
+   * @param stopRow row to stop scanner before (exclusive)
+   */
+  public Scan(byte [] startRow, byte [] stopRow) {
+    this.startRow = startRow;
+    this.stopRow = stopRow;
+    //if the startRow and stopRow both are empty, it is not a Get
+    this.getScan = isStartRowAndEqualsStopRow();
+  }
+
+  /**
+   * Creates a new instance of this class while copying all values.
+   *
+   * @param scan  The scan instance to copy from.
+   * @throws IOException When copying the values fails.
+   */
+  public Scan(Scan scan) throws IOException {
+    startRow = scan.getStartRow();
+    stopRow  = scan.getStopRow();
+    maxVersions = scan.getMaxVersions();
+    batch = scan.getBatch();
+    storeLimit = scan.getMaxResultsPerColumnFamily();
+    storeOffset = scan.getRowOffsetPerColumnFamily();
+    caching = scan.getCaching();
+    maxResultSize = scan.getMaxResultSize();
+    cacheBlocks = scan.getCacheBlocks();
+    getScan = scan.isGetScan();
+    filter = scan.getFilter(); // clone?
+    loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
+    TimeRange ctr = scan.getTimeRange();
+    tr = new TimeRange(ctr.getMin(), ctr.getMax());
+    Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
+    for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
+      byte [] fam = entry.getKey();
+      NavigableSet<byte[]> cols = entry.getValue();
+      if (cols != null && cols.size() > 0) {
+        for (byte[] col : cols) {
+          addColumn(fam, col);
+        }
+      } else {
+        addFamily(fam);
+      }
+    }
+    for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
+      setAttribute(attr.getKey(), attr.getValue());
+    }
+  }
+
+  /**
+   * Builds a scan object with the same specs as get.
+   * @param get get to model scan after
+   */
+  public Scan(Get get) {
+    this.startRow = get.getRow();
+    this.stopRow = get.getRow();
+    this.filter = get.getFilter();
+    this.cacheBlocks = get.getCacheBlocks();
+    this.maxVersions = get.getMaxVersions();
+    this.storeLimit = get.getMaxResultsPerColumnFamily();
+    this.storeOffset = get.getRowOffsetPerColumnFamily();
+    this.tr = get.getTimeRange();
+    this.familyMap = get.getFamilyMap();
+    this.getScan = true;
+  }
+
+  public boolean isGetScan() {
+    return this.getScan || isStartRowAndEqualsStopRow();
+  }
+
+  private boolean isStartRowAndEqualsStopRow() {
+    return this.startRow != null && this.startRow.length > 0 &&
+        Bytes.equals(this.startRow, this.stopRow);
+  }
+  /**
+   * Get all columns from the specified family.
+   * <p>
+   * Overrides previous calls to addColumn for this family.
+   * @param family family name
+   * @return this
+   */
+  public Scan addFamily(byte [] family) {
+    familyMap.remove(family);
+    familyMap.put(family, null);
+    return this;
+  }
+
+  /**
+   * Get the column from the specified family with the specified qualifier.
+   * <p>
+   * Overrides previous calls to addFamily for this family.
+   * @param family family name
+   * @param qualifier column qualifier
+   * @return this
+   */
+  public Scan addColumn(byte [] family, byte [] qualifier) {
+    NavigableSet<byte []> set = familyMap.get(family);
+    if(set == null) {
+      set = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
+    }
+    if (qualifier == null) {
+      qualifier = HConstants.EMPTY_BYTE_ARRAY;
+    }
+    set.add(qualifier);
+    familyMap.put(family, set);
+    return this;
+  }
+
+  /**
+   * Get versions of columns only within the specified timestamp range,
+   * [minStamp, maxStamp).  Note, default maximum versions to return is 1.  If
+   * your time range spans more than one version and you want all versions
+   * returned, up the number of versions beyond the defaut.
+   * @param minStamp minimum timestamp value, inclusive
+   * @param maxStamp maximum timestamp value, exclusive
+   * @throws IOException if invalid time range
+   * @see #setMaxVersions()
+   * @see #setMaxVersions(int)
+   * @return this
+   */
+  public Scan setTimeRange(long minStamp, long maxStamp)
+  throws IOException {
+    tr = new TimeRange(minStamp, maxStamp);
+    return this;
+  }
+
+  /**
+   * Get versions of columns with the specified timestamp. Note, default maximum
+   * versions to return is 1.  If your time range spans more than one version
+   * and you want all versions returned, up the number of versions beyond the
+   * defaut.
+   * @param timestamp version timestamp
+   * @see #setMaxVersions()
+   * @see #setMaxVersions(int)
+   * @return this
+   */
+  public Scan setTimeStamp(long timestamp) {
+    try {
+      tr = new TimeRange(timestamp, timestamp+1);
+    } catch(IOException e) {
+      // Will never happen
+    }
+    return this;
+  }
+
+  /**
+   * Set the start row of the scan.
+   * @param startRow row to start scan on (inclusive)
+   * Note: In order to make startRow exclusive add a trailing 0 byte
+   * @return this
+   */
+  public Scan setStartRow(byte [] startRow) {
+    this.startRow = startRow;
+    return this;
+  }
+
+  /**
+   * Set the stop row.
+   * @param stopRow row to end at (exclusive)
+   * Note: In order to make stopRow inclusive add a trailing 0 byte
+   * @return this
+   */
+  public Scan setStopRow(byte [] stopRow) {
+    this.stopRow = stopRow;
+    return this;
+  }
+
+  /**
+   * Get all available versions.
+   * @return this
+   */
+  public Scan setMaxVersions() {
+    this.maxVersions = Integer.MAX_VALUE;
+    return this;
+  }
+
+  /**
+   * Get up to the specified number of versions of each column.
+   * @param maxVersions maximum versions for each column
+   * @return this
+   */
+  public Scan setMaxVersions(int maxVersions) {
+    this.maxVersions = maxVersions;
+    return this;
+  }
+
+  /**
+   * Set the maximum number of values to return for each call to next()
+   * @param batch the maximum number of values
+   */
+  public void setBatch(int batch) {
+    if (this.hasFilter() && this.filter.hasFilterRow()) {
+      throw new IncompatibleFilterException(
+        "Cannot set batch on a scan using a filter" +
+        " that returns true for filter.hasFilterRow");
+    }
+    this.batch = batch;
+  }
+
+  /**
+   * Set the maximum number of values to return per row per Column Family
+   * @param limit the maximum number of values returned / row / CF
+   */
+  public void setMaxResultsPerColumnFamily(int limit) {
+    this.storeLimit = limit;
+  }
+
+  /**
+   * Set offset for the row per Column Family.
+   * @param offset is the number of kvs that will be skipped.
+   */
+  public void setRowOffsetPerColumnFamily(int offset) {
+    this.storeOffset = offset;
+  }
+
+  /**
+   * Set the number of rows for caching that will be passed to scanners.
+   * If not set, the default setting from {@link HTable#getScannerCaching()} will apply.
+   * Higher caching values will enable faster scanners but will use more memory.
+   * @param caching the number of rows for caching
+   */
+  public void setCaching(int caching) {
+    this.caching = caching;
+  }
+
+  /**
+   * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)}
+   */
+  public long getMaxResultSize() {
+    return maxResultSize;
+  }
+
+  /**
+   * Set the maximum result size. The default is -1; this means that no specific
+   * maximum result size will be set for this scan, and the global configured
+   * value will be used instead. (Defaults to unlimited).
+   *
+   * @param maxResultSize The maximum result size in bytes.
+   */
+  public void setMaxResultSize(long maxResultSize) {
+    this.maxResultSize = maxResultSize;
+  }
+
+  /**
+   * Apply the specified server-side filter when performing the Scan.
+   * @param filter filter to run on the server
+   * @return this
+   */
+  public Scan setFilter(Filter filter) {
+    this.filter = filter;
+    return this;
+  }
+
+  /**
+   * Setting the familyMap
+   * @param familyMap map of family to qualifier
+   * @return this
+   */
+  public Scan setFamilyMap(Map<byte [], NavigableSet<byte []>> familyMap) {
+    this.familyMap = familyMap;
+    return this;
+  }
+
+  /**
+   * Getting the familyMap
+   * @return familyMap
+   */
+  public Map<byte [], NavigableSet<byte []>> getFamilyMap() {
+    return this.familyMap;
+  }
+
+  /**
+   * @return the number of families in familyMap
+   */
+  public int numFamilies() {
+    if(hasFamilies()) {
+      return this.familyMap.size();
+    }
+    return 0;
+  }
+
+  /**
+   * @return true if familyMap is non empty, false otherwise
+   */
+  public boolean hasFamilies() {
+    return !this.familyMap.isEmpty();
+  }
+
+  /**
+   * @return the keys of the familyMap
+   */
+  public byte[][] getFamilies() {
+    if(hasFamilies()) {
+      return this.familyMap.keySet().toArray(new byte[0][0]);
+    }
+    return null;
+  }
+
+  /**
+   * @return the startrow
+   */
+  public byte [] getStartRow() {
+    return this.startRow;
+  }
+
+  /**
+   * @return the stoprow
+   */
+  public byte [] getStopRow() {
+    return this.stopRow;
+  }
+
+  /**
+   * @return the max number of versions to fetch
+   */
+  public int getMaxVersions() {
+    return this.maxVersions;
+  }
+
+  /**
+   * @return maximum number of values to return for a single call to next()
+   */
+  public int getBatch() {
+    return this.batch;
+  }
+
+  /**
+   * @return maximum number of values to return per row per CF
+   */
+  public int getMaxResultsPerColumnFamily() {
+    return this.storeLimit;
+  }
+
+  /**
+   * Method for retrieving the scan's offset per row per column
+   * family (#kvs to be skipped)
+   * @return row offset
+   */
+  public int getRowOffsetPerColumnFamily() {
+    return this.storeOffset;
+  }
+
+  /**
+   * @return caching the number of rows fetched when calling next on a scanner
+   */
+  public int getCaching() {
+    return this.caching;
+  }
+
+  /**
+   * @return TimeRange
+   */
+  public TimeRange getTimeRange() {
+    return this.tr;
+  }
+
+  /**
+   * @return RowFilter
+   */
+  public Filter getFilter() {
+    return filter;
+  }
+
+  /**
+   * @return true is a filter has been specified, false if not
+   */
+  public boolean hasFilter() {
+    return filter != null;
+  }
+
+  /**
+   * Set whether blocks should be cached for this Scan.
+   * <p>
+   * This is true by default.  When true, default settings of the table and
+   * family are used (this will never override caching blocks if the block
+   * cache is disabled for that family or entirely).
+   *
+   * @param cacheBlocks if false, default settings are overridden and blocks
+   * will not be cached
+   */
+  public void setCacheBlocks(boolean cacheBlocks) {
+    this.cacheBlocks = cacheBlocks;
+  }
+
+  /**
+   * Get whether blocks should be cached for this Scan.
+   * @return true if default caching should be used, false if blocks should not
+   * be cached
+   */
+  public boolean getCacheBlocks() {
+    return cacheBlocks;
+  }
+
+  /**
+   * Set the value indicating whether loading CFs on demand should be allowed (cluster
+   * default is false). On-demand CF loading doesn't load column families until necessary, e.g.
+   * if you filter on one column, the other column family data will be loaded only for the rows
+   * that are included in result, not all rows like in normal case.
+   * With column-specific filters, like SingleColumnValueFilter w/filterIfMissing == true,
+   * this can deliver huge perf gains when there's a cf with lots of data; however, it can
+   * also lead to some inconsistent results, as follows:
+   * - if someone does a concurrent update to both column families in question you may get a row
+   *   that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" } }
+   *   someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent scan
+   *   filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 },
+   *   { video => "my dog" } }.
+   * - if there's a concurrent split and you have more than 2 column families, some rows may be
+   *   missing some column families.
+   */
+  public void setLoadColumnFamiliesOnDemand(boolean value) {
+    this.loadColumnFamiliesOnDemand = value;
+  }
+
+  /**
+   * Get the raw loadColumnFamiliesOnDemand setting; if it's not set, can be null.
+   */
+  public Boolean getLoadColumnFamiliesOnDemandValue() {
+    return this.loadColumnFamiliesOnDemand;
+  }
+
+  /**
+   * Get the logical value indicating whether on-demand CF loading should be allowed.
+   */
+  public boolean doLoadColumnFamiliesOnDemand() {
+    return (this.loadColumnFamiliesOnDemand != null)
+      && this.loadColumnFamiliesOnDemand.booleanValue();
+  }
+
+  /**
+   * Compile the table and column family (i.e. schema) information
+   * into a String. Useful for parsing and aggregation by debugging,
+   * logging, and administration tools.
+   * @return Map
+   */
+  @Override
+  public Map<String, Object> getFingerprint() {
+    Map<String, Object> map = new HashMap<String, Object>();
+    List<String> families = new ArrayList<String>();
+    if(this.familyMap.size() == 0) {
+      map.put("families", "ALL");
+      return map;
+    } else {
+      map.put("families", families);
+    }
+    for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
+        this.familyMap.entrySet()) {
+      families.add(Bytes.toStringBinary(entry.getKey()));
+    }
+    return map;
+  }
+
+  /**
+   * Compile the details beyond the scope of getFingerprint (row, columns,
+   * timestamps, etc.) into a Map along with the fingerprinted information.
+   * Useful for debugging, logging, and administration tools.
+   * @param maxCols a limit on the number of columns output prior to truncation
+   * @return Map
+   */
+  @Override
+  public Map<String, Object> toMap(int maxCols) {
+    // start with the fingerpring map and build on top of it
+    Map<String, Object> map = getFingerprint();
+    // map from families to column list replaces fingerprint's list of families
+    Map<String, List<String>> familyColumns =
+      new HashMap<String, List<String>>();
+    map.put("families", familyColumns);
+    // add scalar information first
+    map.put("startRow", Bytes.toStringBinary(this.startRow));
+    map.put("stopRow", Bytes.toStringBinary(this.stopRow));
+    map.put("maxVersions", this.maxVersions);
+    map.put("batch", this.batch);
+    map.put("caching", this.caching);
+    map.put("maxResultSize", this.maxResultSize);
+    map.put("cacheBlocks", this.cacheBlocks);
+    map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
+    List<Long> timeRange = new ArrayList<Long>();
+    timeRange.add(this.tr.getMin());
+    timeRange.add(this.tr.getMax());
+    map.put("timeRange", timeRange);
+    int colCount = 0;
+    // iterate through affected families and list out up to maxCols columns
+    for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
+      this.familyMap.entrySet()) {
+      List<String> columns = new ArrayList<String>();
+      familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
+      if(entry.getValue() == null) {
+        colCount++;
+        --maxCols;
+        columns.add("ALL");
+      } else {
+        colCount += entry.getValue().size();
+        if (maxCols <= 0) {
+          continue;
+        } 
+        for (byte [] column : entry.getValue()) {
+          if (--maxCols <= 0) {
+            continue;
+          }
+          columns.add(Bytes.toStringBinary(column));
+        }
+      } 
+    }       
+    map.put("totalColumns", colCount);
+    if (this.filter != null) {
+      map.put("filter", this.filter.toString());
+    }
+    // add the id if set
+    if (getId() != null) {
+      map.put("id", getId());
+    }
+    return map;
+  }
+
+  /**
+   * Enable/disable "raw" mode for this scan.
+   * If "raw" is enabled the scan will return all
+   * delete marker and deleted rows that have not
+   * been collected, yet.
+   * This is mostly useful for Scan on column families
+   * that have KEEP_DELETED_ROWS enabled.
+   * It is an error to specify any column when "raw" is set.
+   * @param raw True/False to enable/disable "raw" mode.
+   */
+  public void setRaw(boolean raw) {
+    setAttribute(RAW_ATTR, Bytes.toBytes(raw));
+  }
+
+  /**
+   * @return True if this Scan is in "raw" mode.
+   */
+  public boolean isRaw() {
+    byte[] attr = getAttribute(RAW_ATTR);
+    return attr == null ? false : Bytes.toBoolean(attr);
+  }
+
+  /*
+   * Set the isolation level for this scan. If the
+   * isolation level is set to READ_UNCOMMITTED, then
+   * this scan will return data from committed and
+   * uncommitted transactions. If the isolation level 
+   * is set to READ_COMMITTED, then this scan will return 
+   * data from committed transactions only. If a isolation
+   * level is not explicitly set on a Scan, then it 
+   * is assumed to be READ_COMMITTED.
+   * @param level IsolationLevel for this scan
+   */
+  public void setIsolationLevel(IsolationLevel level) {
+    setAttribute(ISOLATION_LEVEL, level.toBytes());
+  }
+  /*
+   * @return The isolation level of this scan.
+   * If no isolation level was set for this scan object, 
+   * then it returns READ_COMMITTED.
+   * @return The IsolationLevel for this scan
+   */
+  public IsolationLevel getIsolationLevel() {
+    byte[] attr = getAttribute(ISOLATION_LEVEL);
+    return attr == null ? IsolationLevel.READ_COMMITTED :
+                          IsolationLevel.fromBytes(attr);
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,315 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import com.google.protobuf.ServiceException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
+import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
+import org.apache.hadoop.hbase.exceptions.RegionServerStoppedException;
+import org.apache.hadoop.hbase.exceptions.UnknownScannerException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.protobuf.ResponseConverter;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.DNS;
+
+import java.io.IOException;
+import java.net.UnknownHostException;
+
+/**
+ * Retries scanner operations such as create, next, etc.
+ * Used by {@link ResultScanner}s made by {@link HTable}.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class ScannerCallable extends ServerCallable<Result[]> {
+  public static final String LOG_SCANNER_LATENCY_CUTOFF
+    = "hbase.client.log.scanner.latency.cutoff";
+  public static final String LOG_SCANNER_ACTIVITY = "hbase.client.log.scanner.activity";
+
+  private static final Log LOG = LogFactory.getLog(ScannerCallable.class);
+  private long scannerId = -1L;
+  private boolean instantiated = false;
+  private boolean closed = false;
+  private Scan scan;
+  private int caching = 1;
+  private ScanMetrics scanMetrics;
+  private boolean logScannerActivity = false;
+  private int logCutOffLatency = 1000;
+
+  // indicate if it is a remote server call
+  private boolean isRegionServerRemote = true;
+  private long nextCallSeq = 0;
+  
+  /**
+   * @param connection which connection
+   * @param tableName table callable is on
+   * @param scan the scan to execute
+   * @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable
+   * won't collect metrics
+   */
+  public ScannerCallable (HConnection connection, byte [] tableName, Scan scan,
+    ScanMetrics scanMetrics) {
+    super(connection, tableName, scan.getStartRow());
+    this.scan = scan;
+    this.scanMetrics = scanMetrics;
+    Configuration conf = connection.getConfiguration();
+    logScannerActivity = conf.getBoolean(LOG_SCANNER_ACTIVITY, false);
+    logCutOffLatency = conf.getInt(LOG_SCANNER_LATENCY_CUTOFF, 1000);
+  }
+
+  /**
+   * @param reload force reload of server location
+   * @throws IOException
+   */
+  @Override
+  public void connect(boolean reload) throws IOException {
+    if (!instantiated || reload) {
+      super.connect(reload);
+      checkIfRegionServerIsRemote();
+      instantiated = true;
+    }
+
+    // check how often we retry.
+    // HConnectionManager will call instantiateServer with reload==true
+    // if and only if for retries.
+    if (reload && this.scanMetrics != null) {
+      this.scanMetrics.countOfRPCRetries.incrementAndGet();
+      if (isRegionServerRemote) {
+        this.scanMetrics.countOfRemoteRPCRetries.incrementAndGet();
+      }
+    }
+  }
+
+  /**
+   * compare the local machine hostname with region server's hostname
+   * to decide if hbase client connects to a remote region server
+   * @throws UnknownHostException.
+   */
+  private void checkIfRegionServerIsRemote() throws UnknownHostException {
+    String myAddress = DNS.getDefaultHost("default", "default");
+    if (this.location.getHostname().equalsIgnoreCase(myAddress)) {
+      isRegionServerRemote = false;
+    } else {
+      isRegionServerRemote = true;
+    }
+  }
+
+  /**
+   * @see java.util.concurrent.Callable#call()
+   */
+  public Result [] call() throws IOException {
+    if (closed) {
+      if (scannerId != -1) {
+        close();
+      }
+    } else {
+      if (scannerId == -1L) {
+        this.scannerId = openScanner();
+      } else {
+        Result [] rrs = null;
+        try {
+          incRPCcallsMetrics();
+          ScanRequest request =
+            RequestConverter.buildScanRequest(scannerId, caching, false, nextCallSeq);
+          ScanResponse response = null;
+          try {
+            response = server.scan(null, request);
+            // Client and RS maintain a nextCallSeq number during the scan. Every next() call
+            // from client to server will increment this number in both sides. Client passes this
+            // number along with the request and at RS side both the incoming nextCallSeq and its
+            // nextCallSeq will be matched. In case of a timeout this increment at the client side
+            // should not happen. If at the server side fetching of next batch of data was over,
+            // there will be mismatch in the nextCallSeq number. Server will throw
+            // OutOfOrderScannerNextException and then client will reopen the scanner with startrow
+            // as the last successfully retrieved row.
+            // See HBASE-5974
+            nextCallSeq++;
+            long timestamp = System.currentTimeMillis();
+            rrs = ResponseConverter.getResults(response);
+            if (logScannerActivity) {
+              long now = System.currentTimeMillis();
+              if (now - timestamp > logCutOffLatency) {
+                int rows = rrs == null ? 0 : rrs.length;
+                LOG.info("Took " + (now-timestamp) + "ms to fetch "
+                  + rows + " rows from scanner=" + scannerId);
+              }
+            }
+            if (response.hasMoreResults()
+                && !response.getMoreResults()) {
+              scannerId = -1L;
+              closed = true;
+              return null;
+            }
+          } catch (ServiceException se) {
+            throw ProtobufUtil.getRemoteException(se);
+          }
+          updateResultsMetrics(response);
+        } catch (IOException e) {
+          if (logScannerActivity) {
+            LOG.info("Got exception in fetching from scanner="
+              + scannerId, e);
+          }
+          IOException ioe = e;
+          if (e instanceof RemoteException) {
+            ioe = RemoteExceptionHandler.decodeRemoteException((RemoteException)e);
+          }
+          if (logScannerActivity && (ioe instanceof UnknownScannerException)) {
+            try {
+              HRegionLocation location =
+                connection.relocateRegion(tableName, scan.getStartRow());
+              LOG.info("Scanner=" + scannerId
+                + " expired, current region location is " + location.toString()
+                + " ip:" + location.getHostnamePort());
+            } catch (Throwable t) {
+              LOG.info("Failed to relocate region", t);
+            }
+          }
+          if (ioe instanceof NotServingRegionException) {
+            // Throw a DNRE so that we break out of cycle of calling NSRE
+            // when what we need is to open scanner against new location.
+            // Attach NSRE to signal client that it needs to resetup scanner.
+            if (this.scanMetrics != null) {
+              this.scanMetrics.countOfNSRE.incrementAndGet();
+            }
+            throw new DoNotRetryIOException("Reset scanner", ioe);
+          } else if (ioe instanceof RegionServerStoppedException) {
+            // Throw a DNRE so that we break out of cycle of calling RSSE
+            // when what we need is to open scanner against new location.
+            // Attach RSSE to signal client that it needs to resetup scanner.
+            throw new DoNotRetryIOException("Reset scanner", ioe);
+          } else {
+            // The outer layers will retry
+            throw ioe;
+          }
+        }
+        return rrs;
+      }
+    }
+    return null;
+  }
+
+  private void incRPCcallsMetrics() {
+    if (this.scanMetrics == null) {
+      return;
+    }
+    this.scanMetrics.countOfRPCcalls.incrementAndGet();
+    if (isRegionServerRemote) {
+      this.scanMetrics.countOfRemoteRPCcalls.incrementAndGet();
+    }
+  }
+
+  private void updateResultsMetrics(ScanResponse response) {
+    if (this.scanMetrics == null || !response.hasResultSizeBytes()) {
+      return;
+    }
+    long value = response.getResultSizeBytes();
+    this.scanMetrics.countOfBytesInResults.addAndGet(value);
+    if (isRegionServerRemote) {
+      this.scanMetrics.countOfBytesInRemoteResults.addAndGet(value);
+    }
+  }
+
+  private void close() {
+    if (this.scannerId == -1L) {
+      return;
+    }
+    try {
+      incRPCcallsMetrics();
+      ScanRequest request =
+        RequestConverter.buildScanRequest(this.scannerId, 0, true);
+      try {
+        server.scan(null, request);
+      } catch (ServiceException se) {
+        throw ProtobufUtil.getRemoteException(se);
+      }
+    } catch (IOException e) {
+      LOG.warn("Ignore, probably already closed", e);
+    }
+    this.scannerId = -1L;
+  }
+
+  protected long openScanner() throws IOException {
+    incRPCcallsMetrics();
+    ScanRequest request =
+      RequestConverter.buildScanRequest(
+        this.location.getRegionInfo().getRegionName(),
+        this.scan, 0, false);
+    try {
+      ScanResponse response = server.scan(null, request);
+      long id = response.getScannerId();
+      if (logScannerActivity) {
+        LOG.info("Open scanner=" + id + " for scan=" + scan.toString()
+          + " on region " + this.location.toString() + " ip:"
+          + this.location.getHostnamePort());
+      }
+      return id;
+    } catch (ServiceException se) {
+      throw ProtobufUtil.getRemoteException(se);
+    }
+  }
+
+  protected Scan getScan() {
+    return scan;
+  }
+
+  /**
+   * Call this when the next invocation of call should close the scanner
+   */
+  public void setClose() {
+    this.closed = true;
+  }
+
+  /**
+   * @return the HRegionInfo for the current region
+   */
+  public HRegionInfo getHRegionInfo() {
+    if (!instantiated) {
+      return null;
+    }
+    return location.getRegionInfo();
+  }
+
+  /**
+   * Get the number of rows that will be fetched on next
+   * @return the number of rows for caching
+   */
+  public int getCaching() {
+    return caching;
+  }
+
+  /**
+   * Set the number of rows that will be fetched on next
+   * @param caching the number of rows for caching
+   */
+  public void setCaching(int caching) {
+    this.caching = caching;
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,44 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
+
+/**
+ * Thrown when a scanner has timed out.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class ScannerTimeoutException extends DoNotRetryIOException {
+
+  private static final long serialVersionUID = 8788838690290688313L;
+
+  /** default constructor */
+  ScannerTimeoutException() {
+    super();
+  }
+
+  /** @param s */
+  ScannerTimeoutException(String s) {
+    super(s);
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,248 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
+import org.apache.hadoop.hbase.ipc.HBaseClientRPC;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.ipc.RemoteException;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.ConnectException;
+import java.net.SocketTimeoutException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+/**
+ * Abstract class that implements {@link Callable}.  Implementation stipulates
+ * return type and method we actually invoke on remote Server.  Usually
+ * used inside a try/catch that fields usual connection failures all wrapped
+ * up in a retry loop.
+ * <p>Call {@link #connect(boolean)} to connect to server hosting region
+ * that contains the passed row in the passed table before invoking
+ * {@link #call()}.
+ * @see HConnection#getRegionServerWithoutRetries(ServerCallable)
+ * @param <T> the class that the ServerCallable handles
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public abstract class ServerCallable<T> implements Callable<T> {
+  protected final HConnection connection;
+  protected final byte [] tableName;
+  protected final byte [] row;
+  protected HRegionLocation location;
+  protected ClientProtocol server;
+  protected int callTimeout;
+  protected long globalStartTime;
+  protected long startTime, endTime;
+  protected final static int MIN_RPC_TIMEOUT = 2000;
+
+  /**
+   * @param connection Connection to use.
+   * @param tableName Table name to which <code>row</code> belongs.
+   * @param row The row we want in <code>tableName</code>.
+   */
+  public ServerCallable(HConnection connection, byte [] tableName, byte [] row) {
+    this(connection, tableName, row, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+  }
+
+  public ServerCallable(HConnection connection, byte [] tableName, byte [] row, int callTimeout) {
+    this.connection = connection;
+    this.tableName = tableName;
+    this.row = row;
+    this.callTimeout = callTimeout;
+  }
+
+  /**
+   * Connect to the server hosting region with row from tablename.
+   * @param reload Set this to true if connection should re-find the region
+   * @throws IOException e
+   */
+  public void connect(final boolean reload) throws IOException {
+    this.location = connection.getRegionLocation(tableName, row, reload);
+    this.server = connection.getClient(location.getServerName());
+  }
+
+  /** @return the server name
+   * @deprecated Just use {@link #toString()} instead.
+   */
+  public String getServerName() {
+    if (location == null) return null;
+    return location.getHostnamePort();
+  }
+
+  /** @return the region name
+   * @deprecated Just use {@link #toString()} instead.
+   */
+  public byte[] getRegionName() {
+    if (location == null) return null;
+    return location.getRegionInfo().getRegionName();
+  }
+
+  /** @return the row
+   * @deprecated Just use {@link #toString()} instead.
+   */
+  public byte [] getRow() {
+    return row;
+  }
+
+  public void beforeCall() {
+    this.startTime = EnvironmentEdgeManager.currentTimeMillis();
+    int remaining = (int)(callTimeout - (this.startTime - this.globalStartTime));
+    if (remaining < MIN_RPC_TIMEOUT) {
+      // If there is no time left, we're trying anyway. It's too late.
+      // 0 means no timeout, and it's not the intent here. So we secure both cases by
+      // resetting to the minimum.
+      remaining = MIN_RPC_TIMEOUT;
+    }
+    HBaseClientRPC.setRpcTimeout(remaining);
+  }
+
+  public void afterCall() {
+    HBaseClientRPC.resetRpcTimeout();
+    this.endTime = EnvironmentEdgeManager.currentTimeMillis();
+  }
+
+  /**
+   * @return {@link HConnection} instance used by this Callable.
+   */
+  HConnection getConnection() {
+    return this.connection;
+  }
+
+  /**
+   * Run this instance with retries, timed waits,
+   * and refinds of missing regions.
+   *
+   * @return an object of type T
+   * @throws IOException if a remote or network exception occurs
+   * @throws RuntimeException other unspecified error
+   */
+  public T withRetries()
+  throws IOException, RuntimeException {
+    Configuration c = getConnection().getConfiguration();
+    final long pause = c.getLong(HConstants.HBASE_CLIENT_PAUSE,
+      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
+    final int numRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
+    List<RetriesExhaustedException.ThrowableWithExtraContext> exceptions =
+      new ArrayList<RetriesExhaustedException.ThrowableWithExtraContext>();
+    this.globalStartTime = EnvironmentEdgeManager.currentTimeMillis();
+    for (int tries = 0; tries < numRetries; tries++) {
+      try {
+        beforeCall();
+        connect(tries != 0);
+        return call();
+      } catch (Throwable t) {
+        t = translateException(t);
+        if (t instanceof SocketTimeoutException ||
+            t instanceof ConnectException ||
+            t instanceof RetriesExhaustedException) {
+          // if thrown these exceptions, we clear all the cache entries that
+          // map to that slow/dead server; otherwise, let cache miss and ask
+          // .META. again to find the new location
+          HRegionLocation hrl = location;
+          if (hrl != null) {
+            getConnection().clearCaches(hrl.getHostnamePort());
+          }
+        }
+        RetriesExhaustedException.ThrowableWithExtraContext qt =
+          new RetriesExhaustedException.ThrowableWithExtraContext(t,
+              EnvironmentEdgeManager.currentTimeMillis(), toString());
+        exceptions.add(qt);
+        if (tries == numRetries - 1) {
+          throw new RetriesExhaustedException(tries, exceptions);
+        }
+        long expectedSleep = ConnectionUtils.getPauseTime(pause, tries);
+        // If, after the planned sleep, there won't be enough time left, we stop now.
+        if (((this.endTime - this.globalStartTime) + MIN_RPC_TIMEOUT + expectedSleep) >
+            this.callTimeout) {
+          throw (SocketTimeoutException) new SocketTimeoutException(
+              "Call to access row '" + Bytes.toString(row) + "' on table '"
+                  + Bytes.toString(tableName)
+                  + "' failed on timeout. " + " callTimeout=" + this.callTimeout +
+                  ", time=" + (this.endTime - this.startTime)).initCause(t);
+        }
+      } finally {
+        afterCall();
+      }
+      try {
+        Thread.sleep(ConnectionUtils.getPauseTime(pause, tries));
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+        throw new IOException("Interrupted after tries=" + tries, e);
+      }
+    }
+    return null;
+  }
+
+  /**
+   * Run this instance against the server once.
+   * @return an object of type T
+   * @throws IOException if a remote or network exception occurs
+   * @throws RuntimeException other unspecified error
+   */
+  public T withoutRetries()
+  throws IOException, RuntimeException {
+    this.globalStartTime = EnvironmentEdgeManager.currentTimeMillis();
+    try {
+      beforeCall();
+      connect(false);
+      return call();
+    } catch (Throwable t) {
+      Throwable t2 = translateException(t);
+      if (t2 instanceof IOException) {
+        throw (IOException)t2;
+      } else {
+        throw new RuntimeException(t2);
+      }
+    } finally {
+      afterCall();
+    }
+  }
+
+  protected static Throwable translateException(Throwable t) throws IOException {
+    if (t instanceof UndeclaredThrowableException) {
+      t = t.getCause();
+    }
+    if (t instanceof RemoteException) {
+      t = ((RemoteException)t).unwrapRemoteException();
+    }
+    if (t instanceof ServiceException) {
+      ServiceException se = (ServiceException)t;
+      Throwable cause = se.getCause();
+      if (cause != null && cause instanceof DoNotRetryIOException) {
+        throw (DoNotRetryIOException)cause;
+      }
+    } else if (t instanceof DoNotRetryIOException) {
+      throw (DoNotRetryIOException)t;
+    }
+    return t;
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,96 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.io.compress.Compression;
+
+/**
+ * Immutable HColumnDescriptor
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
+
+  /**
+   * @param desc wrapped
+   */
+  public UnmodifyableHColumnDescriptor (final HColumnDescriptor desc) {
+    super(desc);
+  }
+
+  /**
+   * @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(byte[], byte[])
+   */
+  @Override
+  public HColumnDescriptor setValue(byte[] key, byte[] value) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+
+  /**
+   * @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(java.lang.String, java.lang.String)
+   */
+  @Override
+  public HColumnDescriptor setValue(String key, String value) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+
+  /**
+   * @see org.apache.hadoop.hbase.HColumnDescriptor#setMaxVersions(int)
+   */
+  @Override
+  public HColumnDescriptor setMaxVersions(int maxVersions) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+
+  /**
+   * @see org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean)
+   */
+  @Override
+  public HColumnDescriptor setInMemory(boolean inMemory) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+
+  /**
+   * @see org.apache.hadoop.hbase.HColumnDescriptor#setBlockCacheEnabled(boolean)
+   */
+  @Override
+  public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+
+  /**
+   * @see org.apache.hadoop.hbase.HColumnDescriptor#setTimeToLive(int)
+   */
+  @Override
+  public HColumnDescriptor setTimeToLive(int timeToLive) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+
+  /**
+   * @see org.apache.hadoop.hbase.HColumnDescriptor#setCompressionType(org.apache.hadoop.hbase.io.compress.Compression.Algorithm)
+   */
+  @Override
+  public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,53 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.HRegionInfo;
+
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+class UnmodifyableHRegionInfo extends HRegionInfo {
+  /*
+   * Creates an unmodifyable copy of an HRegionInfo
+   *
+   * @param info
+   */
+  UnmodifyableHRegionInfo(HRegionInfo info) {
+    super(info);
+  }
+
+  /**
+   * @param split set split status
+   */
+  @Override
+  public void setSplit(boolean split) {
+    throw new UnsupportedOperationException("HRegionInfo is read-only");
+  }
+
+  /**
+   * @param offLine set online - offline status
+   */
+  @Override
+  public void setOffline(boolean offLine) {
+    throw new UnsupportedOperationException("HRegionInfo is read-only");
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,127 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+
+/**
+ * Read-only table descriptor.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class UnmodifyableHTableDescriptor extends HTableDescriptor {
+  /** Default constructor */
+  public UnmodifyableHTableDescriptor() {
+	  super();
+  }
+
+  /*
+   * Create an unmodifyable copy of an HTableDescriptor
+   * @param desc
+   */
+  UnmodifyableHTableDescriptor(final HTableDescriptor desc) {
+    super(desc.getName(), getUnmodifyableFamilies(desc), desc.getValues());
+  }
+
+
+  /*
+   * @param desc
+   * @return Families as unmodifiable array.
+   */
+  private static HColumnDescriptor[] getUnmodifyableFamilies(
+      final HTableDescriptor desc) {
+    HColumnDescriptor [] f = new HColumnDescriptor[desc.getFamilies().size()];
+    int i = 0;
+    for (HColumnDescriptor c: desc.getFamilies()) {
+      f[i++] = c;
+    }
+    return f;
+  }
+
+  /**
+   * Does NOT add a column family. This object is immutable
+   * @param family HColumnDescriptor of familyto add.
+   */
+  @Override
+  public void addFamily(final HColumnDescriptor family) {
+    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+  }
+
+  /**
+   * @param column
+   * @return Column descriptor for the passed family name or the family on
+   * passed in column.
+   */
+  @Override
+  public HColumnDescriptor removeFamily(final byte [] column) {
+    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+  }
+
+  /**
+   * @see org.apache.hadoop.hbase.HTableDescriptor#setReadOnly(boolean)
+   */
+  @Override
+  public void setReadOnly(boolean readOnly) {
+    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+  }
+
+  /**
+   * @see org.apache.hadoop.hbase.HTableDescriptor#setValue(byte[], byte[])
+   */
+  @Override
+  public void setValue(byte[] key, byte[] value) {
+    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+  }
+
+  /**
+   * @see org.apache.hadoop.hbase.HTableDescriptor#setValue(java.lang.String, java.lang.String)
+   */
+  @Override
+  public void setValue(String key, String value) {
+    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+  }
+
+  /**
+   * @see org.apache.hadoop.hbase.HTableDescriptor#setMaxFileSize(long)
+   */
+  @Override
+  public void setMaxFileSize(long maxFileSize) {
+    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+  }
+
+  /**
+   * @see org.apache.hadoop.hbase.HTableDescriptor#setMemStoreFlushSize(long)
+   */
+  @Override
+  public void setMemStoreFlushSize(long memstoreFlushSize) {
+    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+  }
+
+//  /**
+//   * @see org.apache.hadoop.hbase.HTableDescriptor#addIndex(org.apache.hadoop.hbase.client.tableindexed.IndexSpecification)
+//   */
+//  @Override
+//  public void addIndex(IndexSpecification index) {
+//    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+//  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,54 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+
+import java.io.IOException;
+
+/**
+ * We inherit the current ZooKeeperWatcher implementation to change the semantic
+ *  of the close: the new close won't immediately close the connection but
+ *  will have a keep alive. See {@link HConnection}.
+ * This allows to make it available with a consistent interface. The whole
+ *  ZooKeeperWatcher use in HConnection will be then changed to remove the
+ *   watcher part.
+ *
+ * This class is intended to be used internally by HBase classes; but not by
+ * final user code. Hence it's package protected.
+ */
+class ZooKeeperKeepAliveConnection extends ZooKeeperWatcher{
+  ZooKeeperKeepAliveConnection(
+    Configuration conf, String descriptor,
+    HConnectionManager.HConnectionImplementation conn) throws IOException {
+    super(conf, descriptor, conn);
+  }
+
+  @Override
+  public void close() {
+    ((HConnectionManager.HConnectionImplementation)abortable).releaseZooKeeperWatcher(this);
+  }
+
+  void internalClose(){
+    super.close();
+  }
+}



Mime
View raw message