hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chia7...@apache.org
Subject [3/3] hbase git commit: HBASE-18008 Any HColumnDescriptor we give out should be immutable
Date Thu, 08 Jun 2017 15:29:14 GMT
HBASE-18008 Any HColumnDescriptor we give out should be immutable


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1e780463
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1e780463
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1e780463

Branch: refs/heads/branch-2
Commit: 1e7804634c043fca295f97ab70b7fc1da16f274c
Parents: c830a0f
Author: Chia-Ping Tsai <chia7712@gmail.com>
Authored: Thu Jun 8 15:07:48 2017 +0800
Committer: Chia-Ping Tsai <chia7712@gmail.com>
Committed: Thu Jun 8 23:28:33 2017 +0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/HColumnDescriptor.java  | 1029 ++++---------
 .../apache/hadoop/hbase/HTableDescriptor.java   |  186 ++-
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |    5 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java    |    8 +-
 .../hbase/client/ColumnFamilyDescriptor.java    |  216 +++
 .../client/ColumnFamilyDescriptorBuilder.java   | 1347 ++++++++++++++++++
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |    3 +-
 .../client/ImmutableHColumnDescriptor.java      |   46 +
 .../hbase/client/ImmutableHTableDescriptor.java |   59 +-
 .../hadoop/hbase/client/TableDescriptor.java    |   73 +-
 .../hbase/client/TableDescriptorBuilder.java    |  410 ++----
 .../hbase/shaded/protobuf/ProtobufUtil.java     |   27 +-
 .../hbase/shaded/protobuf/RequestConverter.java |    6 +-
 .../hadoop/hbase/TestHColumnDescriptor.java     |   10 +-
 .../hadoop/hbase/TestHTableDescriptor.java      |    1 +
 .../TestColumnFamilyDescriptorBuilder.java      |  185 +++
 .../client/TestImmutableHColumnDescriptor.java  |   97 ++
 .../client/TestImmutableHTableDescriptor.java   |   27 +-
 .../client/TestTableDescriptorBuilder.java      |   57 +-
 .../hadoop/hbase/io/hfile/CacheConfig.java      |   19 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |    1 +
 .../assignment/SplitTableRegionProcedure.java   |    3 +-
 .../hadoop/hbase/util/FSTableDescriptors.java   |   10 +-
 .../apache/hadoop/hbase/TestAcidGuarantees.java |    2 +-
 .../apache/hadoop/hbase/client/TestAdmin1.java  |    2 +-
 .../hbase/client/TestAsyncTableAdminApi.java    |    2 +-
 .../TestReplicationAdminWithClusters.java       |    2 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java |    2 +-
 28 files changed, 2582 insertions(+), 1253 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/1e780463/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
index 46e97c3..fb0b0ee 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -18,29 +18,21 @@
  */
 package org.apache.hadoop.hbase;
 
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Locale;
 import java.util.Map;
-import java.util.Set;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.exceptions.HBaseException;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.PrettyPrinter;
 import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
 
-import com.google.common.base.Preconditions;
-
 /**
  * An HColumnDescriptor contains information about a column family such as the
  * number of versions, compression settings, etc.
@@ -48,261 +40,64 @@ import com.google.common.base.Preconditions;
  * It is used as input when creating a table or adding a column.
  */
 @InterfaceAudience.Public
-public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
-  // For future backward compatibility
-
-  // Version  3 was when column names become byte arrays and when we picked up
-  // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
-  // Version  5 was when bloom filter descriptors were removed.
-  // Version  6 adds metadata as a map where keys and values are byte[].
-  // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
-  // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
-  // Version  9 -- add data block encoding
-  // Version 10 -- change metadata to standard type.
-  // Version 11 -- add column family level configuration.
-  private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
-
-  public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION";
-
-  // These constants are used as FileInfo keys
-  public static final String COMPRESSION = "COMPRESSION";
-  public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
-  public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
-      "ENCODE_ON_DISK";
-  public static final String DATA_BLOCK_ENCODING =
-      "DATA_BLOCK_ENCODING";
-  /**
-   * Key for the BLOCKCACHE attribute.
-   * A more exact name would be CACHE_DATA_ON_READ because this flag sets whether or not we
-   * cache DATA blocks.  We always cache INDEX and BLOOM blocks; caching these blocks cannot be
-   * disabled.
-   */
-  public static final String BLOCKCACHE = "BLOCKCACHE";
-  public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE";
-  public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE";
-  public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE";
-  public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
-  /**
-   * Key for cache data into L1 if cache is set up with more than one tier.
-   * To set in the shell, do something like this:
-   * <code>hbase(main):003:0&gt; create 't',
-   *    {NAME =&gt; 't', CONFIGURATION =&gt; {CACHE_DATA_IN_L1 =&gt; 'true'}}</code>
-   */
-  public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
-
-  /**
-   * Key for the PREFETCH_BLOCKS_ON_OPEN attribute.
-   * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this
-   * family will be loaded into the cache as soon as the file is opened. These
-   * loads will not count as cache misses.
-   */
-  public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN";
-
-  /**
-   * Size of storefile/hfile 'blocks'.  Default is {@link #DEFAULT_BLOCKSIZE}.
-   * Use smaller block sizes for faster random-access at expense of larger
-   * indices (more memory consumption). Note that this is a soft limit and that
-   * blocks have overhead (metadata, CRCs) so blocks will tend to be the size
-   * specified here and then some; i.e. don't expect that setting BLOCKSIZE=4k
-   * means hbase data will align with an SSDs 4k page accesses (TODO).
-   */
-  public static final String BLOCKSIZE = "BLOCKSIZE";
-
+@Deprecated // remove it in 3.0
+public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HColumnDescriptor> {
+  public static final String IN_MEMORY_COMPACTION = ColumnFamilyDescriptorBuilder.IN_MEMORY_COMPACTION;
+  public static final String COMPRESSION = ColumnFamilyDescriptorBuilder.COMPRESSION;
+  public static final String COMPRESSION_COMPACT = ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT;
+  public static final String ENCODE_ON_DISK = "ENCODE_ON_DISK";
+  public static final String DATA_BLOCK_ENCODING = ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING;
+  public static final String BLOCKCACHE = ColumnFamilyDescriptorBuilder.BLOCKCACHE;
+  public static final String CACHE_DATA_ON_WRITE = ColumnFamilyDescriptorBuilder.CACHE_DATA_ON_WRITE;
+  public static final String CACHE_INDEX_ON_WRITE = ColumnFamilyDescriptorBuilder.CACHE_INDEX_ON_WRITE;
+  public static final String CACHE_BLOOMS_ON_WRITE = ColumnFamilyDescriptorBuilder.CACHE_BLOOMS_ON_WRITE;
+  public static final String EVICT_BLOCKS_ON_CLOSE = ColumnFamilyDescriptorBuilder.EVICT_BLOCKS_ON_CLOSE;
+  public static final String CACHE_DATA_IN_L1 = ColumnFamilyDescriptorBuilder.CACHE_DATA_IN_L1;
+  public static final String PREFETCH_BLOCKS_ON_OPEN = ColumnFamilyDescriptorBuilder.PREFETCH_BLOCKS_ON_OPEN;
+  public static final String BLOCKSIZE = ColumnFamilyDescriptorBuilder.BLOCKSIZE;
   public static final String LENGTH = "LENGTH";
-  public static final String TTL = "TTL";
-  public static final String BLOOMFILTER = "BLOOMFILTER";
+  public static final String TTL = ColumnFamilyDescriptorBuilder.TTL;
+  public static final String BLOOMFILTER = ColumnFamilyDescriptorBuilder.BLOOMFILTER;
   public static final String FOREVER = "FOREVER";
-  public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
+  public static final String REPLICATION_SCOPE = ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE;
   public static final byte[] REPLICATION_SCOPE_BYTES = Bytes.toBytes(REPLICATION_SCOPE);
-  public static final String MIN_VERSIONS = "MIN_VERSIONS";
-  /**
-   * Retain all cells across flushes and compactions even if they fall behind
-   * a delete tombstone. To see all retained cells, do a 'raw' scan; see
-   * Scan#setRaw or pass RAW =&gt; true attribute in the shell.
-   */
-  public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
-  public static final String COMPRESS_TAGS = "COMPRESS_TAGS";
-
-  public static final String ENCRYPTION = "ENCRYPTION";
-  public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
-
-  public static final String IS_MOB = "IS_MOB";
+  public static final String MIN_VERSIONS = ColumnFamilyDescriptorBuilder.MIN_VERSIONS;
+  public static final String KEEP_DELETED_CELLS = ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS;
+  public static final String COMPRESS_TAGS = ColumnFamilyDescriptorBuilder.COMPRESS_TAGS;
+  public static final String ENCRYPTION = ColumnFamilyDescriptorBuilder.ENCRYPTION;
+  public static final String ENCRYPTION_KEY = ColumnFamilyDescriptorBuilder.ENCRYPTION_KEY;
+  public static final String IS_MOB = ColumnFamilyDescriptorBuilder.IS_MOB;
   public static final byte[] IS_MOB_BYTES = Bytes.toBytes(IS_MOB);
-  public static final String MOB_THRESHOLD = "MOB_THRESHOLD";
+  public static final String MOB_THRESHOLD = ColumnFamilyDescriptorBuilder.MOB_THRESHOLD;
   public static final byte[] MOB_THRESHOLD_BYTES = Bytes.toBytes(MOB_THRESHOLD);
-  public static final long DEFAULT_MOB_THRESHOLD = 100 * 1024; // 100k
-  public static final String MOB_COMPACT_PARTITION_POLICY = "MOB_COMPACT_PARTITION_POLICY";
-  public static final byte[] MOB_COMPACT_PARTITION_POLICY_BYTES =
-      Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY);
-  public static final MobCompactPartitionPolicy DEFAULT_MOB_COMPACT_PARTITION_POLICY =
-      MobCompactPartitionPolicy.DAILY;
-
-  public static final String DFS_REPLICATION = "DFS_REPLICATION";
-  public static final short DEFAULT_DFS_REPLICATION = 0;
-
-  public static final String STORAGE_POLICY = "STORAGE_POLICY";
-
-  /**
-   * Default compression type.
-   */
-  public static final String DEFAULT_COMPRESSION =
-    Compression.Algorithm.NONE.getName();
-
-  /**
-   * Default value of the flag that enables data block encoding on disk, as
-   * opposed to encoding in cache only. We encode blocks everywhere by default,
-   * as long as {@link #DATA_BLOCK_ENCODING} is not NONE.
-   */
+  public static final long DEFAULT_MOB_THRESHOLD = ColumnFamilyDescriptorBuilder.DEFAULT_MOB_THRESHOLD;
+  public static final String MOB_COMPACT_PARTITION_POLICY = ColumnFamilyDescriptorBuilder.MOB_COMPACT_PARTITION_POLICY;
+  public static final byte[] MOB_COMPACT_PARTITION_POLICY_BYTES = Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY);
+  public static final MobCompactPartitionPolicy DEFAULT_MOB_COMPACT_PARTITION_POLICY
+        = ColumnFamilyDescriptorBuilder.DEFAULT_MOB_COMPACT_PARTITION_POLICY;
+  public static final String DFS_REPLICATION = ColumnFamilyDescriptorBuilder.DFS_REPLICATION;
+  public static final short DEFAULT_DFS_REPLICATION = ColumnFamilyDescriptorBuilder.DEFAULT_DFS_REPLICATION;
+  public static final String STORAGE_POLICY = ColumnFamilyDescriptorBuilder.STORAGE_POLICY;
+  public static final String DEFAULT_COMPRESSION = ColumnFamilyDescriptorBuilder.DEFAULT_COMPRESSION.name();
   public static final boolean DEFAULT_ENCODE_ON_DISK = true;
-
-  /** Default data block encoding algorithm. */
-  public static final String DEFAULT_DATA_BLOCK_ENCODING =
-      DataBlockEncoding.NONE.toString();
-
-  /**
-   * Default number of versions of a record to keep.
-   */
-  public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt(
-      "hbase.column.max.version", 1);
-
-  /**
-   * Default is not to keep a minimum of versions.
-   */
-  public static final int DEFAULT_MIN_VERSIONS = 0;
-
-  /*
-   * Cache here the HCD value.
-   * Question: its OK to cache since when we're reenable, we create a new HCD?
-   */
-  private volatile Integer blocksize = null;
-
-  /**
-   * Default setting for whether to try and serve this column family from memory or not.
-   */
-  public static final boolean DEFAULT_IN_MEMORY = false;
-
-  /**
-   * Default setting for preventing deleted from being collected immediately.
-   */
-  public static final KeepDeletedCells DEFAULT_KEEP_DELETED = KeepDeletedCells.FALSE;
-
-  /**
-   * Default setting for whether to use a block cache or not.
-   */
-  public static final boolean DEFAULT_BLOCKCACHE = true;
-
-  /**
-   * Default setting for whether to cache data blocks on write if block caching
-   * is enabled.
-   */
-  public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
-
-  /**
-   * Default setting for whether to cache data blocks in L1 tier.  Only makes sense if more than
-   * one tier in operations: i.e. if we have an L1 and a L2.  This will be the cases if we are
-   * using BucketCache.
-   */
-  public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false;
-
-  /**
-   * Default setting for whether to cache index blocks on write if block
-   * caching is enabled.
-   */
-  public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false;
-
-  /**
-   * Default size of blocks in files stored to the filesytem (hfiles).
-   */
-  public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE;
-
-  /**
-   * Default setting for whether or not to use bloomfilters.
-   */
-  public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
-
-  /**
-   * Default setting for whether to cache bloom filter blocks on write if block
-   * caching is enabled.
-   */
-  public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
-
-  /**
-   * Default time to live of cell contents.
-   */
-  public static final int DEFAULT_TTL = HConstants.FOREVER;
-
-  /**
-   * Default scope.
-   */
-  public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
-
-  /**
-   * Default setting for whether to evict cached blocks from the blockcache on
-   * close.
-   */
-  public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false;
-
-  /**
-   * Default compress tags along with any type of DataBlockEncoding.
-   */
-  public static final boolean DEFAULT_COMPRESS_TAGS = true;
-
-  /*
-   * Default setting for whether to prefetch blocks into the blockcache on open.
-   */
-  public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false;
-
-  private final static Map<String, String> DEFAULT_VALUES = new HashMap<>();
-  private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>();
-
-  static {
-    DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
-    DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
-    DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
-    DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS));
-    DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
-    DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
-    DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
-    DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
-    DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
-    DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
-    DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
-    DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
-    DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1));
-    DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
-    DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
-    DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
-    DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN));
-    for (String s : DEFAULT_VALUES.keySet()) {
-      RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
-    }
-    RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION)));
-    RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY)));
-    RESERVED_KEYWORDS.add(new Bytes(IS_MOB_BYTES));
-    RESERVED_KEYWORDS.add(new Bytes(MOB_THRESHOLD_BYTES));
-    RESERVED_KEYWORDS.add(new Bytes(MOB_COMPACT_PARTITION_POLICY_BYTES));
-  }
-
-  private static final int UNINITIALIZED = -1;
-
-  // Column family name
-  private byte [] name;
-
-  // Column metadata
-  private final Map<Bytes, Bytes> values = new HashMap<>();
-
-  /**
-   * A map which holds the configuration specific to the column family.
-   * The keys of the map have the same names as config keys and override the defaults with
-   * cf-specific settings. Example usage may be for compactions, etc.
-   */
-  private final Map<String, String> configuration = new HashMap<>();
-
-  /*
-   * Cache the max versions rather than calculate it every time.
-   */
-  private int cachedMaxVersions = UNINITIALIZED;
-
+  public static final String DEFAULT_DATA_BLOCK_ENCODING = ColumnFamilyDescriptorBuilder.DEFAULT_DATA_BLOCK_ENCODING.name();
+  public static final int DEFAULT_VERSIONS = ColumnFamilyDescriptorBuilder.DEFAULT_MAX_VERSIONS;
+  public static final int DEFAULT_MIN_VERSIONS = ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS;
+  public static final boolean DEFAULT_IN_MEMORY = ColumnFamilyDescriptorBuilder.DEFAULT_IN_MEMORY;
+  public static final KeepDeletedCells DEFAULT_KEEP_DELETED = ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED;
+  public static final boolean DEFAULT_BLOCKCACHE = ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKCACHE;
+  public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_DATA_ON_WRITE;
+  public static final boolean DEFAULT_CACHE_DATA_IN_L1 = ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_DATA_IN_L1;
+  public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_INDEX_ON_WRITE;
+  public static final int DEFAULT_BLOCKSIZE = ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE;
+  public static final String DEFAULT_BLOOMFILTER =  ColumnFamilyDescriptorBuilder.DEFAULT_BLOOMFILTER.name();
+  public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_BLOOMS_ON_WRITE;
+  public static final int DEFAULT_TTL = ColumnFamilyDescriptorBuilder.DEFAULT_TTL;
+  public static final int DEFAULT_REPLICATION_SCOPE = ColumnFamilyDescriptorBuilder.DEFAULT_REPLICATION_SCOPE;
+  public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = ColumnFamilyDescriptorBuilder.DEFAULT_EVICT_BLOCKS_ON_CLOSE;
+  public static final boolean DEFAULT_COMPRESS_TAGS = ColumnFamilyDescriptorBuilder.DEFAULT_COMPRESS_TAGS;
+  public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = ColumnFamilyDescriptorBuilder.DEFAULT_PREFETCH_BLOCKS_ON_OPEN;
+  protected final ModifyableColumnFamilyDescriptor delegatee;
   /**
    * Construct a column descriptor specifying only the family name
    * The other attributes are defaulted.
@@ -322,20 +117,7 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * letter -- and may not contain a <code>:</code>
    */
   public HColumnDescriptor(final byte [] familyName) {
-    isLegalFamilyName(familyName);
-    this.name = familyName;
-
-    setMaxVersions(DEFAULT_VERSIONS);
-    setMinVersions(DEFAULT_MIN_VERSIONS);
-    setKeepDeletedCells(DEFAULT_KEEP_DELETED);
-    setInMemory(DEFAULT_IN_MEMORY);
-    setBlockCacheEnabled(DEFAULT_BLOCKCACHE);
-    setTimeToLive(DEFAULT_TTL);
-    setCompressionType(Compression.Algorithm.valueOf(DEFAULT_COMPRESSION.toUpperCase(Locale.ROOT)));
-    setDataBlockEncoding(DataBlockEncoding.valueOf(DEFAULT_DATA_BLOCK_ENCODING.toUpperCase(Locale.ROOT)));
-    setBloomFilterType(BloomType.valueOf(DEFAULT_BLOOMFILTER.toUpperCase(Locale.ROOT)));
-    setBlocksize(DEFAULT_BLOCKSIZE);
-    setScope(DEFAULT_REPLICATION_SCOPE);
+    this(new ModifyableColumnFamilyDescriptor(familyName));
   }
 
   /**
@@ -345,16 +127,16 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * @param desc The descriptor.
    */
   public HColumnDescriptor(HColumnDescriptor desc) {
-    super();
-    this.name = desc.name.clone();
-    for (Map.Entry<Bytes, Bytes> e :
-        desc.values.entrySet()) {
-      this.values.put(e.getKey(), e.getValue());
-    }
-    for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
-      this.configuration.put(e.getKey(), e.getValue());
-    }
-    setMaxVersions(desc.getMaxVersions());
+    this(desc, true);
+  }
+
+  protected HColumnDescriptor(HColumnDescriptor desc, boolean deepClone) {
+    this(deepClone ? new ModifyableColumnFamilyDescriptor(desc)
+            : desc.delegatee);
+  }
+
+  protected HColumnDescriptor(ModifyableColumnFamilyDescriptor delegate) {
+    this.delegatee = delegate;
   }
 
   /**
@@ -364,54 +146,36 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
    * <code>b</code> can be null when deserializing).  Cannot start with a '.'
    * either. Also Family can not be an empty value or equal "recovered.edits".
+   * @deprecated Use {@link ColumnFamilyDescriptorBuilder#isLegalColumnFamilyName(byte[])}.
    */
+  @Deprecated
   public static byte [] isLegalFamilyName(final byte [] b) {
-    if (b == null) {
-      return b;
-    }
-    Preconditions.checkArgument(b.length != 0, "Family name can not be empty");
-    if (b[0] == '.') {
-      throw new IllegalArgumentException("Family names cannot start with a " +
-        "period: " + Bytes.toString(b));
-    }
-    for (int i = 0; i < b.length; i++) {
-      if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') {
-        throw new IllegalArgumentException("Illegal character <" + b[i] +
-          ">. Family names cannot contain control characters or colons: " +
-          Bytes.toString(b));
-      }
-    }
-    byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
-    if (Bytes.equals(recoveredEdit, b)) {
-      throw new IllegalArgumentException("Family name cannot be: " +
-          HConstants.RECOVERED_EDITS_DIR);
-    }
-    return b;
+    return ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(b);
   }
 
   /**
    * @return Name of this column family
    */
+  @Override
   public byte [] getName() {
-    return name;
+    return delegatee.getName();
   }
 
   /**
-   * @return Name of this column family
+   * @return The name string of this column family
    */
+  @Override
   public String getNameAsString() {
-    return Bytes.toString(this.name);
+    return delegatee.getNameAsString();
   }
 
   /**
    * @param key The key.
    * @return The value.
    */
+  @Override
   public byte[] getValue(byte[] key) {
-    Bytes ibw = values.get(new Bytes(key));
-    if (ibw == null)
-      return null;
-    return ibw.get();
+    return delegatee.getValue(key);
   }
 
   /**
@@ -420,17 +184,12 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    */
   public String getValue(String key) {
     byte[] value = getValue(Bytes.toBytes(key));
-    if (value == null)
-      return null;
-    return Bytes.toString(value);
+    return value == null ? null : Bytes.toString(value);
   }
 
-  /**
-   * @return All values.
-   */
+  @Override
   public Map<Bytes, Bytes> getValues() {
-    // shallow pointer copy
-    return Collections.unmodifiableMap(values);
+    return delegatee.getValues();
   }
 
   /**
@@ -439,18 +198,16 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * @return this (for chained invocation)
    */
   public HColumnDescriptor setValue(byte[] key, byte[] value) {
-    if (Bytes.compareTo(Bytes.toBytes(HConstants.VERSIONS), key) == 0) {
-      cachedMaxVersions = UNINITIALIZED;
-    }
-    values.put(new Bytes(key), new Bytes(value));
+    getDelegateeForModification().setValue(key, value);
     return this;
   }
 
   /**
    * @param key Key whose key and value we're to remove from HCD parameters.
    */
-  public void remove(final byte [] key) {
-    values.remove(new Bytes(key));
+  public HColumnDescriptor remove(final byte [] key) {
+    getDelegateeForModification().removeValue(new Bytes(key));
+    return this;
   }
 
   /**
@@ -459,11 +216,7 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * @return this (for chained invocation)
    */
   public HColumnDescriptor setValue(String key, String value) {
-    if (value == null) {
-      remove(Bytes.toBytes(key));
-    } else {
-      setValue(Bytes.toBytes(key), Bytes.toBytes(value));
-    }
+    getDelegateeForModification().setValue(key, value);
     return this;
   }
 
@@ -489,32 +242,17 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
     return getCompactionCompressionType();
   }
 
-  /** @return maximum number of versions */
+  @Override
   public int getMaxVersions() {
-    if (this.cachedMaxVersions == UNINITIALIZED) {
-      String v = getValue(HConstants.VERSIONS);
-      this.cachedMaxVersions = Integer.parseInt(v);
-    }
-    return this.cachedMaxVersions;
+    return delegatee.getMaxVersions();
   }
 
   /**
-   * @param maxVersions maximum number of versions
+   * @param value maximum number of versions
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setMaxVersions(int maxVersions) {
-    if (maxVersions <= 0) {
-      // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
-      // Until there is support, consider 0 or < 0 -- a configuration error.
-      throw new IllegalArgumentException("Maximum versions must be positive");
-    }
-    if (maxVersions < this.getMinVersions()) {
-        throw new IllegalArgumentException("Set MaxVersion to " + maxVersions
-            + " while minVersion is " + this.getMinVersions()
-            + ". Maximum versions must be >= minimum versions ");
-    }
-    setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
-    cachedMaxVersions = maxVersions;
+  public HColumnDescriptor setMaxVersions(int value) {
+    getDelegateeForModification().setMaxVersions(value);
     return this;
   }
 
@@ -542,39 +280,24 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
     return this;
   }
 
-  /**
-   * @return The storefile/hfile blocksize for this column family.
-   */
-  public synchronized int getBlocksize() {
-    if (this.blocksize == null) {
-      String value = getValue(BLOCKSIZE);
-      this.blocksize = (value != null)?
-        Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
-    }
-    return this.blocksize.intValue();
-
+  @Override
+  public int getBlocksize() {
+    return delegatee.getBlocksize();
   }
 
   /**
-   * @param s Blocksize to use when writing out storefiles/hfiles on this
+   * @param value Blocksize to use when writing out storefiles/hfiles on this
    * column family.
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setBlocksize(int s) {
-    setValue(BLOCKSIZE, Integer.toString(s));
-    this.blocksize = null;
+  public HColumnDescriptor setBlocksize(int value) {
+    getDelegateeForModification().setBlocksize(value);
     return this;
   }
 
-  /**
-   * @return Compression type setting.
-   */
+  @Override
   public Compression.Algorithm getCompressionType() {
-    String n = getValue(COMPRESSION);
-    if (n == null) {
-      return Compression.Algorithm.NONE;
-    }
-    return Compression.Algorithm.valueOf(n.toUpperCase(Locale.ROOT));
+    return delegatee.getCompressionType();
   }
 
   /**
@@ -582,73 +305,49 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * LZO is not bundled as part of the hbase distribution.
    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
    * for how to enable it.
-   * @param type Compression type setting.
+   * @param value Compression type setting.
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
-    return setValue(COMPRESSION, type.getName().toUpperCase(Locale.ROOT));
+  public HColumnDescriptor setCompressionType(Compression.Algorithm value) {
+    getDelegateeForModification().setCompressionType(value);
+    return this;
   }
 
-  /**
-   * @return the data block encoding algorithm used in block cache and
-   *         optionally on disk
-   */
+  @Override
   public DataBlockEncoding getDataBlockEncoding() {
-    String type = getValue(DATA_BLOCK_ENCODING);
-    if (type == null) {
-      type = DEFAULT_DATA_BLOCK_ENCODING;
-    }
-    return DataBlockEncoding.valueOf(type);
+    return delegatee.getDataBlockEncoding();
   }
 
   /**
    * Set data block encoding algorithm used in block cache.
-   * @param type What kind of data block encoding will be used.
+   * @param value What kind of data block encoding will be used.
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
-    String name;
-    if (type != null) {
-      name = type.toString();
-    } else {
-      name = DataBlockEncoding.NONE.toString();
-    }
-    return setValue(DATA_BLOCK_ENCODING, name);
+  public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding value) {
+    getDelegateeForModification().setDataBlockEncoding(value);
+    return this;
   }
 
   /**
    * Set whether the tags should be compressed along with DataBlockEncoding. When no
    * DataBlockEncoding is been used, this is having no effect.
    *
-   * @param compressTags
+   * @param value
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setCompressTags(boolean compressTags) {
-    return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
+  public HColumnDescriptor setCompressTags(boolean value) {
+    getDelegateeForModification().setCompressTags(value);
+    return this;
   }
 
-  /**
-   * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
-   *         DataBlockEncoding is been used, this is having no effect.
-   */
+  @Override
   public boolean isCompressTags() {
-    String compressTagsStr = getValue(COMPRESS_TAGS);
-    boolean compressTags = DEFAULT_COMPRESS_TAGS;
-    if (compressTagsStr != null) {
-      compressTags = Boolean.parseBoolean(compressTagsStr);
-    }
-    return compressTags;
+    return delegatee.isCompressTags();
   }
 
-  /**
-   * @return Compression type setting.
-   */
+  @Override
   public Compression.Algorithm getCompactionCompressionType() {
-    String n = getValue(COMPRESSION_COMPACT);
-    if (n == null) {
-      return getCompressionType();
-    }
-    return Compression.Algorithm.valueOf(n.toUpperCase(Locale.ROOT));
+    return delegatee.getCompactionCompressionType();
   }
 
   /**
@@ -656,180 +355,144 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * LZO is not bundled as part of the hbase distribution.
    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
    * for how to enable it.
-   * @param type Compression type setting.
+   * @param value Compression type setting.
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setCompactionCompressionType(
-      Compression.Algorithm type) {
-    return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase(Locale.ROOT));
+  public HColumnDescriptor setCompactionCompressionType(Compression.Algorithm value) {
+    getDelegateeForModification().setCompactionCompressionType(value);
+    return this;
   }
 
-  /**
-   * @return True if we are to favor keeping all values for this column family in the
-   * HRegionServer cache.
-   */
+  @Override
   public boolean isInMemory() {
-    String value = getValue(HConstants.IN_MEMORY);
-    if (value != null) {
-      return Boolean.parseBoolean(value);
-    }
-    return DEFAULT_IN_MEMORY;
+    return delegatee.isInMemory();
   }
 
   /**
-   * @param inMemory True if we are to favor keeping all values for this column family in the
+   * @param value True if we are to favor keeping all values for this column family in the
    * HRegionServer cache
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setInMemory(boolean inMemory) {
-    return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
+  public HColumnDescriptor setInMemory(boolean value) {
+    getDelegateeForModification().setInMemory(value);
+    return this;
   }
 
-  /**
-   * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for
-   *          for this column family
-   */
+  @Override
   public MemoryCompactionPolicy getInMemoryCompaction() {
-    String value = getValue(IN_MEMORY_COMPACTION);
-    if (value != null) {
-      return MemoryCompactionPolicy.valueOf(value);
-    }
-    return null;
+    return delegatee.getInMemoryCompaction();
   }
 
   /**
-   * @param inMemoryCompaction the prefered in-memory compaction policy
+   * @param value the prefered in-memory compaction policy
    *                  for this column family
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) {
-    return setValue(IN_MEMORY_COMPACTION, inMemoryCompaction.toString());
+  public HColumnDescriptor setInMemoryCompaction(MemoryCompactionPolicy value) {
+    getDelegateeForModification().setInMemoryCompaction(value);
+    return this;
   }
 
+  @Override
   public KeepDeletedCells getKeepDeletedCells() {
-    String value = getValue(KEEP_DELETED_CELLS);
-    if (value != null) {
-      // toUpperCase for backwards compatibility
-      return KeepDeletedCells.valueOf(value.toUpperCase(Locale.ROOT));
-    }
-    return DEFAULT_KEEP_DELETED;
+    return delegatee.getKeepDeletedCells();
   }
 
   /**
-   * @param keepDeletedCells True if deleted rows should not be collected
+   * @param value True if deleted rows should not be collected
    * immediately.
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setKeepDeletedCells(KeepDeletedCells keepDeletedCells) {
-    return setValue(KEEP_DELETED_CELLS, keepDeletedCells.toString());
+  public HColumnDescriptor setKeepDeletedCells(KeepDeletedCells value) {
+    getDelegateeForModification().setKeepDeletedCells(value);
+    return this;
   }
 
-  /**
-   * @return Time-to-live of cell contents, in seconds.
-   */
+  @Override
   public int getTimeToLive() {
-    String value = getValue(TTL);
-    return (value != null)? Integer.parseInt(value) : DEFAULT_TTL;
+    return delegatee.getTimeToLive();
   }
 
   /**
-   * @param timeToLive Time-to-live of cell contents, in seconds.
+   * @param value Time-to-live of cell contents, in seconds.
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setTimeToLive(int timeToLive) {
-    return setValue(TTL, Integer.toString(timeToLive));
+  public HColumnDescriptor setTimeToLive(int value) {
+    getDelegateeForModification().setTimeToLive(value);
+    return this;
   }
 
   /**
-   * @param timeToLive Time to live of cell contents, in human readable format
+   * @param value Time to live of cell contents, in human readable format
    *                   @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit)
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setTimeToLive(String timeToLive) throws HBaseException {
-    return setValue(TTL, PrettyPrinter.valueOf(timeToLive, Unit.TIME_INTERVAL));
+  public HColumnDescriptor setTimeToLive(String value) throws HBaseException {
+    getDelegateeForModification().setTimeToLive(value);
+    return this;
   }
 
-  /**
-   * @return The minimum number of versions to keep.
-   */
+  @Override
   public int getMinVersions() {
-    String value = getValue(MIN_VERSIONS);
-    return (value != null)? Integer.parseInt(value) : 0;
+    return delegatee.getMinVersions();
   }
 
   /**
-   * @param minVersions The minimum number of versions to keep.
+   * @param value The minimum number of versions to keep.
    * (used when timeToLive is set)
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setMinVersions(int minVersions) {
-    return setValue(MIN_VERSIONS, Integer.toString(minVersions));
+  public HColumnDescriptor setMinVersions(int value) {
+    getDelegateeForModification().setMinVersions(value);
+    return this;
   }
 
-  /**
-   * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX
-   * and BLOOM type blocks).
-   */
+  @Override
   public boolean isBlockCacheEnabled() {
-    String value = getValue(BLOCKCACHE);
-    if (value != null) {
-      return Boolean.parseBoolean(value);
-    }
-    return DEFAULT_BLOCKCACHE;
+    return delegatee.isBlockCacheEnabled();
   }
 
   /**
-   * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache
+   * @param value True if hfile DATA type blocks should be cached (We always cache
    * INDEX and BLOOM blocks; you cannot turn this off).
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
-    return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
+  public HColumnDescriptor setBlockCacheEnabled(boolean value) {
+    getDelegateeForModification().setBlockCacheEnabled(value);
+    return this;
   }
 
-  /**
-   * @return bloom filter type used for new StoreFiles in ColumnFamily
-   */
+  @Override
   public BloomType getBloomFilterType() {
-    String n = getValue(BLOOMFILTER);
-    if (n == null) {
-      n = DEFAULT_BLOOMFILTER;
-    }
-    return BloomType.valueOf(n.toUpperCase(Locale.ROOT));
+    return delegatee.getBloomFilterType();
   }
 
   /**
-   * @param bt bloom filter type
+   * @param value bloom filter type
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setBloomFilterType(final BloomType bt) {
-    return setValue(BLOOMFILTER, bt.toString());
+  public HColumnDescriptor setBloomFilterType(final BloomType value) {
+    getDelegateeForModification().setBloomFilterType(value);
+    return this;
   }
 
-   /**
-    * @return the scope tag
-    */
+  @Override
   public int getScope() {
-    byte[] value = getValue(REPLICATION_SCOPE_BYTES);
-    if (value != null) {
-      return Integer.parseInt(Bytes.toString(value));
-    }
-    return DEFAULT_REPLICATION_SCOPE;
+    return delegatee.getScope();
   }
 
  /**
-  * @param scope the scope tag
+  * @param value the scope tag
   * @return this (for chained invocation)
   */
-  public HColumnDescriptor setScope(int scope) {
-    return setValue(REPLICATION_SCOPE, Integer.toString(scope));
+  public HColumnDescriptor setScope(int value) {
+    getDelegateeForModification().setScope(value);
+    return this;
   }
 
-  /**
-   * @return true if we should cache data blocks on write
-   */
+  @Override
   public boolean isCacheDataOnWrite() {
-    return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
+    return delegatee.isCacheDataOnWrite();
   }
 
   /**
@@ -837,15 +500,13 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * @return this (for chained invocation)
    */
   public HColumnDescriptor setCacheDataOnWrite(boolean value) {
-    return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
+    getDelegateeForModification().setCacheDataOnWrite(value);
+    return this;
   }
 
-  /**
-   * @return true if we should cache data blocks in the L1 cache (if block cache deploy has more
-   *         than one tier; e.g. we are using CombinedBlockCache).
-   */
+  @Override
   public boolean isCacheDataInL1() {
-    return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
+    return delegatee.isCacheDataInL1();
   }
 
   /**
@@ -854,22 +515,13 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * @return this (for chained invocation)
    */
   public HColumnDescriptor setCacheDataInL1(boolean value) {
-    return setValue(CACHE_DATA_IN_L1, Boolean.toString(value));
-  }
-
-  private boolean setAndGetBoolean(final String key, final boolean defaultSetting) {
-    String value = getValue(key);
-    if (value != null) {
-      return Boolean.parseBoolean(value);
-    }
-    return defaultSetting;
+    getDelegateeForModification().setCacheDataInL1(value);
+    return this;
   }
 
-  /**
-   * @return true if we should cache index blocks on write
-   */
+  @Override
   public boolean isCacheIndexesOnWrite() {
-    return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
+    return delegatee.isCacheIndexesOnWrite();
   }
 
   /**
@@ -877,14 +529,13 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * @return this (for chained invocation)
    */
   public HColumnDescriptor setCacheIndexesOnWrite(boolean value) {
-    return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
+    getDelegateeForModification().setCacheIndexesOnWrite(value);
+    return this;
   }
 
-  /**
-   * @return true if we should cache bloomfilter blocks on write
-   */
+  @Override
   public boolean isCacheBloomsOnWrite() {
-    return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
+    return delegatee.isCacheBloomsOnWrite();
   }
 
   /**
@@ -892,14 +543,13 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * @return this (for chained invocation)
    */
   public HColumnDescriptor setCacheBloomsOnWrite(boolean value) {
-    return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
+    getDelegateeForModification().setCacheBloomsOnWrite(value);
+    return this;
   }
 
-  /**
-   * @return true if we should evict cached blocks from the blockcache on close
-   */
+  @Override
   public boolean isEvictBlocksOnClose() {
-    return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
+    return delegatee.isEvictBlocksOnClose();
   }
 
   /**
@@ -908,14 +558,13 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * @return this (for chained invocation)
    */
   public HColumnDescriptor setEvictBlocksOnClose(boolean value) {
-    return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
+    getDelegateeForModification().setEvictBlocksOnClose(value);
+    return this;
   }
 
-  /**
-   * @return true if we should prefetch blocks into the blockcache on open
-   */
+  @Override
   public boolean isPrefetchBlocksOnOpen() {
-    return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
+    return delegatee.isPrefetchBlocksOnOpen();
   }
 
   /**
@@ -923,7 +572,8 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * @return this (for chained invocation)
    */
   public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) {
-    return setValue(PREFETCH_BLOCKS_ON_OPEN, Boolean.toString(value));
+    getDelegateeForModification().setPrefetchBlocksOnOpen(value);
+    return this;
   }
 
   /**
@@ -931,113 +581,23 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    */
   @Override
   public String toString() {
-    StringBuilder s = new StringBuilder();
-
-    s.append('{');
-    s.append(HConstants.NAME);
-    s.append(" => '");
-    s.append(Bytes.toString(name));
-    s.append("'");
-    s.append(getValues(true));
-    s.append('}');
-    return s.toString();
+    return delegatee.toString();
   }
 
   /**
    * @return Column family descriptor with only the customized attributes.
    */
+  @Override
   public String toStringCustomizedValues() {
-    StringBuilder s = new StringBuilder();
-    s.append('{');
-    s.append(HConstants.NAME);
-    s.append(" => '");
-    s.append(Bytes.toString(name));
-    s.append("'");
-    s.append(getValues(false));
-    s.append('}');
-    return s.toString();
-  }
-
-  private StringBuilder getValues(boolean printDefaults) {
-    StringBuilder s = new StringBuilder();
-
-    boolean hasConfigKeys = false;
-
-    // print all reserved keys first
-    for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) {
-      if (!RESERVED_KEYWORDS.contains(entry.getKey())) {
-        hasConfigKeys = true;
-        continue;
-      }
-      String key = Bytes.toString(entry.getKey().get());
-      String value = Bytes.toStringBinary(entry.getValue().get());
-      if (printDefaults
-          || !DEFAULT_VALUES.containsKey(key)
-          || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
-        s.append(", ");
-        s.append(key);
-        s.append(" => ");
-        s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
-      }
-    }
-
-    // print all non-reserved, advanced config keys as a separate subset
-    if (hasConfigKeys) {
-      s.append(", ");
-      s.append(HConstants.METADATA).append(" => ");
-      s.append('{');
-      boolean printComma = false;
-      for (Bytes k : values.keySet()) {
-        if (RESERVED_KEYWORDS.contains(k)) {
-          continue;
-        }
-        String key = Bytes.toString(k.get());
-        String value = Bytes.toStringBinary(values.get(k).get());
-        if (printComma) {
-          s.append(", ");
-        }
-        printComma = true;
-        s.append('\'').append(key).append('\'');
-        s.append(" => ");
-        s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
-      }
-      s.append('}');
-    }
-
-    if (!configuration.isEmpty()) {
-      s.append(", ");
-      s.append(HConstants.CONFIGURATION).append(" => ");
-      s.append('{');
-      boolean printCommaForConfiguration = false;
-      for (Map.Entry<String, String> e : configuration.entrySet()) {
-        if (printCommaForConfiguration) s.append(", ");
-        printCommaForConfiguration = true;
-        s.append('\'').append(e.getKey()).append('\'');
-        s.append(" => ");
-        s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\'');
-      }
-      s.append("}");
-    }
-    return s;
+    return delegatee.toStringCustomizedValues();
   }
 
   public static Unit getUnit(String key) {
-    Unit unit;
-      /* TTL for now, we can add more as we need */
-    if (key.equals(HColumnDescriptor.TTL)) {
-      unit = Unit.TIME_INTERVAL;
-    } else if (key.equals(HColumnDescriptor.MOB_THRESHOLD)) {
-      unit = Unit.LONG;
-    } else if (key.equals(HColumnDescriptor.IS_MOB)) {
-      unit = Unit.BOOLEAN;
-    } else {
-      unit = Unit.NONE;
-    }
-    return unit;
+    return ColumnFamilyDescriptorBuilder.getUnit(key);
   }
 
   public static Map<String, String> getDefaultValues() {
-    return Collections.unmodifiableMap(DEFAULT_VALUES);
+    return ColumnFamilyDescriptorBuilder.getDefaultValues();
   }
 
   /**
@@ -1062,33 +622,12 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    */
   @Override
   public int hashCode() {
-    int result = Bytes.hashCode(this.name);
-    result ^= (int) COLUMN_DESCRIPTOR_VERSION;
-    result ^= values.hashCode();
-    result ^= configuration.hashCode();
-    return result;
+    return delegatee.hashCode();
   }
 
-  // Comparable
   @Override
-  public int compareTo(HColumnDescriptor o) {
-    int result = Bytes.compareTo(this.name, o.getName());
-    if (result == 0) {
-      // punt on comparison for ordering, just calculate difference.
-      result = this.values.hashCode() - o.values.hashCode();
-      if (result < 0)
-        result = -1;
-      else if (result > 0)
-        result = 1;
-    }
-    if (result == 0) {
-      result = this.configuration.hashCode() - o.configuration.hashCode();
-      if (result < 0)
-        result = -1;
-      else if (result > 0)
-        result = 1;
-    }
-    return result;
+  public int compareTo(HColumnDescriptor other) {
+    return delegatee.compareTo(other.delegatee);
   }
 
   /**
@@ -1096,8 +635,7 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * @see #parseFrom(byte[])
    */
   public byte[] toByteArray() {
-    return ProtobufUtil
-        .prependPBMagic(ProtobufUtil.convertToColumnFamilySchema(this).toByteArray());
+    return ColumnFamilyDescriptorBuilder.toByteArray(delegatee);
   }
 
   /**
@@ -1107,191 +645,150 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
    * @see #toByteArray()
    */
   public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
-    if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic");
-    int pblen = ProtobufUtil.lengthOfPBMagic();
-    ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
-    ColumnFamilySchema cfs = null;
-    try {
-      ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
-      cfs = builder.build();
-    } catch (IOException e) {
-      throw new DeserializationException(e);
+    ColumnFamilyDescriptor desc = ColumnFamilyDescriptorBuilder.parseFrom(bytes);
+    if (desc instanceof ModifyableColumnFamilyDescriptor) {
+      return new HColumnDescriptor((ModifyableColumnFamilyDescriptor) desc);
+    } else {
+      return new HColumnDescriptor(new ModifyableColumnFamilyDescriptor(desc));
     }
-    return ProtobufUtil.convertToHColumnDesc(cfs);
   }
 
-  /**
-   * Getter for accessing the configuration value by key.
-   */
+  @Override
   public String getConfigurationValue(String key) {
-    return configuration.get(key);
+    return delegatee.getConfigurationValue(key);
   }
 
-  /**
-   * Getter for fetching an unmodifiable {@link #configuration} map.
-   */
+  @Override
   public Map<String, String> getConfiguration() {
-    // shallow pointer copy
-    return Collections.unmodifiableMap(configuration);
+    return delegatee.getConfiguration();
   }
 
   /**
-   * Setter for storing a configuration setting in {@link #configuration} map.
+   * Setter for storing a configuration setting.
    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
    * @param value String value. If null, removes the configuration.
    */
   public HColumnDescriptor setConfiguration(String key, String value) {
-    if (value == null) {
-      removeConfiguration(key);
-    } else {
-      configuration.put(key, value);
-    }
+    getDelegateeForModification().setConfiguration(key, value);
     return this;
   }
 
   /**
-   * Remove a configuration setting represented by the key from the {@link #configuration} map.
+   * Remove a configuration setting represented by the key.
    */
-  public void removeConfiguration(final String key) {
-    configuration.remove(key);
+  public HColumnDescriptor removeConfiguration(final String key) {
+    getDelegateeForModification().removeConfiguration(key);
+    return this;
   }
 
-  /**
-   * Return the encryption algorithm in use by this family
-   */
+  @Override
   public String getEncryptionType() {
-    return getValue(ENCRYPTION);
+    return delegatee.getEncryptionType();
   }
 
   /**
    * Set the encryption algorithm for use with this family
-   * @param algorithm
+   * @param value
    */
-  public HColumnDescriptor setEncryptionType(String algorithm) {
-    setValue(ENCRYPTION, algorithm);
+  public HColumnDescriptor setEncryptionType(String value) {
+    getDelegateeForModification().setEncryptionType(value);
     return this;
   }
 
-  /** Return the raw crypto key attribute for the family, or null if not set  */
+  @Override
   public byte[] getEncryptionKey() {
-    return getValue(Bytes.toBytes(ENCRYPTION_KEY));
+    return delegatee.getEncryptionKey();
   }
 
   /** Set the raw crypto key attribute for the family */
-  public HColumnDescriptor setEncryptionKey(byte[] keyBytes) {
-    setValue(Bytes.toBytes(ENCRYPTION_KEY), keyBytes);
+  public HColumnDescriptor setEncryptionKey(byte[] value) {
+    getDelegateeForModification().setEncryptionKey(value);
     return this;
   }
 
-  /**
-   * Gets the mob threshold of the family.
-   * If the size of a cell value is larger than this threshold, it's regarded as a mob.
-   * The default threshold is 1024*100(100K)B.
-   * @return The mob threshold.
-   */
+  @Override
   public long getMobThreshold() {
-    byte[] threshold = getValue(MOB_THRESHOLD_BYTES);
-    return threshold != null && threshold.length == Bytes.SIZEOF_LONG ? Bytes.toLong(threshold)
-        : DEFAULT_MOB_THRESHOLD;
+    return delegatee.getMobThreshold();
   }
 
   /**
    * Sets the mob threshold of the family.
-   * @param threshold The mob threshold.
+   * @param value The mob threshold.
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setMobThreshold(long threshold) {
-    setValue(MOB_THRESHOLD_BYTES, Bytes.toBytes(threshold));
+  public HColumnDescriptor setMobThreshold(long value) {
+    getDelegateeForModification().setMobThreshold(value);
     return this;
   }
 
-  /**
-   * Gets whether the mob is enabled for the family.
-   * @return True if the mob is enabled for the family.
-   */
+  @Override
   public boolean isMobEnabled() {
-    byte[] isMobEnabled = getValue(IS_MOB_BYTES);
-    return isMobEnabled != null && isMobEnabled.length == Bytes.SIZEOF_BOOLEAN
-        && Bytes.toBoolean(isMobEnabled);
+    return delegatee.isMobEnabled();
   }
 
   /**
    * Enables the mob for the family.
-   * @param isMobEnabled Whether to enable the mob for the family.
+   * @param value Whether to enable the mob for the family.
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setMobEnabled(boolean isMobEnabled) {
-    setValue(IS_MOB_BYTES, Bytes.toBytes(isMobEnabled));
+  public HColumnDescriptor setMobEnabled(boolean value) {
+    getDelegateeForModification().setMobEnabled(value);
     return this;
   }
 
-  /**
-   * Get the mob compact partition policy for this family
-   * @return MobCompactPartitionPolicy
-   */
+  @Override
   public MobCompactPartitionPolicy getMobCompactPartitionPolicy() {
-    String policy = getValue(MOB_COMPACT_PARTITION_POLICY);
-    if (policy == null) {
-      return DEFAULT_MOB_COMPACT_PARTITION_POLICY;
-    }
-
-    return MobCompactPartitionPolicy.valueOf(policy.toUpperCase(Locale.ROOT));
+    return delegatee.getMobCompactPartitionPolicy();
   }
 
   /**
    * Set the mob compact partition policy for the family.
-   * @param policy policy type
+   * @param value policy type
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setMobCompactPartitionPolicy(MobCompactPartitionPolicy policy) {
-    return setValue(MOB_COMPACT_PARTITION_POLICY, policy.toString().toUpperCase(Locale.ROOT));
+  public HColumnDescriptor setMobCompactPartitionPolicy(MobCompactPartitionPolicy value) {
+    getDelegateeForModification().setMobCompactPartitionPolicy(value);
+    return this;
   }
 
-  /**
-   * @return replication factor set for this CF or {@link #DEFAULT_DFS_REPLICATION} if not set.
-   *         <p>
-   *         {@link #DEFAULT_DFS_REPLICATION} value indicates that user has explicitly not set any
-   *         block replication factor for this CF, hence use the default replication factor set in
-   *         the file system.
-   */
+  @Override
   public short getDFSReplication() {
-    String rf = getValue(DFS_REPLICATION);
-    return rf == null ? DEFAULT_DFS_REPLICATION : Short.valueOf(rf);
+    return delegatee.getDFSReplication();
   }
 
   /**
    * Set the replication factor to hfile(s) belonging to this family
-   * @param replication number of replicas the blocks(s) belonging to this CF should have, or
+   * @param value number of replicas the blocks(s) belonging to this CF should have, or
    *          {@link #DEFAULT_DFS_REPLICATION} for the default replication factor set in the
    *          filesystem
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setDFSReplication(short replication) {
-    if (replication < 1 && replication != DEFAULT_DFS_REPLICATION) {
-      throw new IllegalArgumentException(
-          "DFS replication factor cannot be less than 1 if explicitly set.");
-    }
-    setValue(DFS_REPLICATION, Short.toString(replication));
+  public HColumnDescriptor setDFSReplication(short value) {
+    getDelegateeForModification().setDFSReplication(value);
     return this;
   }
 
-  /**
-   * Return the storage policy in use by this family
-   * <p/>
-   * Not using {@code enum} here because HDFS is not using {@code enum} for storage policy, see
-   * org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite for more details
-   */
+  @Override
   public String getStoragePolicy() {
-    return getValue(STORAGE_POLICY);
+    return delegatee.getStoragePolicy();
   }
 
   /**
    * Set the storage policy for use with this family
-   * @param policy the policy to set, valid setting includes: <i>"LAZY_PERSIST"</i>,
+   * @param value the policy to set, valid setting includes: <i>"LAZY_PERSIST"</i>,
    *          <i>"ALL_SSD"</i>, <i>"ONE_SSD"</i>, <i>"HOT"</i>, <i>"WARM"</i>, <i>"COLD"</i>
    */
-  public HColumnDescriptor setStoragePolicy(String policy) {
-    setValue(STORAGE_POLICY, policy);
+  public HColumnDescriptor setStoragePolicy(String value) {
+    getDelegateeForModification().setStoragePolicy(value);
     return this;
   }
+
+  @Override
+  public Bytes getValue(Bytes key) {
+    return delegatee.getValue(key);
+  }
+
+  protected ModifyableColumnFamilyDescriptor getDelegateeForModification() {
+    return delegatee;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1e780463/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index bf58d73..5eb737b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -20,10 +20,11 @@ package org.apache.hadoop.hbase;
 
 import java.io.IOException;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -34,6 +35,8 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDesc
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor;
 
 /**
  * HTableDescriptor contains the details about an HBase table  such as the descriptors of
@@ -67,7 +70,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE;
   public static final int DEFAULT_REGION_REPLICATION = TableDescriptorBuilder.DEFAULT_REGION_REPLICATION;
   public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = TableDescriptorBuilder.DEFAULT_REGION_MEMSTORE_REPLICATION;
-  private final ModifyableTableDescriptor delegatee;
+  protected final ModifyableTableDescriptor delegatee;
 
   /**
    * Construct a table descriptor specifying a TableName object
@@ -75,7 +78,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 HBASE: (HBASE-174) Un-openable tablename bug</a>
    */
   public HTableDescriptor(final TableName name) {
-    this(name, Collections.EMPTY_LIST, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
+    this(new ModifyableTableDescriptor(name));
   }
 
   /**
@@ -86,7 +89,16 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param desc The descriptor.
    */
   public HTableDescriptor(final HTableDescriptor desc) {
-    this(desc.getTableName(), desc.getFamilies(), desc.getValues(), desc.getConfiguration());
+    this(desc, true);
+  }
+
+  protected HTableDescriptor(final HTableDescriptor desc, boolean deepClone) {
+    this(deepClone ? new ModifyableTableDescriptor(desc.getTableName(), desc)
+      : desc.delegatee);
+  }
+
+  public HTableDescriptor(final TableDescriptor desc) {
+    this(new ModifyableTableDescriptor(desc.getTableName(), desc));
   }
 
   /**
@@ -99,16 +111,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param desc The descriptor.
    */
   public HTableDescriptor(final TableName name, final HTableDescriptor desc) {
-    this(name, desc.getFamilies(), desc.getValues(), desc.getConfiguration());
-  }
-
-  public HTableDescriptor(final TableDescriptor desc) {
-    this(desc.getTableName(), desc.getFamilies(), desc.getValues(), desc.getConfiguration());
-  }
-
-  private HTableDescriptor(final TableName name, final Collection<HColumnDescriptor> families,
-      Map<Bytes, Bytes> values, Map<String, String> configuration) {
-    this(new ModifyableTableDescriptor(name, families, values, configuration));
+    this(new ModifyableTableDescriptor(name, desc));
   }
 
   protected HTableDescriptor(ModifyableTableDescriptor delegatee) {
@@ -152,19 +155,9 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param key The key.
    * @return The value.
    */
-  @Override
-  public byte[] getValue(byte[] key) {
-    return delegatee.getValue(key);
-  }
-
-  /**
-   * Getter for accessing the metadata associated with the key
-   *
-   * @param key The key.
-   * @return The value.
-   */
   public String getValue(String key) {
-    return delegatee.getValue(key);
+    byte[] value = getValue(Bytes.toBytes(key));
+    return value == null ? null : Bytes.toString(value);
   }
 
   /**
@@ -182,7 +175,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param value The value. If null, removes the setting.
    */
   public HTableDescriptor setValue(byte[] key, byte[] value) {
-    delegatee.setValue(key, value);
+    getDelegateeForModification().setValue(key, value);
     return this;
   }
 
@@ -193,7 +186,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param value The value. If null, removes the setting.
    */
   public HTableDescriptor setValue(final Bytes key, final Bytes value) {
-    delegatee.setValue(key, value);
+    getDelegateeForModification().setValue(key, value);
     return this;
   }
 
@@ -204,7 +197,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param value The value. If null, removes the setting.
    */
   public HTableDescriptor setValue(String key, String value) {
-    delegatee.setValue(key, value);
+    getDelegateeForModification().setValue(Bytes.toBytes(key), Bytes.toBytes(value));
     return this;
   }
 
@@ -215,7 +208,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * parameters.
    */
   public void remove(final String key) {
-    delegatee.remove(key);
+    getDelegateeForModification().removeValue(Bytes.toBytes(key));
   }
 
   /**
@@ -225,7 +218,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * parameters.
    */
   public void remove(Bytes key) {
-    delegatee.remove(key);
+    getDelegateeForModification().removeValue(key);
   }
 
   /**
@@ -235,7 +228,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * parameters.
    */
   public void remove(final byte [] key) {
-    delegatee.remove(key);
+    getDelegateeForModification().removeValue(key);
   }
 
   /**
@@ -258,7 +251,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * only.
    */
   public HTableDescriptor setReadOnly(final boolean readOnly) {
-    delegatee.setReadOnly(readOnly);
+    getDelegateeForModification().setReadOnly(readOnly);
     return this;
   }
 
@@ -279,7 +272,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param isEnable True if enable compaction.
    */
   public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
-    delegatee.setCompactionEnabled(isEnable);
+    getDelegateeForModification().setCompactionEnabled(isEnable);
     return this;
   }
 
@@ -300,7 +293,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param isEnable True if enable normalization.
    */
   public HTableDescriptor setNormalizationEnabled(final boolean isEnable) {
-    delegatee.setNormalizationEnabled(isEnable);
+    getDelegateeForModification().setNormalizationEnabled(isEnable);
     return this;
   }
 
@@ -309,7 +302,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param durability enum value
    */
   public HTableDescriptor setDurability(Durability durability) {
-    delegatee.setDurability(durability);
+    getDelegateeForModification().setDurability(durability);
     return this;
   }
 
@@ -348,7 +341,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param clazz the class name
    */
   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
-    delegatee.setRegionSplitPolicyClassName(clazz);
+    getDelegateeForModification().setRegionSplitPolicyClassName(clazz);
     return this;
   }
 
@@ -395,7 +388,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * before a split is triggered.
    */
   public HTableDescriptor setMaxFileSize(long maxFileSize) {
-    delegatee.setMaxFileSize(maxFileSize);
+    getDelegateeForModification().setMaxFileSize(maxFileSize);
     return this;
   }
 
@@ -418,7 +411,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param memstoreFlushSize memory cache flush size for each hregion
    */
   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
-    delegatee.setMemStoreFlushSize(memstoreFlushSize);
+    getDelegateeForModification().setMemStoreFlushSize(memstoreFlushSize);
     return this;
   }
 
@@ -429,7 +422,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param clazz the class name
    */
   public HTableDescriptor setFlushPolicyClassName(String clazz) {
-    delegatee.setFlushPolicyClassName(clazz);
+    getDelegateeForModification().setFlushPolicyClassName(clazz);
     return this;
   }
 
@@ -451,7 +444,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param family HColumnDescriptor of family to add.
    */
   public HTableDescriptor addFamily(final HColumnDescriptor family) {
-    delegatee.addFamily(family);
+    getDelegateeForModification().addColumnFamily(family);
     return this;
   }
 
@@ -461,7 +454,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @return this (for chained invocation)
    */
   public HTableDescriptor modifyFamily(final HColumnDescriptor family) {
-    delegatee.modifyFamily(family);
+    getDelegateeForModification().modifyColumnFamily(family);
     return this;
   }
 
@@ -470,9 +463,8 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param familyName Family name or column name.
    * @return true if the table contains the specified family name
    */
-  @Override
   public boolean hasFamily(final byte [] familyName) {
-    return delegatee.hasFamily(familyName);
+    return delegatee.hasColumnFamily(familyName);
   }
 
   /**
@@ -548,13 +540,15 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
   /**
    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
    * of all the column families of the table.
-   *
+   * @deprecated Use {@link #getColumnFamilies}.
    * @return Immutable collection of {@link HColumnDescriptor} of all the
    * column families.
    */
-  @Override
+  @Deprecated
   public Collection<HColumnDescriptor> getFamilies() {
-    return delegatee.getFamilies();
+    return Stream.of(delegatee.getColumnFamilies())
+            .map(this::toHColumnDescriptor)
+            .collect(Collectors.toList());
   }
 
   /**
@@ -578,7 +572,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param regionReplication the replication factor per region
    */
   public HTableDescriptor setRegionReplication(int regionReplication) {
-    delegatee.setRegionReplication(regionReplication);
+    getDelegateeForModification().setRegionReplication(regionReplication);
     return this;
   }
 
@@ -600,12 +594,12 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    *                                  data only when the primary flushes the memstore.
    */
   public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) {
-    delegatee.setRegionMemstoreReplication(memstoreReplication);
+    getDelegateeForModification().setRegionMemstoreReplication(memstoreReplication);
     return this;
   }
 
   public HTableDescriptor setPriority(int priority) {
-    delegatee.setPriority(priority);
+    getDelegateeForModification().setPriority(priority);
     return this;
   }
 
@@ -619,12 +613,11 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
    * This returns all the keys of the family map which represents the column
    * family names of the table.
-   *
    * @return Immutable sorted set of the keys of the families.
+   * @deprecated Use {@link #getColumnFamilyNames()}.
    */
-  @Override
   public Set<byte[]> getFamiliesKeys() {
-    return delegatee.getFamiliesKeys();
+    return delegatee.getColumnFamilyNames();
   }
 
   /**
@@ -645,23 +638,25 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    *
    * @see #getFamilies()
    */
+  @Deprecated
   @Override
   public HColumnDescriptor[] getColumnFamilies() {
-    return delegatee.getColumnFamilies();
+    return Stream.of(delegatee.getColumnFamilies())
+            .map(this::toHColumnDescriptor)
+            .toArray(size -> new HColumnDescriptor[size]);
   }
 
-
   /**
    * Returns the HColumnDescriptor for a specific column family with name as
    * specified by the parameter column.
-   *
    * @param column Column family name
    * @return Column descriptor for the passed family name or the family on
    * passed in column.
+   * @deprecated Use {@link #getColumnFamily(byte[])}.
    */
-  @Override
-  public HColumnDescriptor getFamily(final byte [] column) {
-    return delegatee.getFamily(column);
+  @Deprecated
+  public HColumnDescriptor getFamily(final byte[] column) {
+    return toHColumnDescriptor(delegatee.getColumnFamily(column));
   }
 
 
@@ -674,7 +669,24 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * passed in column.
    */
   public HColumnDescriptor removeFamily(final byte [] column) {
-    return delegatee.removeFamily(column);
+    return toHColumnDescriptor(getDelegateeForModification().removeColumnFamily(column));
+  }
+
+  /**
+   * Return a HColumnDescriptor for user to keep the compatibility as much as possible.
+   * @param desc read-only ColumnFamilyDescriptor
+   * @return The older implementation of ColumnFamilyDescriptor
+   */
+  protected HColumnDescriptor toHColumnDescriptor(ColumnFamilyDescriptor desc) {
+    if (desc == null) {
+      return null;
+    } else if (desc instanceof ModifyableColumnFamilyDescriptor) {
+      return new HColumnDescriptor((ModifyableColumnFamilyDescriptor) desc);
+    } else if (desc instanceof HColumnDescriptor) {
+      return (HColumnDescriptor) desc;
+    } else {
+      return new HColumnDescriptor(new ModifyableColumnFamilyDescriptor(desc));
+    }
   }
 
   /**
@@ -688,7 +700,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @throws IOException
    */
   public HTableDescriptor addCoprocessor(String className) throws IOException {
-    delegatee.addCoprocessor(className);
+    getDelegateeForModification().addCoprocessor(className);
     return this;
   }
 
@@ -709,7 +721,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
   public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
                              int priority, final Map<String, String> kvs)
   throws IOException {
-    delegatee.addCoprocessor(className, jarFilePath, priority, kvs);
+    getDelegateeForModification().addCoprocessor(className, jarFilePath, priority, kvs);
     return this;
   }
 
@@ -725,7 +737,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @throws IOException
    */
   public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException {
-    delegatee.addCoprocessorWithSpec(specStr);
+    getDelegateeForModification().addCoprocessorWithSpec(specStr);
     return this;
   }
 
@@ -755,7 +767,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param className Class name of the co-processor
    */
   public void removeCoprocessor(String className) {
-    delegatee.removeCoprocessor(className);
+    getDelegateeForModification().removeCoprocessor(className);
   }
 
   public final static String NAMESPACE_FAMILY_INFO = TableDescriptorBuilder.NAMESPACE_FAMILY_INFO;
@@ -768,14 +780,14 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
 
   @Deprecated
   public HTableDescriptor setOwner(User owner) {
-    delegatee.setOwner(owner);
+    getDelegateeForModification().setOwner(owner);
     return this;
   }
 
   // used by admin.rb:alter(table_name,*args) to update owner.
   @Deprecated
   public HTableDescriptor setOwnerString(String ownerString) {
-    delegatee.setOwnerString(ownerString);
+    getDelegateeForModification().setOwnerString(ownerString);
     return this;
   }
 
@@ -790,7 +802,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @see #parseFrom(byte[])
    */
   public byte[] toByteArray() {
-    return delegatee.toByteArray();
+    return TableDescriptorBuilder.toByteArray(delegatee);
   }
 
   /**
@@ -802,7 +814,12 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    */
   public static HTableDescriptor parseFrom(final byte [] bytes)
   throws DeserializationException, IOException {
-    return new HTableDescriptor(ModifyableTableDescriptor.parseFrom(bytes));
+    TableDescriptor desc = TableDescriptorBuilder.parseFrom(bytes);
+    if (desc instanceof ModifyableTableDescriptor) {
+      return new HTableDescriptor((ModifyableTableDescriptor) desc);
+    } else {
+      return new HTableDescriptor(desc);
+    }
   }
 
   /**
@@ -827,7 +844,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * @param value String value. If null, removes the setting.
    */
   public HTableDescriptor setConfiguration(String key, String value) {
-    delegatee.setConfiguration(key, value);
+    getDelegateeForModification().setConfiguration(key, value);
     return this;
   }
 
@@ -835,6 +852,35 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * Remove a config setting represented by the key from the map
    */
   public void removeConfiguration(final String key) {
-    delegatee.removeConfiguration(key);
+    getDelegateeForModification().removeConfiguration(key);
+  }
+
+  @Override
+  public Bytes getValue(Bytes key) {
+    return delegatee.getValue(key);
+  }
+
+  @Override
+  public byte[] getValue(byte[] key) {
+    return delegatee.getValue(key);
+  }
+
+  @Override
+  public Set<byte[]> getColumnFamilyNames() {
+    return delegatee.getColumnFamilyNames();
+  }
+
+  @Override
+  public boolean hasColumnFamily(byte[] name) {
+    return delegatee.hasColumnFamily(name);
+  }
+
+  @Override
+  public ColumnFamilyDescriptor getColumnFamily(byte[] name) {
+    return delegatee.getColumnFamily(name);
+  }
+
+  protected ModifyableTableDescriptor getDelegateeForModification() {
+    return delegatee;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1e780463/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 3d63705..1a3cae2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -23,7 +23,6 @@ import java.util.Map;
 import java.util.concurrent.CompletableFuture;
 import java.util.regex.Pattern;
 
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.ServerName;
@@ -270,7 +269,7 @@ public interface AsyncAdmin {
    * @param columnFamily column family descriptor of column family to be added
    */
   CompletableFuture<Void> addColumnFamily(final TableName tableName,
-      final HColumnDescriptor columnFamily);
+      final ColumnFamilyDescriptor columnFamily);
 
   /**
    * Delete a column family from a table.
@@ -285,7 +284,7 @@ public interface AsyncAdmin {
    * @param columnFamily new column family descriptor to use
    */
   CompletableFuture<Void> modifyColumnFamily(final TableName tableName,
-      final HColumnDescriptor columnFamily);
+      final ColumnFamilyDescriptor columnFamily);
 
   /**
    * Create a new namespace.

http://git-wip-us.apache.org/repos/asf/hbase/blob/1e780463/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index 987080f..c972b4c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -40,10 +40,10 @@ import com.google.common.annotations.VisibleForTesting;
 
 import io.netty.util.Timeout;
 import io.netty.util.TimerTask;
+import java.util.stream.Stream;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -645,7 +645,7 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
   }
 
   @Override
-  public CompletableFuture<Void> addColumnFamily(TableName tableName, HColumnDescriptor columnFamily) {
+  public CompletableFuture<Void> addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily) {
     return this.<AddColumnRequest, AddColumnResponse> procedureCall(
       RequestConverter.buildAddColumnRequest(tableName, columnFamily, ng.getNonceGroup(),
         ng.newNonce()), (s, c, req, done) -> s.addColumn(c, req, done), (resp) -> resp.getProcId(),
@@ -662,7 +662,7 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
 
   @Override
   public CompletableFuture<Void> modifyColumnFamily(TableName tableName,
-      HColumnDescriptor columnFamily) {
+      ColumnFamilyDescriptor columnFamily) {
     return this.<ModifyColumnRequest, ModifyColumnResponse> procedureCall(
       RequestConverter.buildModifyColumnRequest(tableName, columnFamily, ng.getNonceGroup(),
         ng.newNonce()), (s, c, req, done) -> s.modifyColumn(c, req, done),
@@ -1679,7 +1679,7 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
           Arrays.asList(tables).forEach(
             table -> {
               Map<String, Integer> cfs = new HashMap<>();
-              Arrays.asList(table.getColumnFamilies()).stream()
+              Stream.of(table.getColumnFamilies())
                   .filter(column -> column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL)
                   .forEach(column -> {
                     cfs.put(column.getNameAsString(), column.getScope());


Mime
View raw message