hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e...@apache.org
Subject [1/2] hbase git commit: HBASE-15467 Remove 1.x/2.0 TableDescriptor incompatibility
Date Sat, 18 Jun 2016 00:37:47 GMT
Repository: hbase
Updated Branches:
  refs/heads/master 65a8d7743 -> bdb0cc880


http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 18156cb..d0aeb6c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -40,15 +40,16 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableInfoMissingException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 
 /**
  * Implementation of {@link TableDescriptors} that reads descriptors from the
@@ -88,13 +89,13 @@ public class FSTableDescriptors implements TableDescriptors {
   // This cache does not age out the old stuff.  Thinking is that the amount
   // of data we keep up in here is so small, no need to do occasional purge.
   // TODO.
-  private final Map<TableName, TableDescriptor> cache =
-    new ConcurrentHashMap<TableName, TableDescriptor>();
+  private final Map<TableName, HTableDescriptor> cache =
+    new ConcurrentHashMap<TableName, HTableDescriptor>();
 
   /**
    * Table descriptor for <code>hbase:meta</code> catalog table
    */
-  private final HTableDescriptor metaTableDescritor;
+  private final HTableDescriptor metaTableDescriptor;
 
   /**
    * Construct a FSTableDescriptors instance using the hbase root dir of the given
@@ -122,7 +123,44 @@ public class FSTableDescriptors implements TableDescriptors {
     this.fsreadonly = fsreadonly;
     this.usecache = usecache;
 
-    this.metaTableDescritor = TableDescriptor.metaTableDescriptor(conf);
+    this.metaTableDescriptor = createMetaTableDescriptor(conf);
+  }
+
+  @VisibleForTesting
+  public static HTableDescriptor createMetaTableDescriptor(final Configuration conf)
+      throws IOException {
+    HTableDescriptor metaDescriptor = new HTableDescriptor(
+        TableName.META_TABLE_NAME,
+        new HColumnDescriptor[] {
+            new HColumnDescriptor(HConstants.CATALOG_FAMILY)
+                .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+                    HConstants.DEFAULT_HBASE_META_VERSIONS))
+                .setInMemory(true)
+                .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+                    HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+                .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+                    // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
+                .setBloomFilterType(BloomType.NONE)
+                    // Enable cache of data blocks in L1 if more than one caching tier deployed:
+                    // e.g. if using CombinedBlockCache (BucketCache).
+                .setCacheDataInL1(true),
+            new HColumnDescriptor(HConstants.TABLE_FAMILY)
+                // Ten is arbitrary number.  Keep versions to help debugging.
+                .setMaxVersions(10)
+                .setInMemory(true)
+                .setBlocksize(8 * 1024)
+                .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+                    // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
+                .setBloomFilterType(BloomType.NONE)
+                    // Enable cache of data blocks in L1 if more than one caching tier deployed:
+                    // e.g. if using CombinedBlockCache (BucketCache).
+                .setCacheDataInL1(true)
+        }) {
+    };
+    metaDescriptor.addCoprocessor(
+        "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
+        null, Coprocessor.PRIORITY_SYSTEM, null);
+    return metaDescriptor;
   }
 
   @Override
@@ -150,12 +188,12 @@ public class FSTableDescriptors implements TableDescriptors {
    */
   @Override
   @Nullable
-  public TableDescriptor getDescriptor(final TableName tablename)
+  public HTableDescriptor get(final TableName tablename)
   throws IOException {
     invocations++;
     if (TableName.META_TABLE_NAME.equals(tablename)) {
       cachehits++;
-      return new TableDescriptor(metaTableDescritor);
+      return metaTableDescriptor;
     }
     // hbase:meta is already handled. If some one tries to get the descriptor for
     // .logs, .oldlogs or .corrupt throw an exception.
@@ -165,15 +203,15 @@ public class FSTableDescriptors implements TableDescriptors {
 
     if (usecache) {
       // Look in cache of descriptors.
-      TableDescriptor cachedtdm = this.cache.get(tablename);
+      HTableDescriptor cachedtdm = this.cache.get(tablename);
       if (cachedtdm != null) {
         cachehits++;
         return cachedtdm;
       }
     }
-    TableDescriptor tdmt = null;
+    HTableDescriptor tdmt = null;
     try {
-      tdmt = getTableDescriptorFromFs(fs, rootdir, tablename, !fsreadonly);
+      tdmt = getTableDescriptorFromFs(fs, rootdir, tablename);
     } catch (NullPointerException e) {
       LOG.debug("Exception during readTableDecriptor. Current table name = "
           + tablename, e);
@@ -192,43 +230,26 @@ public class FSTableDescriptors implements TableDescriptors {
   }
 
   /**
-   * Get the current table descriptor for the given table, or null if none exists.
-   *
-   * Uses a local cache of the descriptor but still checks the filesystem on each call
-   * to see if a newer file has been created since the cached one was read.
-   */
-  @Override
-  public HTableDescriptor get(TableName tableName) throws IOException {
-    if (TableName.META_TABLE_NAME.equals(tableName)) {
-      cachehits++;
-      return metaTableDescritor;
-    }
-    TableDescriptor descriptor = getDescriptor(tableName);
-    return descriptor == null ? null : descriptor.getHTableDescriptor();
-  }
-
-  /**
    * Returns a map from table name to table descriptor for all tables.
    */
   @Override
-  public Map<String, TableDescriptor> getAllDescriptors()
+  public Map<String, HTableDescriptor> getAllDescriptors()
   throws IOException {
-    Map<String, TableDescriptor> tds = new TreeMap<String, TableDescriptor>();
+    Map<String, HTableDescriptor> tds = new TreeMap<String, HTableDescriptor>();
 
     if (fsvisited && usecache) {
-      for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
+      for (Map.Entry<TableName, HTableDescriptor> entry: this.cache.entrySet()) {
         tds.put(entry.getKey().toString(), entry.getValue());
       }
       // add hbase:meta to the response
-      tds.put(this.metaTableDescritor.getNameAsString(),
-          new TableDescriptor(metaTableDescritor));
+      tds.put(this.metaTableDescriptor.getNameAsString(), metaTableDescriptor);
     } else {
       LOG.debug("Fetching table descriptors from the filesystem.");
       boolean allvisited = true;
       for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
-        TableDescriptor htd = null;
+        HTableDescriptor htd = null;
         try {
-          htd = getDescriptor(FSUtils.getTableName(d));
+          htd = get(FSUtils.getTableName(d));
         } catch (FileNotFoundException fnfe) {
           // inability of retrieving one HTD shouldn't stop getting the remaining
           LOG.warn("Trouble retrieving htd", fnfe);
@@ -237,7 +258,7 @@ public class FSTableDescriptors implements TableDescriptors {
           allvisited = false;
           continue;
         } else {
-          tds.put(htd.getHTableDescriptor().getTableName().getNameAsString(), htd);
+          tds.put(htd.getTableName().getNameAsString(), htd);
         }
         fsvisited = allvisited;
       }
@@ -251,10 +272,10 @@ public class FSTableDescriptors implements TableDescriptors {
   @Override
   public Map<String, HTableDescriptor> getAll() throws IOException {
     Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
-    Map<String, TableDescriptor> allDescriptors = getAllDescriptors();
-    for (Map.Entry<String, TableDescriptor> entry : allDescriptors
+    Map<String, HTableDescriptor> allDescriptors = getAllDescriptors();
+    for (Map.Entry<String, HTableDescriptor> entry : allDescriptors
         .entrySet()) {
-      htds.put(entry.getKey(), entry.getValue().getHTableDescriptor());
+      htds.put(entry.getKey(), entry.getValue());
     }
     return htds;
   }
@@ -288,27 +309,6 @@ public class FSTableDescriptors implements TableDescriptors {
    * and updates the local cache with it.
    */
   @Override
-  public void add(TableDescriptor htd) throws IOException {
-    if (fsreadonly) {
-      throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
-    }
-    TableName tableName = htd.getHTableDescriptor().getTableName();
-    if (TableName.META_TABLE_NAME.equals(tableName)) {
-      throw new NotImplementedException();
-    }
-    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
-      throw new NotImplementedException(
-        "Cannot add a table descriptor for a reserved subdirectory name: "
-            + htd.getHTableDescriptor().getNameAsString());
-    }
-    updateTableDescriptor(htd);
-  }
-
-  /**
-   * Adds (or updates) the table descriptor to the FileSystem
-   * and updates the local cache with it.
-   */
-  @Override
   public void add(HTableDescriptor htd) throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
@@ -322,12 +322,7 @@ public class FSTableDescriptors implements TableDescriptors {
           "Cannot add a table descriptor for a reserved subdirectory name: "
               + htd.getNameAsString());
     }
-    TableDescriptor descriptor = getDescriptor(htd.getTableName());
-    if (descriptor == null)
-      descriptor = new TableDescriptor(htd);
-    else
-      descriptor.setHTableDescriptor(htd);
-    updateTableDescriptor(descriptor);
+    updateTableDescriptor(htd);
   }
 
   /**
@@ -347,12 +342,8 @@ public class FSTableDescriptors implements TableDescriptors {
         throw new IOException("Failed delete of " + tabledir.toString());
       }
     }
-    TableDescriptor descriptor = this.cache.remove(tablename);
-    if (descriptor == null) {
-      return null;
-    } else {
-      return descriptor.getHTableDescriptor();
-    }
+    HTableDescriptor descriptor = this.cache.remove(tablename);
+    return descriptor;
   }
 
   /**
@@ -535,49 +526,28 @@ public class FSTableDescriptors implements TableDescriptors {
    * if it exists, bypassing the local cache.
    * Returns null if it's not found.
    */
-  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
+  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
       Path hbaseRootDir, TableName tableName) throws IOException {
     Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
     return getTableDescriptorFromFs(fs, tableDir);
   }
 
   /**
-   * Returns the latest table descriptor for the given table directly from the file system
-   * if it exists, bypassing the local cache.
-   * Returns null if it's not found.
-   */
-  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
-   Path hbaseRootDir, TableName tableName, boolean rewritePb) throws IOException {
-    Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
-    return getTableDescriptorFromFs(fs, tableDir, rewritePb);
-  }
-  /**
-   * Returns the latest table descriptor for the table located at the given directory
-   * directly from the file system if it exists.
-   * @throws TableInfoMissingException if there is no descriptor
-   */
-  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
-    throws IOException {
-    return getTableDescriptorFromFs(fs, tableDir, false);
-  }
-
-  /**
    * Returns the latest table descriptor for the table located at the given directory
    * directly from the file system if it exists.
    * @throws TableInfoMissingException if there is no descriptor
    */
-  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir,
-    boolean rewritePb)
+  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
   throws IOException {
     FileStatus status = getTableInfoPath(fs, tableDir, false);
     if (status == null) {
       throw new TableInfoMissingException("No table descriptor file under " + tableDir);
     }
-    return readTableDescriptor(fs, status, rewritePb);
+    return readTableDescriptor(fs, status);
   }
 
-  private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus status,
-      boolean rewritePb) throws IOException {
+  private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status)
+      throws IOException {
     int len = Ints.checkedCast(status.getLen());
     byte [] content = new byte[len];
     FSDataInputStream fsDataInputStream = fs.open(status.getPath());
@@ -586,34 +556,13 @@ public class FSTableDescriptors implements TableDescriptors {
     } finally {
       fsDataInputStream.close();
     }
-    TableDescriptor td = null;
+    HTableDescriptor htd = null;
     try {
-      td = TableDescriptor.parseFrom(content);
+      htd = HTableDescriptor.parseFrom(content);
     } catch (DeserializationException e) {
-      // we have old HTableDescriptor here
-      try {
-        HTableDescriptor htd = HTableDescriptor.parseFrom(content);
-        LOG.warn("Found old table descriptor, converting to new format for table " +
-            htd.getTableName() + "; NOTE table will be in ENABLED state!");
-        td = new TableDescriptor(htd);
-        if (rewritePb) rewriteTableDescriptor(fs, status, td);
-      } catch (DeserializationException e1) {
-        throw new IOException("content=" + Bytes.toShort(content), e);
-      }
+      throw new IOException("content=" + Bytes.toShort(content), e);
     }
-    if (rewritePb && !ProtobufUtil.isPBMagicPrefix(content)) {
-      // Convert the file over to be pb before leaving here.
-      rewriteTableDescriptor(fs, status, td);
-    }
-    return td;
-  }
-
-  private static void rewriteTableDescriptor(final FileSystem fs, final FileStatus status,
-      final TableDescriptor td)
-  throws IOException {
-    Path tableInfoDir = status.getPath().getParent();
-    Path tableDir = tableInfoDir.getParent();
-    writeTableDescriptor(fs, td, tableDir, status);
+    return htd;
   }
 
   /**
@@ -621,18 +570,18 @@ public class FSTableDescriptors implements TableDescriptors {
    * @throws IOException Thrown if failed update.
    * @throws NotImplementedException if in read only mode
    */
-  @VisibleForTesting Path updateTableDescriptor(TableDescriptor td)
+  @VisibleForTesting Path updateTableDescriptor(HTableDescriptor td)
   throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
     }
-    TableName tableName = td.getHTableDescriptor().getTableName();
+    TableName tableName = td.getTableName();
     Path tableDir = getTableDir(tableName);
     Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir));
     if (p == null) throw new IOException("Failed update");
     LOG.info("Updated tableinfo=" + p);
     if (usecache) {
-      this.cache.put(td.getHTableDescriptor().getTableName(), td);
+      this.cache.put(td.getTableName(), td);
     }
     return p;
   }
@@ -683,7 +632,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * @return Descriptor file or null if we failed write.
    */
   private static Path writeTableDescriptor(final FileSystem fs,
-    final TableDescriptor htd, final Path tableDir,
+    final HTableDescriptor htd, final Path tableDir,
     final FileStatus currentDescriptorFile)
   throws IOException {
     // Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
@@ -738,7 +687,7 @@ public class FSTableDescriptors implements TableDescriptors {
     return tableInfoDirPath;
   }
 
-  private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd)
+  private static void writeTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
   throws IOException {
     FSDataOutputStream out = fs.create(p, false);
     try {
@@ -755,17 +704,8 @@ public class FSTableDescriptors implements TableDescriptors {
    * Used by tests.
    * @return True if we successfully created file.
    */
-  public boolean createTableDescriptor(TableDescriptor htd) throws IOException {
-    return createTableDescriptor(htd, false);
-  }
-
-  /**
-   * Create new HTableDescriptor in HDFS. Happens when we are creating table.
-   * Used by tests.
-   * @return True if we successfully created file.
-   */
   public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
-    return createTableDescriptor(new TableDescriptor(htd), false);
+    return createTableDescriptor(htd, false);
   }
 
   /**
@@ -775,22 +715,13 @@ public class FSTableDescriptors implements TableDescriptors {
    *
    * @return True if we successfully created file.
    */
-  public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation)
+  public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
   throws IOException {
-    Path tableDir = getTableDir(htd.getHTableDescriptor().getTableName());
+    Path tableDir = getTableDir(htd.getTableName());
     return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
   }
 
   /**
-   * Create tables descriptor for given HTableDescriptor. Default TableDescriptor state
-   * will be used (typically ENABLED).
-   */
-  public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
-      throws IOException {
-    return createTableDescriptor(new TableDescriptor(htd), forceCreation);
-  }
-
-  /**
    * Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create
    * a new table or snapshot a table.
    * @param tableDir table directory under which we should write the file
@@ -802,7 +733,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * @throws IOException if a filesystem error occurs
    */
   public boolean createTableDescriptorForTableDirectory(Path tableDir,
-      TableDescriptor htd, boolean forceCreation) throws IOException {
+      HTableDescriptor htd, boolean forceCreation) throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
     }
@@ -811,7 +742,7 @@ public class FSTableDescriptors implements TableDescriptors {
       LOG.debug("Current tableInfoPath = " + status.getPath());
       if (!forceCreation) {
         if (fs.exists(status.getPath()) && status.getLen() > 0) {
-          if (readTableDescriptor(fs, status, false).equals(htd)) {
+          if (readTableDescriptor(fs, status).equals(htd)) {
             LOG.debug("TableInfo already exists.. Skipping creation");
             return false;
           }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index e3ebe64..ea91d90 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -94,7 +94,6 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -1201,9 +1200,9 @@ public class HBaseFsck extends Configured implements Closeable {
         modTInfo = new TableInfo(tableName);
         tablesInfo.put(tableName, modTInfo);
         try {
-          TableDescriptor htd =
+          HTableDescriptor htd =
               FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName);
-          modTInfo.htds.add(htd.getHTableDescriptor());
+          modTInfo.htds.add(htd);
         } catch (IOException ioe) {
           if (!orphanTableDirs.containsKey(tableName)) {
             LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe);
@@ -1258,7 +1257,7 @@ public class HBaseFsck extends Configured implements Closeable {
     for (String columnfamimly : columns) {
       htd.addFamily(new HColumnDescriptor(columnfamimly));
     }
-    fstd.createTableDescriptor(new TableDescriptor(htd), true);
+    fstd.createTableDescriptor(htd, true);
     return true;
   }
 
@@ -1306,7 +1305,7 @@ public class HBaseFsck extends Configured implements Closeable {
           if (tableName.equals(htds[j].getTableName())) {
             HTableDescriptor htd = htds[j];
             LOG.info("fixing orphan table: " + tableName + " from cache");
-            fstd.createTableDescriptor(new TableDescriptor(htd), true);
+            fstd.createTableDescriptor(htd, true);
             j++;
             iter.remove();
           }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
index e86d32a..307568c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
@@ -153,8 +153,7 @@ class HMerge {
 
       this.rootDir = FSUtils.getRootDir(conf);
       Path tabledir = FSUtils.getTableDir(this.rootDir, tableName);
-      this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir)
-          .getHTableDescriptor();
+      this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir);
       String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;
 
       final Configuration walConf = new Configuration(conf);

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
index 44b3138..d708edc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -139,9 +138,9 @@ public class Merge extends Configured implements Tool {
     if (info2 == null) {
       throw new NullPointerException("info2 is null using key " + meta);
     }
-    TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
+    HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
       this.rootdir, this.tableName);
-    HRegion merged = merge(htd.getHTableDescriptor(), meta, info1, info2);
+    HRegion merged = merge(htd, meta, info1, info2);
 
     LOG.info("Adding " + merged.getRegionInfo() + " to " +
         meta.getRegionInfo());

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
index 8d0557f..f129f9d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
@@ -133,7 +133,7 @@ public class TestHColumnDescriptorDefaultVersions {
       admin.deleteTable(TABLE_NAME);
     }
   }
-  
+
   @Test
   public void testHColumnDescriptorCachedMaxVersions() throws Exception {
     HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
@@ -159,8 +159,8 @@ public class TestHColumnDescriptorDefaultVersions {
     // Verify descriptor from HDFS
     MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
     Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
-    TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
-    hcds = td.getHTableDescriptor().getColumnFamilies();
+    HTableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
+    hcds = td.getColumnFamilies();
     verifyHColumnDescriptor(expected, hcds, tableName, families);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java
deleted file mode 100644
index ebc4713..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Test setting values in the descriptor
- */
-@Category(SmallTests.class)
-public class TestTableDescriptor {
-   private static final Log LOG = LogFactory.getLog(TestTableDescriptor.class);
-
-  @Test
-  public void testPb() throws DeserializationException, IOException {
-    HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
-    final int v = 123;
-    htd.setMaxFileSize(v);
-    htd.setDurability(Durability.ASYNC_WAL);
-    htd.setReadOnly(true);
-    htd.setRegionReplication(2);
-    TableDescriptor td = new TableDescriptor(htd);
-    byte[] bytes = td.toByteArray();
-    TableDescriptor deserializedTd = TableDescriptor.parseFrom(bytes);
-    assertEquals(td, deserializedTd);
-    assertEquals(td.getHTableDescriptor(), deserializedTd.getHTableDescriptor());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 0ad74c2..35a3a79 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -26,7 +26,6 @@ import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
-import java.util.List;
 import java.util.Map;
 import java.util.SortedMap;
 import java.util.TreeMap;
@@ -47,10 +46,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaMockingUtil;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
-import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
@@ -59,12 +56,8 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
 import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination;
 import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails;
-import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
-import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
@@ -75,9 +68,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
-import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -93,7 +84,6 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import com.google.protobuf.RpcController;
-import com.google.protobuf.Service;
 import com.google.protobuf.ServiceException;
 
 @Category({MasterTests.class, SmallTests.class})
@@ -264,21 +254,15 @@ public class TestCatalogJanitor {
           return null;
         }
 
-        @Override public Map<String, TableDescriptor> getAllDescriptors() throws IOException {
+        @Override public Map<String, HTableDescriptor> getAllDescriptors() throws IOException {
           // noop
           return null;
         }
 
         @Override
         public HTableDescriptor get(TableName tablename)
-        throws IOException {
-          return createHTableDescriptor();
-        }
-
-        @Override
-        public TableDescriptor getDescriptor(TableName tablename)
             throws IOException {
-          return createTableDescriptor();
+          return createHTableDescriptor();
         }
 
         @Override
@@ -292,11 +276,6 @@ public class TestCatalogJanitor {
         }
 
         @Override
-        public void add(TableDescriptor htd) throws IOException {
-          // noop
-        }
-
-        @Override
         public void setCacheOn() throws IOException {
         }
 
@@ -867,10 +846,6 @@ public class TestCatalogJanitor {
     return htd;
   }
 
-  private TableDescriptor createTableDescriptor() {
-    return new TableDescriptor(createHTableDescriptor());
-  }
-
   private MultiResponse buildMultiResponse(MultiRequest req) {
     MultiResponse.Builder builder = MultiResponse.newBuilder();
     RegionActionResult.Builder regionActionResultBuilder =

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index 68a22a8..2d054d8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Durability;
@@ -119,9 +118,7 @@ public class MasterProcedureTestingUtility {
     assertEquals(regions.length, countMetaRegions(master, tableName));
 
     // check htd
-    TableDescriptor tableDesc = master.getTableDescriptors().getDescriptor(tableName);
-    assertTrue("table descriptor not found", tableDesc != null);
-    HTableDescriptor htd = tableDesc.getHTableDescriptor();
+    HTableDescriptor htd = master.getTableDescriptors().get(tableName);
     assertTrue("table descriptor not found", htd != null);
     for (int i = 0; i < family.length; ++i) {
       assertTrue("family not found " + family[i], htd.getFamily(Bytes.toBytes(family[i])) != null);
@@ -142,7 +139,7 @@ public class MasterProcedureTestingUtility {
 
     // check htd
     assertTrue("found htd of deleted table",
-      master.getTableDescriptors().getDescriptor(tableName) == null);
+      master.getTableDescriptors().get(tableName) == null);
   }
 
   private static int countMetaRegions(final HMaster master, final TableName tableName)
@@ -368,18 +365,18 @@ public class MasterProcedureTestingUtility {
 
   public static void validateColumnFamilyAddition(final HMaster master, final TableName tableName,
       final String family) throws IOException {
-    TableDescriptor htd = master.getTableDescriptors().getDescriptor(tableName);
+    HTableDescriptor htd = master.getTableDescriptors().get(tableName);
     assertTrue(htd != null);
 
-    assertTrue(htd.getHTableDescriptor().hasFamily(family.getBytes()));
+    assertTrue(htd.hasFamily(family.getBytes()));
   }
 
   public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName,
       final String family) throws IOException {
     // verify htd
-    TableDescriptor htd = master.getTableDescriptors().getDescriptor(tableName);
+    HTableDescriptor htd = master.getTableDescriptors().get(tableName);
     assertTrue(htd != null);
-    assertFalse(htd.getHTableDescriptor().hasFamily(family.getBytes()));
+    assertFalse(htd.hasFamily(family.getBytes()));
 
     // verify fs
     final FileSystem fs = master.getMasterFileSystem().getFileSystem();
@@ -393,10 +390,10 @@ public class MasterProcedureTestingUtility {
   public static void validateColumnFamilyModification(final HMaster master,
       final TableName tableName, final String family, HColumnDescriptor columnDescriptor)
       throws IOException {
-    TableDescriptor htd = master.getTableDescriptors().getDescriptor(tableName);
+    HTableDescriptor htd = master.getTableDescriptors().get(tableName);
     assertTrue(htd != null);
 
-    HColumnDescriptor hcfd = htd.getHTableDescriptor().getFamily(family.getBytes());
+    HColumnDescriptor hcfd = htd.getFamily(family.getBytes());
     assertTrue(hcfd.equals(columnDescriptor));
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
index 129ef4f..417987b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -275,9 +274,9 @@ public class TestTableDescriptorModificationFromClient {
     // Verify descriptor from HDFS
     MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
     Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
-    TableDescriptor td =
+    HTableDescriptor td =
         FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
-    verifyTableDescriptor(td.getHTableDescriptor(), tableName, families);
+    verifyTableDescriptor(td, tableName, families);
   }
 
   private void verifyTableDescriptor(final HTableDescriptor htd,

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index 0c4029d..1614462 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.KeepDeletedCells;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueTestUtil;
 import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -47,6 +46,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.junit.Before;
 import org.junit.Rule;
@@ -431,6 +431,7 @@ public class TestDefaultMemStore {
       this.startSeqNum = startSeqNum;
     }
 
+    @Override
     public void run() {
       try {
         internalRun();
@@ -961,7 +962,7 @@ public class TestDefaultMemStore {
     edge.setCurrentTimeMillis(1234);
     WALFactory wFactory = new WALFactory(conf, null, "1234");
     HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir,
-        conf, TableDescriptor.metaTableDescriptor(conf),
+        conf, FSTableDescriptors.createMetaTableDescriptor(conf),
         wFactory.getMetaWAL(HRegionInfo.FIRST_META_REGIONINFO.
             getEncodedNameAsBytes()));
     HRegionInfo hri = new HRegionInfo(TableName.valueOf("testShouldFlushMeta"),

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index f7182ba..3307b73 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TestMobSnapshotCloneIndependence;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.HMaster;
@@ -64,7 +63,6 @@ import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index b816200..38afc3b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -498,8 +497,7 @@ public final class SnapshotTestingUtils {
         this.tableRegions = tableRegions;
         this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
         new FSTableDescriptors(conf)
-          .createTableDescriptorForTableDirectory(snapshotDir,
-              new TableDescriptor(htd), false);
+          .createTableDescriptorForTableDirectory(snapshotDir, htd, false);
       }
 
       public HTableDescriptor getTableDescriptor() {
@@ -679,8 +677,7 @@ public final class SnapshotTestingUtils {
     private RegionData[] createTable(final HTableDescriptor htd, final int nregions)
         throws IOException {
       Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName());
-      new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir,
-          new TableDescriptor(htd), false);
+      new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, htd, false);
 
       assertTrue(nregions % 2 == 0);
       RegionData[] regions = new RegionData[nregions];

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
index 09bd895..bdc09fe 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -75,15 +74,14 @@ public class TestFSTableDescriptors {
   public void testCreateAndUpdate() throws IOException {
     Path testdir = UTIL.getDataTestDir("testCreateAndUpdate");
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testCreate"));
-    TableDescriptor td = new TableDescriptor(htd);
     FileSystem fs = FileSystem.get(UTIL.getConfiguration());
     FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
-    assertTrue(fstd.createTableDescriptor(td));
-    assertFalse(fstd.createTableDescriptor(td));
+    assertTrue(fstd.createTableDescriptor(htd));
+    assertFalse(fstd.createTableDescriptor(htd));
     FileStatus [] statuses = fs.listStatus(testdir);
     assertTrue("statuses.length="+statuses.length, statuses.length == 1);
     for (int i = 0; i < 10; i++) {
-      fstd.updateTableDescriptor(td);
+      fstd.updateTableDescriptor(htd);
     }
     statuses = fs.listStatus(testdir);
     assertTrue(statuses.length == 1);
@@ -97,29 +95,27 @@ public class TestFSTableDescriptors {
     Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo");
     HTableDescriptor htd = new HTableDescriptor(
         TableName.valueOf("testSequenceidAdvancesOnTableInfo"));
-    TableDescriptor td = new TableDescriptor(htd);
     FileSystem fs = FileSystem.get(UTIL.getConfiguration());
     FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
-    Path p0 = fstd.updateTableDescriptor(td);
+    Path p0 = fstd.updateTableDescriptor(htd);
     int i0 = FSTableDescriptors.getTableInfoSequenceId(p0);
-    Path p1 = fstd.updateTableDescriptor(td);
+    Path p1 = fstd.updateTableDescriptor(htd);
     // Assert we cleaned up the old file.
     assertTrue(!fs.exists(p0));
     int i1 = FSTableDescriptors.getTableInfoSequenceId(p1);
     assertTrue(i1 == i0 + 1);
-    Path p2 = fstd.updateTableDescriptor(td);
+    Path p2 = fstd.updateTableDescriptor(htd);
     // Assert we cleaned up the old file.
     assertTrue(!fs.exists(p1));
     int i2 = FSTableDescriptors.getTableInfoSequenceId(p2);
     assertTrue(i2 == i1 + 1);
-    td = new TableDescriptor(htd);
-    Path p3 = fstd.updateTableDescriptor(td);
+    Path p3 = fstd.updateTableDescriptor(htd);
     // Assert we cleaned up the old file.
     assertTrue(!fs.exists(p2));
     int i3 = FSTableDescriptors.getTableInfoSequenceId(p3);
     assertTrue(i3 == i2 + 1);
-    TableDescriptor descriptor = fstd.getDescriptor(htd.getTableName());
-    assertEquals(descriptor, td);
+    HTableDescriptor descriptor = fstd.get(htd.getTableName());
+    assertEquals(descriptor, htd);
   }
 
   @Test
@@ -171,13 +167,12 @@ public class TestFSTableDescriptors {
     final String name = "testReadingHTDFromFS";
     FileSystem fs = FileSystem.get(UTIL.getConfiguration());
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
-    TableDescriptor td = new TableDescriptor(htd);
     Path rootdir = UTIL.getDataTestDir(name);
     FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
-    fstd.createTableDescriptor(td);
-    TableDescriptor td2 =
+    fstd.createTableDescriptor(htd);
+    HTableDescriptor td2 =
       FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName());
-    assertTrue(td.equals(td2));
+    assertTrue(htd.equals(td2));
   }
 
   @Test public void testReadingOldHTDFromFS() throws IOException, DeserializationException {
@@ -186,22 +181,21 @@ public class TestFSTableDescriptors {
     Path rootdir = UTIL.getDataTestDir(name);
     FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
-    TableDescriptor td = new TableDescriptor(htd);
-    Path descriptorFile = fstd.updateTableDescriptor(td);
+    Path descriptorFile = fstd.updateTableDescriptor(htd);
     try (FSDataOutputStream out = fs.create(descriptorFile, true)) {
       out.write(htd.toByteArray());
     }
     FSTableDescriptors fstd2 = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
-    TableDescriptor td2 = fstd2.getDescriptor(htd.getTableName());
-    assertEquals(td, td2);
+    HTableDescriptor td2 = fstd2.get(htd.getTableName());
+    assertEquals(htd, td2);
     FileStatus descriptorFile2 =
         FSTableDescriptors.getTableInfoPath(fs, fstd2.getTableDir(htd.getTableName()));
-    byte[] buffer = td.toByteArray();
+    byte[] buffer = htd.toByteArray();
     try (FSDataInputStream in = fs.open(descriptorFile2.getPath())) {
       in.readFully(buffer);
     }
-    TableDescriptor td3 = TableDescriptor.parseFrom(buffer);
-    assertEquals(td, td3);
+    HTableDescriptor td3 = HTableDescriptor.parseFrom(buffer);
+    assertEquals(htd, td3);
   }
 
   @Test public void testHTableDescriptors()
@@ -221,7 +215,7 @@ public class TestFSTableDescriptors {
     final int count = 10;
     // Write out table infos.
     for (int i = 0; i < count; i++) {
-      TableDescriptor htd = new TableDescriptor(
+      HTableDescriptor htd = new HTableDescriptor(
           new HTableDescriptor(TableName.valueOf(name + i)));
       htds.createTableDescriptor(htd);
     }
@@ -236,7 +230,7 @@ public class TestFSTableDescriptors {
     for (int i = 0; i < count; i++) {
       HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
       htd.addFamily(new HColumnDescriptor("" + i));
-      htds.updateTableDescriptor(new TableDescriptor(htd));
+      htds.updateTableDescriptor(htd);
     }
     // Wait a while so mod time we write is for sure different.
     Thread.sleep(100);
@@ -274,7 +268,7 @@ public class TestFSTableDescriptors {
     for (int i = 0; i < count; i++) {
       HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
       htd.addFamily(new HColumnDescriptor("" + i));
-      htds.updateTableDescriptor(new TableDescriptor(htd));
+      htds.updateTableDescriptor(htd);
     }
     for (int i = 0; i < count; i++) {
       assertNotNull("Expected HTD, got null instead", htds.get(TableName.valueOf(name + i)));
@@ -300,7 +294,7 @@ public class TestFSTableDescriptors {
       htds.createTableDescriptor(htd);
     }
     // add hbase:meta
-    HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName());
+    HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
     htds.createTableDescriptor(htd);
 
     assertEquals("getAll() didn't return all TableDescriptors, expected: " +
@@ -335,7 +329,7 @@ public class TestFSTableDescriptors {
     assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
 
     // add a new entry for hbase:meta
-    HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName());
+    HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
     nonchtds.createTableDescriptor(htd);
 
     // hbase:meta will only increase the cachehit by 1
@@ -419,19 +413,18 @@ public class TestFSTableDescriptors {
     Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready");
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(
         "testCreateTableDescriptorUpdatesIfThereExistsAlready"));
-    TableDescriptor td = new TableDescriptor(htd);
     FileSystem fs = FileSystem.get(UTIL.getConfiguration());
     FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
-    assertTrue(fstd.createTableDescriptor(td));
-    assertFalse(fstd.createTableDescriptor(td));
+    assertTrue(fstd.createTableDescriptor(htd));
+    assertFalse(fstd.createTableDescriptor(htd));
     htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
-    assertTrue(fstd.createTableDescriptor(td)); //this will re-create
+    assertTrue(fstd.createTableDescriptor(htd)); //this will re-create
     Path tableDir = fstd.getTableDir(htd.getTableName());
     Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR);
     FileStatus[] statuses = fs.listStatus(tmpTableDir);
     assertTrue(statuses.length == 0);
 
-    assertEquals(td, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir));
+    assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir));
   }
 
   private static class FSTableDescriptorsTest extends FSTableDescriptors {

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
index cb51fb2..1924c9e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -67,6 +66,7 @@ public class TestMergeTool extends HBaseTestCase {
   private MiniDFSCluster dfsCluster = null;
   private WALFactory wals;
 
+  @Override
   @Before
   public void setUp() throws Exception {
     // Set the timeout down else this test will take a while to complete.
@@ -149,8 +149,7 @@ public class TestMergeTool extends HBaseTestCase {
     try {
       // Create meta region
       createMetaRegion();
-      new FSTableDescriptors(this.conf, this.fs, testDir).createTableDescriptor(
-          new TableDescriptor(this.desc));
+      new FSTableDescriptors(this.conf, this.fs, testDir).createTableDescriptor(this.desc);
       /*
        * Create the regions we will merge
        */
@@ -178,6 +177,7 @@ public class TestMergeTool extends HBaseTestCase {
     }
   }
 
+  @Override
   @After
   public void tearDown() throws Exception {
     super.tearDown();


Mime
View raw message