hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ga...@apache.org
Subject [12/44] hive git commit: HIVE-18755 Modifications to the metastore for catalogs (Alan Gates, reviewed by Thejas Nair)
Date Sat, 31 Mar 2018 01:25:31 GMT
http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
index cf92eda..89b4006 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.metastore.cache;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -30,11 +31,15 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.TreeMap;
+
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.StatObjectConverter;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.Catalog;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -50,29 +55,38 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 
+import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
+
 public class SharedCache {
   private static ReentrantReadWriteLock cacheLock = new ReentrantReadWriteLock(true);
+  private boolean isCatalogCachePrewarmed = false;
+  private Map<String, Catalog> catalogCache = new TreeMap<>();
+  private HashSet<String> catalogsDeletedDuringPrewarm = new HashSet<>();
+  private AtomicBoolean isCatalogCacheDirty = new AtomicBoolean(false);
+
   // For caching Database objects. Key is database name
-  private Map<String, Database> databaseCache = new ConcurrentHashMap<String, Database>();
+  private Map<String, Database> databaseCache = new TreeMap<>();
   private boolean isDatabaseCachePrewarmed = false;
-  private HashSet<String> databasesDeletedDuringPrewarm = new HashSet<String>();
+  private HashSet<String> databasesDeletedDuringPrewarm = new HashSet<>();
   private AtomicBoolean isDatabaseCacheDirty = new AtomicBoolean(false);
+
   // For caching TableWrapper objects. Key is aggregate of database name and table name
-  private Map<String, TableWrapper> tableCache = new ConcurrentHashMap<String, TableWrapper>();
+  private Map<String, TableWrapper> tableCache = new TreeMap<>();
   private boolean isTableCachePrewarmed = false;
-  private HashSet<String> tablesDeletedDuringPrewarm = new HashSet<String>();
+  private HashSet<String> tablesDeletedDuringPrewarm = new HashSet<>();
   private AtomicBoolean isTableCacheDirty = new AtomicBoolean(false);
-  private Map<ByteArrayWrapper, StorageDescriptorWrapper> sdCache = new ConcurrentHashMap<>();
+  private Map<ByteArrayWrapper, StorageDescriptorWrapper> sdCache = new HashMap<>();
+  private Configuration conf;
   private static MessageDigest md;
   static final private Logger LOG = LoggerFactory.getLogger(SharedCache.class.getName());
   private AtomicLong cacheUpdateCount = new AtomicLong(0);
 
-  static enum StatsType {
+  enum StatsType {
     ALL(0), ALLBUTDEFAULT(1);
 
     private final int position;
 
-    private StatsType(int position) {
+    StatsType(int position) {
       this.position = position;
     }
 
@@ -155,6 +169,10 @@ public class SharedCache {
       this.parameters = parameters;
     }
 
+    boolean sameDatabase(String catName, String dbName) {
+      return catName.equals(t.getCatName()) && dbName.equals(t.getDbName());
+    }
+
     void cachePartition(Partition part, SharedCache sharedCache) {
       try {
         tableLock.writeLock().lock();
@@ -669,12 +687,102 @@ public class SharedCache {
     }
   }
 
-  public Database getDatabaseFromCache(String name) {
+  public void populateCatalogsInCache(Collection<Catalog> catalogs) {
+    for (Catalog cat : catalogs) {
+      Catalog catCopy = cat.deepCopy();
+      // ObjectStore also stores db name in lowercase
+      catCopy.setName(catCopy.getName().toLowerCase());
+      try {
+        cacheLock.writeLock().lock();
+        // Since we allow write operations on cache while prewarm is happening:
+        // 1. Don't add databases that were deleted while we were preparing list for prewarm
+        // 2. Skip overwriting exisiting db object
+        // (which is present because it was added after prewarm started)
+        if (catalogsDeletedDuringPrewarm.contains(catCopy.getName())) {
+          continue;
+        }
+        catalogCache.putIfAbsent(catCopy.getName(), catCopy);
+        catalogsDeletedDuringPrewarm.clear();
+        isCatalogCachePrewarmed = true;
+      } finally {
+        cacheLock.writeLock().unlock();
+      }
+    }
+  }
+
+  public Catalog getCatalogFromCache(String name) {
+    Catalog cat = null;
+    try {
+      cacheLock.readLock().lock();
+      if (catalogCache.get(name) != null) {
+        cat = catalogCache.get(name).deepCopy();
+      }
+    } finally {
+      cacheLock.readLock().unlock();
+    }
+    return cat;
+  }
+
+  public void addCatalogToCache(Catalog cat) {
+    try {
+      cacheLock.writeLock().lock();
+      Catalog catCopy = cat.deepCopy();
+      // ObjectStore also stores db name in lowercase
+      catCopy.setName(catCopy.getName().toLowerCase());
+      catalogCache.put(cat.getName(), catCopy);
+      isCatalogCacheDirty.set(true);
+    } finally {
+      cacheLock.writeLock().unlock();
+    }
+  }
+
+  public void alterCatalogInCache(String catName, Catalog newCat) {
+    try {
+      cacheLock.writeLock().lock();
+      removeCatalogFromCache(catName);
+      addCatalogToCache(newCat.deepCopy());
+    } finally {
+      cacheLock.writeLock().unlock();
+    }
+  }
+
+  public void removeCatalogFromCache(String name) {
+    name = normalizeIdentifier(name);
+    try {
+      cacheLock.writeLock().lock();
+      // If db cache is not yet prewarmed, add this to a set which the prewarm thread can check
+      // so that the prewarm thread does not add it back
+      if (!isCatalogCachePrewarmed) {
+        catalogsDeletedDuringPrewarm.add(name);
+      }
+      if (catalogCache.remove(name) != null) {
+        isCatalogCacheDirty.set(true);
+      }
+    } finally {
+      cacheLock.writeLock().unlock();
+    }
+  }
+
+  public List<String> listCachedCatalogs() {
+    try {
+      cacheLock.readLock().lock();
+      return new ArrayList<>(catalogCache.keySet());
+    } finally {
+      cacheLock.readLock().unlock();
+    }
+  }
+
+  public boolean isCatalogCachePrewarmed() {
+    return isCatalogCachePrewarmed;
+  }
+
+  public Database getDatabaseFromCache(String catName, String name) {
     Database db = null;
     try {
       cacheLock.readLock().lock();
-      if (databaseCache.get(name) != null) {
-        db = databaseCache.get(name).deepCopy();
+      String key = CacheUtils.buildDbKey(catName, name);
+      if (databaseCache.get(key) != null) {
+        db = databaseCache.get(key).deepCopy();
       }
     } finally {
       cacheLock.readLock().unlock();
@@ -693,10 +801,11 @@ public class SharedCache {
         // 1. Don't add databases that were deleted while we were preparing list for prewarm
         // 2. Skip overwriting exisiting db object
         // (which is present because it was added after prewarm started)
-        if (databasesDeletedDuringPrewarm.contains(dbCopy.getName().toLowerCase())) {
+        String key = CacheUtils.buildDbKey(dbCopy.getCatalogName().toLowerCase(), dbCopy.getName().toLowerCase());
+        if (databasesDeletedDuringPrewarm.contains(key)) {
           continue;
         }
-        databaseCache.putIfAbsent(StringUtils.normalizeIdentifier(dbCopy.getName()), dbCopy);
+        databaseCache.putIfAbsent(key, dbCopy);
         databasesDeletedDuringPrewarm.clear();
         isDatabaseCachePrewarmed = true;
       } finally {
@@ -715,22 +824,24 @@ public class SharedCache {
       Database dbCopy = db.deepCopy();
       // ObjectStore also stores db name in lowercase
       dbCopy.setName(dbCopy.getName().toLowerCase());
-      databaseCache.put(StringUtils.normalizeIdentifier(dbCopy.getName()), dbCopy);
+      dbCopy.setCatalogName(dbCopy.getCatalogName().toLowerCase());
+      databaseCache.put(CacheUtils.buildDbKey(dbCopy.getCatalogName(), dbCopy.getName()), dbCopy);
       isDatabaseCacheDirty.set(true);
     } finally {
       cacheLock.writeLock().unlock();
     }
   }
 
-  public void removeDatabaseFromCache(String dbName) {
+  public void removeDatabaseFromCache(String catName, String dbName) {
     try {
       cacheLock.writeLock().lock();
       // If db cache is not yet prewarmed, add this to a set which the prewarm thread can check
       // so that the prewarm thread does not add it back
+      String key = CacheUtils.buildDbKey(catName, dbName);
       if (!isDatabaseCachePrewarmed) {
-        databasesDeletedDuringPrewarm.add(dbName.toLowerCase());
+        databasesDeletedDuringPrewarm.add(key);
       }
-      if (databaseCache.remove(dbName) != null) {
+      if (databaseCache.remove(key) != null) {
         isDatabaseCacheDirty.set(true);
       }
     } finally {
@@ -738,25 +849,31 @@ public class SharedCache {
     }
   }
 
-  public List<String> listCachedDatabases() {
+  public List<String> listCachedDatabases(String catName) {
     List<String> results = new ArrayList<>();
     try {
       cacheLock.readLock().lock();
-      results.addAll(databaseCache.keySet());
+      for (String pair : databaseCache.keySet()) {
+        String[] n = CacheUtils.splitDbName(pair);
+        if (catName.equals(n[0])) results.add(n[1]);
+      }
     } finally {
       cacheLock.readLock().unlock();
     }
     return results;
   }
 
-  public List<String> listCachedDatabases(String pattern) {
+  public List<String> listCachedDatabases(String catName, String pattern) {
     List<String> results = new ArrayList<>();
     try {
       cacheLock.readLock().lock();
-      for (String dbName : databaseCache.keySet()) {
-        dbName = StringUtils.normalizeIdentifier(dbName);
-        if (CacheUtils.matches(dbName, pattern)) {
-          results.add(dbName);
+      for (String pair : databaseCache.keySet()) {
+        String[] n = CacheUtils.splitDbName(pair);
+        if (catName.equals(n[0])) {
+          n[1] = StringUtils.normalizeIdentifier(n[1]);
+          if (CacheUtils.matches(n[1], pattern)) {
+            results.add(n[1]);
+          }
         }
       }
     } finally {
@@ -768,13 +885,11 @@ public class SharedCache {
   /**
    * Replaces the old db object with the new one.
    * This will add the new database to cache if it does not exist.
-   * @param dbName
-   * @param newDb
    */
-  public void alterDatabaseInCache(String dbName, Database newDb) {
+  public void alterDatabaseInCache(String catName, String dbName, Database newDb) {
     try {
       cacheLock.writeLock().lock();
-      removeDatabaseFromCache(dbName);
+      removeDatabaseFromCache(catName, dbName);
       addDatabaseToCache(newDb.deepCopy());
       isDatabaseCacheDirty.set(true);
     } finally {
@@ -810,14 +925,15 @@ public class SharedCache {
   public void populateTableInCache(Table table, ColumnStatistics tableColStats,
       List<Partition> partitions, List<ColumnStatistics> partitionColStats,
       AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) {
+    String catName = StringUtils.normalizeIdentifier(table.getCatName());
     String dbName = StringUtils.normalizeIdentifier(table.getDbName());
     String tableName = StringUtils.normalizeIdentifier(table.getTableName());
     // Since we allow write operations on cache while prewarm is happening:
     // 1. Don't add tables that were deleted while we were preparing list for prewarm
-    if (tablesDeletedDuringPrewarm.contains(CacheUtils.buildTableCacheKey(dbName, tableName))) {
+    if (tablesDeletedDuringPrewarm.contains(CacheUtils.buildTableKey(catName, dbName, tableName))) {
       return;
     }
-    TableWrapper tblWrapper = createTableWrapper(dbName, tableName, table);
+    TableWrapper tblWrapper = createTableWrapper(catName, dbName, tableName, table);
     if (!table.isSetPartitionKeys() && (tableColStats != null)) {
       tblWrapper.updateTableColStats(tableColStats.getStatsObj());
     } else {
@@ -843,12 +959,14 @@ public class SharedCache {
       cacheLock.writeLock().lock();
       // 2. Skip overwriting exisiting table object
       // (which is present because it was added after prewarm started)
-      tableCache.putIfAbsent(CacheUtils.buildTableCacheKey(dbName, tableName), tblWrapper);
+      tableCache.putIfAbsent(CacheUtils.buildTableKey(catName, dbName, tableName), tblWrapper);
     } finally {
       cacheLock.writeLock().unlock();
     }
   }
 
+
+
   public void completeTableCachePrewarm() {
     try {
       cacheLock.writeLock().lock();
@@ -859,11 +977,11 @@ public class SharedCache {
     }
   }
 
-  public Table getTableFromCache(String dbName, String tableName) {
+  public Table getTableFromCache(String catName, String dbName, String tableName) {
     Table t = null;
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tableName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName));
       if (tblWrapper != null) {
         t = CacheUtils.assemble(tblWrapper, this);
       }
@@ -873,11 +991,11 @@ public class SharedCache {
     return t;
   }
 
-  public TableWrapper addTableToCache(String dbName, String tblName, Table tbl) {
+  public TableWrapper addTableToCache(String catName, String dbName, String tblName, Table tbl) {
     try {
       cacheLock.writeLock().lock();
-      TableWrapper wrapper = createTableWrapper(dbName, tblName, tbl);
-      tableCache.put(CacheUtils.buildTableCacheKey(dbName, tblName), wrapper);
+      TableWrapper wrapper = createTableWrapper(catName, dbName, tblName, tbl);
+      tableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), wrapper);
       isTableCacheDirty.set(true);
       return wrapper;
     } finally {
@@ -885,14 +1003,15 @@ public class SharedCache {
     }
   }
 
-  private TableWrapper createTableWrapper(String dbName, String tblName, Table tbl) {
+  private TableWrapper createTableWrapper(String catName, String dbName, String tblName, Table tbl) {
     TableWrapper wrapper;
     Table tblCopy = tbl.deepCopy();
-    tblCopy.setDbName(StringUtils.normalizeIdentifier(dbName));
-    tblCopy.setTableName(StringUtils.normalizeIdentifier(tblName));
+    tblCopy.setCatName(normalizeIdentifier(catName));
+    tblCopy.setDbName(normalizeIdentifier(dbName));
+    tblCopy.setTableName(normalizeIdentifier(tblName));
     if (tblCopy.getPartitionKeys() != null) {
       for (FieldSchema fs : tblCopy.getPartitionKeys()) {
-        fs.setName(StringUtils.normalizeIdentifier(fs.getName()));
+        fs.setName(normalizeIdentifier(fs.getName()));
       }
     }
     if (tbl.getSd() != null) {
@@ -907,15 +1026,16 @@ public class SharedCache {
     return wrapper;
   }
 
-  public void removeTableFromCache(String dbName, String tblName) {
+
+  public void removeTableFromCache(String catName, String dbName, String tblName) {
     try {
       cacheLock.writeLock().lock();
       // If table cache is not yet prewarmed, add this to a set which the prewarm thread can check
       // so that the prewarm thread does not add it back
       if (!isTableCachePrewarmed) {
-        tablesDeletedDuringPrewarm.add(CacheUtils.buildTableCacheKey(dbName, tblName));
+        tablesDeletedDuringPrewarm.add(CacheUtils.buildTableKey(catName, dbName, tblName));
       }
-      TableWrapper tblWrapper = tableCache.remove(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.remove(CacheUtils.buildTableKey(catName, dbName, tblName));
       byte[] sdHash = tblWrapper.getSdHash();
       if (sdHash != null) {
         decrSd(sdHash);
@@ -926,15 +1046,15 @@ public class SharedCache {
     }
   }
 
-  public void alterTableInCache(String dbName, String tblName, Table newTable) {
+  public void alterTableInCache(String catName, String dbName, String tblName, Table newTable) {
     try {
       cacheLock.writeLock().lock();
-      TableWrapper tblWrapper = tableCache.remove(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.remove(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         tblWrapper.updateTableObj(newTable, this);
         String newDbName = StringUtils.normalizeIdentifier(newTable.getDbName());
         String newTblName = StringUtils.normalizeIdentifier(newTable.getTableName());
-        tableCache.put(CacheUtils.buildTableCacheKey(newDbName, newTblName), tblWrapper);
+        tableCache.put(CacheUtils.buildTableKey(catName, newDbName, newTblName), tblWrapper);
         isTableCacheDirty.set(true);
       }
     } finally {
@@ -942,12 +1062,12 @@ public class SharedCache {
     }
   }
 
-  public List<Table> listCachedTables(String dbName) {
+  public List<Table> listCachedTables(String catName, String dbName) {
     List<Table> tables = new ArrayList<>();
     try {
       cacheLock.readLock().lock();
       for (TableWrapper wrapper : tableCache.values()) {
-        if (wrapper.getTable().getDbName().equals(dbName)) {
+        if (wrapper.sameDatabase(catName, dbName)) {
           tables.add(CacheUtils.assemble(wrapper, this));
         }
       }
@@ -957,12 +1077,12 @@ public class SharedCache {
     return tables;
   }
 
-  public List<String> listCachedTableNames(String dbName) {
+  public List<String> listCachedTableNames(String catName, String dbName) {
     List<String> tableNames = new ArrayList<>();
     try {
       cacheLock.readLock().lock();
       for (TableWrapper wrapper : tableCache.values()) {
-        if (wrapper.getTable().getDbName().equals(dbName)) {
+        if (wrapper.sameDatabase(catName, dbName)) {
           tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName()));
         }
       }
@@ -972,13 +1092,13 @@ public class SharedCache {
     return tableNames;
   }
 
-  public List<String> listCachedTableNames(String dbName, String pattern, short maxTables) {
-    List<String> tableNames = new ArrayList<String>();
+  public List<String> listCachedTableNames(String catName, String dbName, String pattern, short maxTables) {
+    List<String> tableNames = new ArrayList<>();
     try {
       cacheLock.readLock().lock();
       int count = 0;
       for (TableWrapper wrapper : tableCache.values()) {
-        if ((wrapper.getTable().getDbName().equals(dbName))
+        if (wrapper.sameDatabase(catName, dbName)
             && CacheUtils.matches(wrapper.getTable().getTableName(), pattern)
             && (maxTables == -1 || count < maxTables)) {
           tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName()));
@@ -991,12 +1111,12 @@ public class SharedCache {
     return tableNames;
   }
 
-  public List<String> listCachedTableNames(String dbName, String pattern, TableType tableType) {
-    List<String> tableNames = new ArrayList<String>();
+  public List<String> listCachedTableNames(String catName, String dbName, String pattern, TableType tableType) {
+    List<String> tableNames = new ArrayList<>();
     try {
       cacheLock.readLock().lock();
       for (TableWrapper wrapper : tableCache.values()) {
-        if ((wrapper.getTable().getDbName().equals(dbName))
+        if (wrapper.sameDatabase(catName, dbName)
             && CacheUtils.matches(wrapper.getTable().getTableName(), pattern)
             && wrapper.getTable().getTableType().equals(tableType.toString())) {
           tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName()));
@@ -1008,23 +1128,23 @@ public class SharedCache {
     return tableNames;
   }
 
-  public void refreshTablesInCache(String dbName, List<Table> tables) {
+  public void refreshTablesInCache(String catName, String dbName, List<Table> tables) {
     try {
       cacheLock.writeLock().lock();
       if (isTableCacheDirty.compareAndSet(true, false)) {
         LOG.debug("Skipping table cache update; the table list we have is dirty.");
         return;
       }
-      Map<String, TableWrapper> newTableCache = new HashMap<String, TableWrapper>();
+      Map<String, TableWrapper> newTableCache = new HashMap<>();
       for (Table tbl : tables) {
         String tblName = StringUtils.normalizeIdentifier(tbl.getTableName());
-        TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+        TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
         if (tblWrapper != null) {
           tblWrapper.updateTableObj(tbl, this);
         } else {
-          tblWrapper = createTableWrapper(dbName, tblName, tbl);
+          tblWrapper = createTableWrapper(catName, dbName, tblName, tbl);
         }
-        newTableCache.put(CacheUtils.buildTableCacheKey(dbName, tblName), tblWrapper);
+        newTableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), tblWrapper);
       }
       tableCache.clear();
       tableCache = newTableCache;
@@ -1033,12 +1153,12 @@ public class SharedCache {
     }
   }
 
-  public List<ColumnStatisticsObj> getTableColStatsFromCache(String dbName, String tblName,
-      List<String> colNames) {
-    List<ColumnStatisticsObj> colStatObjs = new ArrayList<ColumnStatisticsObj>();
+  public List<ColumnStatisticsObj> getTableColStatsFromCache(
+      String catName, String dbName, String tblName, List<String> colNames) {
+    List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         colStatObjs = tblWrapper.getCachedTableColStats(colNames);
       }
@@ -1048,10 +1168,10 @@ public class SharedCache {
     return colStatObjs;
   }
 
-  public void removeTableColStatsFromCache(String dbName, String tblName, String colName) {
+  public void removeTableColStatsFromCache(String catName, String dbName, String tblName, String colName) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         tblWrapper.removeTableColStats(colName);
       }
@@ -1060,11 +1180,11 @@ public class SharedCache {
     }
   }
 
-  public void updateTableColStatsInCache(String dbName, String tableName,
-      List<ColumnStatisticsObj> colStatsForTable) {
+  public void updateTableColStatsInCache(String catName, String dbName, String tableName,
+                                         List<ColumnStatisticsObj> colStatsForTable) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tableName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName));
       if (tblWrapper != null) {
         tblWrapper.updateTableColStats(colStatsForTable);
       }
@@ -1073,11 +1193,11 @@ public class SharedCache {
     }
   }
 
-  public void refreshTableColStatsInCache(String dbName, String tableName,
-      List<ColumnStatisticsObj> colStatsForTable) {
+  public void refreshTableColStatsInCache(String catName, String dbName, String tableName,
+                                          List<ColumnStatisticsObj> colStatsForTable) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tableName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName));
       if (tblWrapper != null) {
         tblWrapper.refreshTableColStats(colStatsForTable);
       }
@@ -1095,18 +1215,19 @@ public class SharedCache {
     }
   }
 
-  public List<TableMeta> getTableMeta(String dbNames, String tableNames,
-      List<String> tableTypes) {
+  public List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames,
+                                      List<String> tableTypes) {
     List<TableMeta> tableMetas = new ArrayList<>();
     try {
       cacheLock.readLock().lock();
-      for (String dbName : listCachedDatabases()) {
+      for (String dbName : listCachedDatabases(catName)) {
         if (CacheUtils.matches(dbName, dbNames)) {
-          for (Table table : listCachedTables(dbName)) {
+          for (Table table : listCachedTables(catName, dbName)) {
             if (CacheUtils.matches(table.getTableName(), tableNames)) {
               if (tableTypes == null || tableTypes.contains(table.getTableType())) {
                 TableMeta metaData =
                     new TableMeta(dbName, table.getTableName(), table.getTableType());
+                metaData.setCatName(catName);
                 metaData.setComments(table.getParameters().get("comment"));
                 tableMetas.add(metaData);
               }
@@ -1120,10 +1241,10 @@ public class SharedCache {
     return tableMetas;
   }
 
-  public void addPartitionToCache(String dbName, String tblName, Partition part) {
+  public void addPartitionToCache(String catName, String dbName, String tblName, Partition part) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         tblWrapper.cachePartition(part, this);
       }
@@ -1132,10 +1253,10 @@ public class SharedCache {
     }
   }
 
-  public void addPartitionsToCache(String dbName, String tblName, List<Partition> parts) {
+  public void addPartitionsToCache(String catName, String dbName, String tblName, List<Partition> parts) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         tblWrapper.cachePartitions(parts, this);
       }
@@ -1144,12 +1265,12 @@ public class SharedCache {
     }
   }
 
-  public Partition getPartitionFromCache(String dbName, String tblName,
-      List<String> partVals) {
+  public Partition getPartitionFromCache(String catName, String dbName, String tblName,
+                                         List<String> partVals) {
     Partition part = null;
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         part = tblWrapper.getPartition(partVals, this);
       }
@@ -1159,11 +1280,11 @@ public class SharedCache {
     return part;
   }
 
-  public boolean existPartitionFromCache(String dbName, String tblName, List<String> partVals) {
+  public boolean existPartitionFromCache(String catName, String dbName, String tblName, List<String> partVals) {
     boolean existsPart = false;
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         existsPart = tblWrapper.containsPartition(partVals);
       }
@@ -1173,12 +1294,12 @@ public class SharedCache {
     return existsPart;
   }
 
-  public Partition removePartitionFromCache(String dbName, String tblName,
-      List<String> partVals) {
+  public Partition removePartitionFromCache(String catName, String dbName, String tblName,
+                                            List<String> partVals) {
     Partition part = null;
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         part = tblWrapper.removePartition(partVals, this);
       }
@@ -1188,11 +1309,11 @@ public class SharedCache {
     return part;
   }
 
-  public void removePartitionsFromCache(String dbName, String tblName,
-      List<List<String>> partVals) {
+  public void removePartitionsFromCache(String catName, String dbName, String tblName,
+                                        List<List<String>> partVals) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         tblWrapper.removePartitions(partVals, this);
       }
@@ -1201,11 +1322,11 @@ public class SharedCache {
     }
   }
 
-  public List<Partition> listCachedPartitions(String dbName, String tblName, int max) {
+  public List<Partition> listCachedPartitions(String catName, String dbName, String tblName, int max) {
     List<Partition> parts = new ArrayList<Partition>();
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         parts = tblWrapper.listPartitions(max, this);
       }
@@ -1215,11 +1336,11 @@ public class SharedCache {
     return parts;
   }
 
-  public void alterPartitionInCache(String dbName, String tblName, List<String> partVals,
-      Partition newPart) {
+  public void alterPartitionInCache(String catName, String dbName, String tblName, List<String> partVals,
+                                    Partition newPart) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         tblWrapper.alterPartition(partVals, newPart, this);
       }
@@ -1228,11 +1349,11 @@ public class SharedCache {
     }
   }
 
-  public void alterPartitionsInCache(String dbName, String tblName, List<List<String>> partValsList,
-      List<Partition> newParts) {
+  public void alterPartitionsInCache(String catName, String dbName, String tblName, List<List<String>> partValsList,
+                                     List<Partition> newParts) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         tblWrapper.alterPartitions(partValsList, newParts, this);
       }
@@ -1241,10 +1362,10 @@ public class SharedCache {
     }
   }
 
-  public void refreshPartitionsInCache(String dbName, String tblName, List<Partition> partitions) {
+  public void refreshPartitionsInCache(String catName, String dbName, String tblName, List<Partition> partitions) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         tblWrapper.refreshPartitions(partitions, this);
       }
@@ -1253,11 +1374,11 @@ public class SharedCache {
     }
   }
 
-  public void removePartitionColStatsFromCache(String dbName, String tblName,
-      List<String> partVals, String colName) {
+  public void removePartitionColStatsFromCache(String catName, String dbName, String tblName,
+                                               List<String> partVals, String colName) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         tblWrapper.removePartitionColStats(partVals, colName);
       }
@@ -1266,11 +1387,11 @@ public class SharedCache {
     }
   }
 
-  public void updatePartitionColStatsInCache(String dbName, String tableName,
-      List<String> partVals, List<ColumnStatisticsObj> colStatsObjs) {
+  public void updatePartitionColStatsInCache(String catName, String dbName, String tableName,
+                                             List<String> partVals, List<ColumnStatisticsObj> colStatsObjs) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tableName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName));
       if (tblWrapper != null) {
         tblWrapper.updatePartitionColStats(partVals, colStatsObjs);
       }
@@ -1279,12 +1400,12 @@ public class SharedCache {
     }
   }
 
-  public ColumnStatisticsObj getPartitionColStatsFromCache(String dbName, String tblName,
-      List<String> partVal, String colName) {
+  public ColumnStatisticsObj getPartitionColStatsFromCache(String catName, String dbName, String tblName,
+                                                           List<String> partVal, String colName) {
     ColumnStatisticsObj colStatObj = null;
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null){
         colStatObj = tblWrapper.getPartitionColStats(partVal, colName);
       }
@@ -1294,11 +1415,11 @@ public class SharedCache {
     return colStatObj;
   }
 
-  public void refreshPartitionColStatsInCache(String dbName, String tblName,
-      List<ColumnStatistics> partitionColStats) {
+  public void refreshPartitionColStatsInCache(String catName, String dbName, String tblName,
+                                              List<ColumnStatistics> partitionColStats) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         tblWrapper.refreshPartitionColStats(partitionColStats);
       }
@@ -1307,11 +1428,11 @@ public class SharedCache {
     }
   }
 
-  public List<ColumnStatisticsObj> getAggrStatsFromCache(String dbName, String tblName,
-      List<String> colNames, StatsType statsType) {
+  public List<ColumnStatisticsObj> getAggrStatsFromCache(String catName, String dbName, String tblName,
+                                                         List<String> colNames, StatsType statsType) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         return tblWrapper.getAggrPartitionColStats(colNames, statsType);
       }
@@ -1321,11 +1442,11 @@ public class SharedCache {
     return null;
   }
 
-  public void addAggregateStatsToCache(String dbName, String tblName,
-      AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) {
+  public void addAggregateStatsToCache(String catName, String dbName, String tblName,
+                                       AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null){
         tblWrapper.cacheAggrPartitionColStats(aggrStatsAllPartitions,
             aggrStatsAllButDefaultPartition);
@@ -1335,11 +1456,11 @@ public class SharedCache {
     }
   }
 
-  public void refreshAggregateStatsInCache(String dbName, String tblName,
-      AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) {
+  public void refreshAggregateStatsInCache(String catName, String dbName, String tblName,
+                                           AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) {
     try {
       cacheLock.readLock().lock();
-      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName));
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
       if (tblWrapper != null) {
         tblWrapper.refreshAggrPartitionColStats(aggrStatsAllPartitions,
             aggrStatsAllButDefaultPartition);
@@ -1390,6 +1511,16 @@ public class SharedCache {
     return sdCache;
   }
 
+  /**
+   * This resets the contents of the cataog cache so that we can re-fill it in another test.
+   */
+  void resetCatalogCache() {
+    isCatalogCachePrewarmed = false;
+    catalogCache.clear();
+    catalogsDeletedDuringPrewarm.clear();
+    isCatalogCacheDirty.set(false);
+  }
+
   public long getUpdateCount() {
     return cacheUpdateCount.get();
   }
@@ -1398,3 +1529,8 @@ public class SharedCache {
     cacheUpdateCount.incrementAndGet();
   }
 }
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/CatalogBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/CatalogBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/CatalogBuilder.java
new file mode 100644
index 0000000..be76d93
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/CatalogBuilder.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.Catalog;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.thrift.TException;
+
+public class CatalogBuilder {
+  private String name, description, location;
+
+  public CatalogBuilder setName(String name) {
+    this.name = name;
+    return this;
+  }
+
+  public CatalogBuilder setDescription(String description) {
+    this.description = description;
+    return this;
+  }
+
+  public CatalogBuilder setLocation(String location) {
+    this.location = location;
+    return this;
+  }
+
+  public Catalog build() throws MetaException {
+    if (name == null) throw new MetaException("You must name the catalog");
+    if (location == null) throw new MetaException("You must give the catalog a location");
+    Catalog catalog = new Catalog(name, location);
+    if (description != null) catalog.setDescription(description);
+    return catalog;
+  }
+
+  /**
+   * Build the catalog object and create it in the metastore.
+   * @param client metastore client
+   * @return new catalog object
+   * @throws TException thrown from the client
+   */
+  public Catalog create(IMetaStoreClient client) throws TException {
+    Catalog cat = build();
+    client.createCatalog(cat);
+    return cat;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java
index 50e779a..2e32cbf 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java
@@ -17,8 +17,15 @@
  */
 package org.apache.hadoop.hive.metastore.client.builder;
 
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+
+import java.util.ArrayList;
+import java.util.List;
 
 /**
  * Base builder for all types of constraints.  Database name, table name, and column name
@@ -26,28 +33,42 @@ import org.apache.hadoop.hive.metastore.api.Table;
  * @param <T> Type of builder extending this.
  */
 abstract class ConstraintBuilder<T> {
-  protected String dbName, tableName, columnName, constraintName;
-  protected int keySeq;
+  protected String catName, dbName, tableName, constraintName;
+  List<String> columns;
   protected boolean enable, validate, rely;
+  private int nextSeq;
   private T child;
 
   protected ConstraintBuilder() {
-    keySeq = 1;
+    nextSeq = 1;
     enable = true;
     validate = rely = false;
+    dbName = Warehouse.DEFAULT_DATABASE_NAME;
+    columns = new ArrayList<>();
   }
 
   protected void setChild(T child) {
     this.child = child;
   }
 
-  protected void checkBuildable(String defaultConstraintName) throws MetaException {
-    if (dbName == null || tableName == null || columnName == null) {
-      throw new MetaException("You must provide database name, table name, and column name");
+  protected void checkBuildable(String defaultConstraintName, Configuration conf)
+      throws MetaException {
+    if (tableName == null || columns.isEmpty()) {
+      throw new MetaException("You must provide table name and columns");
     }
     if (constraintName == null) {
-      constraintName = dbName + "_" + tableName + "_" + columnName + "_" + defaultConstraintName;
+      constraintName = tableName + "_" + defaultConstraintName;
     }
+    if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf);
+  }
+
+  protected int getNextSeq() {
+    return nextSeq++;
+  }
+
+  public T setCatName(String catName) {
+    this.catName = catName;
+    return child;
   }
 
   public T setDbName(String dbName) {
@@ -60,14 +81,15 @@ abstract class ConstraintBuilder<T> {
     return child;
   }
 
-  public T setDbAndTableName(Table table) {
+  public T onTable(Table table) {
+    this.catName = table.getCatName();
     this.dbName = table.getDbName();
     this.tableName = table.getTableName();
     return child;
   }
 
-  public T setColumnName(String columnName) {
-    this.columnName = columnName;
+  public T addColumn(String columnName) {
+    this.columns.add(columnName);
     return child;
   }
 
@@ -76,11 +98,6 @@ abstract class ConstraintBuilder<T> {
     return child;
   }
 
-  public T setKeySeq(int keySeq) {
-    this.keySeq = keySeq;
-    return child;
-  }
-
   public T setEnable(boolean enable) {
     this.enable = enable;
     return child;

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
index 01693ec..f3d2182 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hive.metastore.client.builder;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.Catalog;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
@@ -33,11 +37,24 @@ import java.util.Map;
  * selects reasonable defaults.
  */
 public class DatabaseBuilder {
-  private String name, description, location;
+  private String name, description, location, catalogName;
   private Map<String, String> params = new HashMap<>();
   private String ownerName;
   private PrincipalType ownerType;
 
+  public DatabaseBuilder() {
+  }
+
+  public DatabaseBuilder setCatalogName(String catalogName) {
+    this.catalogName = catalogName;
+    return this;
+  }
+
+  public DatabaseBuilder setCatalogName(Catalog catalog) {
+    this.catalogName = catalog.getName();
+    return this;
+  }
+
   public DatabaseBuilder setName(String name) {
     this.name = name;
     return this;
@@ -73,11 +90,13 @@ public class DatabaseBuilder {
     return this;
   }
 
-  public Database build() throws MetaException {
+  public Database build(Configuration conf) throws MetaException {
     if (name == null) throw new MetaException("You must name the database");
+    if (catalogName == null) catalogName = MetaStoreUtils.getDefaultCatalog(conf);
     Database db = new Database(name, description, location, params);
+    db.setCatalogName(catalogName);
     try {
-      if (ownerName != null) ownerName = SecurityUtils.getUser();
+      if (ownerName == null) ownerName = SecurityUtils.getUser();
       db.setOwnerName(ownerName);
       if (ownerType == null) ownerType = PrincipalType.USER;
       db.setOwnerType(ownerType);
@@ -86,4 +105,18 @@ public class DatabaseBuilder {
       throw MetaStoreUtils.newMetaException(e);
     }
   }
+
+  /**
+   * Build the database, create it in the metastore, and then return the db object.
+   * @param client metastore client
+   * @param conf configuration file
+   * @return new database object
+   * @throws TException comes from {@link #build(Configuration)} or
+   * {@link IMetaStoreClient#createDatabase(Database)}.
+   */
+  public Database create(IMetaStoreClient client, Configuration conf) throws TException {
+    Database db = build(conf);
+    client.createDatabase(db);
+    return db;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java
index aa9b9f5..c4c09dc 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.hive.metastore.client.builder;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.FunctionType;
@@ -26,6 +29,7 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.ResourceUri;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+import org.apache.thrift.TException;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -35,7 +39,7 @@ import java.util.List;
  * Class for creating Thrift Function objects for tests, and API usage.
  */
 public class FunctionBuilder {
-  private String dbName = "default";
+  private String catName, dbName;
   private String funcName = null;
   private String className = null;
   private String owner = null;
@@ -49,7 +53,13 @@ public class FunctionBuilder {
     ownerType = PrincipalType.USER;
     createTime = (int) (System.currentTimeMillis() / 1000);
     funcType = FunctionType.JAVA;
-    resourceUris = new ArrayList<ResourceUri>();
+    resourceUris = new ArrayList<>();
+    dbName = Warehouse.DEFAULT_DATABASE_NAME;
+  }
+
+  public FunctionBuilder setCatName(String catName) {
+    this.catName = catName;
+    return this;
   }
 
   public FunctionBuilder setDbName(String dbName) {
@@ -57,8 +67,9 @@ public class FunctionBuilder {
     return this;
   }
 
-  public FunctionBuilder setDbName(Database db) {
+  public FunctionBuilder inDb(Database db) {
     this.dbName = db.getName();
+    this.catName = db.getCatalogName();
     return this;
   }
 
@@ -102,7 +113,7 @@ public class FunctionBuilder {
     return this;
   }
 
-  public Function build() throws MetaException {
+  public Function build(Configuration conf) throws MetaException {
     try {
       if (owner != null) {
         owner = SecurityUtils.getUser();
@@ -110,7 +121,23 @@ public class FunctionBuilder {
     } catch (IOException e) {
       throw MetaStoreUtils.newMetaException(e);
     }
-    return new Function(funcName, dbName, className, owner, ownerType, createTime, funcType,
+    if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf);
+    Function f = new Function(funcName, dbName, className, owner, ownerType, createTime, funcType,
         resourceUris);
+    f.setCatName(catName);
+    return f;
+  }
+
+  /**
+   * Create the function object in the metastore and return it.
+   * @param client metastore client
+   * @param conf configuration
+   * @return new function object
+   * @throws TException if thrown by build or the client.
+   */
+  public Function create(IMetaStoreClient client, Configuration conf) throws TException {
+    Function f = build(conf);
+    client.createFunction(f);
+    return f;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java
index 32a84ac..f61a62c 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hive.metastore.client.builder;
 
 import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.ISchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.SchemaCompatibility;
@@ -27,7 +28,7 @@ import org.apache.hadoop.hive.metastore.api.SchemaValidation;
 public class ISchemaBuilder {
   private SchemaType schemaType; // required
   private String name; // required
-  private String dbName; // required
+  private String dbName, catName; // required
   private SchemaCompatibility compatibility; // required
   private SchemaValidation validationLevel; // required
   private boolean canEvolve; // required
@@ -39,6 +40,7 @@ public class ISchemaBuilder {
     validationLevel = SchemaValidation.ALL;
     canEvolve = true;
     dbName = Warehouse.DEFAULT_DATABASE_NAME;
+    catName = Warehouse.DEFAULT_CATALOG_NAME;
   }
 
   public ISchemaBuilder setSchemaType(SchemaType schemaType) {
@@ -56,6 +58,12 @@ public class ISchemaBuilder {
     return this;
   }
 
+  public ISchemaBuilder inDb(Database db) {
+    this.catName = db.getCatalogName();
+    this.dbName = db.getName();
+    return this;
+  }
+
   public ISchemaBuilder setCompatibility(SchemaCompatibility compatibility) {
     this.compatibility = compatibility;
     return this;
@@ -86,7 +94,7 @@ public class ISchemaBuilder {
       throw new MetaException("You must provide a schemaType and name");
     }
     ISchema iSchema =
-        new ISchema(schemaType, name, dbName, compatibility, validationLevel, canEvolve);
+        new ISchema(schemaType, name, catName, dbName, compatibility, validationLevel, canEvolve);
     if (schemaGroup != null) iSchema.setSchemaGroup(schemaGroup);
     if (description != null) iSchema.setDescription(description);
     return iSchema;

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java
index 38e5a8f..d6ee673 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java
@@ -17,9 +17,14 @@
  */
 package org.apache.hadoop.hive.metastore.client.builder;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.thrift.TException;
 
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -31,7 +36,7 @@ import java.util.Map;
  * reference; 2. partition values; 3. whatever {@link StorageDescriptorBuilder} requires.
  */
 public class PartitionBuilder extends StorageDescriptorBuilder<PartitionBuilder> {
-  private String dbName, tableName;
+  private String catName, dbName, tableName;
   private int createTime, lastAccessTime;
   private Map<String, String> partParams;
   private List<String> values;
@@ -40,6 +45,7 @@ public class PartitionBuilder extends StorageDescriptorBuilder<PartitionBuilder>
     // Set some reasonable defaults
     partParams = new HashMap<>();
     createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000);
+    dbName = Warehouse.DEFAULT_DATABASE_NAME;
     super.setChild(this);
   }
 
@@ -53,9 +59,10 @@ public class PartitionBuilder extends StorageDescriptorBuilder<PartitionBuilder>
     return this;
   }
 
-  public PartitionBuilder fromTable(Table table) {
+  public PartitionBuilder inTable(Table table) {
     this.dbName = table.getDbName();
     this.tableName = table.getTableName();
+    this.catName = table.getCatName();
     setCols(table.getSd().getCols());
     return this;
   }
@@ -92,12 +99,21 @@ public class PartitionBuilder extends StorageDescriptorBuilder<PartitionBuilder>
     return this;
   }
 
-  public Partition build() throws MetaException {
-    if (dbName == null || tableName == null) {
-      throw new MetaException("database name and table name must be provided");
+  public Partition build(Configuration conf) throws MetaException {
+    if (tableName == null) {
+      throw new MetaException("table name must be provided");
     }
     if (values == null) throw new MetaException("You must provide partition values");
-    return new Partition(values, dbName, tableName, createTime, lastAccessTime, buildSd(),
+    if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf);
+    Partition p = new Partition(values, dbName, tableName, createTime, lastAccessTime, buildSd(),
         partParams);
+    p.setCatName(catName);
+    return p;
+  }
+
+  public Partition addToTable(IMetaStoreClient client, Configuration conf) throws TException {
+    Partition p = build(conf);
+    client.add_partition(p);
+    return p;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLCheckConstraintBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLCheckConstraintBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLCheckConstraintBuilder.java
new file mode 100644
index 0000000..ec99729
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLCheckConstraintBuilder.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class SQLCheckConstraintBuilder extends ConstraintBuilder<SQLCheckConstraintBuilder> {
+  private String checkExpression;
+
+  public SQLCheckConstraintBuilder() {
+    super.setChild(this);
+  }
+
+  public SQLCheckConstraintBuilder setCheckExpression(String checkExpression) {
+    this.checkExpression = checkExpression;
+    return this;
+  }
+
+  public List<SQLCheckConstraint> build(Configuration conf) throws MetaException {
+    if (checkExpression == null) {
+      throw new MetaException("check expression must be set");
+    }
+    checkBuildable("check_constraint", conf);
+    List<SQLCheckConstraint> cc = new ArrayList<>(columns.size());
+    for (String column : columns) {
+      cc.add(new SQLCheckConstraint(catName, dbName, tableName, column, checkExpression,
+          constraintName, enable, validate, rely));
+    }
+    return cc;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLDefaultConstraintBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLDefaultConstraintBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLDefaultConstraintBuilder.java
new file mode 100644
index 0000000..b24663d
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLDefaultConstraintBuilder.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class SQLDefaultConstraintBuilder extends ConstraintBuilder<SQLDefaultConstraintBuilder> {
+  private Object defaultVal;
+
+  public SQLDefaultConstraintBuilder() {
+    super.setChild(this);
+  }
+
+  public SQLDefaultConstraintBuilder setDefaultVal(Object defaultVal) {
+    this.defaultVal = defaultVal;
+    return this;
+  }
+
+  public List<SQLDefaultConstraint> build(Configuration conf) throws MetaException {
+    if (defaultVal == null) {
+      throw new MetaException("default value must be set");
+    }
+    checkBuildable("default_value", conf);
+    List<SQLDefaultConstraint> dv = new ArrayList<>(columns.size());
+    for (String column : columns) {
+      dv.add(new SQLDefaultConstraint(catName, dbName, tableName, column,
+          defaultVal.toString(), constraintName, enable, validate, rely));
+    }
+    return dv;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java
index a39319a..f5adda1 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java
@@ -17,21 +17,30 @@
  */
 package org.apache.hadoop.hive.metastore.client.builder;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 
+import java.util.ArrayList;
+import java.util.List;
+
 /**
  * Builder for {@link SQLForeignKey}.  Requires what {@link ConstraintBuilder} requires, plus
  * primary key
  * database, table, column and name.
  */
 public class SQLForeignKeyBuilder extends ConstraintBuilder<SQLForeignKeyBuilder> {
-  private String pkDb, pkTable, pkColumn, pkName;
+  private String pkDb, pkTable, pkName;
+  private List<String> pkColumns;
   private int updateRule, deleteRule;
 
   public SQLForeignKeyBuilder() {
+    super.setChild(this);
     updateRule = deleteRule = 0;
+    pkColumns = new ArrayList<>();
+    pkDb = Warehouse.DEFAULT_DATABASE_NAME;
   }
 
   public SQLForeignKeyBuilder setPkDb(String pkDb) {
@@ -44,8 +53,8 @@ public class SQLForeignKeyBuilder extends ConstraintBuilder<SQLForeignKeyBuilder
     return this;
   }
 
-  public SQLForeignKeyBuilder setPkColumn(String pkColumn) {
-    this.pkColumn = pkColumn;
+  public SQLForeignKeyBuilder addPkColumn(String pkColumn) {
+    pkColumns.add(pkColumn);
     return this;
   }
 
@@ -54,11 +63,11 @@ public class SQLForeignKeyBuilder extends ConstraintBuilder<SQLForeignKeyBuilder
     return this;
   }
 
-  public SQLForeignKeyBuilder setPrimaryKey(SQLPrimaryKey pk) {
-    pkDb = pk.getTable_db();
-    pkTable = pk.getTable_name();
-    pkColumn = pk.getColumn_name();
-    pkName = pk.getPk_name();
+  public SQLForeignKeyBuilder fromPrimaryKey(List<SQLPrimaryKey> pk) {
+    pkDb = pk.get(0).getTable_db();
+    pkTable = pk.get(0).getTable_name();
+    for (SQLPrimaryKey pkcol : pk) pkColumns.add(pkcol.getColumn_name());
+    pkName = pk.get(0).getPk_name();
     return this;
   }
 
@@ -72,12 +81,23 @@ public class SQLForeignKeyBuilder extends ConstraintBuilder<SQLForeignKeyBuilder
     return this;
   }
 
-  public SQLForeignKey build() throws MetaException {
-    checkBuildable("foreign_key");
-    if (pkDb == null || pkTable == null || pkColumn == null || pkName == null) {
-      throw new MetaException("You must provide the primary key database, table, column, and name");
+  public List<SQLForeignKey> build(Configuration conf) throws MetaException {
+    checkBuildable("to_" + pkTable + "_foreign_key", conf);
+    if (pkTable == null || pkColumns.isEmpty() || pkName == null) {
+      throw new MetaException("You must provide the primary key table, columns, and name");
+    }
+    if (columns.size() != pkColumns.size()) {
+      throw new MetaException("The number of foreign columns must match the number of primary key" +
+          " columns");
+    }
+    List<SQLForeignKey> fk = new ArrayList<>(columns.size());
+    for (int i = 0; i < columns.size(); i++) {
+      SQLForeignKey keyCol = new SQLForeignKey(pkDb, pkTable, pkColumns.get(i), dbName, tableName,
+          columns.get(i), getNextSeq(), updateRule, deleteRule, constraintName, pkName, enable,
+          validate, rely);
+      keyCol.setCatName(catName);
+      fk.add(keyCol);
     }
-    return new SQLForeignKey(pkDb, pkTable, pkColumn, dbName, tableName, columnName, keySeq,
-        updateRule, deleteRule, constraintName, pkName, enable, validate, rely);
+    return fk;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java
index 77d1e49..497032e 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java
@@ -17,9 +17,13 @@
  */
 package org.apache.hadoop.hive.metastore.client.builder;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 
+import java.util.ArrayList;
+import java.util.List;
+
 /**
  * Builder for {@link SQLNotNullConstraint}.  Only requires what {@link ConstraintBuilder} requires.
  */
@@ -29,9 +33,20 @@ public class SQLNotNullConstraintBuilder extends ConstraintBuilder<SQLNotNullCon
     super.setChild(this);
   }
 
-  public SQLNotNullConstraint build() throws MetaException {
-    checkBuildable("not_null_constraint");
-    return new SQLNotNullConstraint(dbName, tableName, columnName, constraintName, enable,
-        validate, rely);
+  public SQLNotNullConstraintBuilder setColName(String colName) {
+    assert columns.isEmpty();
+    columns.add(colName);
+    return this;
+  }
+
+  public List<SQLNotNullConstraint> build(Configuration conf) throws MetaException {
+    checkBuildable("not_null_constraint", conf);
+    List<SQLNotNullConstraint> uc = new ArrayList<>(columns.size());
+    for (String column : columns) {
+      SQLNotNullConstraint c = new SQLNotNullConstraint(catName, dbName, tableName, columns.get(0),
+          constraintName, enable, validate, rely);
+      uc.add(c);
+    }
+    return uc;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java
index 9000f86..40f74bd 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java
@@ -17,9 +17,13 @@
  */
 package org.apache.hadoop.hive.metastore.client.builder;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 
+import java.util.ArrayList;
+import java.util.List;
+
 /**
  * Builder for {@link SQLPrimaryKey}.  Only requires what {@link ConstraintBuilder} requires.
  */
@@ -34,9 +38,15 @@ public class SQLPrimaryKeyBuilder extends ConstraintBuilder<SQLPrimaryKeyBuilder
     return setConstraintName(name);
   }
 
-  public SQLPrimaryKey build() throws MetaException {
-    checkBuildable("primary_key");
-    return new SQLPrimaryKey(dbName, tableName, columnName, keySeq, constraintName, enable,
-        validate, rely);
+  public List<SQLPrimaryKey> build(Configuration conf) throws MetaException {
+    checkBuildable("primary_key", conf);
+    List<SQLPrimaryKey> pk = new ArrayList<>(columns.size());
+    for (String colName : columns) {
+      SQLPrimaryKey keyCol = new SQLPrimaryKey(dbName, tableName, colName, getNextSeq(),
+          constraintName, enable, validate, rely);
+      keyCol.setCatName(catName);
+      pk.add(keyCol);
+    }
+    return pk;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java
index 640e9d1..138ee15 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java
@@ -17,9 +17,13 @@
  */
 package org.apache.hadoop.hive.metastore.client.builder;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 
+import java.util.ArrayList;
+import java.util.List;
+
 /**
  * Builder for {@link SQLUniqueConstraint}.  Only requires what {@link ConstraintBuilder} requires.
  */
@@ -29,9 +33,14 @@ public class SQLUniqueConstraintBuilder extends ConstraintBuilder<SQLUniqueConst
     super.setChild(this);
   }
 
-  public SQLUniqueConstraint build() throws MetaException {
-    checkBuildable("unique_constraint");
-    return new SQLUniqueConstraint(dbName, tableName, columnName, keySeq, constraintName, enable,
-        validate, rely);
+  public List<SQLUniqueConstraint> build(Configuration conf) throws MetaException {
+    checkBuildable("unique_constraint", conf);
+    List<SQLUniqueConstraint> uc = new ArrayList<>(columns.size());
+    for (String column : columns) {
+      SQLUniqueConstraint c = new SQLUniqueConstraint(catName, dbName, tableName, column, getNextSeq(),
+          constraintName, enable, validate, rely);
+      uc.add(c);
+    }
+    return uc;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java
index ceb0f49..521be3e 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java
@@ -23,8 +23,11 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.SchemaVersion;
 import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
 
+import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+
 public class SchemaVersionBuilder extends SerdeAndColsBuilder<SchemaVersionBuilder> {
-  private String schemaName, dbName; // required
+  private String schemaName, dbName, catName; // required
   private int version; // required
   private long createdAt; // required
   private SchemaVersionState state; // optional
@@ -34,6 +37,8 @@ public class SchemaVersionBuilder extends SerdeAndColsBuilder<SchemaVersionBuild
   private String name; // optional
 
   public SchemaVersionBuilder() {
+    catName = DEFAULT_CATALOG_NAME;
+    dbName = DEFAULT_DATABASE_NAME;
     createdAt = System.currentTimeMillis() / 1000;
     version = -1;
     super.setChild(this);
@@ -50,6 +55,7 @@ public class SchemaVersionBuilder extends SerdeAndColsBuilder<SchemaVersionBuild
   }
 
   public SchemaVersionBuilder versionOf(ISchema schema) {
+    this.catName = schema.getCatName();
     this.dbName = schema.getDbName();
     this.schemaName = schema.getName();
     return this;
@@ -92,11 +98,11 @@ public class SchemaVersionBuilder extends SerdeAndColsBuilder<SchemaVersionBuild
   }
 
   public SchemaVersion build() throws MetaException {
-    if (schemaName == null || dbName == null || version < 0) {
-      throw new MetaException("You must provide the database name, schema name, and schema version");
+    if (schemaName == null || version < 0) {
+      throw new MetaException("You must provide the schema name, and schema version");
     }
     SchemaVersion schemaVersion =
-        new SchemaVersion(new ISchemaName(dbName, schemaName), version, createdAt, getCols());
+        new SchemaVersion(new ISchemaName(catName, dbName, schemaName), version, createdAt, getCols());
     if (state != null) schemaVersion.setState(state);
     if (description != null) schemaVersion.setDescription(description);
     if (schemaText != null) schemaVersion.setSchemaText(schemaText);

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java
index 2b9f816..79ef7de 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java
@@ -17,48 +17,69 @@
  */
 package org.apache.hadoop.hive.metastore.client.builder;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.BasicTxnInfo;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+import org.apache.thrift.TException;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 /**
  * Build a {@link Table}.  The database name and table name must be provided, plus whatever is
  * needed by the underlying {@link StorageDescriptorBuilder}.
  */
 public class TableBuilder extends StorageDescriptorBuilder<TableBuilder> {
-  private String dbName, tableName, owner, viewOriginalText, viewExpandedText, type;
+  private String catName, dbName, tableName, owner, viewOriginalText, viewExpandedText, type,
+      mvValidTxnList;
   private List<FieldSchema> partCols;
   private int createTime, lastAccessTime, retention;
   private Map<String, String> tableParams;
   private boolean rewriteEnabled, temporary;
+  private Set<String> mvReferencedTables;
+
 
   public TableBuilder() {
     // Set some reasonable defaults
+    dbName = Warehouse.DEFAULT_DATABASE_NAME;
     tableParams = new HashMap<>();
     createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000);
     retention = 0;
     partCols = new ArrayList<>();
     type = TableType.MANAGED_TABLE.name();
+    mvReferencedTables = new HashSet<>();
+    temporary = false;
     super.setChild(this);
   }
 
+  public TableBuilder setCatName(String catName) {
+    this.catName = catName;
+    return this;
+  }
+
   public TableBuilder setDbName(String dbName) {
     this.dbName = dbName;
     return this;
   }
 
-  public TableBuilder setDbName(Database db) {
+  public TableBuilder inDb(Database db) {
     this.dbName = db.getName();
+    this.catName = db.getCatalogName();
     return this;
   }
 
@@ -139,9 +160,19 @@ public class TableBuilder extends StorageDescriptorBuilder<TableBuilder> {
     return this;
   }
 
-  public Table build() throws MetaException {
-    if (dbName == null || tableName == null) {
-      throw new MetaException("You must set the database and table name");
+  public TableBuilder addMaterializedViewReferencedTable(String tableName) {
+    mvReferencedTables.add(tableName);
+    return this;
+  }
+
+  public TableBuilder setMaterializedViewValidTxnList(ValidTxnList validTxnList) {
+    mvValidTxnList = validTxnList.writeToString();
+    return this;
+  }
+
+  public Table build(Configuration conf) throws MetaException {
+    if (tableName == null) {
+      throw new MetaException("You must set the table name");
     }
     if (owner == null) {
       try {
@@ -150,15 +181,24 @@ public class TableBuilder extends StorageDescriptorBuilder<TableBuilder> {
         throw MetaStoreUtils.newMetaException(e);
       }
     }
+    if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf);
     Table t = new Table(tableName, dbName, owner, createTime, lastAccessTime, retention, buildSd(),
         partCols, tableParams, viewOriginalText, viewExpandedText, type);
-    if (rewriteEnabled) {
-      t.setRewriteEnabled(true);
-    }
-    if (temporary) {
-      t.setTemporary(temporary);
+    if (rewriteEnabled) t.setRewriteEnabled(true);
+    if (temporary) t.setTemporary(temporary);
+    t.setCatName(catName);
+    if (!mvReferencedTables.isEmpty()) {
+      CreationMetadata cm = new CreationMetadata(catName, dbName, tableName, mvReferencedTables);
+      if (mvValidTxnList != null) cm.setValidTxnList(mvValidTxnList);
+      t.setCreationMetadata(cm);
     }
     return t;
   }
 
+  public Table create(IMetaStoreClient client, Configuration conf) throws TException {
+    Table t = build(conf);
+    client.createTable(t);
+    return t;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index b8976ed..995137f 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -306,6 +306,12 @@ public class MetastoreConf {
     CAPABILITY_CHECK("metastore.client.capability.check",
         "hive.metastore.client.capability.check", true,
         "Whether to check client capabilities for potentially breaking API usage."),
+    CATALOG_DEFAULT("metastore.catalog.default", "metastore.catalog.default", "hive",
+        "The default catalog to use when a catalog is not specified.  Default is 'hive' (the " +
+            "default catalog)."),
+    CATALOGS_TO_CACHE("metastore.cached.rawstore.catalogs", "metastore.cached.rawstore.catalogs",
+        "hive", "Comma separated list of catalogs to cache in the CachedStore. Default is 'hive' " +
+        "(the default catalog).  Empty string means all catalogs will be cached."),
     CLIENT_CONNECT_RETRY_DELAY("metastore.client.connect.retry.delay",
         "hive.metastore.client.connect.retry.delay", 1, TimeUnit.SECONDS,
         "Number of seconds for the client to wait between consecutive connection attempts"),

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/CreateCatalogEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/CreateCatalogEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/CreateCatalogEvent.java
new file mode 100644
index 0000000..e667277
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/CreateCatalogEvent.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.Catalog;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class CreateCatalogEvent extends ListenerEvent {
+
+  private final Catalog cat;
+
+  public CreateCatalogEvent(boolean status, IHMSHandler handler, Catalog cat) {
+    super(status, handler);
+    this.cat = cat;
+  }
+
+  public Catalog getCatalog() {
+    return cat;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropCatalogEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropCatalogEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropCatalogEvent.java
new file mode 100644
index 0000000..67c6d51
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropCatalogEvent.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.Catalog;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class DropCatalogEvent extends ListenerEvent {
+
+  private final Catalog cat;
+
+  public DropCatalogEvent(boolean status, IHMSHandler handler, Catalog cat) {
+    super(status, handler);
+    this.cat = cat;
+  }
+
+  public Catalog getCatalog() {
+    return cat;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java
index 4c5918f..ccd968b 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java
@@ -26,17 +26,23 @@ import org.apache.hadoop.hive.metastore.IHMSHandler;
 @InterfaceStability.Stable
 public class DropConstraintEvent extends ListenerEvent {
 
+  private final String catName;
   private final String dbName;
   private final String tableName;
   private final String constraintName;
-  public DropConstraintEvent(String dbName, String tableName, String constraintName,
+  public DropConstraintEvent(String catName, String dbName, String tableName, String constraintName,
       boolean status, IHMSHandler handler) {
     super(status, handler);
+    this.catName = catName;
     this.dbName = dbName;
     this.tableName = tableName;
     this.constraintName = constraintName;
   }
 
+  public String getCatName() {
+    return catName;
+  }
+
   public String getDbName() {
     return dbName;
   }


Mime
View raw message