hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ga...@apache.org
Subject svn commit: r1657394 [3/5] - in /hive/branches/hbase-metastore: bin/ext/ common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/conf/ itests/hive-unit/ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase...
Date Wed, 04 Feb 2015 20:00:51 GMT
Added: hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java?rev=1657394&view=auto
==============================================================================
--- hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java (added)
+++ hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java Wed Feb  4 20:00:49 2015
@@ -0,0 +1,967 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hive.metastore.hbase;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.RawStore;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.model.MDBPrivilege;
+import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege;
+import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege;
+import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege;
+import org.apache.hadoop.hive.metastore.model.MRoleMap;
+import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
+import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.thrift.TException;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Implementation of RawStore that stores data in HBase
+ */
+public class HBaseStore implements RawStore {
+  static final private Log LOG = LogFactory.getLog(HBaseStore.class.getName());
+
+  // Do not access this directly, call getHBase to make sure it is initialized.
+  private HBaseReadWrite hbase = null;
+  private Configuration conf;
+  private int txnNestLevel = 0;
+
+  public HBaseStore() {
+  }
+
+  @Override
+  public void shutdown() {
+    try {
+      if (txnNestLevel != 0) rollbackTransaction();
+      getHBase().close();
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public boolean openTransaction() {
+    if (txnNestLevel++ == 0) getHBase().begin();
+    return true;
+  }
+
+  @Override
+  public boolean commitTransaction() {
+    if (txnNestLevel-- < 1) getHBase().commit();
+    return true;
+  }
+
+  @Override
+  public void rollbackTransaction() {
+    txnNestLevel = 0;
+    getHBase().rollback();
+  }
+
+  @Override
+  public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+    try {
+      // HiveMetaStore already checks for existence of the database, don't recheck
+      getHBase().putDb(db);
+    } catch (IOException e) {
+      // TODO NOt sure what i should throw here
+      LOG.error("Unable to create database ", e);
+      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
+    }
+
+  }
+
+  @Override
+  public Database getDatabase(String name) throws NoSuchObjectException {
+    try {
+      Database db = getHBase().getDb(name);
+      if (db == null) {
+        throw new NoSuchObjectException("Unable to find db " + name);
+      }
+      return db;
+    } catch (IOException e) {
+      LOG.error("Unable to get db", e);
+      throw new NoSuchObjectException("Error reading db " + e.getMessage());
+    }
+  }
+
+  @Override
+  public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException {
+    try {
+      getHBase().deleteDb(dbname);
+      return true;
+    } catch (IOException e) {
+      LOG.error("Unable to delete db" + e);
+      throw new MetaException("Unable to drop database " + dbname);
+    }
+  }
+
+  @Override
+  public boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException,
+      MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<String> getDatabases(String pattern) throws MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<String> getAllDatabases() throws MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean createType(Type type) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Type getType(String typeName) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean dropType(String typeName) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+    // HiveMetaStore above us checks if the table already exists, so we can blindly store it here.
+    try {
+      getHBase().putTable(tbl);
+    } catch (IOException e) {
+      // TODO NOt sure what i should throw here
+      LOG.error("Unable to create table ", e);
+      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
+    }
+  }
+
+  @Override
+  public boolean dropTable(String dbName, String tableName) throws MetaException,
+      NoSuchObjectException, InvalidObjectException, InvalidInputException {
+    try {
+      getHBase().deleteTable(dbName, tableName);
+      return true;
+    } catch (IOException e) {
+      LOG.error("Unable to delete db" + e);
+      throw new MetaException("Unable to drop table " + tableName(dbName, tableName));
+    }
+  }
+
+  @Override
+  public Table getTable(String dbName, String tableName) throws MetaException {
+    try {
+      Table table = getHBase().getTable(dbName, tableName);
+      if (table == null) {
+        LOG.debug("Unable to find table " + tableName(dbName, tableName));
+      }
+      return table;
+    } catch (IOException e) {
+      LOG.error("Unable to get table", e);
+      throw new MetaException("Error reading table " + e.getMessage());
+    }
+  }
+
+  @Override
+  public boolean addPartition(Partition part) throws InvalidObjectException, MetaException {
+    try {
+      getHBase().putPartition(part);
+      return true;
+    } catch (IOException e) {
+      // TODO NOt sure what i should throw here
+      LOG.error("Unable to add partition", e);
+      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
+    }
+  }
+
+  @Override
+  public boolean addPartitions(String dbName, String tblName, List<Partition> parts) throws
+      InvalidObjectException, MetaException {
+    try {
+      for (Partition part : parts) {
+        getHBase().putPartition(part);
+      }
+      return true;
+    } catch (IOException e) {
+      // TODO NOt sure what i should throw here
+      LOG.error("Unable to add partitions", e);
+      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
+    }
+  }
+
+  @Override
+  public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec,
+                               boolean ifNotExists) throws InvalidObjectException, MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Partition getPartition(String dbName, String tableName, List<String> part_vals) throws
+      MetaException, NoSuchObjectException {
+    try {
+      Partition part = getHBase().getPartition(dbName, tableName, part_vals);
+      if (part == null) {
+        throw new NoSuchObjectException("Unable to find partition " +
+            partName(dbName, tableName, part_vals));
+      }
+      return part;
+    } catch (IOException e) {
+      LOG.error("Unable to get partition", e);
+      throw new MetaException("Error reading partition " + e.getMessage());
+    }
+  }
+
+  @Override
+  public boolean doesPartitionExist(String dbName, String tableName, List<String> part_vals) throws
+      MetaException, NoSuchObjectException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean dropPartition(String dbName, String tableName, List<String> part_vals) throws
+      MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
+    try {
+      getHBase().deletePartition(dbName, tableName, part_vals);
+      return true;
+    } catch (IOException e) {
+      LOG.error("Unable to delete db" + e);
+      throw new MetaException("Unable to drop partition " + partName(dbName, tableName, part_vals));
+    }
+  }
+
+  @Override
+  public List<Partition> getPartitions(String dbName, String tableName, int max) throws
+      MetaException, NoSuchObjectException {
+    try {
+      return getHBase().scanPartitionsInTable(dbName, tableName, max);
+    } catch (IOException e) {
+      LOG.error("Unable to get partitions", e);
+      throw new MetaException("Error scanning partitions");
+    }
+  }
+
+  @Override
+  public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException,
+      MetaException {
+    // HiveMetaStore above us has already confirmed the table exists, I'm not rechecking
+    try {
+      getHBase().putTable(newTable);
+    } catch (IOException e) {
+      LOG.error("Unable to alter table " + tableName(dbname, name), e);
+      throw new MetaException("Unable to alter table " + tableName(dbname, name));
+    }
+  }
+
+  @Override
+  public List<String> getTables(String dbName, String pattern) throws MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<Table> getTableObjectsByName(String dbname, List<String> tableNames) throws
+      MetaException, UnknownDBException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<String> getAllTables(String dbName) throws MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<String> listTableNamesByFilter(String dbName, String filter, short max_tables) throws
+      MetaException, UnknownDBException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<String> listPartitionNames(String db_name, String tbl_name, short max_parts) throws
+      MetaException {
+    try {
+      List<Partition> parts = getHBase().scanPartitionsInTable(db_name, tbl_name, max_parts);
+      if (parts == null) return null;
+      List<String> names = new ArrayList<String>(parts.size());
+      Table table = getHBase().getTable(db_name, tbl_name);
+      for (Partition p : parts) {
+        names.add(partName(table, p));
+      }
+      return names;
+    } catch (IOException e) {
+      LOG.error("Unable to get partitions", e);
+      throw new MetaException("Error scanning partitions");
+    }
+  }
+
+  @Override
+  public List<String> listPartitionNamesByFilter(String db_name, String tbl_name, String filter,
+                                                 short max_parts) throws MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void alterPartition(String db_name, String tbl_name, List<String> part_vals,
+                             Partition new_part) throws InvalidObjectException, MetaException {
+
+  }
+
+  @Override
+  public void alterPartitions(String db_name, String tbl_name, List<List<String>> part_vals_list,
+                              List<Partition> new_parts) throws InvalidObjectException,
+      MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean addIndex(Index index) throws InvalidObjectException, MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Index getIndex(String dbName, String origTableName, String indexName) throws
+      MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean dropIndex(String dbName, String origTableName, String indexName) throws
+      MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<Index> getIndexes(String dbName, String origTableName, int max) throws MetaException {
+    // TODO - Index not currently supported.  But I need to return an empty list or else drop
+    // table cores.
+    return new ArrayList<Index>();
+  }
+
+  @Override
+  public List<String> listIndexNames(String dbName, String origTableName, short max) throws
+      MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void alterIndex(String dbname, String baseTblName, String name, Index newIndex) throws
+      InvalidObjectException, MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<Partition> getPartitionsByFilter(String dbName, String tblName, String filter,
+                                               short maxParts) throws MetaException,
+      NoSuchObjectException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
+                                     String defaultPartitionName, short maxParts,
+                                     List<Partition> result) throws TException {
+    // TODO for now just return all partitions, need to add real expression parsing later.
+    result.addAll(getPartitions(dbName, tblName, maxParts));
+    return true;
+  }
+
+  @Override
+  public List<Partition> getPartitionsByNames(String dbName, String tblName,
+                                              List<String> partNames) throws MetaException,
+      NoSuchObjectException {
+    List<Partition> parts = new ArrayList<Partition>();
+    for (String partName : partNames) {
+      parts.add(getPartition(dbName, tblName, partNameToVals(partName)));
+    }
+    return parts;
+  }
+
+  @Override
+  public Table markPartitionForEvent(String dbName, String tblName, Map<String, String> partVals,
+                                     PartitionEventType evtType) throws MetaException,
+      UnknownTableException, InvalidPartitionException, UnknownPartitionException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean isPartitionMarkedForEvent(String dbName, String tblName,
+                                           Map<String, String> partName,
+                                           PartitionEventType evtType) throws MetaException,
+      UnknownTableException, InvalidPartitionException, UnknownPartitionException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean addRole(String roleName, String ownerName) throws InvalidObjectException,
+      MetaException, NoSuchObjectException {
+    int now = (int)(System.currentTimeMillis()/1000);
+    Role role = new Role(roleName, now, ownerName);
+    try {
+      if (getHBase().getRole(roleName) != null) {
+        throw new InvalidObjectException("Role " + roleName + " already exists");
+      }
+      getHBase().putRole(role);
+      return true;
+    } catch (IOException e) {
+      // TODO NOt sure what i should throw here
+      LOG.error("Unable to create role ", e);
+      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
+    }
+  }
+
+  @Override
+  public boolean removeRole(String roleName) throws MetaException, NoSuchObjectException {
+    try {
+      getHBase().deleteRole(roleName);
+      return true;
+    } catch (IOException e) {
+      LOG.error("Unable to delete role" + e);
+      throw new MetaException("Unable to drop role " + roleName);
+    }
+  }
+
+  @Override
+  public boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor,
+                           PrincipalType grantorType, boolean grantOption) throws MetaException,
+      NoSuchObjectException, InvalidObjectException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean revokeRole(Role role, String userName, PrincipalType principalType,
+                            boolean grantOption) throws MetaException, NoSuchObjectException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List<String> groupNames) throws
+      InvalidObjectException, MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName,
+                                                 List<String> groupNames) throws
+      InvalidObjectException, MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName,
+                                                    String userName, List<String> groupNames) throws
+      InvalidObjectException, MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName,
+                                                        String partition, String userName,
+                                                        List<String> groupNames) throws
+      InvalidObjectException, MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName,
+                                                     String partitionName, String columnName,
+                                                     String userName,
+                                                     List<String> groupNames) throws
+      InvalidObjectException, MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<MGlobalPrivilege> listPrincipalGlobalGrants(String principalName,
+                                                          PrincipalType principalType) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<MDBPrivilege> listPrincipalDBGrants(String principalName, PrincipalType principalType,
+                                                  String dbName) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<MTablePrivilege> listAllTableGrants(String principalName, PrincipalType principalType,
+                                                  String dbName, String tableName) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<MPartitionPrivilege> listPrincipalPartitionGrants(String principalName,
+                                                                PrincipalType principalType,
+                                                                String dbName, String tableName,
+                                                                String partName) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<MTableColumnPrivilege> listPrincipalTableColumnGrants(String principalName,
+                                                                    PrincipalType principalType,
+                                                                    String dbName, String tableName,
+                                                                    String columnName) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<MPartitionColumnPrivilege> listPrincipalPartitionColumnGrants(String principalName,
+                                                                            PrincipalType principalType,
+                                                                            String dbName,
+                                                                            String tableName,
+                                                                            String partName,
+                                                                            String columnName) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException,
+      MetaException, NoSuchObjectException {
+    // TODO
+    return true;
+  }
+
+  @Override
+  public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) throws
+      InvalidObjectException, MetaException, NoSuchObjectException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Role getRole(String roleName) throws NoSuchObjectException {
+    try {
+      Role role = getHBase().getRole(roleName);
+      if (role == null) {
+        throw new NoSuchObjectException("Unable to find role " + roleName);
+      }
+      return role;
+    } catch (IOException e) {
+      LOG.error("Unable to get role", e);
+      throw new NoSuchObjectException("Error reading table " + e.getMessage());
+    }
+  }
+
+  @Override
+  public List<String> listRoleNames() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<MRoleMap> listRoles(String principalName, PrincipalType principalType) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<MRoleMap> listRoleMembers(String roleName) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Partition getPartitionWithAuth(String dbName, String tblName, List<String> partVals,
+                                        String user_name, List<String> group_names) throws
+      MetaException, NoSuchObjectException, InvalidObjectException {
+    Partition p = getPartition(dbName, tblName, partVals);
+    // TODO check that user is authorized to see these partitions
+    return p;
+  }
+
+  @Override
+  public List<Partition> getPartitionsWithAuth(String dbName, String tblName, short maxParts,
+                                               String userName, List<String> groupNames) throws
+      MetaException, NoSuchObjectException, InvalidObjectException {
+    List<Partition> parts = getPartitions(dbName, tblName, maxParts);
+    // TODO check that user is authorized;
+    return parts;
+  }
+
+  @Override
+  public List<String> listPartitionNamesPs(String db_name, String tbl_name, List<String> part_vals,
+                                           short max_parts) throws MetaException,
+      NoSuchObjectException {
+    throw new UnsupportedOperationException();
+  }
+
+
+  @Override
+  public List<Partition> listPartitionsPsWithAuth(String db_name, String tbl_name,
+                                                  List<String> part_vals, short max_parts,
+                                                  String userName, List<String> groupNames) throws
+      MetaException, InvalidObjectException, NoSuchObjectException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean updateTableColumnStatistics(ColumnStatistics colStats) throws
+      NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+    try {
+      getHBase().updateStatistics(colStats.getStatsDesc().getDbName(),
+          colStats.getStatsDesc().getTableName(), null, null, colStats);
+      return true;
+    } catch (IOException e) {
+      LOG.error("Unable to update column statistics", e);
+      throw new MetaException("Failed to update column statistics, " + e.getMessage());
+    }
+  }
+
+  @Override
+  public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
+                                                 List<String> partVals) throws
+      NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+    try {
+      getHBase().updateStatistics(statsObj.getStatsDesc().getDbName(),
+          statsObj.getStatsDesc().getTableName(), statsObj.getStatsDesc().getPartName(),
+          partVals, statsObj);
+      return true;
+    } catch (IOException e) {
+      LOG.error("Unable to update column statistics", e);
+      throw new MetaException("Failed to update column statistics, " + e.getMessage());
+    }
+  }
+
+  @Override
+  public ColumnStatistics getTableColumnStatistics(String dbName, String tableName,
+                                                   List<String> colName) throws MetaException,
+      NoSuchObjectException {
+    try {
+      return getHBase().getTableStatistics(dbName, tableName, colName);
+    } catch (IOException e) {
+      LOG.error("Unable to fetch column statistics", e);
+      throw new MetaException("Failed to fetch column statistics, " + e.getMessage());
+    }
+  }
+
+  @Override
+  public List<ColumnStatistics> getPartitionColumnStatistics(String dbName, String tblName,
+                                                             List<String> partNames,
+                                                             List<String> colNames) throws
+      MetaException, NoSuchObjectException {
+    List<List<String>> partVals = new ArrayList<List<String>>(partNames.size());
+    for (String partName : partNames) partVals.add(partNameToVals(partName));
+    try {
+      return getHBase().getPartitionStatistics(dbName, tblName, partNames, partVals, colNames);
+    } catch (IOException e) {
+      LOG.error("Unable to fetch column statistics", e);
+      throw new MetaException("Failed fetching column statistics, " + e.getMessage());
+    }
+  }
+
+  @Override
+  public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName,
+                                                 List<String> partVals, String colName) throws
+      NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+    // NOP, stats will be deleted along with the partition when it is dropped.
+    return true;
+  }
+
+  @Override
+  public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws
+      NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+    // NOP, stats will be deleted along with the table when it is dropped.
+    return true;
+  }
+
+  @Override
+  public long cleanupEvents() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean addToken(String tokenIdentifier, String delegationToken) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean removeToken(String tokenIdentifier) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public String getToken(String tokenIdentifier) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<String> getAllTokenIdentifiers() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public int addMasterKey(String key) throws MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void updateMasterKey(Integer seqNo, String key) throws NoSuchObjectException,
+      MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean removeMasterKey(Integer keySeq) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public String[] getMasterKeys() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void verifySchema() throws MetaException {
+    try {
+      getHBase().createTablesIfNotExist();
+    } catch (IOException e) {
+      LOG.fatal("Unable to verify schema ", e);
+      throw new MetaException("Unable to verify schema");
+    }
+  }
+
+  @Override
+  public String getMetaStoreSchemaVersion() throws MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void dropPartitions(String dbName, String tblName, List<String> partNames) throws
+      MetaException, NoSuchObjectException {
+    try {
+      for (String partName : partNames) {
+        dropPartition(dbName, tblName, partNameToVals(partName));
+      }
+    } catch (Exception e) {
+      LOG.error("Unable to drop partitions", e);
+      throw new NoSuchObjectException("Failure dropping partitions, " + e.getMessage());
+    }
+  }
+
+  @Override
+  public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(String principalName,
+                                                            PrincipalType principalType) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(String principalName,
+                                                               PrincipalType principalType) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(String principalName,
+                                                                   PrincipalType principalType) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(String principalName,
+                                                                     PrincipalType principalType) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(String principalName,
+                                                                         PrincipalType principalType) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<HiveObjectPrivilege> listGlobalGrantsAll() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<HiveObjectPrivilege> listDBGrantsAll(String dbName) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String dbName, String tableName,
+                                                                String partitionName,
+                                                                String columnName) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<HiveObjectPrivilege> listTableGrantsAll(String dbName, String tableName) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<HiveObjectPrivilege> listPartitionGrantsAll(String dbName, String tableName,
+                                                          String partitionName) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<HiveObjectPrivilege> listTableColumnGrantsAll(String dbName, String tableName,
+                                                            String columnName) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void createFunction(Function func) throws InvalidObjectException, MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void alterFunction(String dbName, String funcName, Function newFunction) throws
+      InvalidObjectException, MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void dropFunction(String dbName, String funcName) throws MetaException,
+      NoSuchObjectException, InvalidObjectException, InvalidInputException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Function getFunction(String dbName, String funcName) throws MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<String> getFunctions(String dbName, String pattern) throws MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public AggrStats get_aggr_stats_for(String dbName, String tblName, List<String> partNames,
+                                      List<String> colNames) throws MetaException,
+      NoSuchObjectException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void addNotificationEvent(NotificationEvent event) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void cleanNotificationEvents(int olderThan) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public CurrentNotificationEventId getCurrentNotificationEventId() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void flushCache() {
+    getHBase().flushCatalogCache();
+  }
+
+  @Override
+  public void setConf(Configuration configuration) {
+    conf = configuration;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+
+  }
+
+  private HBaseReadWrite getHBase() {
+    if (hbase == null) hbase = HBaseReadWrite.getInstance(conf);
+    return hbase;
+  }
+
+  private String tableName(String dbName, String tableName) {
+    return dbName + "." + tableName;
+  }
+
+  private String partName(String dbName, String tableName, List<String> partVals) {
+    return tableName(dbName, tableName) + StringUtils.join(partVals, ':');
+  }
+
+  private String partName(Table table, Partition part) {
+    return partName(table, part.getValues());
+  }
+
+  static String partName(Table table, List<String> partVals) {
+    List<FieldSchema> partCols = table.getPartitionKeys();
+    StringBuilder builder = new StringBuilder();
+    if (partCols.size() != partVals.size()) {
+      throw new RuntimeException("Woh bad, different number of partition cols and vals!");
+    }
+    for (int i = 0; i < partCols.size(); i++) {
+      if (i != 0) builder.append('/');
+      builder.append(partCols.get(i).getName());
+      builder.append('=');
+      builder.append(partVals.get(i));
+    }
+    return builder.toString();
+  }
+
+  private List<String> partNameToVals(String name) {
+    if (name == null) return null;
+    List<String> vals = new ArrayList<String>();
+    String[] kvp = name.split("/");
+    for (String kv : kvp) {
+      vals.add(kv.substring(kv.indexOf('=') + 1));
+    }
+    return vals;
+  }
+}

Added: hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java?rev=1657394&view=auto
==============================================================================
--- hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java (added)
+++ hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java Wed Feb  4 20:00:49 2015
@@ -0,0 +1,612 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hive.metastore.hbase;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.Decimal;
+import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
+import org.apache.hadoop.io.Writable;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Utility functions
+ */
+class HBaseUtils {
+
+  final static Charset ENCODING = StandardCharsets.UTF_8;
+  final static char KEY_SEPARATOR = ':';
+
+  static final private Log LOG = LogFactory.getLog(HBaseUtils.class.getName());
+
+  /**
+   * Build a key for an object in hbase
+   * @param components
+   * @return
+   */
+  static byte[] buildKey(String... components) {
+    return buildKey(false, components);
+  }
+
+  static byte[] buildKeyWithTrailingSeparator(String... components) {
+    return buildKey(true, components);
+  }
+
+  private static byte[] buildKey(boolean trailingSeparator, String... components) {
+    String protoKey = StringUtils.join(components, KEY_SEPARATOR);
+    if (trailingSeparator) protoKey += KEY_SEPARATOR;
+    return protoKey.getBytes(ENCODING);
+  }
+
+  static byte[] serialize(Writable writable) throws IOException {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    DataOutputStream dos = new DataOutputStream(baos);
+    writable.write(dos);
+    return baos.toByteArray();
+  }
+
+  static <T extends Writable> void deserialize(T instance, byte[] bytes) throws IOException {
+    DataInput in = new DataInputStream(new ByteArrayInputStream(bytes));
+    instance.readFields(in);
+  }
+
+  static void writeStr(DataOutput out, String str) throws IOException {
+    if (str == null || str.length() == 0) {
+      out.writeInt(0);
+      return;
+    } else {
+      out.writeInt(str.length());
+      out.write(str.getBytes(), 0, str.length());
+    }
+  }
+
+  static String readStr(DataInput in) throws IOException {
+    int len = in.readInt();
+    if (len == 0) {
+      return new String();
+    } else {
+      byte[] b = new byte[len];
+      in.readFully(b, 0, len);
+      return new String(b);
+    }
+  }
+
+  static void writeByteArray(DataOutput out, byte[] b) throws IOException {
+    if (b == null || b.length == 0) {
+      out.writeInt(0);
+    } else {
+      out.writeInt(b.length);
+      out.write(b, 0, b.length);
+    }
+  }
+
+  static byte[] readByteArray(DataInput in) throws IOException {
+    int len = in.readInt();
+    if (len == 0) {
+      return new byte[0];
+    } else {
+      byte[] b = new byte[len];
+      in.readFully(b, 0, len);
+      return b;
+    }
+  }
+
+  static void writeDecimal(DataOutput out, Decimal val) throws IOException {
+    HBaseUtils.writeByteArray(out, val.getUnscaled());
+    out.writeShort(val.getScale());
+  }
+
+  static Decimal readDecimal(DataInput in) throws IOException {
+    Decimal d = new Decimal();
+    d.setUnscaled(HBaseUtils.readByteArray(in));
+    d.setScale(in.readShort());
+    return d;
+  }
+
+  static Map<String, String> readStrStrMap(DataInput in) throws IOException {
+    int sz = in.readInt();
+    if (sz == 0) {
+      return new HashMap<String, String>();
+    } else {
+      Map<String, String> m = new HashMap<String, String>(sz);
+      for (int i = 0; i < sz; i++) {
+        m.put(readStr(in), readStr(in));
+      }
+      return m;
+    }
+  }
+
+
+  static void writeStrStrMap(DataOutput out, Map<String, String> map) throws IOException {
+    if (map == null || map.size() == 0) {
+      out.writeInt(0);
+    } else {
+      out.writeInt(map.size());
+      for (Map.Entry<String, String> e : map.entrySet()) {
+        writeStr(out, e.getKey());
+        writeStr(out, e.getValue());
+      }
+    }
+  }
+
+  static Map<List<String>, String> readStrListStrMap(DataInput in) throws IOException {
+    int sz = in.readInt();
+    if (sz == 0) {
+      return new HashMap<List<String>, String>();
+    } else {
+      Map<List<String>, String> m = new HashMap<List<String>, String>(sz);
+      for (int i = 0; i < sz; i++) {
+        m.put(readStrList(in), readStr(in));
+      }
+      return m;
+    }
+  }
+
+
+  static void writeStrListStrMap(DataOutput out, Map<List<String>, String> map) throws IOException {
+    if (map == null || map.size() == 0) {
+      out.writeInt(0);
+    } else {
+      out.writeInt(map.size());
+      for (Map.Entry<List<String>, String> e : map.entrySet()) {
+        writeStrList(out, e.getKey());
+        writeStr(out, e.getValue());
+      }
+    }
+  }
+
+  static void writeStrList(DataOutput out, List<String> list) throws IOException {
+    if (list == null || list.size() == 0) {
+      out.writeInt(0);
+    } else {
+      out.writeInt(list.size());
+      for (String val : list) {
+        writeStr(out, val);
+      }
+    }
+  }
+
+  static List<String> readStrList(DataInput in) throws IOException {
+    int sz = in.readInt();
+    if (sz == 0) {
+      return new ArrayList<String>();
+    } else {
+      List<String> list = new ArrayList<String>(sz);
+      for (int i = 0; i < sz; i++) {
+        list.add(readStr(in));
+      }
+      return list;
+    }
+  }
+
+  static void writeWritableList(DataOutput out, List<? extends Writable> list) throws IOException {
+    if (list == null || list.size() == 0) {
+      out.writeInt(0);
+    } else {
+      out.writeInt(list.size());
+      for (Writable val : list) {
+        val.write(out);
+      }
+    }
+  }
+
+  static <T extends Writable> List<T> readWritableList(DataInput in, Class<T> clazz)
+      throws IOException {
+    int sz = in.readInt();
+    if (sz == 0) {
+      return new ArrayList<T>();
+    } else {
+      List<T> list = new ArrayList<T>(sz);
+      for (int i = 0; i < sz; i++) {
+        try {
+          T instance = clazz.newInstance();
+          instance.readFields(in);
+          list.add(instance);
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      }
+      return list;
+    }
+  }
+
+  static void writeStrListList(DataOutput out, List<List<String>> list) throws IOException {
+    if (list == null || list.size() == 0) {
+      out.writeInt(0);
+    } else {
+      out.writeInt(list.size());
+      for (List<String> vals : list) {
+        writeStrList(out, vals);
+      }
+    }
+  }
+
+  static List<List<String>> readStrListList(DataInput in) throws IOException {
+    int sz = in.readInt();
+    if (sz == 0) {
+      return new ArrayList<List<String>>();
+    } else {
+      List<List<String>> list = new ArrayList<List<String>>(sz);
+      for (int i = 0; i < sz; i++) {
+        list.add(readStrList(in));
+      }
+      return list;
+    }
+  }
+  static List<FieldSchema> readFieldSchemaList(DataInput in) throws IOException {
+    int sz = in.readInt();
+    if (sz == 0) {
+      return new ArrayList<FieldSchema>();
+    } else {
+      List<FieldSchema> schemas = new ArrayList<FieldSchema>(sz);
+      for (int i = 0; i < sz; i++) {
+        schemas.add(new FieldSchema(readStr(in), readStr(in), readStr(in)));
+      }
+      return schemas;
+    }
+  }
+
+  static void writeFieldSchemaList(DataOutput out, List<FieldSchema> fields) throws IOException {
+    if (fields == null || fields.size() == 0) {
+      out.writeInt(0);
+    } else {
+      out.writeInt(fields.size());
+      for (FieldSchema field : fields) {
+        writeStr(out, field.getName());
+        writeStr(out, field.getType());
+        writeStr(out, field.getComment());
+      }
+    }
+  }
+
+  static List<Order> readOrderList(DataInput in) throws IOException {
+    int sz = in.readInt();
+    if (sz == 0) {
+      return new ArrayList<Order>();
+    } else {
+      List<Order> orderList = new ArrayList<Order>(sz);
+      for (int i = 0; i < sz; i++) {
+        orderList.add(new Order(readStr(in), in.readInt()));
+      }
+      return orderList;
+    }
+  }
+
+  static void writeOrderList(DataOutput out, List<Order> orderList) throws IOException {
+    if (orderList == null || orderList.size() == 0) {
+      out.writeInt(0);
+    } else {
+      out.writeInt(orderList.size());
+      for (Order order : orderList) {
+        writeStr(out, order.getCol());
+        out.writeInt(order.getOrder());
+      }
+    }
+  }
+
+  static PrincipalPrivilegeSet readPrivileges(DataInput in) throws IOException {
+    if (in.readBoolean()) {
+      PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet();
+      pps.setUserPrivileges(readPrivilege(in));
+      pps.setGroupPrivileges(readPrivilege(in));
+      pps.setRolePrivileges(readPrivilege(in));
+      return pps;
+    } else {
+      return new PrincipalPrivilegeSet();
+    }
+
+  }
+
+  private static Map<String, List<PrivilegeGrantInfo>> readPrivilege(DataInput in)
+      throws IOException {
+    int sz = in.readInt();
+    if (sz == 0) {
+      return new HashMap<String, List<PrivilegeGrantInfo>>();
+    } else {
+      Map<String, List<PrivilegeGrantInfo>> priv =
+          new HashMap<String, List<PrivilegeGrantInfo>>(sz);
+      for (int i = 0; i < sz; i++) {
+        String key = readStr(in);
+        int numGrants = in.readInt();
+        if (numGrants == 0) {
+          priv.put(key, new ArrayList<PrivilegeGrantInfo>());
+        } else {
+          for (int j = 0; j < numGrants; j++) {
+            PrivilegeGrantInfo pgi = new PrivilegeGrantInfo();
+            pgi.setPrivilege(readStr(in));
+            pgi.setCreateTime(in.readInt());
+            pgi.setGrantor(readStr(in));
+            pgi.setGrantorType(PrincipalType.findByValue(in.readInt()));
+            pgi.setGrantOption(in.readBoolean());
+          }
+        }
+      }
+      return priv;
+    }
+  }
+
+  static void writePrivileges(DataOutput out, PrincipalPrivilegeSet privs) throws IOException {
+    if (privs == null) {
+      out.writeBoolean(false);
+    } else {
+      out.writeBoolean(true);
+      writePrivilege(out, privs.getUserPrivileges());
+      writePrivilege(out, privs.getGroupPrivileges());
+      writePrivilege(out, privs.getRolePrivileges());
+    }
+  }
+
+  private static void writePrivilege(DataOutput out, Map<String,List<PrivilegeGrantInfo>> priv)
+      throws IOException {
+    if (priv == null || priv.size() == 0) {
+      out.writeInt(0);
+    } else {
+      out.writeInt(priv.size());
+      for (Map.Entry<String, List<PrivilegeGrantInfo>> e : priv.entrySet()) {
+        writeStr(out, e.getKey());
+        List<PrivilegeGrantInfo> grants = e.getValue();
+        if (grants == null || grants.size() == 0) {
+          out.writeInt(0);
+        } else {
+          out.writeInt(grants.size());
+          for (PrivilegeGrantInfo grant : grants) {
+            writeStr(out, grant.getPrivilege());
+            out.writeInt(grant.getCreateTime());
+            writeStr(out, grant.getGrantor());
+            out.writeInt(grant.getGrantorType().getValue());
+            out.writeBoolean(grant.isGrantOption());
+          }
+        }
+      }
+    }
+  }
+
+  static void writePrincipalType(DataOutput out, PrincipalType pt) throws IOException {
+    if (pt == null) {
+      out.writeBoolean(false);
+    } else {
+      out.writeBoolean(true);
+      out.writeInt(pt.getValue());
+    }
+  }
+
+  static PrincipalType readPrincipalType(DataInput in) throws IOException {
+    return (in.readBoolean()) ? PrincipalType.findByValue(in.readInt()) : null;
+  }
+
+  static void writeSkewedInfo(DataOutput out, SkewedInfo skew) throws IOException {
+    if (skew == null) {
+      out.writeBoolean(false);
+    } else {
+      out.writeBoolean(true);
+      writeStrList(out, skew.getSkewedColNames());
+      writeStrListList(out, skew.getSkewedColValues());
+      writeStrListStrMap(out, skew.getSkewedColValueLocationMaps());
+    }
+  }
+
+  static SkewedInfo readSkewedInfo(DataInput in) throws IOException {
+    if (in.readBoolean()) {
+      SkewedInfo skew = new SkewedInfo();
+      skew.setSkewedColNames(readStrList(in));
+      skew.setSkewedColValues(readStrListList(in));
+      skew.setSkewedColValueLocationMaps(readStrListStrMap(in));
+      return skew;
+    } else {
+      return new SkewedInfo(new ArrayList<String>(), new ArrayList<List<String>>(),
+          new HashMap<List<String>, String>());
+    }
+  }
+
+  static byte[] serializeStorageDescriptor(StorageDescriptor sd) throws IOException {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    DataOutputStream dos = new DataOutputStream(baos);
+    writeFieldSchemaList(dos, sd.getCols());
+    writeStr(dos, sd.getInputFormat());
+    writeStr(dos, sd.getOutputFormat());
+    dos.writeBoolean(sd.isCompressed());
+    dos.writeInt(sd.getNumBuckets());
+    writeStr(dos, sd.getSerdeInfo().getName());
+    writeStr(dos, sd.getSerdeInfo().getSerializationLib());
+    writeStrStrMap(dos, sd.getSerdeInfo().getParameters());
+    writeStrList(dos, sd.getBucketCols());
+    writeOrderList(dos, sd.getSortCols());
+    writeSkewedInfo(dos, sd.getSkewedInfo());
+    dos.writeBoolean(sd.isStoredAsSubDirectories());
+    return baos.toByteArray();
+  }
+
+  static void deserializeStorageDescriptor(StorageDescriptor sd, byte[] bytes)
+      throws IOException {
+    DataInput in = new DataInputStream(new ByteArrayInputStream(bytes));
+    sd.setCols(readFieldSchemaList(in));
+    sd.setInputFormat(readStr(in));
+    sd.setOutputFormat(readStr(in));
+    sd.setCompressed(in.readBoolean());
+    sd.setNumBuckets(in.readInt());
+    SerDeInfo serde = new SerDeInfo(readStr(in), readStr(in), readStrStrMap(in));
+    sd.setSerdeInfo(serde);
+    sd.setBucketCols(readStrList(in));
+    sd.setSortCols(readOrderList(in));
+    sd.setSkewedInfo(readSkewedInfo(in));
+    sd.setStoredAsSubDirectories(in.readBoolean());
+  }
+
+  static byte[] serializeStatsForOneColumn(ColumnStatistics stats, ColumnStatisticsObj obj)
+      throws IOException {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    DataOutputStream dos = new DataOutputStream(baos);
+    dos.writeLong(stats.getStatsDesc().getLastAnalyzed());
+    HBaseUtils.writeStr(dos, obj.getColType());
+    ColumnStatisticsData colData = obj.getStatsData();
+    HBaseUtils.writeStr(dos, colData.getSetField().toString());
+    switch (colData.getSetField()) {
+      case BOOLEAN_STATS:
+        BooleanColumnStatsData boolData = colData.getBooleanStats();
+        dos.writeLong(boolData.getNumTrues());
+        dos.writeLong(boolData.getNumFalses());
+        dos.writeLong(boolData.getNumNulls());
+        break;
+
+      case LONG_STATS:
+        LongColumnStatsData longData = colData.getLongStats();
+        dos.writeLong(longData.getLowValue());
+        dos.writeLong(longData.getHighValue());
+        dos.writeLong(longData.getNumNulls());
+        dos.writeLong(longData.getNumDVs());
+        break;
+
+      case DOUBLE_STATS:
+        DoubleColumnStatsData doubleData = colData.getDoubleStats();
+        dos.writeDouble(doubleData.getLowValue());
+        dos.writeDouble(doubleData.getHighValue());
+        dos.writeLong(doubleData.getNumNulls());
+        dos.writeLong(doubleData.getNumDVs());
+        break;
+
+      case STRING_STATS:
+        StringColumnStatsData stringData = colData.getStringStats();
+        dos.writeLong(stringData.getMaxColLen());
+        dos.writeDouble(stringData.getAvgColLen());
+        dos.writeLong(stringData.getNumNulls());
+        dos.writeLong(stringData.getNumDVs());
+        break;
+
+      case BINARY_STATS:
+        BinaryColumnStatsData binaryData = colData.getBinaryStats();
+        dos.writeLong(binaryData.getMaxColLen());
+        dos.writeDouble(binaryData.getAvgColLen());
+        dos.writeLong(binaryData.getNumNulls());
+        break;
+
+      case DECIMAL_STATS:
+        DecimalColumnStatsData decimalData = colData.getDecimalStats();
+        writeDecimal(dos, decimalData.getHighValue());
+        writeDecimal(dos, decimalData.getLowValue());
+        dos.writeLong(decimalData.getNumNulls());
+        dos.writeLong(decimalData.getNumDVs());
+        break;
+
+      default:
+        throw new RuntimeException("Woh, bad.  Unknown stats type!");
+    }
+    return baos.toByteArray();
+  }
+
+  static ColumnStatisticsObj deserializeStatsForOneColumn(ColumnStatistics stats,
+                                                          byte[] bytes) throws IOException {
+    DataInput in = new DataInputStream(new ByteArrayInputStream(bytes));
+    ColumnStatisticsObj obj = new ColumnStatisticsObj();
+    long lastAnalyzed = in.readLong();
+    stats.getStatsDesc().setLastAnalyzed(
+        Math.max(lastAnalyzed, stats.getStatsDesc().getLastAnalyzed()));
+    obj.setColType(HBaseUtils.readStr(in));
+
+    ColumnStatisticsData._Fields type = ColumnStatisticsData._Fields.valueOf(HBaseUtils.readStr (in));
+    ColumnStatisticsData colData = new ColumnStatisticsData();
+    switch (type) {
+      case BOOLEAN_STATS:
+        BooleanColumnStatsData boolData = new BooleanColumnStatsData();
+        boolData.setNumTrues(in.readLong());
+        boolData.setNumFalses(in.readLong());
+        boolData.setNumNulls(in.readLong());
+        colData.setBooleanStats(boolData);
+        break;
+
+      case LONG_STATS:
+        LongColumnStatsData longData = new LongColumnStatsData();
+        longData.setLowValue(in.readLong());
+        longData.setHighValue(in.readLong());
+        longData.setNumNulls(in.readLong());
+        longData.setNumDVs(in.readLong());
+        colData.setLongStats(longData);
+        break;
+
+      case DOUBLE_STATS:
+        DoubleColumnStatsData doubleData = new DoubleColumnStatsData();
+        doubleData.setLowValue(in.readDouble());
+        doubleData.setHighValue(in.readDouble());
+        doubleData.setNumNulls(in.readLong());
+        doubleData.setNumDVs(in.readLong());
+        colData.setDoubleStats(doubleData);
+        break;
+
+      case STRING_STATS:
+        StringColumnStatsData stringData = new StringColumnStatsData();
+        stringData.setMaxColLen(in.readLong());
+        stringData.setAvgColLen(in.readDouble());
+        stringData.setNumNulls(in.readLong());
+        stringData.setNumDVs(in.readLong());
+        colData.setStringStats(stringData);
+        break;
+
+      case BINARY_STATS:
+        BinaryColumnStatsData binaryData = new BinaryColumnStatsData();
+        binaryData.setMaxColLen(in.readLong());
+        binaryData.setAvgColLen(in.readDouble());
+        binaryData.setNumNulls(in.readLong());
+        colData.setBinaryStats(binaryData);
+        break;
+
+      case DECIMAL_STATS:
+        DecimalColumnStatsData decimalData = new DecimalColumnStatsData();
+        decimalData.setHighValue(readDecimal(in));
+        decimalData.setLowValue(readDecimal(in));
+        decimalData.setNumNulls(in.readLong());
+        decimalData.setNumDVs(in.readLong());
+        colData.setDecimalStats(decimalData);
+        break;
+
+      default:
+        throw new RuntimeException("Woh, bad.  Unknown stats type!");
+    }
+    obj.setStatsData(colData);
+    return obj;
+  }
+}

Added: hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/ObjectCache.java
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/ObjectCache.java?rev=1657394&view=auto
==============================================================================
--- hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/ObjectCache.java (added)
+++ hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/ObjectCache.java Wed Feb  4 20:00:49 2015
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hive.metastore.hbase;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A generic class for caching objects obtained from HBase.  Currently a set of
+ * convenience methods around a {@link java.util.HashMap} with a max size but built
+ * as a separate class in case we want to switch out the implementation to something more
+ * efficient.  The cache has a max size; when this is exceeded any additional entries are dropped
+ * on the floor.
+ *
+ * This cache is local to a particular thread and thus is not synchronized.  It is intended to be
+ * flushed before a query begins to make sure it doesn't carry old versions of objects between
+ * queries (that is, an object may have changed between two queries, we want to get the newest
+ * version).
+ */
+class ObjectCache<K, V> {
+  private Map<K, V> cache;
+  private final int maxSize;
+  private Counter hits;
+  private Counter misses;
+  private Counter overflows;
+
+  /**
+   *
+   * @param max maximum number of objects to store in the cache.  When max is reached, eviction
+   *            policy is MRU.
+   * @param hits counter to increment when we find an element in the cache
+   * @param misses counter to increment when we do not find an element in the cache
+   * @param overflows counter to increment when we do not have room for an element in the cache
+   */
+  ObjectCache(int max, Counter hits, Counter misses, Counter overflows) {
+    maxSize = max;
+    cache = new HashMap<K, V>();
+    this.hits = hits;
+    this.misses = misses;
+    this.overflows = overflows;
+  }
+
+  void put(K key, V value) {
+    if (cache.size() < maxSize) {
+      cache.put(key, value);
+    } else {
+      overflows.incr();
+    }
+  }
+
+  V get(K key) {
+    V val = cache.get(key);
+    if (val == null) misses.incr();
+    else hits.incr();
+    return val;
+  }
+
+  void remove(K key) {
+    cache.remove(key);
+  }
+
+  void flush() {
+    cache.clear();
+  }
+}

Added: hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionCache.java
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionCache.java?rev=1657394&view=auto
==============================================================================
--- hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionCache.java (added)
+++ hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionCache.java Wed Feb  4 20:00:49 2015
@@ -0,0 +1,168 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hive.metastore.hbase;
+
+import org.apache.hadoop.hive.common.ObjectPair;
+import org.apache.hadoop.hive.metastore.api.Partition;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A cache for partition objects.  This is separate from
+ * {@link org.apache.hadoop.hive.metastore.hbase.ObjectCache} because we need to access it
+ * differently (always by table) and because we need to be able to track whether we are caching
+ * all of the partitions for a table or not.  Like ObjectCache it is local to a particular thread
+ * and thus not synchronized.  Also like ObjectCache it is intended to be flushed before each query.
+ */
+class PartitionCache {
+  // This is a trie.  The key to the first map is (dbname, tablename), since partitions are
+  // always accessed within the context of the table they belong to.  The second map maps
+  // partition values (not names) to partitions.
+  private Map<ObjectPair<String, String>, TrieValue> cache;
+  private final int maxSize;
+  private int cacheSize;
+  private Counter misses;
+  private Counter hits;
+  private Counter overflows;
+
+  /**
+   *
+   * @param max maximum number of objects to store in the cache.  When max is reached, eviction
+   *            policy is MRU.
+   * @param hits counter to increment when we find an element in the cache
+   * @param misses counter to increment when we do not find an element in the cache
+   * @param overflows counter to increment when we do not have room for an element in the cache
+   */
+  PartitionCache(int max, Counter hits, Counter misses, Counter overflows) {
+    maxSize = max;
+    cache = new HashMap<ObjectPair<String, String>, TrieValue>();
+    cacheSize = 0;
+    this.hits = hits;
+    this.misses = misses;
+    this.overflows = overflows;
+  }
+
+  /**
+   * Put a single partition into the cache
+   * @param dbName
+   * @param tableName
+   * @param part
+   */
+  void put(String dbName, String tableName, Partition part) {
+    if (cacheSize < maxSize) {
+      ObjectPair<String, String> key = new ObjectPair<String, String>(dbName, tableName);
+      TrieValue entry = cache.get(key);
+      if (entry == null) {
+        entry = new TrieValue(false);
+        cache.put(key, entry);
+      }
+      entry.map.put(part.getValues(), part);
+      cacheSize++;
+    } else {
+      overflows.incr();
+    }
+  }
+
+  /**
+   *
+   * @param dbName
+   * @param tableName
+   * @param parts
+   * @param allForTable if true indicates that all partitions for this table are present
+   */
+  void put(String dbName, String tableName, List<Partition> parts, boolean allForTable) {
+    if (cacheSize + parts.size() < maxSize) {
+      ObjectPair<String, String> key = new ObjectPair<String, String>(dbName, tableName);
+      TrieValue entry = cache.get(key);
+      if (entry == null) {
+        entry = new TrieValue(allForTable);
+        cache.put(key, entry);
+      }
+      for (Partition part : parts) entry.map.put(part.getValues(), part);
+      cacheSize += parts.size();
+    } else {
+      overflows.incr();
+    }
+  }
+
+  /**
+   * Will only return a value if all partitions for this table are in the cache.  Otherwise you
+   * should call {@link #get} individually
+   * @param dbName
+   * @param tableName
+   * @return
+   */
+  Collection<Partition> getAllForTable(String dbName, String tableName) {
+    TrieValue entry = cache.get(new ObjectPair<String, String>(dbName, tableName));
+    if (entry != null && entry.hasAllPartitionsForTable) {
+      hits.incr();
+      return entry.map.values();
+    } else {
+      misses.incr();
+      return null;
+    }
+  }
+
+  Partition get(String dbName, String tableName, List<String> partVals) {
+    TrieValue entry = cache.get(new ObjectPair<String, String>(dbName, tableName));
+    if (entry != null) {
+      hits.incr();
+      return entry.map.get(partVals);
+    } else {
+      misses.incr();
+      return null;
+    }
+  }
+
+  void remove(String dbName, String tableName) {
+    ObjectPair<String, String> key = new ObjectPair<String, String>(dbName, tableName);
+    TrieValue entry = cache.get(key);
+    if (entry != null) {
+      cacheSize -= entry.map.size();
+      cache.remove(key);
+    }
+  }
+
+  void remove(String dbName, String tableName, List<String> partVals) {
+    ObjectPair<String, String> key = new ObjectPair<String, String>(dbName, tableName);
+    TrieValue entry = cache.get(key);
+    if (entry != null && entry.map.remove(partVals) != null) {
+      cacheSize--;
+      entry.hasAllPartitionsForTable = false;
+    }
+  }
+
+  void flush() {
+    cache.clear();
+    cacheSize = 0;
+  }
+
+  static class TrieValue {
+    boolean hasAllPartitionsForTable;
+    Map<List<String>, Partition> map;
+
+    TrieValue(boolean hasAll) {
+      hasAllPartitionsForTable = hasAll;
+      map = new HashMap<List<String>, Partition>();
+    }
+  }
+}

Added: hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionWritable.java
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionWritable.java?rev=1657394&view=auto
==============================================================================
--- hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionWritable.java (added)
+++ hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionWritable.java Wed Feb  4 20:00:49 2015
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hive.metastore.hbase;
+
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.io.Writable;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * Wrapper for {@link org.apache.hadoop.hive.metastore.api.Table} that makes it writable
+ */
+class PartitionWritable implements Writable {
+  final Partition part;
+
+  PartitionWritable() {
+    this.part = new Partition();
+  }
+
+  PartitionWritable(Partition part) {
+    this.part = part;
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    HBaseUtils.writeStrList(out, part.getValues());
+    // TODO should be able to avoid dbname and tablename since they're in the key
+    HBaseUtils.writeStr(out, part.getDbName());
+    HBaseUtils.writeStr(out, part.getTableName());
+    out.writeInt(part.getCreateTime());
+    out.writeInt(part.getLastAccessTime());
+    new StorageDescriptorWritable(part.getSd()).write(out);
+    HBaseUtils.writeStrStrMap(out, part.getParameters());
+    HBaseUtils.writePrivileges(out, part.getPrivileges());
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    part.setValues(HBaseUtils.readStrList(in));
+    part.setDbName(HBaseUtils.readStr(in));
+    part.setTableName(HBaseUtils.readStr(in));
+    part.setCreateTime(in.readInt());
+    part.setLastAccessTime(in.readInt());
+    StorageDescriptorWritable sdw = new StorageDescriptorWritable();
+    sdw.readFields(in);
+    part.setSd(sdw.sd);
+    part.setParameters(HBaseUtils.readStrStrMap(in));
+    part.setPrivileges(HBaseUtils.readPrivileges(in));
+  }
+}

Added: hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/RoleWritable.java
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/RoleWritable.java?rev=1657394&view=auto
==============================================================================
--- hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/RoleWritable.java (added)
+++ hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/RoleWritable.java Wed Feb  4 20:00:49 2015
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hive.metastore.hbase;
+
+import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.io.Writable;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * Wrapper for {@link org.apache.hadoop.hive.metastore.api.Table} that makes it writable
+ */
+class RoleWritable implements Writable {
+  final Role role;
+
+  RoleWritable() {
+    this.role = new Role();
+  }
+
+  RoleWritable(Role role) {
+    this.role = role;
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    HBaseUtils.writeStr(out, role.getRoleName());
+    out.writeInt(role.getCreateTime());
+    HBaseUtils.writeStr(out, role.getOwnerName());
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    role.setRoleName(HBaseUtils.readStr(in));
+    role.setCreateTime(in.readInt());
+    role.setOwnerName(HBaseUtils.readStr(in));
+  }
+}
\ No newline at end of file

Added: hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedStorageDescriptor.java
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedStorageDescriptor.java?rev=1657394&view=auto
==============================================================================
--- hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedStorageDescriptor.java (added)
+++ hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedStorageDescriptor.java Wed Feb  4 20:00:49 2015
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hive.metastore.hbase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A {@link org.apache.hadoop.hive.metastore.api.StorageDescriptor} with most of it's content
+ * shared.  Location and parameters are left alone, everything else is redirected to a shared
+ * reference in the cache.
+ */
+public class SharedStorageDescriptor extends StorageDescriptor {
+  static final private Log LOG = LogFactory.getLog(SharedStorageDescriptor.class.getName());
+  StorageDescriptor shared;
+
+  SharedStorageDescriptor() {
+  }
+
+  public SharedStorageDescriptor(SharedStorageDescriptor that) {
+    this.setLocation(that.getLocation());
+    this.setParameters(that.getParameters());
+    this.shared = that.shared;
+  }
+
+  void readShared(byte[] hash) throws IOException {
+    shared = HBaseReadWrite.getInstance().getStorageDescriptor(hash);
+  }
+
+  @Override
+  public List<FieldSchema> getCols() {
+    return shared.getCols();
+  }
+
+  @Override
+  public int getColsSize() {
+    return shared.getColsSize();
+  }
+
+  @Override
+  public Iterator<FieldSchema> getColsIterator() {
+    return shared.getColsIterator();
+  }
+
+  @Override
+  public String getInputFormat() {
+    return shared.getInputFormat();
+  }
+
+  @Override
+  public String getOutputFormat() {
+    return shared.getOutputFormat();
+  }
+
+  @Override
+  public boolean isCompressed() {
+    return shared.isCompressed();
+  }
+
+  @Override
+  public int getNumBuckets() {
+    return shared.getNumBuckets();
+  }
+
+  @Override
+  public SerDeInfo getSerdeInfo() {
+    return shared.getSerdeInfo();
+  }
+
+  @Override
+  public List<String> getBucketCols() {
+    return shared.getBucketCols();
+  }
+
+  @Override
+  public int getBucketColsSize() {
+    return shared.getBucketColsSize();
+  }
+
+  @Override
+  public Iterator<String> getBucketColsIterator() {
+    return shared.getBucketColsIterator();
+  }
+
+  @Override
+  public List<Order> getSortCols() {
+    return shared.getSortCols();
+  }
+
+  @Override
+  public int getSortColsSize() {
+    return shared.getSortColsSize();
+  }
+
+  @Override
+  public Iterator<Order> getSortColsIterator() {
+    return shared.getSortColsIterator();
+  }
+
+  @Override
+  public SkewedInfo getSkewedInfo() {
+    return shared.getSkewedInfo();
+  }
+
+  @Override
+  public boolean isStoredAsSubDirectories() {
+    return shared.isStoredAsSubDirectories();
+  }
+}



Mime
View raw message