hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ga...@apache.org
Subject [01/12] hive git commit: HIVE-17812 Move remaining classes that HiveMetaStore depends on. This closes #261. (Alan Gates, reviewed by Vihang Karajgaonkar)
Date Thu, 02 Nov 2017 16:23:03 GMT
Repository: hive
Updated Branches:
  refs/heads/master 10aa33072 -> c5a9673a0


http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
index 5cae281..50e4244 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
@@ -21,20 +21,32 @@ import com.google.common.base.Predicates;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.commons.collections.ListUtils;
 import org.apache.commons.lang.*;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.metastore.ColumnType;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.Decimal;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator;
 import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,9 +60,11 @@ import java.text.DateFormat;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.SortedMap;
 import java.util.SortedSet;
 import java.util.TreeMap;
@@ -60,6 +74,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 public class MetaStoreUtils {
@@ -73,9 +88,20 @@ public class MetaStoreUtils {
       return val;
     }
   };
+  // Indicates a type was derived from the deserializer rather than Hive's metadata.
+  public static final String TYPE_FROM_DESERIALIZER = "<derived from deserializer>";
+
   private static final Charset ENCODING = StandardCharsets.UTF_8;
   private static final Logger LOG = LoggerFactory.getLogger(MetaStoreUtils.class);
 
+  // Right now we only support one special character '/'.
+  // More special characters can be added accordingly in the future.
+  // NOTE:
+  // If the following array is updated, please also be sure to update the
+  // configuration parameter documentation
+  // HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES in HiveConf as well.
+  private static final char[] specialCharactersInTableNames = new char[] { '/' };
+
   /**
    * Catches exceptions that can't be handled and bundles them to MetaException
    *
@@ -139,8 +165,9 @@ public class MetaStoreUtils {
       return org.apache.commons.lang.StringUtils.defaultString(string);
     }
   };
+
   /**
-   * We have aneed to sanity-check the map before conversion from persisted objects to
+   * We have a need to sanity-check the map before conversion from persisted objects to
    * metadata thrift objects because null values in maps will cause a NPE if we send
    * across thrift. Pruning is appropriate for most cases except for databases such as
    * Oracle where Empty strings are stored as nulls, in which case we need to handle that.
@@ -359,4 +386,365 @@ public class MetaStoreUtils {
     }
     return colNames;
   }
+
+  /**
+   * validateName
+   *
+   * Checks the name conforms to our standars which are: "[a-zA-z_0-9]+". checks
+   * this is just characters and numbers and _
+   *
+   * @param name
+   *          the name to validate
+   * @param conf
+   *          hive configuration
+   * @return true or false depending on conformance
+   *              if it doesn't match the pattern.
+   */
+  public static boolean validateName(String name, Configuration conf) {
+    Pattern tpat = null;
+    String allowedCharacters = "\\w_";
+    if (conf != null
+        && MetastoreConf.getBoolVar(conf,
+        MetastoreConf.ConfVars.SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES)) {
+      for (Character c : specialCharactersInTableNames) {
+        allowedCharacters += c;
+      }
+    }
+    tpat = Pattern.compile("[" + allowedCharacters + "]+");
+    Matcher m = tpat.matcher(name);
+    return m.matches();
+  }
+
+  /*
+   * At the Metadata level there are no restrictions on Column Names.
+   */
+  public static boolean validateColumnName(String name) {
+    return true;
+  }
+
+  static public String validateTblColumns(List<FieldSchema> cols) {
+    for (FieldSchema fieldSchema : cols) {
+      // skip this, as validateColumnName always returns true
+      /*
+      if (!validateColumnName(fieldSchema.getName())) {
+        return "name: " + fieldSchema.getName();
+      }
+      */
+      String typeError = validateColumnType(fieldSchema.getType());
+      if (typeError != null) {
+        return typeError;
+      }
+    }
+    return null;
+  }
+
+  private static String validateColumnType(String type) {
+    if (type.equals(TYPE_FROM_DESERIALIZER)) return null;
+    int last = 0;
+    boolean lastAlphaDigit = isValidTypeChar(type.charAt(last));
+    for (int i = 1; i <= type.length(); i++) {
+      if (i == type.length()
+          || isValidTypeChar(type.charAt(i)) != lastAlphaDigit) {
+        String token = type.substring(last, i);
+        last = i;
+        if (!ColumnType.AllTypes.contains(token)) {
+          return "type: " + type;
+        }
+        break;
+      }
+    }
+    return null;
+  }
+
+  private static boolean isValidTypeChar(char c) {
+    return Character.isLetterOrDigit(c) || c == '_';
+  }
+
+  /**
+   * Determines whether a table is an external table.
+   *
+   * @param table table of interest
+   *
+   * @return true if external
+   */
+  public static boolean isExternalTable(Table table) {
+    if (table == null) {
+      return false;
+    }
+    Map<String, String> params = table.getParameters();
+    if (params == null) {
+      return false;
+    }
+
+    return "TRUE".equalsIgnoreCase(params.get("EXTERNAL"));
+  }
+
+  // check if stats need to be (re)calculated
+  public static boolean requireCalStats(Configuration hiveConf, Partition oldPart,
+    Partition newPart, Table tbl, EnvironmentContext environmentContext) {
+
+    if (environmentContext != null
+        && environmentContext.isSetProperties()
+        && StatsSetupConst.TRUE.equals(environmentContext.getProperties().get(
+            StatsSetupConst.DO_NOT_UPDATE_STATS))) {
+      return false;
+    }
+
+    if (MetaStoreUtils.isView(tbl)) {
+      return false;
+    }
+
+    if  (oldPart == null && newPart == null) {
+      return true;
+    }
+
+    // requires to calculate stats if new partition doesn't have it
+    if ((newPart == null) || (newPart.getParameters() == null)
+        || !containsAllFastStats(newPart.getParameters())) {
+      return true;
+    }
+
+    if (environmentContext != null && environmentContext.isSetProperties()) {
+      String statsType = environmentContext.getProperties().get(StatsSetupConst.STATS_GENERATED);
+      // no matter STATS_GENERATED is USER or TASK, all need to re-calculate the stats:
+      // USER: alter table .. update statistics
+      // TASK: from some sql operation which could collect and compute stats
+      if (StatsSetupConst.TASK.equals(statsType) || StatsSetupConst.USER.equals(statsType))
{
+        return true;
+      }
+    }
+
+    // requires to calculate stats if new and old have different fast stats
+    return !isFastStatsSame(oldPart, newPart);
+  }
+
+  public static boolean isView(Table table) {
+    if (table == null) {
+      return false;
+    }
+    return TableType.VIRTUAL_VIEW.toString().equals(table.getTableType());
+  }
+
+  /**
+   * @param partParams
+   * @return True if the passed Parameters Map contains values for all "Fast Stats".
+   */
+  private static boolean containsAllFastStats(Map<String, String> partParams) {
+    for (String stat : StatsSetupConst.fastStats) {
+      if (!partParams.containsKey(stat)) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  public static boolean isFastStatsSame(Partition oldPart, Partition newPart) {
+    // requires to calculate stats if new and old have different fast stats
+    if ((oldPart != null) && (oldPart.getParameters() != null)) {
+      for (String stat : StatsSetupConst.fastStats) {
+        if (oldPart.getParameters().containsKey(stat)) {
+          Long oldStat = Long.parseLong(oldPart.getParameters().get(stat));
+          Long newStat = Long.parseLong(newPart.getParameters().get(stat));
+          if (!oldStat.equals(newStat)) {
+            return false;
+          }
+        } else {
+          return false;
+        }
+      }
+      return true;
+    }
+    return false;
+  }
+
+  public static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh,
+                                             boolean madeDir, EnvironmentContext environmentContext)
throws MetaException {
+    return updateTableStatsFast(db, tbl, wh, madeDir, false, environmentContext);
+  }
+
+  public static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh,
+                                             boolean madeDir, boolean forceRecompute, EnvironmentContext
environmentContext) throws MetaException {
+    if (tbl.getPartitionKeysSize() == 0) {
+      // Update stats only when unpartitioned
+      FileStatus[] fileStatuses = wh.getFileStatusesForUnpartitionedTable(db, tbl);
+      return updateTableStatsFast(tbl, fileStatuses, madeDir, forceRecompute, environmentContext);
+    } else {
+      return false;
+    }
+  }
+
+  /**
+   * Updates the numFiles and totalSize parameters for the passed Table by querying
+   * the warehouse if the passed Table does not already have values for these parameters.
+   * @param tbl
+   * @param fileStatus
+   * @param newDir if true, the directory was just created and can be assumed to be empty
+   * @param forceRecompute Recompute stats even if the passed Table already has
+   * these parameters set
+   * @return true if the stats were updated, false otherwise
+   */
+  public static boolean updateTableStatsFast(Table tbl, FileStatus[] fileStatus, boolean
newDir,
+                                             boolean forceRecompute, EnvironmentContext environmentContext)
throws MetaException {
+
+    Map<String,String> params = tbl.getParameters();
+
+    if ((params!=null) && params.containsKey(StatsSetupConst.DO_NOT_UPDATE_STATS)){
+      boolean doNotUpdateStats = Boolean.valueOf(params.get(StatsSetupConst.DO_NOT_UPDATE_STATS));
+      params.remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
+      tbl.setParameters(params); // to make sure we remove this marker property
+      if (doNotUpdateStats){
+        return false;
+      }
+    }
+
+    boolean updated = false;
+    if (forceRecompute ||
+        params == null ||
+        !containsAllFastStats(params)) {
+      if (params == null) {
+        params = new HashMap<String,String>();
+      }
+      if (!newDir) {
+        // The table location already exists and may contain data.
+        // Let's try to populate those stats that don't require full scan.
+        LOG.info("Updating table stats fast for " + tbl.getTableName());
+        populateQuickStats(fileStatus, params);
+        LOG.info("Updated size of table " + tbl.getTableName() +" to "+ params.get(StatsSetupConst.TOTAL_SIZE));
+        if (environmentContext != null
+            && environmentContext.isSetProperties()
+            && StatsSetupConst.TASK.equals(environmentContext.getProperties().get(
+            StatsSetupConst.STATS_GENERATED))) {
+          StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE);
+        } else {
+          StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE);
+        }
+      }
+      tbl.setParameters(params);
+      updated = true;
+    }
+    return updated;
+  }
+
+  public static void populateQuickStats(FileStatus[] fileStatus, Map<String, String>
params) {
+    int numFiles = 0;
+    long tableSize = 0L;
+    for (FileStatus status : fileStatus) {
+      // don't take directories into account for quick stats
+      if (!status.isDir()) {
+        tableSize += status.getLen();
+        numFiles += 1;
+      }
+    }
+    params.put(StatsSetupConst.NUM_FILES, Integer.toString(numFiles));
+    params.put(StatsSetupConst.TOTAL_SIZE, Long.toString(tableSize));
+  }
+
+  public static boolean areSameColumns(List<FieldSchema> oldCols, List<FieldSchema>
newCols) {
+    return ListUtils.isEqualList(oldCols, newCols);
+  }
+
+  public static void updateBasicState(EnvironmentContext environmentContext, Map<String,String>
+      params) {
+    if (params == null) {
+      return;
+    }
+    if (environmentContext != null
+        && environmentContext.isSetProperties()
+        && StatsSetupConst.TASK.equals(environmentContext.getProperties().get(
+        StatsSetupConst.STATS_GENERATED))) {
+      StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE);
+    } else {
+      StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE);
+    }
+  }
+
+  public static boolean updatePartitionStatsFast(Partition part, Warehouse wh, EnvironmentContext
environmentContext)
+      throws MetaException {
+    return updatePartitionStatsFast(part, wh, false, false, environmentContext);
+  }
+
+  public static boolean updatePartitionStatsFast(Partition part, Warehouse wh, boolean madeDir,
EnvironmentContext environmentContext)
+      throws MetaException {
+    return updatePartitionStatsFast(part, wh, madeDir, false, environmentContext);
+  }
+
+  /**
+   * Updates the numFiles and totalSize parameters for the passed Partition by querying
+   *  the warehouse if the passed Partition does not already have values for these parameters.
+   * @param part
+   * @param wh
+   * @param madeDir if true, the directory was just created and can be assumed to be empty
+   * @param forceRecompute Recompute stats even if the passed Partition already has
+   * these parameters set
+   * @return true if the stats were updated, false otherwise
+   */
+  public static boolean updatePartitionStatsFast(Partition part, Warehouse wh,
+                                                 boolean madeDir, boolean forceRecompute,
EnvironmentContext environmentContext) throws MetaException {
+    return updatePartitionStatsFast(new PartitionSpecProxy.SimplePartitionWrapperIterator(part),
+        wh, madeDir, forceRecompute, environmentContext);
+  }
+  /**
+   * Updates the numFiles and totalSize parameters for the passed Partition by querying
+   *  the warehouse if the passed Partition does not already have values for these parameters.
+   * @param part
+   * @param wh
+   * @param madeDir if true, the directory was just created and can be assumed to be empty
+   * @param forceRecompute Recompute stats even if the passed Partition already has
+   * these parameters set
+   * @return true if the stats were updated, false otherwise
+   */
+  public static boolean updatePartitionStatsFast(PartitionSpecProxy.PartitionIterator part,
Warehouse wh,
+                                                 boolean madeDir, boolean forceRecompute,
EnvironmentContext environmentContext) throws MetaException {
+    Map<String,String> params = part.getParameters();
+    boolean updated = false;
+    if (forceRecompute ||
+        params == null ||
+        !containsAllFastStats(params)) {
+      if (params == null) {
+        params = new HashMap<String,String>();
+      }
+      if (!madeDir) {
+        // The partition location already existed and may contain data. Lets try to
+        // populate those statistics that don't require a full scan of the data.
+        LOG.warn("Updating partition stats fast for: " + part.getTableName());
+        FileStatus[] fileStatus = wh.getFileStatusesForLocation(part.getLocation());
+        populateQuickStats(fileStatus, params);
+        LOG.warn("Updated size to " + params.get(StatsSetupConst.TOTAL_SIZE));
+        updateBasicState(environmentContext, params);
+      }
+      part.setParameters(params);
+      updated = true;
+    }
+    return updated;
+  }
+
+  /*
+     * This method is to check if the new column list includes all the old columns with same
name and
+     * type. The column comment does not count.
+     */
+  public static boolean columnsIncludedByNameType(List<FieldSchema> oldCols,
+                                                  List<FieldSchema> newCols) {
+    if (oldCols.size() > newCols.size()) {
+      return false;
+    }
+
+    Map<String, String> columnNameTypePairMap = new HashMap<String, String>(newCols.size());
+    for (FieldSchema newCol : newCols) {
+      columnNameTypePairMap.put(newCol.getName().toLowerCase(), newCol.getType());
+    }
+    for (final FieldSchema oldCol : oldCols) {
+      if (!columnNameTypePairMap.containsKey(oldCol.getName())
+          || !columnNameTypePairMap.get(oldCol.getName()).equalsIgnoreCase(oldCol.getType()))
{
+        return false;
+      }
+    }
+
+    return true;
+  }
+
+  /** Duplicates AcidUtils; used in a couple places in metastore. */
+  public static boolean isInsertOnlyTableParam(Map<String, String> params) {
+    String transactionalProp = params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
+    return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
new file mode 100644
index 0000000..03ea7fc
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.api.*;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import java.util.Arrays;
+
+public class TestHiveAlterHandler {
+
+  @Test
+  public void testAlterTableAddColNotUpdateStats() throws MetaException, InvalidObjectException,
NoSuchObjectException {
+    FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
+    FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
+    FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
+    FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
+
+    StorageDescriptor oldSd = new StorageDescriptor();
+    oldSd.setCols(Arrays.asList(col1, col2, col3));
+    Table oldTable = new Table();
+    oldTable.setDbName("default");
+    oldTable.setTableName("test_table");
+    oldTable.setSd(oldSd);
+
+    StorageDescriptor newSd = new StorageDescriptor(oldSd);
+    newSd.setCols(Arrays.asList(col1, col2, col3, col4));
+    Table newTable = new Table(oldTable);
+    newTable.setSd(newSd);
+
+    RawStore msdb = Mockito.mock(RawStore.class);
+    Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics(
+        oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3"));
+    HiveAlterHandler handler = new HiveAlterHandler();
+    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable);
+  }
+
+  @Test
+  public void testAlterTableDelColUpdateStats() throws MetaException, InvalidObjectException,
NoSuchObjectException {
+    FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
+    FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
+    FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
+    FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
+
+    StorageDescriptor oldSd = new StorageDescriptor();
+    oldSd.setCols(Arrays.asList(col1, col2, col3, col4));
+    Table oldTable = new Table();
+    oldTable.setDbName("default");
+    oldTable.setTableName("test_table");
+    oldTable.setSd(oldSd);
+
+    StorageDescriptor newSd = new StorageDescriptor(oldSd);
+    newSd.setCols(Arrays.asList(col1, col2, col3));
+    Table newTable = new Table(oldTable);
+    newTable.setSd(newSd);
+
+    RawStore msdb = Mockito.mock(RawStore.class);
+    HiveAlterHandler handler = new HiveAlterHandler();
+    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable);
+    Mockito.verify(msdb, Mockito.times(1)).getTableColumnStatistics(
+        oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3",
"col4")
+    );
+  }
+
+  @Test
+  public void testAlterTableChangePosNotUpdateStats() throws MetaException, InvalidObjectException,
NoSuchObjectException {
+    FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
+    FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
+    FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
+    FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
+
+    StorageDescriptor oldSd = new StorageDescriptor();
+    oldSd.setCols(Arrays.asList(col1, col2, col3, col4));
+    Table oldTable = new Table();
+    oldTable.setDbName("default");
+    oldTable.setTableName("test_table");
+    oldTable.setSd(oldSd);
+
+    StorageDescriptor newSd = new StorageDescriptor(oldSd);
+    newSd.setCols(Arrays.asList(col1, col4, col2, col3));
+    Table newTable = new Table(oldTable);
+    newTable.setSd(newSd);
+
+    RawStore msdb = Mockito.mock(RawStore.class);
+    Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics(
+        oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3",
"col4"));
+    HiveAlterHandler handler = new HiveAlterHandler();
+    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java
new file mode 100644
index 0000000..8d44bf8
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.util.concurrent.TimeUnit;
+
+import javax.jdo.JDOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+public class TestRetriesInRetryingHMSHandler {
+
+  private static Configuration conf;
+  private static final int RETRY_ATTEMPTS = 3;
+
+  @BeforeClass
+  public static void setup() throws IOException {
+    conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setLongVar(conf, ConfVars.HMSHANDLERATTEMPTS, RETRY_ATTEMPTS);
+    MetastoreConf.setTimeVar(conf, ConfVars.HMSHANDLERINTERVAL, 10, TimeUnit.MILLISECONDS);
+    MetastoreConf.setBoolVar(conf, ConfVars.HMSHANDLERFORCERELOADCONF, false);
+  }
+
+  /*
+   * If the init method of HMSHandler throws exception for the first time
+   * while creating RetryingHMSHandler it should be retried
+   */
+  @Test
+  public void testRetryInit() throws MetaException {
+    IHMSHandler mockBaseHandler = Mockito.mock(IHMSHandler.class);
+    Mockito.when(mockBaseHandler.getConf()).thenReturn(conf);
+    Mockito
+    .doThrow(JDOException.class)
+    .doNothing()
+    .when(mockBaseHandler).init();
+    RetryingHMSHandler.getProxy(conf, mockBaseHandler, false);
+    Mockito.verify(mockBaseHandler, Mockito.times(2)).init();
+  }
+
+  /*
+   * init method in HMSHandler should not be retried if there are no exceptions
+   */
+  @Test
+  public void testNoRetryInit() throws MetaException {
+    IHMSHandler mockBaseHandler = Mockito.mock(IHMSHandler.class);
+    Mockito.when(mockBaseHandler.getConf()).thenReturn(conf);
+    Mockito.doNothing().when(mockBaseHandler).init();
+    RetryingHMSHandler.getProxy(conf, mockBaseHandler, false);
+    Mockito.verify(mockBaseHandler, Mockito.times(1)).init();
+  }
+
+  /*
+   * If the init method in HMSHandler throws exception all the times it should be retried
until
+   * HiveConf.ConfVars.HMSHANDLERATTEMPTS is reached before giving up
+   */
+  @Test(expected = MetaException.class)
+  public void testRetriesLimit() throws MetaException {
+    IHMSHandler mockBaseHandler = Mockito.mock(IHMSHandler.class);
+    Mockito.when(mockBaseHandler.getConf()).thenReturn(conf);
+    Mockito.doThrow(JDOException.class).when(mockBaseHandler).init();
+    RetryingHMSHandler.getProxy(conf, mockBaseHandler, false);
+    Mockito.verify(mockBaseHandler, Mockito.times(RETRY_ATTEMPTS)).init();
+  }
+
+  /*
+   * Test retries when InvocationException wrapped in MetaException wrapped in JDOException
+   * is thrown
+   */
+  @Test
+  public void testWrappedMetaExceptionRetry() throws MetaException {
+    IHMSHandler mockBaseHandler = Mockito.mock(IHMSHandler.class);
+    Mockito.when(mockBaseHandler.getConf()).thenReturn(conf);
+    //JDOException wrapped in MetaException wrapped in InvocationException
+    MetaException me = new MetaException("Dummy exception");
+    me.initCause(new JDOException());
+    InvocationTargetException ex = new InvocationTargetException(me);
+    Mockito
+    .doThrow(me)
+    .doNothing()
+    .when(mockBaseHandler).init();
+    RetryingHMSHandler.getProxy(conf, mockBaseHandler, false);
+    Mockito.verify(mockBaseHandler, Mockito.times(2)).init();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/messaging/json/TestJSONMessageDeserializer.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/messaging/json/TestJSONMessageDeserializer.java
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/messaging/json/TestJSONMessageDeserializer.java
new file mode 100644
index 0000000..2f98855
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/messaging/json/TestJSONMessageDeserializer.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.messaging.json;
+
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.json.JSONException;
+import org.junit.Test;
+import org.skyscreamer.jsonassert.JSONAssert;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.*;
+
+public class TestJSONMessageDeserializer {
+
+  public static class MyClass {
+    @JsonProperty
+    private int a;
+    @JsonProperty
+    private Map<String, String> map;
+    private long l;
+    private String shouldNotSerialize = "shouldNotSerialize";
+
+    //for jackson to instantiate
+    MyClass() {
+    }
+
+    MyClass(int a, Map<String, String> map, long l) {
+      this.a = a;
+      this.map = map;
+      this.l = l;
+    }
+
+    @JsonProperty
+    long getL() {
+      return l;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o)
+        return true;
+      if (o == null || getClass() != o.getClass())
+        return false;
+
+      MyClass myClass = (MyClass) o;
+
+      if (a != myClass.a)
+        return false;
+      if (l != myClass.l)
+        return false;
+      if (!map.equals(myClass.map))
+        return false;
+      return shouldNotSerialize.equals(myClass.shouldNotSerialize);
+    }
+
+    @Override
+    public int hashCode() {
+      int result = a;
+      result = 31 * result + map.hashCode();
+      result = 31 * result + (int) (l ^ (l >>> 32));
+      result = 31 * result + shouldNotSerialize.hashCode();
+      return result;
+    }
+  }
+
+  @Test
+  public void shouldNotSerializePropertiesNotAnnotated() throws IOException, JSONException
{
+    MyClass obj = new MyClass(Integer.MAX_VALUE, new HashMap<String, String>() {{
+      put("a", "a");
+      put("b", "b");
+    }}, Long.MAX_VALUE);
+    String json = JSONMessageDeserializer.mapper.writeValueAsString(obj);
+    JSONAssert.assertEquals(
+        "{\"a\":2147483647,\"map\":{\"b\":\"b\",\"a\":\"a\"},\"l\":9223372036854775807}",
json,
+        false);
+  }
+
+  @Test
+  public void shouldDeserializeJsonStringToObject() throws IOException {
+    String json = "{\"a\":47,\"map\":{\"a\":\"a\",\"b\":\"a value for b\"},\"l\":98}";
+    MyClass actual = JSONMessageDeserializer.mapper.readValue(json, MyClass.class);
+    MyClass expected = new MyClass(47, new HashMap<String, String>() {{
+      put("a", "a");
+      put("b", "a value for b");
+    }}, 98L);
+    assertEquals(expected, actual);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestMetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestMetaStoreUtils.java
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestMetaStoreUtils.java
new file mode 100644
index 0000000..32ad63a
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestMetaStoreUtils.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.utils;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestMetaStoreUtils {
+
+  @Test
+  public void testTrimMapNullsXform() throws Exception {
+    Map<String,String> m = new HashMap<>();
+    m.put("akey","aval");
+    m.put("blank","");
+    m.put("null",null);
+
+    Map<String,String> xformed = MetaStoreUtils.trimMapNulls(m,true);
+    assertEquals(3,xformed.size());
+    assert(xformed.containsKey("akey"));
+    assert(xformed.containsKey("blank"));
+    assert(xformed.containsKey("null"));
+    assertEquals("aval",xformed.get("akey"));
+    assertEquals("",xformed.get("blank"));
+    assertEquals("",xformed.get("null"));
+  }
+
+  @Test
+  public void testTrimMapNullsPrune() throws Exception {
+    Map<String,String> m = new HashMap<>();
+    m.put("akey","aval");
+    m.put("blank","");
+    m.put("null",null);
+
+    Map<String,String> pruned = MetaStoreUtils.trimMapNulls(m,false);
+    assertEquals(2,pruned.size());
+    assert(pruned.containsKey("akey"));
+    assert(pruned.containsKey("blank"));
+    assert(!pruned.containsKey("null"));
+    assertEquals("aval",pruned.get("akey"));
+    assertEquals("",pruned.get("blank"));
+    assert(!pruned.containsValue(null));
+  }
+
+  @Test
+  public void testcolumnsIncludedByNameType() {
+    FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
+    FieldSchema col1a = new FieldSchema("col1", "string", "col1 but with a different comment");
+    FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
+    FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
+    Assert.assertTrue(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.columnsIncludedByNameType(Arrays.asList(col1),
Arrays.asList(col1)));
+    Assert.assertTrue(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.columnsIncludedByNameType(Arrays.asList(col1),
Arrays.asList(col1a)));
+    Assert.assertTrue(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.columnsIncludedByNameType(Arrays.asList(col1,
col2), Arrays.asList(col1, col2)));
+    Assert.assertTrue(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.columnsIncludedByNameType(Arrays.asList(col1,
col2), Arrays.asList(col2, col1)));
+    Assert.assertTrue(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.columnsIncludedByNameType(Arrays.asList(col1,
col2), Arrays.asList(col1, col2, col3)));
+    Assert.assertTrue(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.columnsIncludedByNameType(Arrays.asList(col1,
col2), Arrays.asList(col3, col2, col1)));
+    Assert.assertFalse(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.columnsIncludedByNameType(Arrays.asList(col1,
col2), Arrays.asList(col1)));
+  }
+
+
+
+}


Mime
View raw message