hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject hive git commit: HIVE-12320 : hive.metastore.disallow.incompatible.col.type.changes should be true by default (Ashutosh Chauhan via Jason Dere)
Date Thu, 05 Nov 2015 23:56:58 GMT
Repository: hive
Updated Branches:
  refs/heads/master a8eb4aef4 -> 0add63786


HIVE-12320 : hive.metastore.disallow.incompatible.col.type.changes should be true by default
(Ashutosh Chauhan via Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0add6378
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0add6378
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0add6378

Branch: refs/heads/master
Commit: 0add63786d293e7323ef147a85b0c61523c1973a
Parents: a8eb4ae
Author: Ashutosh Chauhan <hashutosh@apache.org>
Authored: Thu Nov 5 15:55:39 2015 -0800
Committer: Ashutosh Chauhan <hashutosh@apache.org>
Committed: Thu Nov 5 15:55:39 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +-
 .../hive/hcatalog/cli/TestSemanticAnalysis.java |   1 +
 .../hive/hcatalog/api/TestHCatClient.java       |   2 +-
 .../hadoop/hive/metastore/MetaStoreUtils.java   |  13 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   | 118 ++-----------------
 .../hive/ql/parse/TypeCheckProcFactory.java     |   5 +-
 .../hive/ql/exec/TestFunctionRegistry.java      |   2 +-
 .../disallow_incompatible_type_change_on1.q     |   6 +-
 ql/src/test/queries/clientpositive/alter1.q     |   6 +-
 .../queries/clientpositive/avro_partitioned.q   |   3 +-
 .../columnarserde_create_shortcut.q             |   2 +
 ql/src/test/queries/clientpositive/input3.q     |  10 +-
 ql/src/test/queries/clientpositive/lineage3.q   |   3 +-
 .../clientpositive/orc_int_type_promotion.q     |   2 +
 .../clientpositive/parquet_schema_evolution.q   |   6 +-
 .../partition_wise_fileformat11.q               |   4 +-
 .../partition_wise_fileformat12.q               |   4 +-
 .../partition_wise_fileformat13.q               |   5 +-
 .../partition_wise_fileformat15.q               |   4 +-
 .../partition_wise_fileformat16.q               |   4 +-
 .../test/queries/clientpositive/rename_column.q |   4 +-
 .../disallow_incompatible_type_change_on1.q.out |   3 +-
 .../hive/serde2/typeinfo/TypeInfoUtils.java     |  95 ++++++++++++++-
 23 files changed, 154 insertions(+), 150 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 3ab73ad..98f9206 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -637,7 +637,7 @@ public class HiveConf extends Configuration {
         "as nulls, so we should set this parameter if we wish to reverse that behaviour.
For others, " +
         "pruning is the correct behaviour"),
     METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES(
-        "hive.metastore.disallow.incompatible.col.type.changes", false,
+        "hive.metastore.disallow.incompatible.col.type.changes", true,
         "If true (default is false), ALTER TABLE operations which change the type of a\n"
+
         "column (say STRING) to an incompatible type (say MAP) are disallowed.\n" +
         "RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n"
+

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java
b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java
index 606cb3a..cf15ff2 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java
@@ -68,6 +68,7 @@ public class TestSemanticAnalysis extends HCatBaseTest {
           "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe");
       hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
           HCatSemanticAnalyzer.class.getName());
+      hcatConf.setBoolVar(HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES,
false);
       hcatDriver = new Driver(hcatConf);
       SessionState.start(new CliSessionState(hcatConf));
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
index 891322a..aa9c7d3 100644
--- a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
+++ b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
@@ -565,7 +565,7 @@ public class TestHCatClient {
       client.createTable(HCatCreateTableDesc.create(dbName, tableName, oldSchema).build());
 
       List<HCatFieldSchema> newSchema = Arrays.asList(new HCatFieldSchema("completely",
Type.DOUBLE, ""),
-          new HCatFieldSchema("new", Type.FLOAT, ""),
+          new HCatFieldSchema("new", Type.STRING, ""),
           new HCatFieldSchema("fields", Type.STRING, ""));
 
       client.updateTableSchema(dbName, tableName, newSchema);

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index bbaa1ce..02cbd76 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -51,11 +51,9 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -79,6 +77,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge;
 import org.apache.hive.common.util.ReflectionUtil;
@@ -632,9 +631,6 @@ public class MetaStoreUtils {
    * Two types are compatible if we have internal functions to cast one to another.
    */
   static private boolean areColTypesCompatible(String oldType, String newType) {
-    if (oldType.equals(newType)) {
-      return true;
-    }
 
     /*
      * RCFile default serde (ColumnarSerde) serializes the values in such a way that the
@@ -645,12 +641,9 @@ public class MetaStoreUtils {
      * Primitive types like INT, STRING, BIGINT, etc are compatible with each other and are
      * not blocked.
      */
-    if(serdeConstants.PrimitiveTypes.contains(oldType.toLowerCase()) &&
-        serdeConstants.PrimitiveTypes.contains(newType.toLowerCase())) {
-      return true;
-    }
 
-    return false;
+    return TypeInfoUtils.implicitConvertible(TypeInfoUtils.getTypeInfoFromTypeString(oldType),
+      TypeInfoUtils.getTypeInfoFromTypeString(newType));
   }
 
   public static final int MAX_MS_TYPENAME_LENGTH = 2000; // 4000/2, for an unlikely unicode
case

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 2196ca9..5353062 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -22,7 +22,6 @@ import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.EnumMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashSet;
@@ -558,30 +557,6 @@ public final class FunctionRegistry {
     return synonyms;
   }
 
-  // The ordering of types here is used to determine which numeric types
-  // are common/convertible to one another. Probably better to rely on the
-  // ordering explicitly defined here than to assume that the enum values
-  // that were arbitrarily assigned in PrimitiveCategory work for our purposes.
-  static EnumMap<PrimitiveCategory, Integer> numericTypes =
-      new EnumMap<PrimitiveCategory, Integer>(PrimitiveCategory.class);
-  static List<PrimitiveCategory> numericTypeList = new ArrayList<PrimitiveCategory>();
-
-  static void registerNumericType(PrimitiveCategory primitiveCategory, int level) {
-    numericTypeList.add(primitiveCategory);
-    numericTypes.put(primitiveCategory, level);
-  }
-
-  static {
-    registerNumericType(PrimitiveCategory.BYTE, 1);
-    registerNumericType(PrimitiveCategory.SHORT, 2);
-    registerNumericType(PrimitiveCategory.INT, 3);
-    registerNumericType(PrimitiveCategory.LONG, 4);
-    registerNumericType(PrimitiveCategory.FLOAT, 5);
-    registerNumericType(PrimitiveCategory.DOUBLE, 6);
-    registerNumericType(PrimitiveCategory.DECIMAL, 7);
-    registerNumericType(PrimitiveCategory.STRING, 8);
-  }
-
   /**
    * Check if the given type is numeric. String is considered numeric when used in
    * numeric operators.
@@ -702,15 +677,15 @@ public final class FunctionRegistry {
           (PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b,PrimitiveCategory.STRING);
     }
 
-    if (FunctionRegistry.implicitConvertible(a, b)) {
+    if (TypeInfoUtils.implicitConvertible(a, b)) {
       return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b,
pcB);
     }
-    if (FunctionRegistry.implicitConvertible(b, a)) {
+    if (TypeInfoUtils.implicitConvertible(b, a)) {
       return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b,
pcA);
     }
-    for (PrimitiveCategory t : numericTypeList) {
-      if (FunctionRegistry.implicitConvertible(pcA, t)
-          && FunctionRegistry.implicitConvertible(pcB, t)) {
+    for (PrimitiveCategory t : TypeInfoUtils.numericTypeList) {
+      if (TypeInfoUtils.implicitConvertible(pcA, t)
+          && TypeInfoUtils.implicitConvertible(pcB, t)) {
         return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b,
t);
       }
     }
@@ -759,9 +734,9 @@ public final class FunctionRegistry {
       return TypeInfoFactory.doubleTypeInfo;
     }
 
-    for (PrimitiveCategory t : numericTypeList) {
-      if (FunctionRegistry.implicitConvertible(pcA, t)
-          && FunctionRegistry.implicitConvertible(pcB, t)) {
+    for (PrimitiveCategory t : TypeInfoUtils.numericTypeList) {
+      if (TypeInfoUtils.implicitConvertible(pcA, t)
+          && TypeInfoUtils.implicitConvertible(pcB, t)) {
         return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b,
t);
       }
     }
@@ -790,8 +765,8 @@ public final class FunctionRegistry {
     if (pgB == PrimitiveGrouping.DATE_GROUP && pgA == PrimitiveGrouping.STRING_GROUP)
{
       return PrimitiveCategory.STRING;
     }
-    Integer ai = numericTypes.get(pcA);
-    Integer bi = numericTypes.get(pcB);
+    Integer ai = TypeInfoUtils.numericTypes.get(pcA);
+    Integer bi = TypeInfoUtils.numericTypes.get(pcB);
     if (ai == null || bi == null) {
       // If either is not a numeric type, return null.
       return null;
@@ -870,73 +845,6 @@ public final class FunctionRegistry {
     return TypeInfoFactory.getStructTypeInfo(names, typeInfos);
   }
 
-  public static boolean implicitConvertible(PrimitiveCategory from, PrimitiveCategory to)
{
-    if (from == to) {
-      return true;
-    }
-
-    PrimitiveGrouping fromPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(from);
-    PrimitiveGrouping toPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(to);
-
-    // Allow implicit String to Double conversion
-    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DOUBLE)
{
-      return true;
-    }
-    // Allow implicit String to Decimal conversion
-    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DECIMAL)
{
-      return true;
-    }
-    // Void can be converted to any type
-    if (from == PrimitiveCategory.VOID) {
-      return true;
-    }
-
-    // Allow implicit String to Date conversion
-    if (fromPg == PrimitiveGrouping.DATE_GROUP && toPg == PrimitiveGrouping.STRING_GROUP)
{
-      return true;
-    }
-    // Allow implicit Numeric to String conversion
-    if (fromPg == PrimitiveGrouping.NUMERIC_GROUP && toPg == PrimitiveGrouping.STRING_GROUP)
{
-      return true;
-    }
-    // Allow implicit String to varchar conversion, and vice versa
-    if (fromPg == PrimitiveGrouping.STRING_GROUP && toPg == PrimitiveGrouping.STRING_GROUP)
{
-      return true;
-    }
-
-    // Allow implicit conversion from Byte -> Integer -> Long -> Float -> Double
-    // Decimal -> String
-    Integer f = numericTypes.get(from);
-    Integer t = numericTypes.get(to);
-    if (f == null || t == null) {
-      return false;
-    }
-    if (f.intValue() > t.intValue()) {
-      return false;
-    }
-    return true;
-  }
-
-  /**
-   * Returns whether it is possible to implicitly convert an object of Class
-   * from to Class to.
-   */
-  public static boolean implicitConvertible(TypeInfo from, TypeInfo to) {
-    if (from.equals(to)) {
-      return true;
-    }
-
-    // Reimplemented to use PrimitiveCategory rather than TypeInfo, because
-    // 2 TypeInfos from the same qualified type (varchar, decimal) should still be
-    // seen as equivalent.
-    if (from.getCategory() == Category.PRIMITIVE && to.getCategory() == Category.PRIMITIVE)
{
-      return implicitConvertible(
-          ((PrimitiveTypeInfo) from).getPrimitiveCategory(),
-          ((PrimitiveTypeInfo) to).getPrimitiveCategory());
-    }
-    return false;
-  }
-
   /**
    * Get the GenericUDAF evaluator for the name and argumentClasses.
    *
@@ -1105,7 +1013,7 @@ public final class FunctionRegistry {
       // but there is a conversion cost.
       return 1;
     }
-    if (!exact && implicitConvertible(argumentPassed, argumentAccepted)) {
+    if (!exact && TypeInfoUtils.implicitConvertible(argumentPassed, argumentAccepted))
{
       return 1;
     }
 
@@ -1273,9 +1181,9 @@ public final class FunctionRegistry {
             acceptedIsPrimitive = true;
             acceptedPrimCat = ((PrimitiveTypeInfo) accepted).getPrimitiveCategory();
           }
-          if (acceptedIsPrimitive && numericTypes.containsKey(acceptedPrimCat)) {
+          if (acceptedIsPrimitive && TypeInfoUtils.numericTypes.containsKey(acceptedPrimCat))
{
             // We're looking for the udf with the smallest maximum numeric type.
-            int typeValue = numericTypes.get(acceptedPrimCat);
+            int typeValue = TypeInfoUtils.numericTypes.get(acceptedPrimCat);
             maxNumericType = typeValue > maxNumericType ? typeValue : maxNumericType;
           } else if (!accepted.equals(reference)) {
             // There are non-numeric arguments that don't match from one UDF to

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
index 3a6535b..7f5d72a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
@@ -78,6 +78,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hive.common.util.DateUtils;
@@ -903,7 +904,7 @@ public class TypeCheckProcFactory {
 
         if (myt.getCategory() == Category.LIST) {
           // Only allow integer index for now
-          if (!FunctionRegistry.implicitConvertible(children.get(1).getTypeInfo(),
+          if (!TypeInfoUtils.implicitConvertible(children.get(1).getTypeInfo(),
               TypeInfoFactory.intTypeInfo)) {
             throw new SemanticException(SemanticAnalyzer.generateErrorMessage(
                   expr, ErrorMsg.INVALID_ARRAYINDEX_TYPE.getMsg()));
@@ -913,7 +914,7 @@ public class TypeCheckProcFactory {
           TypeInfo t = ((ListTypeInfo) myt).getListElementTypeInfo();
           desc = new ExprNodeGenericFuncDesc(t, FunctionRegistry.getGenericUDFForIndex(),
children);
         } else if (myt.getCategory() == Category.MAP) {
-          if (!FunctionRegistry.implicitConvertible(children.get(1).getTypeInfo(),
+          if (!TypeInfoUtils.implicitConvertible(children.get(1).getTypeInfo(),
               ((MapTypeInfo) myt).getMapKeyTypeInfo())) {
             throw new SemanticException(ErrorMsg.INVALID_MAPINDEX_TYPE
                 .getMsg(expr));

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
index 068bdee..6a83c32 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
@@ -80,7 +80,7 @@ public class TestFunctionRegistry extends TestCase {
   }
 
   private void implicit(TypeInfo a, TypeInfo b, boolean convertible) {
-    assertEquals(convertible, FunctionRegistry.implicitConvertible(a, b));
+    assertEquals(convertible, TypeInfoUtils.implicitConvertible(a, b));
   }
 
   public void testImplicitConversion() {

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q b/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q
index d0d748c..cec9a0d 100644
--- a/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q
+++ b/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q
@@ -1,4 +1,4 @@
-SET hive.metastore.disallow.incompatible.col.type.changes=true;
+SET hive.metastore.disallow.incompatible.col.type.changes=false;
 SELECT * FROM src LIMIT 1;
 CREATE TABLE test_table123 (a INT, b MAP<STRING, STRING>) PARTITIONED BY (ds STRING)
STORED AS SEQUENCEFILE;
 INSERT OVERWRITE TABLE test_table123 PARTITION(ds="foo1") SELECT 1, MAP("a1", "b1") FROM
src LIMIT 1;
@@ -11,7 +11,11 @@ ALTER TABLE test_table123 REPLACE COLUMNS (a TINYINT, b MAP<STRING,
STRING>);
 ALTER TABLE test_table123 REPLACE COLUMNS (a BOOLEAN, b MAP<STRING, STRING>);
 ALTER TABLE test_table123 REPLACE COLUMNS (a TINYINT, b MAP<STRING, STRING>);
 ALTER TABLE test_table123 CHANGE COLUMN a a_new BOOLEAN;
+
+SET hive.metastore.disallow.incompatible.col.type.changes=true;
 -- All the above ALTERs will succeed since they are between compatible types.
 -- The following ALTER will fail as MAP<STRING, STRING> and STRING are not
 -- compatible.
+
 ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING);
+reset hive.metastore.disallow.incompatible.col.type.changes;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/alter1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter1.q b/ql/src/test/queries/clientpositive/alter1.q
index 2fac195..767ab5c 100644
--- a/ql/src/test/queries/clientpositive/alter1.q
+++ b/ql/src/test/queries/clientpositive/alter1.q
@@ -21,8 +21,9 @@ describe extended alter1;
 
 alter table alter1 set serde 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe';
 describe extended alter1;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table alter1 replace columns (a int, b int, c string);
+reset hive.metastore.disallow.incompatible.col.type.changes;
 describe alter1;
 
 -- Cleanup
@@ -61,8 +62,9 @@ DESCRIBE EXTENDED alter1_db.alter1;
 
 ALTER TABLE alter1_db.alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe';
 DESCRIBE EXTENDED alter1_db.alter1;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 ALTER TABLE alter1_db.alter1 REPLACE COLUMNS (a int, b int, c string);
+reset hive.metastore.disallow.incompatible.col.type.changes;
 DESCRIBE alter1_db.alter1;
 
 DROP TABLE alter1_db.alter1;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/avro_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/avro_partitioned.q b/ql/src/test/queries/clientpositive/avro_partitioned.q
index a06e7c4..9e6c79a 100644
--- a/ql/src/test/queries/clientpositive/avro_partitioned.q
+++ b/ql/src/test/queries/clientpositive/avro_partitioned.q
@@ -112,7 +112,7 @@ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat';
 
 -- Insert data into a partition
 INSERT INTO TABLE episodes_partitioned_serdeproperties PARTITION (doctor_pt) SELECT title,
air_date, doctor, doctor as doctor_pt FROM episodes;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 -- Evolve the table schema by adding new array field "cast_and_crew"
 ALTER TABLE episodes_partitioned_serdeproperties
 SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
@@ -144,5 +144,6 @@ WITH SERDEPROPERTIES ('avro.schema.literal'='{
   ]
 }');
 
+reset hive.metastore.disallow.incompatible.col.type.changes;
 -- Try selecting from the evolved table
 SELECT * FROM episodes_partitioned_serdeproperties;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q b/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q
index 8d8cb6b..851a821 100644
--- a/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q
+++ b/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q
@@ -22,5 +22,7 @@ SELECT * FROM columnShortcutTable;
 
 ALTER TABLE columnShortcutTable ADD COLUMNS (c string);
 SELECT * FROM columnShortcutTable;
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 ALTER TABLE columnShortcutTable REPLACE COLUMNS (key int);
+reset hive.metastore.disallow.incompatible.col.type.changes;
 SELECT * FROM columnShortcutTable;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/input3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input3.q b/ql/src/test/queries/clientpositive/input3.q
index 2efa7a4..1925fff 100644
--- a/ql/src/test/queries/clientpositive/input3.q
+++ b/ql/src/test/queries/clientpositive/input3.q
@@ -1,7 +1,3 @@
-
-
-
-
 CREATE TABLE TEST3a(A INT, B DOUBLE) STORED AS TEXTFILE; 
 DESCRIBE TEST3a; 
 CREATE TABLE TEST3b(A ARRAY<INT>, B DOUBLE, C MAP<DOUBLE, INT>) STORED AS TEXTFILE;

@@ -16,11 +12,9 @@ ALTER TABLE TEST3b RENAME TO TEST3c;
 ALTER TABLE TEST3b RENAME TO TEST3c;
 DESCRIBE TEST3c; 
 SHOW TABLES;
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 EXPLAIN
 ALTER TABLE TEST3c REPLACE COLUMNS (R1 INT, R2 DOUBLE);
 ALTER TABLE TEST3c REPLACE COLUMNS (R1 INT, R2 DOUBLE);
+reset hive.metastore.disallow.incompatible.col.type.changes;
 DESCRIBE EXTENDED TEST3c;
-
-
-
-

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/lineage3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/lineage3.q b/ql/src/test/queries/clientpositive/lineage3.q
index 70d4e57..d1fb454 100644
--- a/ql/src/test/queries/clientpositive/lineage3.q
+++ b/ql/src/test/queries/clientpositive/lineage3.q
@@ -1,5 +1,5 @@
 set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.LineageLogger;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 drop table if exists d1;
 create table d1(a int);
 
@@ -202,3 +202,4 @@ insert into dest_dp3 partition (y=2, m, d) select first, word, month m,
day d wh
 insert into dest_dp2 partition (y=1, m) select f, w, m
 insert into dest_dp1 partition (year=0) select f, w;
 
+reset hive.metastore.disallow.incompatible.col.type.changes;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
index 4a805a0..c3e2cf9 100644
--- a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
+++ b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
@@ -1,3 +1,4 @@
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 create table if not exists alltypes (
  bo boolean,
  ti tinyint,
@@ -77,3 +78,4 @@ select * from src_part_orc limit 10;
 
 alter table src_part_orc change key key bigint;
 select * from src_part_orc limit 10;
+reset hive.metastore.disallow.incompatible.col.type.changes;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/parquet_schema_evolution.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_schema_evolution.q b/ql/src/test/queries/clientpositive/parquet_schema_evolution.q
index af0cf99..d2f2996 100644
--- a/ql/src/test/queries/clientpositive/parquet_schema_evolution.q
+++ b/ql/src/test/queries/clientpositive/parquet_schema_evolution.q
@@ -11,10 +11,10 @@ INSERT OVERWRITE TABLE NewStructField SELECT named_struct('a1', map('k1','v1'),
 
 DESCRIBE NewStructField;
 SELECT * FROM NewStructField;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 -- Adds new fields to the struct types
 ALTER TABLE NewStructField REPLACE COLUMNS (a struct<a1:map<string,string>, a2:struct<e1:int,e2:string>,
a3:int>, b int);
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 DESCRIBE NewStructField;
 SELECT * FROM NewStructField;
 
@@ -24,4 +24,4 @@ DESCRIBE NewStructFieldTable;
 SELECT * FROM NewStructFieldTable;
 
 DROP TABLE NewStructField;
-DROP TABLE NewStructFieldTable;
\ No newline at end of file
+DROP TABLE NewStructFieldTable;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
index 1a4291f..b2db2f1 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
@@ -7,9 +7,9 @@ insert overwrite table partition_test_partitioned partition(dt='1') select
* fro
 
 select * from partition_test_partitioned where dt is not null;
 select key+key, value from partition_test_partitioned where dt is not null;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table partition_test_partitioned change key key int;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 select key+key, value from partition_test_partitioned where dt is not null;
 select * from partition_test_partitioned where dt is not null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
index bc51cb5..632d022 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
@@ -7,9 +7,9 @@ insert overwrite table partition_test_partitioned partition(dt='1') select
* fro
 
 select * from partition_test_partitioned where dt is not null;
 select key+key, value from partition_test_partitioned where dt is not null;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table partition_test_partitioned change key key int;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 select key+key, value from partition_test_partitioned where dt is not null;
 select * from partition_test_partitioned where dt is not null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
index 2e4ae69..f124ec3 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
@@ -4,8 +4,9 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 create table T1(key string, value string) partitioned by (dt string) stored as rcfile;
 alter table T1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
 insert overwrite table T1 partition (dt='1') select * from src where key = 238 or key = 97;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table T1 change key key int;
+reset hive.metastore.disallow.incompatible.col.type.changes;
 insert overwrite table T1 partition (dt='2') select * from src where key = 238 or key = 97;
 
 alter table T1 change key key string;
@@ -14,4 +15,4 @@ create table T2(key string, value string) partitioned by (dt string) stored
as r
 insert overwrite table T2 partition (dt='1') select * from src where key = 238 or key = 97;
 
 select /* + MAPJOIN(a) */ count(*) FROM T1 a JOIN T2 b ON a.key = b.key;
-select count(*) FROM T1 a JOIN T2 b ON a.key = b.key;
\ No newline at end of file
+select count(*) FROM T1 a JOIN T2 b ON a.key = b.key;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
index 6fce1e0..70a454f 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
@@ -8,9 +8,9 @@ select * from src where key = 238;
 
 select * from partition_test_partitioned where dt is not null;
 select key+key, value from partition_test_partitioned where dt is not null;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table partition_test_partitioned change key key int;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 select key+key, value from partition_test_partitioned where dt is not null;
 select * from partition_test_partitioned where dt is not null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
index 37bb1a7..92757f6 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
@@ -8,9 +8,9 @@ select * from src where key = 238;
 
 select * from partition_test_partitioned where dt is not null;
 select key+key, value from partition_test_partitioned where dt is not null;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table partition_test_partitioned change key key int;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 select key+key, value from partition_test_partitioned where dt is not null;
 select * from partition_test_partitioned where dt is not null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/rename_column.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/rename_column.q b/ql/src/test/queries/clientpositive/rename_column.q
index a3f3f30..a211cfa 100644
--- a/ql/src/test/queries/clientpositive/rename_column.q
+++ b/ql/src/test/queries/clientpositive/rename_column.q
@@ -3,7 +3,7 @@ DESCRIBE kv_rename_test;
 
 ALTER TABLE kv_rename_test CHANGE a a STRING;
 DESCRIBE kv_rename_test;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 ALTER TABLE kv_rename_test CHANGE a a1 INT;
 DESCRIBE kv_rename_test;
 
@@ -52,6 +52,6 @@ DESCRIBE kv_rename_test;
 
 ALTER TABLE kv_rename_test CHANGE COLUMN a2 a INT AFTER b;
 DESCRIBE kv_rename_test;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 DROP TABLE kv_rename_test;
 SHOW TABLES;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
index 96600eb..69b2b41 100644
--- a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
+++ b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
@@ -103,9 +103,10 @@ POSTHOOK: Output: default@test_table123
 PREHOOK: query: -- All the above ALTERs will succeed since they are between compatible types.
 -- The following ALTER will fail as MAP<STRING, STRING> and STRING are not
 -- compatible.
+
 ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@test_table123
 PREHOOK: Output: default@test_table123
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable
to alter table. The following columns have types incompatible with the existing columns in
their respective positions :
-b
+a,b

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
index 24361c7..1d79880 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
@@ -23,6 +23,7 @@ import java.lang.reflect.Method;
 import java.lang.reflect.ParameterizedType;
 import java.lang.reflect.Type;
 import java.util.ArrayList;
+import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -45,6 +46,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveGrouping;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveTypeEntry;
 
 /**
@@ -53,6 +55,25 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn
  */
 public final class TypeInfoUtils {
 
+  public static List<PrimitiveCategory> numericTypeList = new ArrayList<PrimitiveCategory>();
+  // The ordering of types here is used to determine which numeric types
+  // are common/convertible to one another. Probably better to rely on the
+  // ordering explicitly defined here than to assume that the enum values
+  // that were arbitrarily assigned in PrimitiveCategory work for our purposes.
+  public static EnumMap<PrimitiveCategory, Integer> numericTypes =
+      new EnumMap<PrimitiveCategory, Integer>(PrimitiveCategory.class);
+
+  static {
+    registerNumericType(PrimitiveCategory.BYTE, 1);
+    registerNumericType(PrimitiveCategory.SHORT, 2);
+    registerNumericType(PrimitiveCategory.INT, 3);
+    registerNumericType(PrimitiveCategory.LONG, 4);
+    registerNumericType(PrimitiveCategory.FLOAT, 5);
+    registerNumericType(PrimitiveCategory.DOUBLE, 6);
+    registerNumericType(PrimitiveCategory.DECIMAL, 7);
+    registerNumericType(PrimitiveCategory.STRING, 8);
+  }
+
   private TypeInfoUtils() {
     // prevent instantiation
   }
@@ -266,7 +287,7 @@ public final class TypeInfoUtils {
      *
      * tokenize("map<int,string>") should return
      * ["map","<","int",",","string",">"]
-     * 
+     *
      * Note that we add '$' in new Calcite return path. As '$' will not appear
      * in any type in Hive, it is safe to do so.
      */
@@ -810,4 +831,76 @@ public final class TypeInfoUtils {
         return 0;
     }
   }
+
+  public static void registerNumericType(PrimitiveCategory primitiveCategory, int level)
{
+    numericTypeList.add(primitiveCategory);
+    numericTypes.put(primitiveCategory, level);
+  }
+
+  public static boolean implicitConvertible(PrimitiveCategory from, PrimitiveCategory to)
{
+    if (from == to) {
+      return true;
+    }
+
+    PrimitiveGrouping fromPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(from);
+    PrimitiveGrouping toPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(to);
+
+    // Allow implicit String to Double conversion
+    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DOUBLE)
{
+      return true;
+    }
+    // Allow implicit String to Decimal conversion
+    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DECIMAL)
{
+      return true;
+    }
+    // Void can be converted to any type
+    if (from == PrimitiveCategory.VOID) {
+      return true;
+    }
+
+    // Allow implicit String to Date conversion
+    if (fromPg == PrimitiveGrouping.DATE_GROUP && toPg == PrimitiveGrouping.STRING_GROUP)
{
+      return true;
+    }
+    // Allow implicit Numeric to String conversion
+    if (fromPg == PrimitiveGrouping.NUMERIC_GROUP && toPg == PrimitiveGrouping.STRING_GROUP)
{
+      return true;
+    }
+    // Allow implicit String to varchar conversion, and vice versa
+    if (fromPg == PrimitiveGrouping.STRING_GROUP && toPg == PrimitiveGrouping.STRING_GROUP)
{
+      return true;
+    }
+
+    // Allow implicit conversion from Byte -> Integer -> Long -> Float -> Double
+    // Decimal -> String
+    Integer f = numericTypes.get(from);
+    Integer t = numericTypes.get(to);
+    if (f == null || t == null) {
+      return false;
+    }
+    if (f.intValue() > t.intValue()) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Returns whether it is possible to implicitly convert an object of Class
+   * from to Class to.
+   */
+  public static boolean implicitConvertible(TypeInfo from, TypeInfo to) {
+    if (from.equals(to)) {
+      return true;
+    }
+
+    // Reimplemented to use PrimitiveCategory rather than TypeInfo, because
+    // 2 TypeInfos from the same qualified type (varchar, decimal) should still be
+    // seen as equivalent.
+    if (from.getCategory() == Category.PRIMITIVE && to.getCategory() == Category.PRIMITIVE)
{
+      return implicitConvertible(
+          ((PrimitiveTypeInfo) from).getPrimitiveCategory(),
+          ((PrimitiveTypeInfo) to).getPrimitiveCategory());
+    }
+    return false;
+  }
 }


Mime
View raw message