hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject [05/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)
Date Tue, 16 May 2017 22:53:10 GMT
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
index 762d946..5bb52b4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
@@ -42,7 +42,8 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
   // Need to remember whether this is an acid compliant operation, and if so whether it is
an
   // insert, update, or delete.
   private AcidUtils.Operation writeType;
-  private Long mmWriteId;
+  private Long txnId;
+  private int stmtId;
 
   // TODO: the below seems like they should just be combined into partitionDesc
   private org.apache.hadoop.hive.ql.plan.TableDesc table;
@@ -65,11 +66,11 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
       final org.apache.hadoop.hive.ql.plan.TableDesc table,
       final Map<String, String> partitionSpec,
       final boolean replace,
-      final AcidUtils.Operation writeType, Long mmWriteId) {
+      final AcidUtils.Operation writeType, Long txnId) {
     super(sourcePath);
     Utilities.LOG14535.info("creating part LTD from " + sourcePath + " to "
         + ((table.getProperties() == null) ? "null" : table.getTableName()));
-    init(table, partitionSpec, replace, writeType, mmWriteId);
+    init(table, partitionSpec, replace, writeType, txnId);
   }
 
   /**
@@ -83,15 +84,15 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
                        final TableDesc table,
                        final Map<String, String> partitionSpec,
                        final boolean replace,
-                       final Long mmWriteId) {
-    this(sourcePath, table, partitionSpec, replace, AcidUtils.Operation.NOT_ACID, mmWriteId);
+                       final Long txnId) {
+    this(sourcePath, table, partitionSpec, replace, AcidUtils.Operation.NOT_ACID, txnId);
   }
 
   public LoadTableDesc(final Path sourcePath,
       final org.apache.hadoop.hive.ql.plan.TableDesc table,
       final Map<String, String> partitionSpec,
-      final AcidUtils.Operation writeType, Long mmWriteId) {
-    this(sourcePath, table, partitionSpec, true, writeType, mmWriteId);
+      final AcidUtils.Operation writeType, Long txnId) {
+    this(sourcePath, table, partitionSpec, true, writeType, txnId);
   }
 
   /**
@@ -102,22 +103,22 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
    */
   public LoadTableDesc(final Path sourcePath,
                        final org.apache.hadoop.hive.ql.plan.TableDesc table,
-                       final Map<String, String> partitionSpec, Long mmWriteId) {
-    this(sourcePath, table, partitionSpec, true, AcidUtils.Operation.NOT_ACID, mmWriteId);
+                       final Map<String, String> partitionSpec, Long txnId) {
+    this(sourcePath, table, partitionSpec, true, AcidUtils.Operation.NOT_ACID, txnId);
   }
 
   public LoadTableDesc(final Path sourcePath,
       final org.apache.hadoop.hive.ql.plan.TableDesc table,
       final DynamicPartitionCtx dpCtx,
       final AcidUtils.Operation writeType,
-      boolean isReplace, Long mmWriteId) {
+      boolean isReplace, Long txnId) {
     super(sourcePath);
     Utilities.LOG14535.info("creating LTD from " + sourcePath + " to " + table.getTableName()/*,
new Exception()*/);
     this.dpCtx = dpCtx;
     if (dpCtx != null && dpCtx.getPartSpec() != null && partitionSpec ==
null) {
-      init(table, dpCtx.getPartSpec(), isReplace, writeType, mmWriteId);
+      init(table, dpCtx.getPartSpec(), isReplace, writeType, txnId);
     } else {
-      init(table, new LinkedHashMap<String, String>(), isReplace, writeType, mmWriteId);
+      init(table, new LinkedHashMap<String, String>(), isReplace, writeType, txnId);
     }
   }
 
@@ -125,12 +126,12 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
       final org.apache.hadoop.hive.ql.plan.TableDesc table,
       final Map<String, String> partitionSpec,
       final boolean replace,
-      AcidUtils.Operation writeType, Long mmWriteId) {
+      AcidUtils.Operation writeType, Long txnId) {
     this.table = table;
     this.partitionSpec = partitionSpec;
     this.replace = replace;
     this.writeType = writeType;
-    this.mmWriteId = mmWriteId;
+    this.txnId = txnId;
   }
 
   @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED
})
@@ -158,11 +159,11 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
 
   @Explain(displayName = "micromanaged table")
   public Boolean isMmTableExplain() {
-    return mmWriteId != null? true : null;
+    return txnId != null? true : null;
   }
 
   public boolean isMmTable() {
-    return mmWriteId != null;
+    return txnId != null;
   }
 
   public void setReplace(boolean replace) {
@@ -203,8 +204,20 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
     return writeType;
   }
 
-  public Long getMmWriteId() {
-    return mmWriteId;
+  public Long getTxnId() {
+    return txnId;
+  }
+
+  public void setTxnId(Long txnId) {
+    this.txnId = txnId;
+  }
+
+  public int getStmtId() {
+    return stmtId;
+  }
+
+  public void setStmtId(int stmtId) {
+    this.stmtId = stmtId;
   }
 
   public void setIntermediateInMmWrite(boolean b) {

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
index 4a13e1f..55b9da9 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
@@ -141,7 +141,7 @@ public class TestExecDriver extends TestCase {
         db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true);
         db.createTable(src, cols, null, TextInputFormat.class,
             HiveIgnoreKeyTextOutputFormat.class);
-        db.loadTable(hadoopDataFile[i], src, false, true, false, false, false, null);
+        db.loadTable(hadoopDataFile[i], src, false, true, false, false, false, null, 0);
         i++;
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/queries/clientpositive/mm_all.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q
index e2c8e97..8971292 100644
--- a/ql/src/test/queries/clientpositive/mm_all.q
+++ b/ql/src/test/queries/clientpositive/mm_all.q
@@ -33,7 +33,6 @@ drop table part_mm;
 drop table simple_mm;
 create table simple_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 insert into table simple_mm select key from intermediate;
-insert overwrite table simple_mm select key from intermediate;
 select * from simple_mm order by key;
 insert into table simple_mm select key from intermediate;
 select * from simple_mm order by key;
@@ -193,47 +192,6 @@ set hive.merge.mapredfiles=false;
 -- TODO: need to include merge+union+DP, but it's broken for now
 
 
-drop table ctas0_mm;
-create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only")
as select * from intermediate;
-select * from ctas0_mm;
-drop table ctas0_mm;
-
-drop table ctas1_mm;
-create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only")
as
-  select * from intermediate union all select * from intermediate;
-select * from ctas1_mm;
-drop table ctas1_mm;
-
-
-
-drop table iow0_mm;
-create table iow0_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
-insert overwrite table iow0_mm select key from intermediate;
-insert into table iow0_mm select key + 1 from intermediate;
-select * from iow0_mm order by key;
-insert overwrite table iow0_mm select key + 2 from intermediate;
-select * from iow0_mm order by key;
-drop table iow0_mm;
-
-
-drop table iow1_mm; 
-create table iow1_mm(key int) partitioned by (key2 int)  tblproperties("transactional"="true",
"transactional_properties"="insert_only");
-insert overwrite table iow1_mm partition (key2)
-select key as k1, key from intermediate union all select key as k1, key from intermediate;
-insert into table iow1_mm partition (key2)
-select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate;
-select * from iow1_mm order by key, key2;
-insert overwrite table iow1_mm partition (key2)
-select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate;
-select * from iow1_mm order by key, key2;
-insert overwrite table iow1_mm partition (key2)
-select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from
intermediate;
-select * from iow1_mm order by key, key2;
-drop table iow1_mm;
-
-
-
-
 drop table load0_mm;
 create table load0_mm (key string, value string) stored as textfile tblproperties("transactional"="true",
"transactional_properties"="insert_only");
 load data local inpath '../../data/files/kv1.txt' into table load0_mm;
@@ -279,174 +237,11 @@ drop table load2_mm;
 drop table intermediate2;
 
 
-drop table intermediate_nonpart;
-drop table intermmediate_part;
-drop table intermmediate_nonpart;
-create table intermediate_nonpart(key int, p int);
-insert into intermediate_nonpart select * from intermediate;
-create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true",
"transactional_properties"="insert_only");
-insert into intermmediate_nonpart select * from intermediate;
-create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true",
"transactional_properties"="insert_only");
-insert into table intermmediate partition(p) select key, p from intermediate;
-
-set hive.exim.test.mode=true;
-
-export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart';
-export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart';
-export table intermediate to 'ql/test/data/exports/intermediate_part';
-export table intermmediate to 'ql/test/data/exports/intermmediate_part';
-
-drop table intermediate_nonpart;
-drop table intermmediate_part;
-drop table intermmediate_nonpart;
-
--- non-MM export to MM table, with and without partitions
-
-drop table import0_mm;
-create table import0_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
-import table import0_mm from 'ql/test/data/exports/intermediate_nonpart';
-select * from import0_mm order by key, p;
-drop table import0_mm;
-
-
-
-drop table import1_mm;
-create table import1_mm(key int) partitioned by (p int)
-  stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only");
-import table import1_mm from 'ql/test/data/exports/intermediate_part';
-select * from import1_mm order by key, p;
-drop table import1_mm;
-
-
--- MM export into new MM table, non-part and part
-
---drop table import2_mm;
---import table import2_mm from 'ql/test/data/exports/intermmediate_nonpart';
---desc import2_mm;
---select * from import2_mm order by key, p;
---drop table import2_mm;
---
---drop table import3_mm;
---import table import3_mm from 'ql/test/data/exports/intermmediate_part';
---desc import3_mm;
---select * from import3_mm order by key, p;
---drop table import3_mm;
-
--- MM export into existing MM table, non-part and partial part
-
-drop table import4_mm;
-create table import4_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
-import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart';
-select * from import4_mm order by key, p;
-drop table import4_mm;
-
-drop table import5_mm;
-create table import5_mm(key int) partitioned by (p int) tblproperties("transactional"="true",
"transactional_properties"="insert_only");
-import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part';
-select * from import5_mm order by key, p;
-drop table import5_mm;
-
--- MM export into existing non-MM table, non-part and part
-
-drop table import6_mm;
-create table import6_mm(key int, p int);
-import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart';
-select * from import6_mm order by key, p;
-drop table import6_mm;
-
-drop table import7_mm;
-create table import7_mm(key int) partitioned by (p int);
-import table import7_mm from 'ql/test/data/exports/intermmediate_part';
-select * from import7_mm order by key, p;
-drop table import7_mm;
-
-set hive.exim.test.mode=false;
-
-
-
 drop table multi0_1_mm;
 drop table multi0_2_mm;
 create table multi0_1_mm (key int, key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only");
 create table multi0_2_mm (key int, key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only");
 
-from intermediate
-insert overwrite table multi0_1_mm select key, p
-insert overwrite table multi0_2_mm select p, key;
-
-select * from multi0_1_mm order by key, key2;
-select * from multi0_2_mm order by key, key2;
-
-set hive.merge.mapredfiles=true;
-set hive.merge.sparkfiles=true;
-set hive.merge.tezfiles=true;
-
-from intermediate
-insert into table multi0_1_mm select p, key
-insert overwrite table multi0_2_mm select key, p;
-select * from multi0_1_mm order by key, key2;
-select * from multi0_2_mm order by key, key2;
-
-set hive.merge.mapredfiles=false;
-set hive.merge.sparkfiles=false;
-set hive.merge.tezfiles=false;
-
-drop table multi0_1_mm;
-drop table multi0_2_mm;
-
-
-drop table multi1_mm;
-create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties("transactional"="true",
"transactional_properties"="insert_only");
-from intermediate
-insert into table multi1_mm partition(p=1) select p, key
-insert into table multi1_mm partition(p=2) select key, p;
-select * from multi1_mm order by key, key2, p;
-from intermediate
-insert into table multi1_mm partition(p=2) select p, key
-insert overwrite table multi1_mm partition(p=1) select key, p;
-select * from multi1_mm order by key, key2, p;
-
-from intermediate
-insert into table multi1_mm partition(p) select p, key, p
-insert into table multi1_mm partition(p=1) select key, p;
-select key, key2, p from multi1_mm order by key, key2, p;
-
-from intermediate
-insert into table multi1_mm partition(p) select p, key, 1
-insert into table multi1_mm partition(p=1) select key, p;
-select key, key2, p from multi1_mm order by key, key2, p;
-drop table multi1_mm;
-
-
-
-
-set datanucleus.cache.collections=false;
-set hive.stats.autogather=true;
-
-drop table stats_mm;
-create table stats_mm(key int)  tblproperties("transactional"="true", "transactional_properties"="insert_only");
-insert overwrite table stats_mm  select key from intermediate;
-desc formatted stats_mm;
-
-insert into table stats_mm  select key from intermediate;
-desc formatted stats_mm;
-drop table stats_mm;
-
-drop table stats2_mm;
-create table stats2_mm tblproperties("transactional"="true", "transactional_properties"="insert_only")
as select array(key, value) from src;
-desc formatted stats2_mm;
-drop table stats2_mm;
-
-
-set hive.optimize.skewjoin=true;
-set hive.skewjoin.key=2;
-set hive.optimize.metadataonly=false;
-
-CREATE TABLE skewjoin_mm(key INT, value STRING) STORED AS TEXTFILE tblproperties ("transactional"="true",
"transactional_properties"="insert_only");
-FROM src src1 JOIN src src2 ON (src1.key = src2.key) INSERT OVERWRITE TABLE skewjoin_mm SELECT
src1.key, src2.value;
-select count(distinct key) from skewjoin_mm;
-drop table skewjoin_mm;
-
-set hive.optimize.skewjoin=false;
 
 set hive.optimize.index.filter=true;
 set hive.auto.convert.join=false;

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/queries/clientpositive/mm_conversions.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mm_conversions.q b/ql/src/test/queries/clientpositive/mm_conversions.q
index 2dc7a74..62faeac 100644
--- a/ql/src/test/queries/clientpositive/mm_conversions.q
+++ b/ql/src/test/queries/clientpositive/mm_conversions.q
@@ -4,7 +4,8 @@ set hive.fetch.task.conversion=none;
 set tez.grouping.min-size=1;
 set tez.grouping.max-size=2;
 set hive.exec.dynamic.partition.mode=nonstrict;
-
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
 -- Force multiple writers when reading
 drop table intermediate;
@@ -13,29 +14,31 @@ insert into table intermediate partition(p='455') select distinct key
from src w
 insert into table intermediate partition(p='456') select distinct key from src where key
is not null order by key asc limit 1;
 insert into table intermediate partition(p='457') select distinct key from src where key
>= 100 order by key asc limit 1;
 
-drop table simple_from_mm;
-create table simple_from_mm(key int) stored as orc tblproperties ("transactional"="true",
"transactional_properties"="insert_only");
-insert into table simple_from_mm select key from intermediate;
-insert into table simple_from_mm select key from intermediate;
-select * from simple_from_mm s1 order by key;
-alter table simple_from_mm unset tblproperties('transactional_properties', 'transactional');
-select * from simple_from_mm s2 order by key;
-insert into table simple_from_mm select key from intermediate;
-select * from simple_from_mm s3 order by key;
-alter table simple_from_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only");
-select * from simple_from_mm s4 order by key;
-insert into table simple_from_mm select key from intermediate;
-select * from simple_from_mm s5 order by key;
-alter table simple_from_mm set tblproperties("transactional"="false", 'transactional_properties'='false');
-select * from simple_from_mm s6 order by key;
-insert into table simple_from_mm select key from intermediate;
-select * from simple_from_mm s7 order by key;
-drop table simple_from_mm;
+drop table simple_from_mm1;
+create table simple_from_mm1(key int) stored as orc tblproperties ("transactional"="true",
"transactional_properties"="insert_only");
+insert into table simple_from_mm1 select key from intermediate;
+insert into table simple_from_mm1 select key from intermediate;
+select * from simple_from_mm1 s1 order by key;
+alter table simple_from_mm1 unset tblproperties('transactional_properties', 'transactional');
+select * from simple_from_mm1 s2 order by key;
+insert into table simple_from_mm1 select key from intermediate;
+select * from simple_from_mm1 s3 order by key;
+drop table simple_from_mm1;
+
+drop table simple_from_mm2;
+create table simple_from_mm2(key int) stored as orc tblproperties ("transactional"="true",
"transactional_properties"="insert_only");
+insert into table simple_from_mm2 select key from intermediate;
+insert into table simple_from_mm2 select key from intermediate;
+select * from simple_from_mm2 s1 order by key;
+alter table simple_from_mm2 set tblproperties("transactional"="false", 'transactional_properties'='false');
+select * from simple_from_mm2 s2 order by key;
+insert into table simple_from_mm2 select key from intermediate;
+select * from simple_from_mm2 s3 order by key;
+drop table simple_from_mm2;
 
 drop table simple_to_mm;
 create table simple_to_mm(key int) stored as orc;
 insert into table simple_to_mm select key from intermediate;
-insert into table simple_to_mm select key from intermediate;
 select * from simple_to_mm s1 order by key;
 alter table simple_to_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only");
 select * from simple_to_mm s2 order by key;
@@ -44,27 +47,29 @@ insert into table simple_to_mm select key from intermediate;
 select * from simple_to_mm s3 order by key;
 drop table simple_to_mm;
 
-drop table part_from_mm;
-create table part_from_mm(key int) partitioned by (key_mm int) stored as orc tblproperties
("transactional"="true", "transactional_properties"="insert_only");
-insert into table part_from_mm partition(key_mm='455') select key from intermediate;
-insert into table part_from_mm partition(key_mm='455') select key from intermediate;
-insert into table part_from_mm partition(key_mm='456') select key from intermediate;
-select * from part_from_mm s1 order by key, key_mm;
-alter table part_from_mm unset tblproperties('transactional_properties', 'transactional');
-select * from part_from_mm s2 order by key, key_mm;
-insert into table part_from_mm partition(key_mm='456') select key from intermediate;
-insert into table part_from_mm partition(key_mm='457') select key from intermediate;
-select * from part_from_mm s3 order by key, key_mm;
-alter table part_from_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only");
-select * from part_from_mm s4 order by key, key_mm;
-insert into table part_from_mm partition(key_mm='456') select key from intermediate;
-insert into table part_from_mm partition(key_mm='455') select key from intermediate;
-select * from part_from_mm s5 order by key, key_mm;
-alter table part_from_mm set tblproperties("transactional"="false", 'transactional_properties'='false');
-select * from part_from_mm s6 order by key, key_mm;
-insert into table part_from_mm partition(key_mm='457') select key from intermediate;
-select * from part_from_mm s7 order by key, key_mm;
-drop table part_from_mm;
+drop table part_from_mm1;
+create table part_from_mm1(key int) partitioned by (key_mm int) stored as orc tblproperties
("transactional"="true", "transactional_properties"="insert_only");
+insert into table part_from_mm1 partition(key_mm='455') select key from intermediate;
+insert into table part_from_mm1 partition(key_mm='455') select key from intermediate;
+insert into table part_from_mm1 partition(key_mm='456') select key from intermediate;
+select * from part_from_mm1 s1 order by key, key_mm;
+alter table part_from_mm1 unset tblproperties('transactional_properties', 'transactional');
+select * from part_from_mm1 s2 order by key, key_mm;
+insert into table part_from_mm1 partition(key_mm='456') select key from intermediate;
+insert into table part_from_mm1 partition(key_mm='457') select key from intermediate;
+select * from part_from_mm1 s3 order by key, key_mm;
+drop table part_from_mm1;
+
+drop table part_from_mm2;
+create table part_from_mm2(key int) partitioned by (key_mm int) stored as orc tblproperties
("transactional"="true", "transactional_properties"="insert_only");
+insert into table part_from_mm2 partition(key_mm='456') select key from intermediate;--fails
here
+insert into table part_from_mm2 partition(key_mm='455') select key from intermediate;
+select * from part_from_mm2 s1 order by key, key_mm;
+alter table part_from_mm2 set tblproperties("transactional"="false", 'transactional_properties'='false');
+select * from part_from_mm2 s2 order by key, key_mm;
+insert into table part_from_mm2 partition(key_mm='457') select key from intermediate;
+select * from part_from_mm2 s3 order by key, key_mm;
+drop table part_from_mm2;
 
 drop table part_to_mm;
 create table part_to_mm(key int) partitioned by (key_mm int) stored as orc;

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/queries/clientpositive/mm_exim.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mm_exim.q b/ql/src/test/queries/clientpositive/mm_exim.q
new file mode 100644
index 0000000..2cdb001
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/mm_exim.q
@@ -0,0 +1,98 @@
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+set hive.fetch.task.conversion=none;
+set tez.grouping.min-size=1;
+set tez.grouping.max-size=2;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+
+drop table intermediate;
+create table intermediate(key int) partitioned by (p int) stored as orc;
+insert into table intermediate partition(p='455') select distinct key from src where key
>= 0 order by key desc limit 2;
+insert into table intermediate partition(p='456') select distinct key from src where key
is not null order by key asc limit 2;
+insert into table intermediate partition(p='457') select distinct key from src where key
>= 100 order by key asc limit 2;
+
+drop table intermediate_nonpart;
+drop table intermmediate_part;
+drop table intermmediate_nonpart;
+create table intermediate_nonpart(key int, p int);
+insert into intermediate_nonpart select * from intermediate;
+create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true",
"transactional_properties"="insert_only");
+insert into intermmediate_nonpart select * from intermediate;
+create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true",
"transactional_properties"="insert_only");
+insert into table intermmediate partition(p) select key, p from intermediate;
+
+set hive.exim.test.mode=true;
+
+export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart';
+export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart';
+export table intermediate to 'ql/test/data/exports/intermediate_part';
+export table intermmediate to 'ql/test/data/exports/intermmediate_part';
+
+drop table intermediate_nonpart;
+drop table intermmediate_part;
+drop table intermmediate_nonpart;
+
+-- non-MM export to MM table, with and without partitions
+
+drop table import0_mm;
+create table import0_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
+import table import0_mm from 'ql/test/data/exports/intermediate_nonpart';
+select * from import0_mm order by key, p;
+drop table import0_mm;
+
+
+
+drop table import1_mm;
+create table import1_mm(key int) partitioned by (p int)
+  stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only");
+import table import1_mm from 'ql/test/data/exports/intermediate_part';
+select * from import1_mm order by key, p;
+drop table import1_mm;
+
+
+-- MM export into new MM table, non-part and part
+
+--drop table import2_mm;
+--import table import2_mm from 'ql/test/data/exports/intermmediate_nonpart';
+--desc import2_mm;
+--select * from import2_mm order by key, p;
+--drop table import2_mm;
+--
+--drop table import3_mm;
+--import table import3_mm from 'ql/test/data/exports/intermmediate_part';
+--desc import3_mm;
+--select * from import3_mm order by key, p;
+--drop table import3_mm;
+
+-- MM export into existing MM table, non-part and partial part
+
+drop table import4_mm;
+create table import4_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
+import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart';
+select * from import4_mm order by key, p;
+drop table import4_mm;
+
+drop table import5_mm;
+create table import5_mm(key int) partitioned by (p int) tblproperties("transactional"="true",
"transactional_properties"="insert_only");
+import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part';
+select * from import5_mm order by key, p;
+drop table import5_mm;
+
+-- MM export into existing non-MM table, non-part and part
+
+drop table import6_mm;
+create table import6_mm(key int, p int);
+import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart';
+select * from import6_mm order by key, p;
+drop table import6_mm;
+
+drop table import7_mm;
+create table import7_mm(key int) partitioned by (p int);
+import table import7_mm from 'ql/test/data/exports/intermmediate_part';
+select * from import7_mm order by key, p;
+drop table import7_mm;
+
+set hive.exim.test.mode=false;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/queries/clientpositive/mm_insertonly_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mm_insertonly_acid.q b/ql/src/test/queries/clientpositive/mm_insertonly_acid.q
deleted file mode 100644
index 7da99c5..0000000
--- a/ql/src/test/queries/clientpositive/mm_insertonly_acid.q
+++ /dev/null
@@ -1,16 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.explain.user=false;
-set hive.fetch.task.conversion=none;
-set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.support.concurrency=true;
-set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-
-
-drop table qtr_acid;
-create table qtr_acid (key int) partitioned by (p int) tblproperties ("transactional"="true",
"transactional_properties"="insert_only");
-insert into table qtr_acid partition(p='123') select distinct key from src where key >
0 order by key asc limit 10;
-insert into table qtr_acid partition(p='456') select distinct key from src where key >
0 order by key desc limit 10;
-explain
-select * from qtr_acid order by key;
-select * from qtr_acid order by key;
-drop table qtr_acid;
\ No newline at end of file


Mime
View raw message