hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From the...@apache.org
Subject hive git commit: HIVE-17527 : Support replication for rename/move table across database (Sankar Hariappan, reviewed by Anishek Agarwal, Thejas Nair)
Date Fri, 22 Sep 2017 17:23:58 GMT
Repository: hive
Updated Branches:
  refs/heads/master 266c50554 -> 2765e544f


HIVE-17527 : Support replication for rename/move table across database (Sankar Hariappan,
reviewed by Anishek Agarwal, Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2765e544
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2765e544
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2765e544

Branch: refs/heads/master
Commit: 2765e544ffbcddd01c95004530fff2ac3c4a3d55
Parents: 266c505
Author: Sankar Hariappan <mailtosankarh@gmail.com>
Authored: Fri Sep 22 10:23:52 2017 -0700
Committer: Thejas M Nair <thejas@hortonworks.com>
Committed: Fri Sep 22 10:23:52 2017 -0700

----------------------------------------------------------------------
 .../hive/ql/parse/TestReplicationScenarios.java | 70 ++++++++++++++++++++
 .../hadoop/hive/metastore/HiveAlterHandler.java | 31 +++++++--
 .../hadoop/hive/metastore/HiveMetaStore.java    | 27 ++++++--
 3 files changed, 119 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/2765e544/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index 896ec1a..a8c3a0b 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -1950,6 +1950,76 @@ public class TestReplicationScenarios {
   }
 
   @Test
+  public void testRenameTableAcrossDatabases() throws IOException {
+    String testName = "renameTableAcrossDatabases";
+    LOG.info("Testing " + testName);
+    String dbName1 = testName + "_" + tid + "_1";
+    String dbName2 = testName + "_" + tid + "_2";
+    String replDbName1 = dbName1 + "_dupe";
+    String replDbName2 = dbName2 + "_dupe";
+
+    run("CREATE DATABASE " + dbName1, driver);
+    run("CREATE DATABASE " + dbName2, driver);
+    run("CREATE TABLE " + dbName1 + ".unptned(a string) STORED AS TEXTFILE", driver);
+
+    String[] unptn_data = new String[] { "ten", "twenty" };
+    String unptn_locn = new Path(TEST_PATH, testName + "_unptn").toUri().getPath();
+
+    createTestDataFile(unptn_locn, unptn_data);
+    run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName1 + ".unptned",
driver);
+
+    Tuple bootstrap1 = bootstrapLoadAndVerify(dbName1, replDbName1);
+    Tuple bootstrap2 = bootstrapLoadAndVerify(dbName2, replDbName2);
+
+    verifyRun("SELECT a from " + replDbName1 + ".unptned ORDER BY a", unptn_data, driverMirror);
+    verifyIfTableNotExist(replDbName2, "unptned", metaStoreClientMirror);
+
+    run("ALTER TABLE " + dbName1 + ".unptned RENAME TO " + dbName2 + ".unptned_renamed",
driver);
+
+    incrementalLoadAndVerify(dbName1, bootstrap1.lastReplId, replDbName1);
+    incrementalLoadAndVerify(dbName2, bootstrap2.lastReplId, replDbName2);
+
+    verifyIfTableNotExist(replDbName1, "unptned", metaStoreClientMirror);
+    verifyIfTableNotExist(replDbName1, "unptned_renamed", metaStoreClientMirror);
+    verifyRun("SELECT a from " + replDbName2 + ".unptned_renamed ORDER BY a", unptn_data,
driverMirror);
+  }
+
+  @Test
+  public void testRenamePartitionedTableAcrossDatabases() throws IOException {
+    String testName = "renamePartitionedTableAcrossDatabases";
+    LOG.info("Testing " + testName);
+    String dbName1 = testName + "_" + tid + "_1";
+    String dbName2 = testName + "_" + tid + "_2";
+    String replDbName1 = dbName1 + "_dupe";
+    String replDbName2 = dbName2 + "_dupe";
+
+    run("CREATE DATABASE " + dbName1, driver);
+    run("CREATE DATABASE " + dbName2, driver);
+    run("CREATE TABLE " + dbName1 + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE",
driver);
+
+    String[] ptn_data = new String[] { "fifteen", "fourteen" };
+    String ptn_locn = new Path(TEST_PATH, testName + "_ptn").toUri().getPath();
+
+    createTestDataFile(ptn_locn, ptn_data);
+    run("LOAD DATA LOCAL INPATH '" + ptn_locn + "' OVERWRITE INTO TABLE " + dbName1 + ".ptned
PARTITION(b=1)", driver);
+
+    Tuple bootstrap1 = bootstrapLoadAndVerify(dbName1, replDbName1);
+    Tuple bootstrap2 = bootstrapLoadAndVerify(dbName2, replDbName2);
+
+    verifyRun("SELECT a from " + replDbName1 + ".ptned where (b=1) ORDER BY a", ptn_data,
driverMirror);
+    verifyIfTableNotExist(replDbName2, "ptned", metaStoreClientMirror);
+
+    run("ALTER TABLE " + dbName1 + ".ptned RENAME TO " + dbName2 + ".ptned_renamed", driver);
+
+    incrementalLoadAndVerify(dbName1, bootstrap1.lastReplId, replDbName1);
+    incrementalLoadAndVerify(dbName2, bootstrap2.lastReplId, replDbName2);
+
+    verifyIfTableNotExist(replDbName1, "ptned", metaStoreClientMirror);
+    verifyIfTableNotExist(replDbName1, "ptned_renamed", metaStoreClientMirror);
+    verifyRun("SELECT a from " + replDbName2 + ".ptned_renamed where (b=1) ORDER BY a", ptn_data,
driverMirror);
+  }
+
+  @Test
   public void testViewsReplication() throws IOException {
     String testName = "viewsReplication";
     String dbName = createDB(testName, driver);

http://git-wip-us.apache.org/repos/asf/hive/blob/2765e544/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index b279e1d..65551ad 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -21,8 +21,11 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 
 import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
 import org.apache.hadoop.hive.metastore.messaging.EventMessage;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -301,10 +304,28 @@ public class HiveAlterHandler implements AlterHandler {
       }
 
       if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
-        MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                              EventMessage.EventType.ALTER_TABLE,
-                                              new AlterTableEvent(oldt, newt, false, true,
handler),
-                                              environmentContext);
+        if (oldt.getDbName().equalsIgnoreCase(newt.getDbName())) {
+          MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                  EventMessage.EventType.ALTER_TABLE,
+                  new AlterTableEvent(oldt, newt, false, true, handler),
+                  environmentContext);
+        } else {
+          MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                  EventMessage.EventType.DROP_TABLE,
+                  new DropTableEvent(oldt, true, false, handler),
+                  environmentContext);
+          MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                  EventMessage.EventType.CREATE_TABLE,
+                  new CreateTableEvent(newt, true, handler),
+                  environmentContext);
+          if (isPartitionedTable) {
+            parts = msdb.getPartitions(newt.getDbName(), newt.getTableName(), -1);
+            MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                    EventMessage.EventType.ADD_PARTITION,
+                    new AddPartitionEvent(newt, parts, true, handler),
+                    environmentContext);
+          }
+        }
       }
       // commit the changes
       success = msdb.commitTransaction();
@@ -591,7 +612,7 @@ public class HiveAlterHandler implements AlterHandler {
     return alterPartitions(msdb, wh, dbname, name, new_parts, environmentContext, null);
   }
 
-    @Override
+  @Override
   public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String
dbname,
     final String name, final List<Partition> new_parts, EnvironmentContext environmentContext,
     HMSHandler handler)

http://git-wip-us.apache.org/repos/asf/hive/blob/2765e544/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index c5140e5..d5de4f2 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -4437,10 +4437,29 @@ public class HiveMetaStore extends ThriftHiveMetastore {
                 envContext, this);
         success = true;
         if (!listeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.ALTER_TABLE,
-                                                new AlterTableEvent(oldt, newTable, false,
true, this),
-                                                envContext);
+          if (oldt.getDbName().equalsIgnoreCase(newTable.getDbName())) {
+            MetaStoreListenerNotifier.notifyEvent(listeners,
+                    EventType.ALTER_TABLE,
+                    new AlterTableEvent(oldt, newTable, false, true, this),
+                    envContext);
+          } else {
+            MetaStoreListenerNotifier.notifyEvent(listeners,
+                    EventType.DROP_TABLE,
+                    new DropTableEvent(oldt, true, false, this),
+                    envContext);
+            MetaStoreListenerNotifier.notifyEvent(listeners,
+                    EventType.CREATE_TABLE,
+                    new CreateTableEvent(newTable, true, this),
+                    envContext);
+            if (newTable.getPartitionKeysSize() != 0) {
+              List<Partition> partitions
+                      = getMS().getPartitions(newTable.getDbName(), newTable.getTableName(),
-1);
+              MetaStoreListenerNotifier.notifyEvent(listeners,
+                      EventType.ADD_PARTITION,
+                      new AddPartitionEvent(newTable, partitions, true, this),
+                      envContext);
+            }
+          }
         }
       } catch (NoSuchObjectException e) {
         // thrown when the table to be altered does not exist


Mime
View raw message