hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From khorg...@apache.org
Subject svn commit: r1674557 [1/3] - in /hive/trunk: common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/conf/ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ metastore/src/java/org/apache/hadoop/...
Date Sat, 18 Apr 2015 19:04:47 GMT
Author: khorgath
Date: Sat Apr 18 19:04:46 2015
New Revision: 1674557

URL: http://svn.apache.org/r1674557
Log:
HIVE-10228 : Changes to Hive Export/Import/DropTable/DropPartition to support replication semantics (Sushanth Sowmyan, reviewed by Alan Gates)

Added:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
    hive/trunk/ql/src/test/queries/clientpositive/repl_1_drop.q
    hive/trunk/ql/src/test/queries/clientpositive/repl_2_exim_basic.q
    hive/trunk/ql/src/test/queries/clientpositive/repl_3_exim_metadata.q
    hive/trunk/ql/src/test/results/clientpositive/repl_1_drop.q.out
    hive/trunk/ql/src/test/results/clientpositive/repl_2_exim_basic.q.out
    hive/trunk/ql/src/test/results/clientpositive/repl_3_exim_metadata.q.out
Modified:
    hive/trunk/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
    hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/trunk/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationUtils.java
    hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/MetaDataExportListener.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java
    hive/trunk/ql/src/test/results/clientnegative/alter_table_add_partition.q.out
    hive/trunk/ql/src/test/results/clientnegative/alter_view_failure5.q.out
    hive/trunk/ql/src/test/results/clientnegative/alter_view_failure7.q.out
    hive/trunk/ql/src/test/results/clientnegative/truncate_partition_column.q.out

Modified: hive/trunk/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java (original)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java Sat Apr 18 19:04:46 2015
@@ -135,6 +135,11 @@ public class StatsSetupConst {
   // alterPartition/alterTable is happening via statsTask.
   public static final String STATS_GENERATED_VIA_STATS_TASK = "STATS_GENERATED_VIA_STATS_TASK";
 
+  // This string constant is used by AlterHandler to figure out that it should not attempt to
+  // update stats. It is set by any client-side task which wishes to signal that no stats
+  // update should take place, such as with replication.
+  public static final String DO_NOT_UPDATE_STATS = "DO_NOT_UPDATE_STATS";
+
   // This string constant will be persisted in metastore to indicate whether corresponding
   // table or partition's statistics are accurate or not.
   public static final String COLUMN_STATS_ACCURATE = "COLUMN_STATS_ACCURATE";

Modified: hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Sat Apr 18 19:04:46 2015
@@ -1584,6 +1584,11 @@ public class HiveConf extends Configurat
     // temporary variable for testing. This is added just to turn off this feature in case of a bug in
     // deployment. It has not been documented in hive-default.xml intentionally, this should be removed
     // once the feature is stable
+    HIVE_EXIM_RESTRICT_IMPORTS_INTO_REPLICATED_TABLES("hive.exim.strict.repl.tables",true,
+        "Parameter that determines if 'regular' (non-replication) export dumps can be\n" +
+        "imported on to tables that are the target of replication. If this parameter is\n" +
+        "set, regular imports will check if the destination table(if it exists) has a " +
+        "'repl.last.id' set on it. If so, it will fail."),
     HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS("hive.mapper.cannot.span.multiple.partitions", false, ""),
     HIVE_REWORK_MAPREDWORK("hive.rework.mapredwork", false,
         "should rework the mapred work or not.\n" +

Modified: hive/trunk/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationUtils.java?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationUtils.java (original)
+++ hive/trunk/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationUtils.java Sat Apr 18 19:04:46 2015
@@ -23,6 +23,7 @@ import com.google.common.base.Function;
 import com.google.common.base.Objects;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.io.IOExceptionWithCause;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
 import org.apache.hive.hcatalog.api.HCatDatabase;
 import org.apache.hive.hcatalog.api.HCatPartition;
 import org.apache.hive.hcatalog.api.HCatTable;
@@ -40,7 +41,7 @@ import java.util.Map;
 
 public class ReplicationUtils {
 
-  private final static String REPL_STATE_ID = "repl.last.id"; // TODO : define in ReplicationSpec, and point this to that once that's patched in.
+  private final static String REPL_STATE_ID = ReplicationSpec.KEY.CURR_STATE_ID.toString();
 
   private ReplicationUtils(){
     // dummy private constructor, since this class is a collection of static utility methods.

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Sat Apr 18 19:04:46 2015
@@ -194,6 +194,16 @@ public class MetaStoreUtils {
       FileStatus[] fileStatus, boolean newDir, boolean forceRecompute) throws MetaException {
 
     Map<String,String> params = tbl.getParameters();
+
+    if ((params!=null) && params.containsKey(StatsSetupConst.DO_NOT_UPDATE_STATS)){
+      boolean doNotUpdateStats = Boolean.valueOf(params.get(StatsSetupConst.DO_NOT_UPDATE_STATS));
+      params.remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
+      tbl.setParameters(params); // to make sure we remove this marker property
+      if (doNotUpdateStats){
+        return false;
+      }
+    }
+
     boolean updated = false;
     if (forceRecompute ||
         params == null ||

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java Sat Apr 18 19:04:46 2015
@@ -430,6 +430,7 @@ public enum ErrorMsg {
 
   DROP_NATIVE_FUNCTION(10301, "Cannot drop native function"),
   UPDATE_CANNOT_UPDATE_BUCKET_VALUE(10302, "Updating values of bucketing columns is not supported.  Column {0}.", true),
+  IMPORT_INTO_STRICT_REPL_TABLE(10303,"Non-repl import disallowed against table that is a destination of replication."),
 
   //========================== 20000 range starts here ========================//
   SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."),

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Sat Apr 18 19:04:46 2015
@@ -47,6 +47,7 @@ import java.util.SortedSet;
 import java.util.TreeMap;
 import java.util.TreeSet;
 
+import com.google.common.collect.Iterables;
 import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
@@ -115,12 +116,14 @@ import org.apache.hadoop.hive.ql.metadat
 import org.apache.hadoop.hive.ql.metadata.HiveUtils;
 import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.PartitionIterable;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils;
 import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatter;
 import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
 import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.AlterIndexDesc;
@@ -3693,6 +3696,39 @@ public class DDLTask extends Task<DDLWor
   }
 
   private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException {
+
+    ReplicationSpec replicationSpec = dropTbl.getReplicationSpec();
+    if (replicationSpec.isInReplicationScope()){
+      /**
+       * ALTER TABLE DROP PARTITION ... FOR REPLICATION(x) behaves as a DROP PARTITION IF OLDER THAN x
+       *
+       * So, we check each partition that matches our DropTableDesc.getPartSpecs(), and drop it only
+       * if it's older than the event that spawned this replicated request to drop partition
+       */
+      // TODO: Current implementation of replication will result in DROP_PARTITION under replication
+      // scope being called per-partition instead of multiple partitions. However, to be robust, we
+      // must still handle the case of multiple partitions in case this assumption changes in the
+      // future. However, if this assumption changes, we will not be very performant if we fetch
+      // each partition one-by-one, and then decide on inspection whether or not this is a candidate
+      // for dropping. Thus, we need a way to push this filter (replicationSpec.allowEventReplacementInto)
+      // to the  metastore to allow it to do drop a partition or not, depending on a Predicate on the
+      // parameter key values.
+      for (DropTableDesc.PartSpec partSpec : dropTbl.getPartSpecs()){
+        try {
+          for (Partition p : Iterables.filter(
+              db.getPartitionsByFilter(tbl, partSpec.getPartSpec().getExprString()),
+              replicationSpec.allowEventReplacementInto())){
+            db.dropPartition(tbl.getDbName(),tbl.getTableName(),p.getValues(),true);
+          }
+        } catch (NoSuchObjectException e){
+          // ignore NSOE because that means there's nothing to drop.
+        } catch (Exception e) {
+          throw new HiveException(e.getMessage(), e);
+        }
+      }
+      return;
+    }
+
     // ifExists is currently verified in DDLSemanticAnalyzer
     List<Partition> droppedParts
         = db.dropPartitions(dropTbl.getTableName(),
@@ -3735,6 +3771,46 @@ public class DDLTask extends Task<DDLWor
           " is protected from being dropped");
     }
 
+    ReplicationSpec replicationSpec = dropTbl.getReplicationSpec();
+    if ((tbl!= null) && replicationSpec.isInReplicationScope()){
+      /**
+       * DROP TABLE FOR REPLICATION behaves differently from DROP TABLE IF EXISTS - it more closely
+       * matches a DROP TABLE IF OLDER THAN(x) semantic.
+       *
+       * Ideally, commands executed under the scope of replication need to be idempotent and resilient
+       * to repeats. What can happen, sometimes, is that a drone processing a replication task can
+       * have been abandoned for not returning in time, but still execute its task after a while,
+       * which should not result in it mucking up data that has been impressed later on. So, for eg.,
+       * if we create partition P1, followed by droppping it, followed by creating it yet again,
+       * the replication of that drop should not drop the newer partition if it runs after the destination
+       * object is already in the newer state.
+       *
+       * Thus, we check the replicationSpec.allowEventReplacementInto to determine whether or not we can
+       * drop the object in question(will return false if object is newer than the event, true if not)
+       *
+       * In addition, since DROP TABLE FOR REPLICATION can result in a table not being dropped, while DROP
+       * TABLE will always drop the table, and the included partitions, DROP TABLE FOR REPLICATION must
+       * do one more thing - if it does not drop the table because the table is in a newer state, it must
+       * drop the partitions inside it that are older than this event. To wit, DROP TABLE FOR REPL
+       * acts like a recursive DROP TABLE IF OLDER.
+       */
+      if (!replicationSpec.allowEventReplacementInto(tbl)){
+        // Drop occured as part of replicating a drop, but the destination
+        // table was newer than the event being replicated. Ignore, but drop
+        // any partitions inside that are older.
+        if (tbl.isPartitioned()){
+
+          PartitionIterable partitions = new PartitionIterable(db,tbl,null,conf.getIntVar(
+              HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
+
+          for (Partition p : Iterables.filter(partitions, replicationSpec.allowEventReplacementInto())){
+            db.dropPartition(tbl.getDbName(),tbl.getTableName(),p.getValues(),true);
+          }
+        }
+        return; // table is newer, leave it be.
+      }
+    }
+
     int partitionBatchSize = HiveConf.getIntVar(conf,
         ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX);
 
@@ -3889,7 +3965,12 @@ public class DDLTask extends Task<DDLWor
    */
   private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException {
     // create the table
-    Table tbl = db.newTable(crtTbl.getTableName());
+    Table tbl;
+    if (crtTbl.getDatabaseName() == null || (crtTbl.getTableName().contains("."))){
+      tbl = db.newTable(crtTbl.getTableName());
+    }else {
+      tbl = new Table(crtTbl.getDatabaseName(),crtTbl.getTableName());
+    }
 
     if (crtTbl.getTblProps() != null) {
       tbl.getTTable().getParameters().putAll(crtTbl.getTblProps());
@@ -4043,7 +4124,16 @@ public class DDLTask extends Task<DDLWor
     }
 
     // create the table
-    db.createTable(tbl, crtTbl.getIfNotExists());
+    if (crtTbl.getReplaceMode()){
+      // replace-mode creates are really alters using CreateTableDesc.
+      try {
+        db.alterTable(tbl.getDbName()+"."+tbl.getTableName(),tbl);
+      } catch (InvalidOperationException e) {
+        throw new HiveException("Unable to alter table. " + e.getMessage(), e);
+      }
+    } else {
+      db.createTable(tbl, crtTbl.getIfNotExists());
+    }
     work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
     return 0;
   }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Sat Apr 18 19:04:46 2015
@@ -1704,10 +1704,22 @@ private void constructOneLBLocationMap(F
     }
     List<Partition> out = new ArrayList<Partition>();
     try {
-      // TODO: normally, the result is not necessary; might make sense to pass false
-      for (org.apache.hadoop.hive.metastore.api.Partition outPart
-          : getMSC().add_partitions(in, addPartitionDesc.isIfNotExists(), true)) {
-        out.add(new Partition(tbl, outPart));
+      if (!addPartitionDesc.getReplaceMode()){
+        // TODO: normally, the result is not necessary; might make sense to pass false
+        for (org.apache.hadoop.hive.metastore.api.Partition outPart
+            : getMSC().add_partitions(in, addPartitionDesc.isIfNotExists(), true)) {
+          out.add(new Partition(tbl, outPart));
+        }
+      } else {
+        getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), in);
+        List<String> part_names = new ArrayList<String>();
+        for (org.apache.hadoop.hive.metastore.api.Partition p: in){
+          part_names.add(Warehouse.makePartName(tbl.getPartitionKeys(), p.getValues()));
+        }
+        for ( org.apache.hadoop.hive.metastore.api.Partition outPart :
+        getMSC().getPartitionsByNames(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(),part_names)){
+          out.add(new Partition(tbl,outPart));
+        }
       }
     } catch (Exception e) {
       LOG.error(StringUtils.stringifyException(e));

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java Sat Apr 18 19:04:46 2015
@@ -336,17 +336,27 @@ public class Table implements Serializab
     return outputFormatClass;
   }
 
+  /**
+   * Marker SemanticException, so that processing that allows for table validation failures
+   * and appropriately handles them can recover from these types of SemanticExceptions
+   */
+  public class ValidationFailureSemanticException extends SemanticException{
+    public ValidationFailureSemanticException(String s) {
+      super(s);
+    }
+  };
+
   final public void validatePartColumnNames(
       Map<String, String> spec, boolean shouldBeFull) throws SemanticException {
     List<FieldSchema> partCols = tTable.getPartitionKeys();
     if (partCols == null || (partCols.size() == 0)) {
       if (spec != null) {
-        throw new SemanticException("table is not partitioned but partition spec exists: " + spec);
+        throw new ValidationFailureSemanticException("table is not partitioned but partition spec exists: " + spec);
       }
       return;
     } else if (spec == null) {
       if (shouldBeFull) {
-        throw new SemanticException("table is partitioned but partition spec is not specified");
+        throw new ValidationFailureSemanticException("table is partitioned but partition spec is not specified");
       }
       return;
     }
@@ -358,10 +368,10 @@ public class Table implements Serializab
       if (columnsFound == spec.size()) break;
     }
     if (columnsFound < spec.size()) {
-      throw new SemanticException("Partition spec " + spec + " contains non-partition columns");
+      throw new ValidationFailureSemanticException("Partition spec " + spec + " contains non-partition columns");
     }
     if (shouldBeFull && (spec.size() != partCols.size())) {
-      throw new SemanticException("partition spec " + spec
+      throw new ValidationFailureSemanticException("partition spec " + spec
           + " doesn't contain all (" + partCols.size() + ") partition columns");
     }
   }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java Sat Apr 18 19:04:46 2015
@@ -37,6 +37,7 @@ import java.util.Set;
 import org.antlr.runtime.tree.CommonTree;
 import org.antlr.runtime.tree.Tree;
 import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.tuple.Pair;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
@@ -304,16 +305,28 @@ public abstract class BaseSemanticAnalyz
     return getUnescapedName(tableOrColumnNode, null);
   }
 
+  public static Map.Entry<String,String> getDbTableNamePair(ASTNode tableNameNode) {
+    assert(tableNameNode.getToken().getType() == HiveParser.TOK_TABNAME);
+    if (tableNameNode.getChildCount() == 2) {
+      String dbName = unescapeIdentifier(tableNameNode.getChild(0).getText());
+      String tableName = unescapeIdentifier(tableNameNode.getChild(1).getText());
+      return Pair.of(dbName, tableName);
+    } else {
+      String tableName = unescapeIdentifier(tableNameNode.getChild(0).getText());
+      return Pair.of(null,tableName);
+    }
+  }
+
   public static String getUnescapedName(ASTNode tableOrColumnNode, String currentDatabase) {
     int tokenType = tableOrColumnNode.getToken().getType();
     if (tokenType == HiveParser.TOK_TABNAME) {
       // table node
-      if (tableOrColumnNode.getChildCount() == 2) {
-        String dbName = unescapeIdentifier(tableOrColumnNode.getChild(0).getText());
-        String tableName = unescapeIdentifier(tableOrColumnNode.getChild(1).getText());
+      Map.Entry<String,String> dbTablePair = getDbTableNamePair(tableOrColumnNode);
+      String dbName = dbTablePair.getKey();
+      String tableName = dbTablePair.getValue();
+      if (dbName != null){
         return dbName + "." + tableName;
       }
-      String tableName = unescapeIdentifier(tableOrColumnNode.getChild(0).getText());
       if (currentDatabase != null) {
         return currentDatabase + "." + tableName;
       }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Sat Apr 18 19:04:46 2015
@@ -829,6 +829,9 @@ public class DDLSemanticAnalyzer extends
     // configured not to fail silently
     boolean throwException =
         !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT);
+
+    ReplicationSpec replicationSpec = new ReplicationSpec(ast);
+
     Table tab = getTable(tableName, throwException);
     if (tab != null) {
       inputs.add(new ReadEntity(tab));
@@ -836,7 +839,7 @@ public class DDLSemanticAnalyzer extends
     }
 
     boolean ifPurge = (ast.getFirstChildWithType(HiveParser.KW_PURGE) != null);
-    DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, ifExists, ifPurge);
+    DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, ifExists, ifPurge, replicationSpec);
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
         dropTblDesc), conf));
   }
@@ -2630,7 +2633,29 @@ public class DDLSemanticAnalyzer extends
     boolean canGroupExprs = ifExists;
 
     boolean mustPurge = (ast.getFirstChildWithType(HiveParser.KW_PURGE) != null);
-    Table tab = getTable(qualified);
+    ReplicationSpec replicationSpec = new ReplicationSpec(ast);
+
+    Table tab = null;
+    try {
+      tab = getTable(qualified);
+    } catch (SemanticException se){
+      if (replicationSpec.isInReplicationScope() &&
+            (
+                (se.getCause() instanceof InvalidTableException)
+                ||  (se.getMessage().contains(ErrorMsg.INVALID_TABLE.getMsg()))
+            )){
+        // If we're inside a replication scope, then the table not existing is not an error.
+        // We just return in that case, no drop needed.
+        return;
+        // TODO : the contains message check is fragile, we should refactor SemanticException to be
+        // queriable for error code, and not simply have a message
+        // NOTE : IF_EXISTS might also want to invoke this, but there's a good possibility
+        // that IF_EXISTS is stricter about table existence, and applies only to the ptn.
+        // Therefore, ignoring IF_EXISTS here.
+      } else {
+        throw se;
+      }
+    }
     Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs =
         getFullPartitionSpecs(ast, tab, canGroupExprs);
     if (partSpecs.isEmpty()) return; // nothing to do
@@ -2644,7 +2669,7 @@ public class DDLSemanticAnalyzer extends
     addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists, ignoreProtection);
 
     DropTableDesc dropTblDesc =
-        new DropTableDesc(getDotName(qualified), partSpecs, expectView, ignoreProtection, mustPurge);
+        new DropTableDesc(getDotName(qualified), partSpecs, expectView, ignoreProtection, mustPurge, replicationSpec);
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf));
   }
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java Sat Apr 18 19:04:46 2015
@@ -32,6 +32,7 @@ import java.util.Map;
 import java.util.StringTokenizer;
 import java.util.TreeMap;
 
+import com.google.common.base.Function;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -39,6 +40,7 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
@@ -53,6 +55,8 @@ import org.json.JSONArray;
 import org.json.JSONException;
 import org.json.JSONObject;
 
+import javax.annotation.Nullable;
+
 /**
  *
  * EximUtil. Utility methods for the export/import semantic
@@ -169,8 +173,18 @@ public class EximUtil {
   /* If null, then the major version number should match */
   public static final String METADATA_FORMAT_FORWARD_COMPATIBLE_VERSION = null;
 
-  public static void createExportDump(FileSystem fs, Path metadataPath, org.apache.hadoop.hive.ql.metadata.Table tableHandle,
-      Iterable<org.apache.hadoop.hive.ql.metadata.Partition> partitions) throws SemanticException, IOException {
+  public static void createExportDump(FileSystem fs, Path metadataPath,
+      org.apache.hadoop.hive.ql.metadata.Table tableHandle,
+      Iterable<org.apache.hadoop.hive.ql.metadata.Partition> partitions,
+      ReplicationSpec replicationSpec) throws SemanticException, IOException {
+
+    if (replicationSpec == null){
+      replicationSpec = new ReplicationSpec(); // instantiate default values if not specified
+    }
+    if (tableHandle == null){
+      replicationSpec.setNoop(true);
+    }
+
     OutputStream out = fs.create(metadataPath);
     JsonGenerator jgen = (new JsonFactory()).createJsonGenerator(out);
     jgen.writeStartObject();
@@ -178,22 +192,63 @@ public class EximUtil {
     if (METADATA_FORMAT_FORWARD_COMPATIBLE_VERSION != null) {
       jgen.writeStringField("fcversion",METADATA_FORMAT_FORWARD_COMPATIBLE_VERSION);
     }
-    TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
-    try {
-      jgen.writeStringField("table", serializer.toString(tableHandle.getTTable(), "UTF-8"));
-      jgen.writeFieldName("partitions");
-      jgen.writeStartArray();
-      if (partitions != null) {
-        for (org.apache.hadoop.hive.ql.metadata.Partition partition : partitions) {
-          jgen.writeString(serializer.toString(partition.getTPartition(), "UTF-8"));
-          jgen.flush();
+
+    if (replicationSpec.isInReplicationScope()){
+      for (ReplicationSpec.KEY key : ReplicationSpec.KEY.values()){
+        String value = replicationSpec.get(key);
+        if (value != null){
+          jgen.writeStringField(key.toString(), value);
         }
       }
-      jgen.writeEndArray();
-    } catch (TException e) {
-      throw new SemanticException(
-          ErrorMsg.GENERIC_ERROR
-              .getMsg("Exception while serializing the metastore objects"), e);
+      if (tableHandle != null){
+        Table ttable = tableHandle.getTTable();
+        ttable.putToParameters(
+            ReplicationSpec.KEY.CURR_STATE_ID.toString(), replicationSpec.getCurrentReplicationState());
+        if ((ttable.getParameters().containsKey("EXTERNAL")) &&
+            (ttable.getParameters().get("EXTERNAL").equalsIgnoreCase("TRUE"))){
+          // Replication destination will not be external - override if set
+          ttable.putToParameters("EXTERNAL","FALSE");
+        }
+        if (ttable.isSetTableType() && ttable.getTableType().equalsIgnoreCase(TableType.EXTERNAL_TABLE.toString())){
+          // Replication dest will not be external - override if set
+          ttable.setTableType(TableType.MANAGED_TABLE.toString());
+        }
+      }
+    } else {
+      // ReplicationSpec.KEY scopeKey = ReplicationSpec.KEY.REPL_SCOPE;
+      // write(out, ",\""+ scopeKey.toString() +"\":\"" + replicationSpec.get(scopeKey) + "\"");
+      // TODO: if we want to be explicit about this dump not being a replication dump, we can
+      // uncomment this else section, but currently unnneeded. Will require a lot of golden file
+      // regen if we do so.
+    }
+    if ((tableHandle != null) && (!replicationSpec.isNoop())){
+      TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
+      try {
+        jgen.writeStringField("table", serializer.toString(tableHandle.getTTable(), "UTF-8"));
+        jgen.writeFieldName("partitions");
+        jgen.writeStartArray();
+        if (partitions != null) {
+          for (org.apache.hadoop.hive.ql.metadata.Partition partition : partitions) {
+            Partition tptn = partition.getTPartition();
+            if (replicationSpec.isInReplicationScope()){
+              tptn.putToParameters(
+                  ReplicationSpec.KEY.CURR_STATE_ID.toString(), replicationSpec.getCurrentReplicationState());
+              if ((tptn.getParameters().containsKey("EXTERNAL")) &&
+                  (tptn.getParameters().get("EXTERNAL").equalsIgnoreCase("TRUE"))){
+                // Replication destination will not be external
+                tptn.putToParameters("EXTERNAL", "FALSE");
+              }
+            }
+            jgen.writeString(serializer.toString(tptn, "UTF-8"));
+            jgen.flush();
+          }
+        }
+        jgen.writeEndArray();
+      } catch (TException e) {
+        throw new SemanticException(
+            ErrorMsg.GENERIC_ERROR
+                .getMsg("Exception while serializing the metastore objects"), e);
+      }
     }
     jgen.writeEndObject();
     jgen.close(); // JsonGenerator owns the OutputStream, so it closes it when we call close.
@@ -203,8 +258,37 @@ public class EximUtil {
     out.write(s.getBytes("UTF-8"));
   }
 
-  public static Map.Entry<Table, List<Partition>>
-      readMetaData(FileSystem fs, Path metadataPath)
+  /**
+   * Utility class to help return complex value from readMetaData function
+   */
+  public static class ReadMetaData {
+    private final Table table;
+    private final Iterable<Partition> partitions;
+    private final ReplicationSpec replicationSpec;
+
+    public ReadMetaData(){
+      this(null,null,new ReplicationSpec());
+    }
+    public ReadMetaData(Table table, Iterable<Partition> partitions, ReplicationSpec replicationSpec){
+      this.table = table;
+      this.partitions = partitions;
+      this.replicationSpec = replicationSpec;
+    }
+
+    public Table getTable() {
+      return table;
+    }
+
+    public Iterable<Partition> getPartitions() {
+      return partitions;
+    }
+
+    public ReplicationSpec getReplicationSpec() {
+      return replicationSpec;
+    }
+  };
+
+  public static ReadMetaData readMetaData(FileSystem fs, Path metadataPath)
       throws IOException, SemanticException {
     FSDataInputStream mdstream = null;
     try {
@@ -219,24 +303,27 @@ public class EximUtil {
       String md = new String(sb.toByteArray(), "UTF-8");
       JSONObject jsonContainer = new JSONObject(md);
       String version = jsonContainer.getString("version");
-      String fcversion = null;
-      try {
-        fcversion = jsonContainer.getString("fcversion");
-      } catch (JSONException ignored) {}
+      String fcversion = getJSONStringEntry(jsonContainer, "fcversion");
       checkCompatibility(version, fcversion);
-      String tableDesc = jsonContainer.getString("table");
-      Table table = new Table();
-      TDeserializer deserializer = new TDeserializer(new TJSONProtocol.Factory());
-      deserializer.deserialize(table, tableDesc, "UTF-8");
-      JSONArray jsonPartitions = new JSONArray(jsonContainer.getString("partitions"));
-      List<Partition> partitionsList = new ArrayList<Partition>(jsonPartitions.length());
-      for (int i = 0; i < jsonPartitions.length(); ++i) {
-        String partDesc = jsonPartitions.getString(i);
-        Partition partition = new Partition();
-        deserializer.deserialize(partition, partDesc, "UTF-8");
-        partitionsList.add(partition);
+      String tableDesc = getJSONStringEntry(jsonContainer,"table");
+      Table table = null;
+      List<Partition> partitionsList = null;
+      if (tableDesc != null){
+        table = new Table();
+        TDeserializer deserializer = new TDeserializer(new TJSONProtocol.Factory());
+        deserializer.deserialize(table, tableDesc, "UTF-8");
+        // TODO : jackson-streaming-iterable-redo this
+        JSONArray jsonPartitions = new JSONArray(jsonContainer.getString("partitions"));
+        partitionsList = new ArrayList<Partition>(jsonPartitions.length());
+        for (int i = 0; i < jsonPartitions.length(); ++i) {
+          String partDesc = jsonPartitions.getString(i);
+          Partition partition = new Partition();
+          deserializer.deserialize(partition, partDesc, "UTF-8");
+          partitionsList.add(partition);
+        }
       }
-      return new AbstractMap.SimpleEntry<Table, List<Partition>>(table, partitionsList);
+
+      return new ReadMetaData(table, partitionsList,readReplicationSpec(jsonContainer));
     } catch (JSONException e) {
       throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg("Error in serializing metadata"), e);
     } catch (TException e) {
@@ -248,6 +335,24 @@ public class EximUtil {
     }
   }
 
+  private static ReplicationSpec readReplicationSpec(final JSONObject jsonContainer){
+    Function<String,String> keyFetcher = new Function<String, String>() {
+      @Override
+      public String apply(@Nullable String s) {
+        return getJSONStringEntry(jsonContainer,s);
+      }
+    };
+    return new ReplicationSpec(keyFetcher);
+  }
+
+  private static String getJSONStringEntry(JSONObject jsonContainer, String name) {
+    String retval = null;
+    try {
+      retval = jsonContainer.getString(name);
+    } catch (JSONException ignored) {}
+    return retval;
+  }
+
   /* check the forward and backward compatibility */
   private static void checkCompatibility(String version, String fcVersion) throws SemanticException {
     doCheckCompatibility(

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java Sat Apr 18 19:04:46 2015
@@ -33,8 +33,10 @@ import org.apache.hadoop.hive.ql.ErrorMs
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.PartitionIterable;
+import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.CopyWork;
 
 /**
@@ -43,6 +45,8 @@ import org.apache.hadoop.hive.ql.plan.Co
  */
 public class ExportSemanticAnalyzer extends BaseSemanticAnalyzer {
 
+  private ReplicationSpec replicationSpec;
+
   public ExportSemanticAnalyzer(HiveConf conf) throws SemanticException {
     super(conf);
   }
@@ -52,13 +56,57 @@ public class ExportSemanticAnalyzer exte
     Tree tableTree = ast.getChild(0);
     Tree toTree = ast.getChild(1);
 
+    if (ast.getChildCount() > 2) {
+      replicationSpec = new ReplicationSpec((ASTNode) ast.getChild(2));
+    } else {
+      replicationSpec = new ReplicationSpec();
+    }
+
     // initialize export path
     String tmpPath = stripQuotes(toTree.getText());
     URI toURI = EximUtil.getValidatedURI(conf, tmpPath);
 
     // initialize source table/partition
-    TableSpec ts = new TableSpec(db, conf, (ASTNode) tableTree, false, true);
-    EximUtil.validateTable(ts.tableHandle);
+    TableSpec ts;
+
+    try {
+      ts = new TableSpec(db, conf, (ASTNode) tableTree, false, true);
+    } catch (SemanticException sme){
+      if ((replicationSpec.isInReplicationScope()) &&
+            ((sme.getCause() instanceof InvalidTableException)
+            || (sme instanceof Table.ValidationFailureSemanticException)
+            )
+          ){
+        // If we're in replication scope, it's possible that we're running the export long after
+        // the table was dropped, so the table not existing currently or being a different kind of
+        // table is not an error - it simply means we should no-op, and let a future export
+        // capture the appropriate state
+        ts = null;
+      } else {
+        throw sme;
+      }
+    }
+
+    if (ts != null) {
+      try {
+        EximUtil.validateTable(ts.tableHandle);
+        if (replicationSpec.isInReplicationScope()
+            && ts.tableHandle.isTemporary()){
+          // No replication for temporary tables either
+          ts = null;
+        }
+
+      } catch (SemanticException e) {
+        // table was a view, a non-native table or an offline table.
+        // ignore for replication, error if not.
+        if (replicationSpec.isInReplicationScope()){
+          ts = null; // null out ts so we can't use it.
+        } else {
+          throw e;
+        }
+      }
+    }
+
     try {
       FileSystem fs = FileSystem.get(toURI, conf);
       Path toPath = new Path(toURI.getScheme(), toURI.getAuthority(), toURI.getPath());
@@ -83,14 +131,34 @@ public class ExportSemanticAnalyzer exte
 
     PartitionIterable partitions = null;
     try {
-      if (ts.tableHandle.isPartitioned()) {
-        partitions = (ts.partitions != null) ?
-            new PartitionIterable(ts.partitions) :
-            new PartitionIterable(db,ts.tableHandle,null,conf.getIntVar(
+      replicationSpec.setCurrentReplicationState(String.valueOf(db.getMSC().getCurrentNotificationEventId().getEventId()));
+      if ( (ts != null) && (ts.tableHandle.isPartitioned())){
+        if (ts.specType == TableSpec.SpecType.TABLE_ONLY){
+          // TABLE-ONLY, fetch partitions if regular export, don't if metadata-only
+          if (replicationSpec.isMetadataOnly()){
+            partitions = null;
+          } else {
+            partitions = new PartitionIterable(db,ts.tableHandle,null,conf.getIntVar(
                 HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
+          }
+        } else {
+          // PARTITIONS specified - partitions inside tableSpec
+          partitions = new PartitionIterable(ts.partitions);
+        }
+      } else {
+        // Either tableHandle isn't partitioned => null, or repl-export after ts becomes null => null.
+        // or this is a noop-replication export, so we can skip looking at ptns.
+        partitions = null;
       }
+
       Path path = new Path(ctx.getLocalTmpPath(), "_metadata");
-      EximUtil.createExportDump(FileSystem.getLocal(conf), path, ts.tableHandle, partitions);
+      EximUtil.createExportDump(
+          FileSystem.getLocal(conf),
+          path,
+          (ts != null ? ts.tableHandle: null),
+          partitions,
+          replicationSpec);
+
       Task<? extends Serializable> rTask = TaskFactory.get(new CopyWork(
           path, new Path(toURI), false), conf);
       rootTasks.add(rTask);
@@ -102,26 +170,29 @@ public class ExportSemanticAnalyzer exte
               .getMsg("Exception while writing out the local file"), e);
     }
 
-    Path parentPath = new Path(toURI);
-
-    if (ts.tableHandle.isPartitioned()) {
-      for (Partition partition : partitions) {
-        Path fromPath = partition.getDataLocation();
-        Path toPartPath = new Path(parentPath, partition.getName());
-        Task<? extends Serializable> rTask = TaskFactory.get(
-            new CopyWork(fromPath, toPartPath, false),
-            conf);
+    if (!(replicationSpec.isMetadataOnly() || (ts == null))) {
+      Path parentPath = new Path(toURI);
+      if (ts.tableHandle.isPartitioned()) {
+        for (Partition partition : partitions) {
+          Path fromPath = partition.getDataLocation();
+          Path toPartPath = new Path(parentPath, partition.getName());
+          Task<? extends Serializable> rTask = TaskFactory.get(
+              new CopyWork(fromPath, toPartPath, false),
+              conf);
+          rootTasks.add(rTask);
+          inputs.add(new ReadEntity(partition));
+        }
+      } else {
+        Path fromPath = ts.tableHandle.getDataLocation();
+        Path toDataPath = new Path(parentPath, "data");
+        Task<? extends Serializable> rTask = TaskFactory.get(new CopyWork(
+            fromPath, toDataPath, false), conf);
         rootTasks.add(rTask);
-        inputs.add(new ReadEntity(partition));
+        inputs.add(new ReadEntity(ts.tableHandle));
       }
-    } else {
-      Path fromPath = ts.tableHandle.getDataLocation();
-      Path toDataPath = new Path(parentPath, "data");
-      Task<? extends Serializable> rTask = TaskFactory.get(new CopyWork(
-          fromPath, toDataPath, false), conf);
-      rootTasks.add(rTask);
-      inputs.add(new ReadEntity(ts.tableHandle));
+      outputs.add(toWriteEntity(parentPath));
     }
-    outputs.add(toWriteEntity(parentPath));
+
   }
+
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g Sat Apr 18 19:04:46 2015
@@ -94,6 +94,8 @@ KW_UNION: 'UNION';
 KW_LOAD: 'LOAD';
 KW_EXPORT: 'EXPORT';
 KW_IMPORT: 'IMPORT';
+KW_REPLICATION: 'REPLICATION';
+KW_METADATA: 'METADATA';
 KW_DATA: 'DATA';
 KW_INPATH: 'INPATH';
 KW_IS: 'IS';

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g Sat Apr 18 19:04:46 2015
@@ -96,6 +96,8 @@ TOK_CROSSJOIN;
 TOK_LOAD;
 TOK_EXPORT;
 TOK_IMPORT;
+TOK_REPLICATION;
+TOK_METADATA;
 TOK_NULL;
 TOK_ISNULL;
 TOK_ISNOTNULL;
@@ -687,17 +689,30 @@ loadStatement
     -> ^(TOK_LOAD $path $tab $islocal? $isoverwrite?)
     ;
 
+replicationClause
+@init { pushMsg("replication clause", state); }
+@after { popMsg(state); }
+    : KW_FOR (isMetadataOnly=KW_METADATA)? KW_REPLICATION LPAREN (replId=StringLiteral) RPAREN
+    -> ^(TOK_REPLICATION $replId $isMetadataOnly?)
+    ;
+
 exportStatement
 @init { pushMsg("export statement", state); }
 @after { popMsg(state); }
-    : KW_EXPORT KW_TABLE (tab=tableOrPartition) KW_TO (path=StringLiteral)
-    -> ^(TOK_EXPORT $tab $path)
+    : KW_EXPORT
+      KW_TABLE (tab=tableOrPartition)
+      KW_TO (path=StringLiteral)
+      replicationClause?
+    -> ^(TOK_EXPORT $tab $path replicationClause?)
     ;
 
 importStatement
 @init { pushMsg("import statement", state); }
 @after { popMsg(state); }
-	: KW_IMPORT ((ext=KW_EXTERNAL)? KW_TABLE (tab=tableOrPartition))? KW_FROM (path=StringLiteral) tableLocation?
+       : KW_IMPORT
+         ((ext=KW_EXTERNAL)? KW_TABLE (tab=tableOrPartition))?
+         KW_FROM (path=StringLiteral)
+         tableLocation?
     -> ^(TOK_IMPORT $path $tab? $ext? tableLocation?)
     ;
 
@@ -958,7 +973,8 @@ dropIndexStatement
 dropTableStatement
 @init { pushMsg("drop statement", state); }
 @after { popMsg(state); }
-    : KW_DROP KW_TABLE ifExists? tableName KW_PURGE? -> ^(TOK_DROPTABLE tableName ifExists? KW_PURGE?)
+    : KW_DROP KW_TABLE ifExists? tableName KW_PURGE? replicationClause?
+    -> ^(TOK_DROPTABLE tableName ifExists? KW_PURGE? replicationClause?)
     ;
 
 alterStatement
@@ -1135,9 +1151,9 @@ partitionLocation
 alterStatementSuffixDropPartitions[boolean table]
 @init { pushMsg("drop partition statement", state); }
 @after { popMsg(state); }
-    : KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* ignoreProtection? KW_PURGE?
-    -> { table }? ^(TOK_ALTERTABLE_DROPPARTS dropPartitionSpec+ ifExists? ignoreProtection? KW_PURGE?)
-    ->            ^(TOK_ALTERVIEW_DROPPARTS dropPartitionSpec+ ifExists? ignoreProtection?)
+    : KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* ignoreProtection? replicationClause?
+    -> { table }? ^(TOK_ALTERTABLE_DROPPARTS dropPartitionSpec+ ifExists? ignoreProtection? replicationClause?)
+    ->            ^(TOK_ALTERVIEW_DROPPARTS dropPartitionSpec+ ifExists? ignoreProtection? replicationClause?)
     ;
 
 alterStatementSuffixProperties

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g?rev=1674557&r1=1674556&r2=1674557&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g Sat Apr 18 19:04:46 2015
@@ -620,10 +620,10 @@ nonReserved
     | KW_FIRST | KW_FORMAT | KW_FORMATTED | KW_FUNCTIONS | KW_HOLD_DDLTIME | KW_HOUR | KW_IDXPROPERTIES | KW_IGNORE
     | KW_INDEX | KW_INDEXES | KW_INPATH | KW_INPUTDRIVER | KW_INPUTFORMAT | KW_ITEMS | KW_JAR
     | KW_KEYS | KW_KEY_TYPE | KW_LIMIT | KW_LINES | KW_LOAD | KW_LOCATION | KW_LOCK | KW_LOCKS | KW_LOGICAL | KW_LONG
-    | KW_MAPJOIN | KW_MATERIALIZED | KW_MINUS | KW_MINUTE | KW_MONTH | KW_MSCK | KW_NOSCAN | KW_NO_DROP | KW_OFFLINE | KW_OPTION
-    | KW_OUTPUTDRIVER | KW_OUTPUTFORMAT | KW_OVERWRITE | KW_OWNER | KW_PARTITIONED | KW_PARTITIONS | KW_PLUS | KW_PRETTY | KW_PRINCIPALS
-    | KW_PROTECTION | KW_PURGE | KW_READ | KW_READONLY | KW_REBUILD | KW_RECORDREADER | KW_RECORDWRITER
-    | KW_REGEXP | KW_RELOAD | KW_RENAME | KW_REPAIR | KW_REPLACE | KW_RESTRICT | KW_REWRITE | KW_RLIKE
+    | KW_MAPJOIN | KW_MATERIALIZED | KW_METADATA | KW_MINUS | KW_MINUTE | KW_MONTH | KW_MSCK | KW_NOSCAN | KW_NO_DROP | KW_OFFLINE
+    | KW_OPTION | KW_OUTPUTDRIVER | KW_OUTPUTFORMAT | KW_OVERWRITE | KW_OWNER | KW_PARTITIONED | KW_PARTITIONS | KW_PLUS | KW_PRETTY
+    | KW_PRINCIPALS | KW_PROTECTION | KW_PURGE | KW_READ | KW_READONLY | KW_REBUILD | KW_RECORDREADER | KW_RECORDWRITER
+    | KW_REGEXP | KW_RELOAD | KW_RENAME | KW_REPAIR | KW_REPLACE | KW_REPLICATION | KW_RESTRICT | KW_REWRITE | KW_RLIKE
     | KW_ROLE | KW_ROLES | KW_SCHEMA | KW_SCHEMAS | KW_SECOND | KW_SEMI | KW_SERDE | KW_SERDEPROPERTIES | KW_SERVER | KW_SETS | KW_SHARED
     | KW_SHOW | KW_SHOW_DATABASE | KW_SKEWED | KW_SORT | KW_SORTED | KW_SSL | KW_STATISTICS | KW_STORED
     | KW_STREAMTABLE | KW_STRING | KW_STRUCT | KW_TABLES | KW_TBLPROPERTIES | KW_TEMPORARY | KW_TERMINATED



Mime
View raw message