hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ser...@apache.org
Subject svn commit: r1654355 [2/27] - in /hive/branches/llap: ./ beeline/src/java/org/apache/hive/beeline/ cli/src/java/org/apache/hadoop/hive/cli/ common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/conf/ data/conf/ data/conf...
Date Fri, 23 Jan 2015 19:59:24 GMT

Propchange: hive/branches/llap/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jan 23 19:59:11 2015
@@ -1,5 +1,6 @@
 /hive/branches/branch-0.11:1480385,1480458,1481120,1481344,1481346,1481348,1481352,1483872,1505184
 /hive/branches/cbo:1605012-1627125
+/hive/branches/spark:1608589-1651242
 /hive/branches/tez:1494760-1622766
 /hive/branches/vectorization:1466908-1527856
-/hive/trunk:1624170-1649725
+/hive/trunk:1624170-1654300

Modified: hive/branches/llap/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java (original)
+++ hive/branches/llap/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java Fri Jan 23 19:59:11 2015
@@ -17,6 +17,7 @@
  */
 package org.apache.hive.beeline;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaException;
@@ -97,55 +98,65 @@ public class HiveSchemaHelper {
     }
 
     static final String DEFAUTL_DELIMITER = ";";
-    /***
+
+    /**
      * Find the type of given command
+     *
      * @param dbCommand
      * @return
      */
     public boolean isPartialCommand(String dbCommand) throws IllegalArgumentException;
 
-    /** Parse the DB specific nesting format and extract the inner script name if any
+    /**
+     * Parse the DB specific nesting format and extract the inner script name if any
+     *
      * @param dbCommand command from parent script
      * @return
      * @throws IllegalFormatException
      */
     public String getScriptName(String dbCommand) throws IllegalArgumentException;
 
-    /***
+    /**
      * Find if the given command is a nested script execution
+     *
      * @param dbCommand
      * @return
      */
     public boolean isNestedScript(String dbCommand);
 
-    /***
+    /**
      * Find if the given command should not be passed to DB
+     *
      * @param dbCommand
      * @return
      */
     public boolean isNonExecCommand(String dbCommand);
 
-    /***
+    /**
      * Get the SQL statement delimiter
+     *
      * @return
      */
     public String getDelimiter();
 
-    /***
+    /**
      * Clear any client specific tags
+     *
      * @return
      */
     public String cleanseCommand(String dbCommand);
 
-    /***
+    /**
      * Does the DB required table/column names quoted
+     *
      * @return
      */
     public boolean needsQuotedIdentifier();
 
-    /***
+    /**
      * Flatten the nested upgrade script into a buffer
-     * @param scriptDir upgrade script directory
+     *
+     * @param scriptDir  upgrade script directory
      * @param scriptFile upgrade script file
      * @return string of sql commands
      */
@@ -258,6 +269,8 @@ public class HiveSchemaHelper {
     private void setDbOpts(String dbOpts) {
       if (dbOpts != null) {
         this.dbOpts = Lists.newArrayList(dbOpts.split(","));
+      } else {
+        this.dbOpts = Lists.newArrayList();
       }
     }
 
@@ -369,6 +382,10 @@ public class HiveSchemaHelper {
   // Postgres specific parser
   public static class PostgresCommandParser extends AbstractCommandParser {
     private static String POSTGRES_NESTING_TOKEN = "\\i";
+    @VisibleForTesting
+    public static String POSTGRES_STANDARD_STRINGS_OPT = "SET standard_conforming_strings";
+    @VisibleForTesting
+    public static String POSTGRES_SKIP_STANDARD_STRINGS_DBOPT = "postgres.filter.81";
 
     public PostgresCommandParser(String dbOpts, String msUsername, String msPassword,
         HiveConf hiveConf) {
@@ -394,6 +411,19 @@ public class HiveSchemaHelper {
     public boolean needsQuotedIdentifier() {
       return true;
     }
+
+    @Override
+    public boolean isNonExecCommand(String dbCommand) {
+      // Skip "standard_conforming_strings" command which is read-only in older
+      // Postgres versions like 8.1
+      // See: http://www.postgresql.org/docs/8.2/static/release-8-1.html
+      if (getDbOpts().contains(POSTGRES_SKIP_STANDARD_STRINGS_DBOPT)) {
+        if (dbCommand.startsWith(POSTGRES_STANDARD_STRINGS_OPT)) {
+          return true;
+        }
+      }
+      return super.isNonExecCommand(dbCommand);
+    }
   }
 
   //Oracle specific parser

Modified: hive/branches/llap/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java (original)
+++ hive/branches/llap/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java Fri Jan 23 19:59:11 2015
@@ -178,7 +178,8 @@ public class HiveSchemaTool {
         getConnectionToMetastore(false));
     // verify that the new version is added to schema
     if (!MetaStoreSchemaInfo.getHiveSchemaVersion().equalsIgnoreCase(newSchemaVersion)) {
-      throw new HiveMetaException("Found unexpected schema version " + newSchemaVersion);
+      throw new HiveMetaException("Expected schema version " + MetaStoreSchemaInfo.getHiveSchemaVersion() +
+        ", found version " + newSchemaVersion);
     }
   }
 

Modified: hive/branches/llap/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java (original)
+++ hive/branches/llap/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java Fri Jan 23 19:59:11 2015
@@ -40,6 +40,7 @@ import com.google.common.base.Splitter;
 import jline.console.ConsoleReader;
 import jline.console.completer.Completer;
 import jline.console.history.FileHistory;
+import jline.console.history.PersistentHistory;
 import jline.console.completer.StringsCompleter;
 import jline.console.completer.ArgumentCompleter;
 import jline.console.completer.ArgumentCompleter.ArgumentDelimiter;
@@ -721,10 +722,12 @@ public class CliDriver {
     String line;
     final String HISTORYFILE = ".hivehistory";
     String historyDirectory = System.getProperty("user.home");
+    PersistentHistory history = null;
     try {
       if ((new File(historyDirectory)).exists()) {
         String historyFile = historyDirectory + File.separator + HISTORYFILE;
-        reader.setHistory(new FileHistory(new File(historyFile)));
+        history = new FileHistory(new File(historyFile));
+        reader.setHistory(history);
       } else {
         System.err.println("WARNING: Directory for Hive history file: " + historyDirectory +
                            " does not exist.   History will not be available during this session.");
@@ -759,6 +762,10 @@ public class CliDriver {
         continue;
       }
     }
+
+    if (history != null) {
+      history.flush();
+    }
     return ret;
   }
 

Modified: hive/branches/llap/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/common/src/java/org/apache/hadoop/hive/common/FileUtils.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/common/src/java/org/apache/hadoop/hive/common/FileUtils.java (original)
+++ hive/branches/llap/common/src/java/org/apache/hadoop/hive/common/FileUtils.java Fri Jan 23 19:59:11 2015
@@ -53,17 +53,19 @@ import org.apache.hadoop.util.Shell;
 public final class FileUtils {
   private static final Log LOG = LogFactory.getLog(FileUtils.class.getName());
 
-  /**
-   * Accept all paths.
-   */
-  private static class AcceptAllPathFilter implements PathFilter {
-    @Override
-    public boolean accept(Path path) {
-      return true;
+  public static final PathFilter HIDDEN_FILES_PATH_FILTER = new PathFilter() {
+    public boolean accept(Path p) {
+      String name = p.getName();
+      return !name.startsWith("_") && !name.startsWith(".");
+    }
+  };
+
+  public static final PathFilter STAGING_DIR_PATH_FILTER = new PathFilter() {
+    public boolean accept(Path p) {
+      String name = p.getName();
+      return !name.startsWith(".");
     }
-  }
-
-  private static final PathFilter allPathFilter = new AcceptAllPathFilter();
+  };
 
   /**
    * Variant of Path.makeQualified that qualifies the input path against the default file system
@@ -319,14 +321,7 @@ public final class FileUtils {
       List<FileStatus> results) throws IOException {
 
     if (fileStatus.isDir()) {
-      for (FileStatus stat : fs.listStatus(fileStatus.getPath(), new PathFilter() {
-
-        @Override
-        public boolean accept(Path p) {
-          String name = p.getName();
-          return !name.startsWith("_") && !name.startsWith(".");
-        }
-      })) {
+      for (FileStatus stat : fs.listStatus(fileStatus.getPath(), HIDDEN_FILES_PATH_FILTER)) {
         listStatusRecursively(fs, stat, results);
       }
     } else {
@@ -366,7 +361,6 @@ public final class FileUtils {
    *             check will be performed within a doAs() block to use the access privileges
    *             of this user. In this case the user must be configured to impersonate other
    *             users, otherwise this check will fail with error.
-   * @param groups  List of groups for the user
    * @throws IOException
    * @throws AccessControlException
    * @throws InterruptedException
@@ -547,10 +541,25 @@ public final class FileUtils {
     boolean deleteSource,
     boolean overwrite,
     HiveConf conf) throws IOException {
-    boolean copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf);
+
+    HadoopShims shims = ShimLoader.getHadoopShims();
+    boolean copied;
+
+    /* Run distcp if source file/dir is too big */
+    if (srcFS.getUri().getScheme().equals("hdfs") &&
+        srcFS.getFileStatus(src).getLen() > conf.getLongVar(HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE)) {
+      LOG.info("Source is " + srcFS.getFileStatus(src).getLen() + " bytes. (MAX: " + conf.getLongVar(HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE) + ")");
+      LOG.info("Launch distributed copy (distcp) job.");
+      copied = shims.runDistCp(src, dst, conf);
+      if (copied && deleteSource) {
+        srcFS.delete(src, true);
+      }
+    } else {
+      copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf);
+    }
+
     boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
     if (copied && inheritPerms) {
-      HadoopShims shims = ShimLoader.getHadoopShims();
       HdfsFileStatus fullFileStatus = shims.getFullFileStatus(conf, dstFS, dst);
       try {
         shims.setFullFileStatus(conf, fullFileStatus, dstFS, dst);
@@ -571,7 +580,7 @@ public final class FileUtils {
    * @throws IOException
    */
   public static boolean trashFilesUnderDir(FileSystem fs, Path f, Configuration conf) throws FileNotFoundException, IOException {
-    FileStatus[] statuses = fs.listStatus(f, allPathFilter);
+    FileStatus[] statuses = fs.listStatus(f, HIDDEN_FILES_PATH_FILTER);
     boolean result = true;
     for (FileStatus status : statuses) {
       result = result & moveToTrash(fs, status.getPath(), conf);
@@ -603,6 +612,25 @@ public final class FileUtils {
     return result;
   }
 
+  /**
+   * Check if first path is a subdirectory of second path.
+   * Both paths must belong to the same filesystem.
+   *
+   * @param p1 first path
+   * @param p2 second path
+   * @param fs FileSystem, both paths must belong to the same filesystem
+   * @return
+   */
+  public static boolean isSubDir(Path p1, Path p2, FileSystem fs) {
+    String path1 = fs.makeQualified(p1).toString();
+    String path2 = fs.makeQualified(p2).toString();
+    if (path1.startsWith(path2)) {
+      return true;
+    }
+
+    return false;
+  }
+
   public static boolean renameWithPerms(FileSystem fs, Path sourcePath,
                                Path destPath, boolean inheritPerms,
                                Configuration conf) throws IOException {

Modified: hive/branches/llap/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java (original)
+++ hive/branches/llap/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java Fri Jan 23 19:59:11 2015
@@ -70,7 +70,7 @@ public class HiveStatsUtils {
       sb.append(Path.SEPARATOR).append("*");
     }
     Path pathPattern = new Path(path, sb.toString());
-    return fs.globStatus(pathPattern);
+    return fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER);
   }
 
 }

Modified: hive/branches/llap/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java (original)
+++ hive/branches/llap/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java Fri Jan 23 19:59:11 2015
@@ -55,6 +55,8 @@ public class StatsSetupConst {
       public String getAggregator(Configuration conf) {
         if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
           return "org.apache.hadoop.hive.ql.stats.CounterStatsAggregatorTez";
+        } else if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
+          return "org.apache.hadoop.hive.ql.stats.CounterStatsAggregatorSpark";
         }
         return "org.apache.hadoop.hive.ql.stats.CounterStatsAggregator"; }
     },

Modified: hive/branches/llap/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/llap/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Fri Jan 23 19:59:11 2015
@@ -79,6 +79,15 @@ public class HiveConf extends Configurat
   private final List<String> restrictList = new ArrayList<String>();
 
   private Pattern modWhiteListPattern = null;
+  private boolean isSparkConfigUpdated = false;
+
+  public boolean getSparkConfigUpdated() {
+    return isSparkConfigUpdated;
+  }
+
+  public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
+    this.isSparkConfigUpdated = isSparkConfigUpdated;
+  }
 
   static {
     ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
@@ -206,6 +215,10 @@ public class HiveConf extends Configurat
     PLAN_SERIALIZATION("hive.plan.serialization.format", "kryo",
         "Query plan format serialization between client and task nodes. \n" +
         "Two supported values are : kryo and javaXML. Kryo is default."),
+    STAGINGDIR("hive.exec.stagingdir", ".hive-staging",
+        "Directory name that will be created inside table locations in order to support HDFS encryption. " +
+        "This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. " +
+        "In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans."),
     SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive",
         "HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. " +
         "For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, " +
@@ -258,6 +271,10 @@ public class HiveConf extends Configurat
         "Comma-separated list of on-failure hooks to be invoked for each statement. \n" +
         "An on-failure hook is specified as the name of Java class which implements the \n" +
         "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
+    QUERYREDACTORHOOKS("hive.exec.query.redactor.hooks", "",
+        "Comma-separated list of hooks to be invoked for each query which can \n" +
+        "tranform the query before it's placed in the job.xml file. Must be a Java class which \n" +
+        "extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class."),
     CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", "",
         "Comma-separated list of statistics publishers to be invoked on counters on each job. \n" +
         "A client stats publisher is specified as the name of a Java class which implements the \n" +
@@ -412,6 +429,9 @@ public class HiveConf extends Configurat
         "If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos."),
     METASTORE_USE_THRIFT_FRAMED_TRANSPORT("hive.metastore.thrift.framed.transport.enabled", false,
         "If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used."),
+    METASTORE_USE_THRIFT_COMPACT_PROTOCOL("hive.metastore.thrift.compact.protocol.enabled", false,
+        "If true, the metastore Thrift interface will use TCompactProtocol. When false (default) TBinaryProtocol will be used.\n" +
+        "Setting it to true will break compatibility with older clients running TBinaryProtocol."),
     METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS("hive.cluster.delegation.token.store.class",
         "org.apache.hadoop.hive.thrift.MemoryTokenStore",
         "The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster."),
@@ -477,6 +497,9 @@ public class HiveConf extends Configurat
     METASTORE_PRE_EVENT_LISTENERS("hive.metastore.pre.event.listeners", "",
         "List of comma separated listeners for metastore events."),
     METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", "", ""),
+    METASTORE_EVENT_DB_LISTENER_TTL("hive.metastore.event.db.listener.timetolive", "86400s",
+        new TimeValidator(TimeUnit.SECONDS),
+        "time after which events will be removed from the database listener queue"),
     METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS("hive.metastore.authorization.storage.checks", false,
         "Should the metastore do authorization checks against the underlying storage (usually hdfs) \n" +
         "for operations like drop-partition (disallow the drop-partition if the user in\n" +
@@ -684,13 +707,6 @@ public class HiveConf extends Configurat
     HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true,
         "Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,\n" +
         "because memory-optimized hashtable cannot be serialized."),
-    HIVEMAPJOINUSEOPTIMIZEDKEYS("hive.mapjoin.optimized.keys", true,
-        "Whether MapJoin hashtable should use optimized (size-wise), keys, allowing the table to take less\n" +
-        "memory. Depending on key, the memory savings for entire table can be 5-15% or so."),
-    HIVEMAPJOINLAZYHASHTABLE("hive.mapjoin.lazy.hashtable", true,
-        "Whether MapJoin hashtable should deserialize values on demand. Depending on how many values in\n" +
-        "the table the join will actually touch, it can save a lot of memory by not creating objects for\n" +
-        "rows that are not needed. If all rows are needed obviously there's no gain."),
     HIVEHASHTABLEWBSIZE("hive.mapjoin.optimized.hashtable.wbsize", 10 * 1024 * 1024,
         "Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to\n" +
         "store data. This is one buffer size. HT may be slightly faster if this is larger, but for small\n" +
@@ -734,6 +750,10 @@ public class HiveConf extends Configurat
         "cardinality (4 in the example above), is more than this value, a new MR job is added under the\n" +
         "assumption that the original group by will reduce the data size."),
 
+    // Max filesize used to do a single copy (after that, distcp is used)
+    HIVE_EXEC_COPYFILE_MAXSIZE("hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/,
+        "Maximum file size (in Mb) that Hive uses to do single HDFS copies between directories." +
+        "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."),
 
     // for hive udtf operator
     HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false,
@@ -836,6 +856,7 @@ public class HiveConf extends Configurat
     HIVEMERGEMAPREDFILES("hive.merge.mapredfiles", false,
         "Merge small files at the end of a map-reduce job"),
     HIVEMERGETEZFILES("hive.merge.tezfiles", false, "Merge small files at the end of a Tez DAG"),
+    HIVEMERGESPARKFILES("hive.merge.sparkfiles", false, "Merge small files at the end of a Spark DAG Transformation"),
     HIVEMERGEMAPFILESSIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000),
         "Size of merged files at the end of the job"),
     HIVEMERGEMAPFILESAVGSIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000),
@@ -1307,13 +1328,20 @@ public class HiveConf extends Configurat
         "The port of ZooKeeper servers to talk to.\n" +
         "If the list of Zookeeper servers specified in hive.zookeeper.quorum\n" +
         "does not contain port numbers, this value is used."),
-    HIVE_ZOOKEEPER_SESSION_TIMEOUT("hive.zookeeper.session.timeout", 600*1000,
-        "ZooKeeper client's session timeout. The client is disconnected, and as a result, all locks released, \n" +
+    HIVE_ZOOKEEPER_SESSION_TIMEOUT("hive.zookeeper.session.timeout", "600000ms",
+        new TimeValidator(TimeUnit.MILLISECONDS),
+        "ZooKeeper client's session timeout (in milliseconds). The client is disconnected, and as a result, all locks released, \n" +
         "if a heartbeat is not sent in the timeout."),
     HIVE_ZOOKEEPER_NAMESPACE("hive.zookeeper.namespace", "hive_zookeeper_namespace",
         "The parent node under which all ZooKeeper nodes are created."),
     HIVE_ZOOKEEPER_CLEAN_EXTRA_NODES("hive.zookeeper.clean.extra.nodes", false,
         "Clean extra nodes at the end of the session."),
+    HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES("hive.zookeeper.connection.max.retries", 3,
+        "Max number of times to retry when connecting to the ZooKeeper server."),
+    HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME("hive.zookeeper.connection.basesleeptime", "1000ms",
+        new TimeValidator(TimeUnit.MILLISECONDS),
+        "Initial amount of time (in milliseconds) to wait between retries\n" +
+        "when connecting to the ZooKeeper server when using ExponentialBackoffRetry policy."),
 
     // Transactions
     HIVE_TXN_MANAGER("hive.txn.manager",
@@ -1592,6 +1620,10 @@ public class HiveConf extends Configurat
         "inheriting the permission of the warehouse or database directory."),
     HIVE_INSERT_INTO_EXTERNAL_TABLES("hive.insert.into.external.tables", true,
         "whether insert into external tables is allowed"),
+    HIVE_TEMPORARY_TABLE_STORAGE(
+        "hive.exec.temporary.table.storage", "default", new StringSet("memory",
+         "ssd", "default"), "Define the storage policy for temporary tables." +
+         "Choices between memory, ssd and default"),
 
     HIVE_DRIVER_RUN_HOOKS("hive.exec.driver.run.hooks", "",
         "A comma separated list of hooks which implement HiveDriverRunHook. Will be run at the beginning " +
@@ -1849,8 +1881,8 @@ public class HiveConf extends Configurat
     HIVE_DECODE_PARTITION_NAME("hive.decode.partition.name", false,
         "Whether to show the unquoted partition names in query results."),
 
-    HIVE_EXECUTION_ENGINE("hive.execution.engine", "mr", new StringSet("mr", "tez"),
-        "Chooses execution engine. Options are: mr (Map reduce, default) or tez (hadoop 2 only)"),
+    HIVE_EXECUTION_ENGINE("hive.execution.engine", "mr", new StringSet("mr", "tez", "spark"),
+        "Chooses execution engine. Options are: mr (Map reduce, default), tez (hadoop 2 only), spark"),
     HIVE_JAR_DIRECTORY("hive.jar.directory", null,
         "This is the location hive in tez mode will look for to find a site wide \n" +
         "installed hive instance."),
@@ -1967,6 +1999,11 @@ public class HiveConf extends Configurat
         "hive.tez.exec.inplace.progress",
         true,
         "Updates tez job execution progress in-place in the terminal."),
+    SPARK_CLIENT_FUTURE_TIMEOUT(
+        "hive.spark.client.future.timeout",
+        "60s",
+        new TimeValidator(TimeUnit.SECONDS),
+        "remote spark client JobHandle future timeout value in seconds."),
 
     LLAP_ENABLED("hive.llap.enabled", true, ""),
     LLAP_LOW_LEVEL_CACHE("hive.llap.use.lowlevel.cache", true, ""),
@@ -1977,7 +2014,6 @@ public class HiveConf extends Configurat
     LLAP_REQUEST_THREAD_COUNT("hive.llap.request.thread.count", 16, ""),
     LLAP_USE_LRFU("hive.llap.use.lrfu", true, ""),
     LLAP_LRFU_LAMBDA("hive.llap.lrfu.lambda", 0.01f, "")
-
     ;
 
     public final String varname;
@@ -2218,6 +2254,7 @@ public class HiveConf extends Configurat
       throw new IllegalArgumentException("Cannot modify " + name + " at runtime. It is in the list"
           + "of parameters that can't be modified at runtime");
     }
+    isSparkConfigUpdated = name.startsWith("spark");
     set(name, value);
   }
 

Modified: hive/branches/llap/data/conf/hive-log4j.properties
URL: http://svn.apache.org/viewvc/hive/branches/llap/data/conf/hive-log4j.properties?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/data/conf/hive-log4j.properties (original)
+++ hive/branches/llap/data/conf/hive-log4j.properties Fri Jan 23 19:59:11 2015
@@ -42,7 +42,7 @@ log4j.appender.DRFA.layout=org.apache.lo
 # Pattern format: Date LogLevel LoggerName LogMessage
 #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 # Debugging Pattern format
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
 
 # Warning: If you enable the following appender it will fill up your disk if you don't have a cleanup job!
 # This uses the updated rolling file appender from log4j-extras that supports a reliable time-based rolling policy.

Modified: hive/branches/llap/dev-support/jenkins-submit-build.sh
URL: http://svn.apache.org/viewvc/hive/branches/llap/dev-support/jenkins-submit-build.sh?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/dev-support/jenkins-submit-build.sh (original)
+++ hive/branches/llap/dev-support/jenkins-submit-build.sh Fri Jan 23 19:59:11 2015
@@ -30,12 +30,18 @@ case "$BUILD_PROFILE" in
    curl -v -i "$url"
    exit 0
   ;;
-  spark-mr2|spark2-mr2)
+  spark-mr2)
    test -n "$SPARK_URL" || fail "SPARK_URL must be specified"
    url="$SPARK_URL&ISSUE_NUM=$ISSUE_NUM"
    curl -v -i "$url"
    exit 0
   ;;
+  encryption-mr2)
+   test -n "$ENCRYPTION_URL" || fail "ENCRYPTION_URL must be specified"
+   url="$ENCRYPTION_URL&ISSUE_NUM=$ISSUE_NUM"
+   curl -v -i "$url"
+   exit 0
+  ;;
   *)
   echo "Unknown profile '$BUILD_PROFILE'"
   exit 1

Propchange: hive/branches/llap/hbase-handler/pom.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jan 23 19:59:11 2015
@@ -1,6 +1,6 @@
 /hive/branches/branch-0.11/hbase-handler/pom.xml:1480385,1480458,1481120,1481344,1481346,1481348,1481352,1483872,1505184
 /hive/branches/cbo/hbase-handler/pom.xml:1605012-1627125
-/hive/branches/spark/hbase-handler/pom.xml:1608589-1621357
+/hive/branches/spark/hbase-handler/pom.xml:1608589-1651242
 /hive/branches/tez/hbase-handler/pom.xml:1494760-1622766
 /hive/branches/vectorization/hbase-handler/pom.xml:1466908-1527856
-/hive/trunk/hbase-handler/pom.xml:1494760-1537575,1624170-1649725
+/hive/trunk/hbase-handler/pom.xml:1494760-1537575,1624170-1654300

Modified: hive/branches/llap/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java (original)
+++ hive/branches/llap/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java Fri Jan 23 19:59:11 2015
@@ -488,6 +488,14 @@ public class HBaseStorageHandler extends
     }
   }
 
+  private static Class counterClass = null;
+  static {
+    try {
+      counterClass = Class.forName("org.cliffc.high_scale_lib.Counter");
+    } catch (ClassNotFoundException cnfe) {
+      // this dependency is removed for HBase 1.0
+    }
+  }
   @Override
   public void configureJobConf(TableDesc tableDesc, JobConf jobConf) {
     try {
@@ -498,9 +506,13 @@ public class HBaseStorageHandler extends
        * will not be required once Hive bumps up its hbase version). At that time , we will
        * only need TableMapReduceUtil.addDependencyJars(jobConf) here.
        */
-      TableMapReduceUtil.addDependencyJars(
-          jobConf, HBaseStorageHandler.class, TableInputFormatBase.class,
-          org.cliffc.high_scale_lib.Counter.class); // this will be removed for HBase 1.0
+      if (counterClass != null) {
+        TableMapReduceUtil.addDependencyJars(
+          jobConf, HBaseStorageHandler.class, TableInputFormatBase.class, counterClass);
+      } else {
+        TableMapReduceUtil.addDependencyJars(
+          jobConf, HBaseStorageHandler.class, TableInputFormatBase.class);
+      }
       Set<String> merged = new LinkedHashSet<String>(jobConf.getStringCollection("tmpjars"));
 
       Job copy = new Job(jobConf);

Modified: hive/branches/llap/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java (original)
+++ hive/branches/llap/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java Fri Jan 23 19:59:11 2015
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.KeyValueU
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.shims.ShimLoader;
@@ -149,7 +150,7 @@ public class HiveHFileOutputFormat exten
           fs.mkdirs(columnFamilyPath);
           Path srcDir = outputdir;
           for (;;) {
-            FileStatus [] files = fs.listStatus(srcDir);
+            FileStatus [] files = fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER);
             if ((files == null) || (files.length == 0)) {
               throw new IOException("No family directories found in " + srcDir);
             }
@@ -161,7 +162,7 @@ public class HiveHFileOutputFormat exten
               break;
             }
           }
-          for (FileStatus regionFile : fs.listStatus(srcDir)) {
+          for (FileStatus regionFile : fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER)) {
             fs.rename(
               regionFile.getPath(),
               new Path(

Modified: hive/branches/llap/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java (original)
+++ hive/branches/llap/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java Fri Jan 23 19:59:11 2015
@@ -96,6 +96,19 @@ public final class HCatConstants {
   public static final String HCAT_DESIRED_PARTITION_NUM_SPLITS =
     "hcat.desired.partition.num.splits";
 
+  /**
+   * hcat.append.limit allows a hcat user to specify a custom append limit.
+   * By default, while appending to an existing directory, hcat will attempt
+   * to avoid naming clashes and try to append _a_NNN where NNN is a number to
+   * the desired filename to avoid clashes. However, by default, it only tries
+   * for NNN from 0 to 999 before giving up. This can cause an issue for some
+   * tables with an extraordinarily large number of files. Ideally, this should
+   * be fixed by the user changing their usage pattern and doing some manner of
+   * compaction, but in the meanwhile, until they can, setting this parameter
+   * can be used to bump that limit.
+   */
+  public static final String HCAT_APPEND_LIMIT = "hcat.append.limit";
+
   // IMPORTANT IMPORTANT IMPORTANT!!!!!
   //The keys used to store info into the job Configuration.
   //If any new keys are added, the HCatStorer needs to be updated. The HCatStorer
@@ -132,8 +145,10 @@ public final class HCatConstants {
   public static final String HCAT_EVENT = "HCAT_EVENT";
   public static final String HCAT_ADD_PARTITION_EVENT = "ADD_PARTITION";
   public static final String HCAT_DROP_PARTITION_EVENT = "DROP_PARTITION";
+  public static final String HCAT_ALTER_PARTITION_EVENT = "ALTER_PARTITION";
   public static final String HCAT_PARTITION_DONE_EVENT = "PARTITION_DONE";
   public static final String HCAT_CREATE_TABLE_EVENT = "CREATE_TABLE";
+  public static final String HCAT_ALTER_TABLE_EVENT = "ALTER_TABLE";
   public static final String HCAT_DROP_TABLE_EVENT = "DROP_TABLE";
   public static final String HCAT_CREATE_DATABASE_EVENT = "CREATE_DATABASE";
   public static final String HCAT_DROP_DATABASE_EVENT = "DROP_DATABASE";

Modified: hive/branches/llap/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java (original)
+++ hive/branches/llap/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java Fri Jan 23 19:59:11 2015
@@ -75,6 +75,8 @@ class FileOutputCommitterContainer exten
   static final String DYNTEMP_DIR_NAME = "_DYN";
   static final String SCRATCH_DIR_NAME = "_SCRATCH";
   private static final String APPEND_SUFFIX = "_a_";
+  private static final int APPEND_COUNTER_WARN_THRESHOLD = 1000;
+  private final int maxAppendAttempts;
 
   private static final Logger LOG = LoggerFactory.getLogger(FileOutputCommitterContainer.class);
   private final boolean dynamicPartitioningUsed;
@@ -112,6 +114,8 @@ class FileOutputCommitterContainer exten
     } else {
       customDynamicLocationUsed = false;
     }
+
+    this.maxAppendAttempts = context.getConfiguration().getInt(HCatConstants.HCAT_APPEND_LIMIT, APPEND_COUNTER_WARN_THRESHOLD);
   }
 
   @Override
@@ -646,19 +650,23 @@ class FileOutputCommitterContainer exten
           filetype = "";
         }
 
-        // Attempt to find COUNTER_MAX possible alternatives to a filename by
+        // Attempt to find maxAppendAttempts possible alternatives to a filename by
         // appending _a_N and seeing if that destination also clashes. If we're
         // still clashing after that, give up.
-        final int COUNTER_MAX = 1000;
         int counter = 1;
-        for (; fs.exists(itemDest) && counter < COUNTER_MAX ; counter++) {
+        for (; fs.exists(itemDest) && counter < maxAppendAttempts; counter++) {
           itemDest = new Path(dest, name + (APPEND_SUFFIX + counter) + filetype);
         }
 
-        if (counter == COUNTER_MAX){
+        if (counter == maxAppendAttempts){
           throw new HCatException(ErrorType.ERROR_MOVE_FAILED,
               "Could not find a unique destination path for move: file = "
                   + file + " , src = " + src + ", dest = " + dest);
+        } else if (counter > APPEND_COUNTER_WARN_THRESHOLD) {
+          LOG.warn("Append job used filename clash counter [" + counter
+              +"] which is greater than warning limit [" + APPEND_COUNTER_WARN_THRESHOLD
+              +"]. Please compact this table so that performance is not impacted."
+              + " Please see HIVE-9381 for details.");
         }
 
       }
@@ -696,7 +704,7 @@ class FileOutputCommitterContainer exten
 
       //      LOG.info("Searching for "+dynPathSpec);
       Path pathPattern = new Path(dynPathSpec);
-      FileStatus[] status = fs.globStatus(pathPattern);
+      FileStatus[] status = fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER);
 
       partitionsDiscoveredByPath = new LinkedHashMap<String, Map<String, String>>();
       contextDiscoveredByPath = new LinkedHashMap<String, JobContext>();

Modified: hive/branches/llap/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java (original)
+++ hive/branches/llap/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java Fri Jan 23 19:59:11 2015
@@ -20,9 +20,7 @@
 package org.apache.hive.hcatalog.mapreduce;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;

Modified: hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java (original)
+++ hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java Fri Jan 23 19:59:11 2015
@@ -60,6 +60,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.events.ListenerEvent;
 import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
 import org.apache.hive.hcatalog.common.HCatConstants;
+import org.apache.hive.hcatalog.messaging.AlterTableMessage;
 import org.apache.hive.hcatalog.messaging.HCatEventMessage;
 import org.apache.hive.hcatalog.messaging.MessageFactory;
 import org.slf4j.Logger;
@@ -116,7 +117,7 @@ public class NotificationListener extend
     testAndCreateConnection();
   }
 
-  private static String getTopicName(Table table, ListenerEvent partitionEvent) {
+  private static String getTopicName(Table table) {
     return table.getParameters().get(HCatConstants.HCAT_MSGBUS_TOPIC_NAME);
   }
 
@@ -129,7 +130,7 @@ public class NotificationListener extend
     if (partitionEvent.getStatus()) {
       Table table = partitionEvent.getTable();
       List<Partition> partitions = partitionEvent.getPartitions();
-      String topicName = getTopicName(table, partitionEvent);
+      String topicName = getTopicName(table);
       if (topicName != null && !topicName.equals("")) {
         send(messageFactory.buildAddPartitionMessage(table, partitions), topicName);
       } else {
@@ -144,6 +145,17 @@ public class NotificationListener extend
     }
   }
 
+  @Override
+  public void onAlterPartition(AlterPartitionEvent ape) throws MetaException {
+    if (ape.getStatus()) {
+      Partition before = ape.getOldPartition();
+      Partition after = ape.getNewPartition();
+
+      String topicName = getTopicName(ape.getTable());
+      send(messageFactory.buildAlterPartitionMessage(before, after), topicName);
+    }
+  }
+
   /**
    * Send dropped partition notifications. Subscribers can receive these notifications for a
    * particular table by listening on a topic named "dbName.tableName" with message selector
@@ -165,7 +177,7 @@ public class NotificationListener extend
       sd.setParameters(new HashMap<String, String>());
       sd.getSerdeInfo().setParameters(new HashMap<String, String>());
       sd.getSkewedInfo().setSkewedColNames(new ArrayList<String>());
-      String topicName = getTopicName(partitionEvent.getTable(), partitionEvent);
+      String topicName = getTopicName(partitionEvent.getTable());
       if (topicName != null && !topicName.equals("")) {
         send(messageFactory.buildDropPartitionMessage(partitionEvent.getTable(), partition), topicName);
       } else {
@@ -241,6 +253,35 @@ public class NotificationListener extend
   }
 
   /**
+   * Send altered table notifications. Subscribers can receive these notifications for
+   * dropped tables by listening on topic "HCAT" with message selector string
+   * {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_EVENT} =
+   * {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_ALTER_TABLE_EVENT}
+   */
+  @Override
+  public void onAlterTable(AlterTableEvent tableEvent) throws MetaException {
+    if (tableEvent.getStatus()) {
+      Table before = tableEvent.getOldTable();
+      Table after = tableEvent.getNewTable();
+
+      // onCreateTable alters the table to add the topic name.  Since this class is generating
+      // that alter, we don't want to notify on that alter.  So take a quick look and see if
+      // that's what this this alter is, and if so swallow it.
+      if (after.getParameters() != null &&
+          after.getParameters().get(HCatConstants.HCAT_MSGBUS_TOPIC_NAME) != null &&
+          (before.getParameters() == null ||
+              before.getParameters().get(HCatConstants.HCAT_MSGBUS_TOPIC_NAME) == null)) {
+        return;
+      }
+      // I think this is wrong, the alter table statement should come on the table topic not the
+      // DB topic - Alan.
+      String topicName = getTopicPrefix(tableEvent.getHandler().getHiveConf()) + "." +
+          after.getDbName().toLowerCase();
+      send(messageFactory.buildAlterTableMessage(before, after), topicName);
+    }
+  }
+
+  /**
    * Send dropped table notifications. Subscribers can receive these notifications for
    * dropped tables by listening on topic "HCAT" with message selector string
    * {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_EVENT} =
@@ -262,6 +303,8 @@ public class NotificationListener extend
 
     if (tableEvent.getStatus()) {
       Table table = tableEvent.getTable();
+      // I think this is wrong, the drop table statement should come on the table topic not the
+      // DB topic - Alan.
       String topicName = getTopicPrefix(tableEvent.getHandler().getHiveConf()) + "." + table.getDbName().toLowerCase();
       send(messageFactory.buildDropTableMessage(table), topicName);
     }
@@ -435,14 +478,4 @@ public class NotificationListener extend
 //        if(lpde.getStatus())
 //            send(lpde.getPartitionName(),lpde.getTable().getParameters().get(HCatConstants.HCAT_MSGBUS_TOPIC_NAME),HCatConstants.HCAT_PARTITION_DONE_EVENT);
   }
-
-  @Override
-  public void onAlterPartition(AlterPartitionEvent ape) throws MetaException {
-    // no-op
-  }
-
-  @Override
-  public void onAlterTable(AlterTableEvent ate) throws MetaException {
-    // no-op
-  }
 }

Modified: hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java (original)
+++ hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java Fri Jan 23 19:59:11 2015
@@ -37,7 +37,9 @@ public abstract class HCatEventMessage {
     CREATE_TABLE(HCatConstants.HCAT_CREATE_TABLE_EVENT),
     DROP_TABLE(HCatConstants.HCAT_DROP_TABLE_EVENT),
     ADD_PARTITION(HCatConstants.HCAT_ADD_PARTITION_EVENT),
-    DROP_PARTITION(HCatConstants.HCAT_DROP_PARTITION_EVENT);
+    DROP_PARTITION(HCatConstants.HCAT_DROP_PARTITION_EVENT),
+    ALTER_TABLE(HCatConstants.HCAT_ALTER_TABLE_EVENT),
+    ALTER_PARTITION(HCatConstants.HCAT_ALTER_PARTITION_EVENT);
 
     private String typeString;
 

Modified: hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageDeserializer.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageDeserializer.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageDeserializer.java (original)
+++ hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageDeserializer.java Fri Jan 23 19:59:11 2015
@@ -36,10 +36,14 @@ public abstract class MessageDeserialize
       return getDropDatabaseMessage(messageBody);
     case CREATE_TABLE:
       return getCreateTableMessage(messageBody);
+    case ALTER_TABLE:
+      return getAlterTableMessage(messageBody);
     case DROP_TABLE:
       return getDropTableMessage(messageBody);
     case ADD_PARTITION:
       return getAddPartitionMessage(messageBody);
+    case ALTER_PARTITION:
+      return getAlterPartitionMessage(messageBody);
     case DROP_PARTITION:
       return getDropPartitionMessage(messageBody);
 
@@ -64,6 +68,13 @@ public abstract class MessageDeserialize
   public abstract CreateTableMessage getCreateTableMessage(String messageBody);
 
   /**
+   * Method to de-serialize AlterTableMessge
+   * @param messageBody string message
+   * @return object message
+   */
+  public abstract AlterTableMessage getAlterTableMessage(String messageBody);
+
+  /**
    * Method to de-serialize DropTableMessage instance.
    */
   public abstract DropTableMessage getDropTableMessage(String messageBody);
@@ -74,6 +85,13 @@ public abstract class MessageDeserialize
   public abstract AddPartitionMessage getAddPartitionMessage(String messageBody);
 
   /**
+   * Method to deserialize AlterPartitionMessage
+   * @param messageBody the message in serialized form
+   * @return message in object form
+   */
+  public abstract AlterPartitionMessage getAlterPartitionMessage(String messageBody);
+
+  /**
    * Method to de-serialize DropPartitionMessage instance.
    */
   public abstract DropPartitionMessage getDropPartitionMessage(String messageBody);

Modified: hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java (original)
+++ hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java Fri Jan 23 19:59:11 2015
@@ -119,6 +119,17 @@ public abstract class MessageFactory {
   public abstract CreateTableMessage buildCreateTableMessage(Table table);
 
   /**
+   * Factory method for AlterTableMessage.  Unlike most of these calls, this one can return null,
+   * which means no message should be sent.  This is because there are many flavors of alter
+   * table (add column, add partition, etc.).  Some are covered elsewhere (like add partition)
+   * and some are not yet supported.
+   * @param before The table before the alter
+   * @param after The table after the alter
+   * @return
+   */
+  public abstract AlterTableMessage buildAlterTableMessage(Table before, Table after);
+
+  /**
    * Factory method for DropTableMessage.
    * @param table The Table being dropped.
    * @return DropTableMessage instance.
@@ -144,6 +155,15 @@ public abstract class MessageFactory {
   public abstract AddPartitionMessage buildAddPartitionMessage(Table table, PartitionSpecProxy partitionSpec);
 
   /**
+   * Factory method for building AlterPartitionMessage
+   * @param before The partition before it was altered
+   * @param after The partition after it was altered
+   * @return a new AlterPartitionMessage
+   */
+  public abstract AlterPartitionMessage buildAlterPartitionMessage(Partition before,
+                                                                   Partition after);
+
+  /**
    * Factory method for DropPartitionMessage.
    * @param table The Table from which the partition is dropped.
    * @param partition The Partition being dropped.

Modified: hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageDeserializer.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageDeserializer.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageDeserializer.java (original)
+++ hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageDeserializer.java Fri Jan 23 19:59:11 2015
@@ -19,7 +19,10 @@
 
 package org.apache.hive.hcatalog.messaging.json;
 
+import org.apache.hive.hcatalog.common.HCatConstants;
 import org.apache.hive.hcatalog.messaging.AddPartitionMessage;
+import org.apache.hive.hcatalog.messaging.AlterPartitionMessage;
+import org.apache.hive.hcatalog.messaging.AlterTableMessage;
 import org.apache.hive.hcatalog.messaging.CreateDatabaseMessage;
 import org.apache.hive.hcatalog.messaging.CreateTableMessage;
 import org.apache.hive.hcatalog.messaging.DropDatabaseMessage;
@@ -71,6 +74,17 @@ public class JSONMessageDeserializer ext
   }
 
   @Override
+  public AlterTableMessage getAlterTableMessage(String messageBody) {
+    try {
+      return mapper.readValue(messageBody, JSONAlterTableMessage.class);
+    }
+    catch (Exception exception) {
+      throw new IllegalArgumentException("Could not construct appropriate alter table type.",
+          exception);
+    }
+  }
+
+  @Override
   public DropTableMessage getDropTableMessage(String messageBody) {
     try {
       return mapper.readValue(messageBody, JSONDropTableMessage.class);
@@ -90,6 +104,15 @@ public class JSONMessageDeserializer ext
     }
   }
 
+  @Override
+  public AlterPartitionMessage getAlterPartitionMessage(String messageBody) {
+    try {
+      return mapper.readValue(messageBody, JSONAlterPartitionMessage.class);
+    } catch (Exception e) {
+      throw new IllegalArgumentException("Could not construct AlterPartitionMessage.", e);
+    }
+  }
+
   @Override
   public DropPartitionMessage getDropPartitionMessage(String messageBody) {
     try {

Modified: hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java (original)
+++ hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java Fri Jan 23 19:59:11 2015
@@ -19,6 +19,8 @@
 
 package org.apache.hive.hcatalog.messaging.json;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.metastore.api.Database;
@@ -26,6 +28,8 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hive.hcatalog.messaging.AddPartitionMessage;
+import org.apache.hive.hcatalog.messaging.AlterPartitionMessage;
+import org.apache.hive.hcatalog.messaging.AlterTableMessage;
 import org.apache.hive.hcatalog.messaging.CreateDatabaseMessage;
 import org.apache.hive.hcatalog.messaging.CreateTableMessage;
 import org.apache.hive.hcatalog.messaging.DropDatabaseMessage;
@@ -42,6 +46,9 @@ import java.util.*;
  */
 public class JSONMessageFactory extends MessageFactory {
 
+  private static final Log LOG = LogFactory.getLog(JSONMessageFactory.class.getName());
+
+
   private static JSONMessageDeserializer deserializer = new JSONMessageDeserializer();
 
   @Override
@@ -62,31 +69,37 @@ public class JSONMessageFactory extends
   @Override
   public CreateDatabaseMessage buildCreateDatabaseMessage(Database db) {
     return new JSONCreateDatabaseMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, db.getName(),
-        System.currentTimeMillis() / 1000);
+        now());
   }
 
   @Override
   public DropDatabaseMessage buildDropDatabaseMessage(Database db) {
     return new JSONDropDatabaseMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, db.getName(),
-        System.currentTimeMillis() / 1000);
+        now());
   }
 
   @Override
   public CreateTableMessage buildCreateTableMessage(Table table) {
     return new JSONCreateTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
-        table.getTableName(), System.currentTimeMillis()/1000);
+        table.getTableName(), now());
+  }
+
+  @Override
+  public AlterTableMessage buildAlterTableMessage(Table before, Table after) {
+    return new JSONAlterTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, before.getDbName(),
+        before.getTableName(), now());
   }
 
   @Override
   public DropTableMessage buildDropTableMessage(Table table) {
     return new JSONDropTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(), table.getTableName(),
-        System.currentTimeMillis()/1000);
+        now());
   }
 
   @Override
   public AddPartitionMessage buildAddPartitionMessage(Table table, List<Partition> partitions) {
     return new JSONAddPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
-        table.getTableName(), getPartitionKeyValues(table, partitions), System.currentTimeMillis()/1000);
+        table.getTableName(), getPartitionKeyValues(table, partitions), now());
   }
 
   @Override
@@ -94,14 +107,23 @@ public class JSONMessageFactory extends
   @InterfaceStability.Evolving
   public AddPartitionMessage buildAddPartitionMessage(Table table, PartitionSpecProxy partitionSpec) {
     return new JSONAddPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
-        table.getTableName(), getPartitionKeyValues(table, partitionSpec), System.currentTimeMillis()/1000);
+        table.getTableName(), getPartitionKeyValues(table, partitionSpec), now());
+  }
+
+  @Override
+  public AlterPartitionMessage buildAlterPartitionMessage(Partition before, Partition after) {
+    return new JSONAlterPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL,
+        before.getDbName(), before.getTableName(), before.getValues(), now());
   }
 
   @Override
   public DropPartitionMessage buildDropPartitionMessage(Table table, Partition partition) {
     return new JSONDropPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, partition.getDbName(),
-        partition.getTableName(), Arrays.asList(getPartitionKeyValues(table, partition)),
-        System.currentTimeMillis()/1000);
+        partition.getTableName(), Arrays.asList(getPartitionKeyValues(table, partition)), now());
+  }
+
+  private long now() {
+    return System.currentTimeMillis() / 1000;
   }
 
   private static Map<String, String> getPartitionKeyValues(Table table, Partition partition) {

Modified: hive/branches/llap/hcatalog/server-extensions/src/test/java/org/apache/hive/hcatalog/listener/TestNotificationListener.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/server-extensions/src/test/java/org/apache/hive/hcatalog/listener/TestNotificationListener.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/server-extensions/src/test/java/org/apache/hive/hcatalog/listener/TestNotificationListener.java (original)
+++ hive/branches/llap/hcatalog/server-extensions/src/test/java/org/apache/hive/hcatalog/listener/TestNotificationListener.java Fri Jan 23 19:59:11 2015
@@ -46,6 +46,8 @@ import org.apache.hive.hcatalog.common.H
 import org.apache.hive.hcatalog.mapreduce.HCatBaseTest;
 
 import org.apache.hive.hcatalog.messaging.AddPartitionMessage;
+import org.apache.hive.hcatalog.messaging.AlterPartitionMessage;
+import org.apache.hive.hcatalog.messaging.AlterTableMessage;
 import org.apache.hive.hcatalog.messaging.CreateDatabaseMessage;
 import org.apache.hive.hcatalog.messaging.CreateTableMessage;
 import org.apache.hive.hcatalog.messaging.DropDatabaseMessage;
@@ -104,7 +106,9 @@ public class TestNotificationListener ex
         HCatConstants.HCAT_CREATE_DATABASE_EVENT,
         HCatConstants.HCAT_CREATE_TABLE_EVENT,
         HCatConstants.HCAT_ADD_PARTITION_EVENT,
+        HCatConstants.HCAT_ALTER_PARTITION_EVENT,
         HCatConstants.HCAT_DROP_PARTITION_EVENT,
+        HCatConstants.HCAT_ALTER_TABLE_EVENT,
         HCatConstants.HCAT_DROP_TABLE_EVENT,
         HCatConstants.HCAT_DROP_DATABASE_EVENT);
     Assert.assertEquals(expectedMessages, actualMessages);
@@ -120,7 +124,9 @@ public class TestNotificationListener ex
     kvs.put("b", "2011");
     client.markPartitionForEvent("mydb", "mytbl", kvs,
         PartitionEventType.LOAD_DONE);
+    driver.run("alter table mytbl partition (b='2011') set fileformat orc");
     driver.run("alter table mytbl drop partition(b='2011')");
+    driver.run("alter table mytbl add columns (c int comment 'this is an int', d decimal(3,2))");
     driver.run("drop table mytbl");
     driver.run("drop database mydb");
   }
@@ -170,6 +176,20 @@ public class TestNotificationListener ex
         Assert.assertEquals("mytbl", ((AddPartitionMessage) message2).getTable());
         Assert.assertEquals(1, ((AddPartitionMessage) message2).getPartitions().size());
         Assert.assertEquals("2011", ((AddPartitionMessage) message2).getPartitions().get(0).get("b"));
+      } else if (event.equals(HCatConstants.HCAT_ALTER_PARTITION_EVENT)) {
+        Assert.assertEquals("topic://hcat.mydb.mytbl", msg.getJMSDestination().toString());
+        // for alter partition events
+        AlterPartitionMessage message = deserializer.getAlterPartitionMessage(messageBody);
+        Assert.assertEquals("mytbl", message.getTable());
+        Assert.assertEquals("mydb", message.getDB());
+        Assert.assertEquals(1, message.getValues().size());
+        Assert.assertEquals("2011", message.getValues().get(0));
+        HCatEventMessage message2 = MessagingUtils.getMessage(msg);
+        Assert.assertTrue("Unexpected message-type.", message2 instanceof AlterPartitionMessage);
+        Assert.assertEquals("mydb", message2.getDB());
+        Assert.assertEquals("mytbl", ((AlterPartitionMessage) message2).getTable());
+        Assert.assertEquals(1, ((AlterPartitionMessage) message2).getValues().size());
+        Assert.assertEquals("2011", ((AlterPartitionMessage) message2).getValues().get(0));
       } else if (event.equals(HCatConstants.HCAT_DROP_PARTITION_EVENT)) {
 
         Assert.assertEquals("topic://hcat.mydb.mytbl", msg.getJMSDestination()
@@ -184,7 +204,8 @@ public class TestNotificationListener ex
         Assert.assertEquals("mydb", message2.getDB());
         Assert.assertEquals("mytbl", ((DropPartitionMessage) message2).getTable());
         Assert.assertEquals(1, ((DropPartitionMessage) message2).getPartitions().size());
-        Assert.assertEquals("2011", ((DropPartitionMessage) message2).getPartitions().get(0).get("b"));
+        Assert.assertEquals("2011", ((DropPartitionMessage) message2).getPartitions().get(0).get(
+            "b"));
       } else if (event.equals(HCatConstants.HCAT_DROP_TABLE_EVENT)) {
 
         Assert.assertEquals("topic://hcat.mydb", msg.getJMSDestination().toString());
@@ -199,11 +220,20 @@ public class TestNotificationListener ex
 
         Assert.assertEquals("topic://" + HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX, msg
             .getJMSDestination().toString());
-        DropDatabaseMessage message =  deserializer.getDropDatabaseMessage(messageBody);
+        DropDatabaseMessage message = deserializer.getDropDatabaseMessage(messageBody);
         Assert.assertEquals("mydb", message.getDB());
         HCatEventMessage message2 = MessagingUtils.getMessage(msg);
         Assert.assertTrue("Unexpected message-type.", message2 instanceof DropDatabaseMessage);
         Assert.assertEquals("mydb", message2.getDB());
+      } else if (event.equals(HCatConstants.HCAT_ALTER_TABLE_EVENT)) {
+        Assert.assertEquals("topic://hcat.mydb", msg.getJMSDestination().toString());
+        AlterTableMessage message = deserializer.getAlterTableMessage(messageBody);
+        Assert.assertEquals("mytbl", message.getTable());
+        Assert.assertEquals("mydb", message.getDB());
+        HCatEventMessage message2 = MessagingUtils.getMessage(msg);
+        Assert.assertTrue("Unexpected message-type.", message2 instanceof AlterTableMessage);
+        Assert.assertEquals("mydb", message2.getDB());
+        Assert.assertEquals("mytbl", ((AlterTableMessage) message2).getTable());
       } else if (event.equals(HCatConstants.HCAT_PARTITION_DONE_EVENT)) {
         // TODO: Fill in when PARTITION_DONE_EVENT is supported.
         Assert.assertTrue("Unexpected: HCAT_PARTITION_DONE_EVENT not supported (yet).", false);

Modified: hive/branches/llap/hcatalog/src/test/e2e/templeton/README.txt
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/src/test/e2e/templeton/README.txt?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/src/test/e2e/templeton/README.txt (original)
+++ hive/branches/llap/hcatalog/src/test/e2e/templeton/README.txt Fri Jan 23 19:59:11 2015
@@ -223,3 +223,14 @@ enough map slots (10?) (mapred.tasktrack
 Adding Tests
 ------------
 ToDo: add some guidelines
+
+Running on Tez
+1. set up Tez as in http://tez.apache.org/install.html
+2. set hive.execution.engine=tez in hive-site.xml (actually is this needed?)
+3. add hive.execution.engine=tez to templeton.hive.properties in webhcat-site.xml
+4. add to mapred-env.sh/yarn-env.sh (as you defined these in step 1)
+export TEZ_VERSION=0.5.3
+export TEZ_JARS=/Users/ekoifman/dev/apache-tez-client-${TEZ_VERSION}
+export TEZ_CONF_DIR=${TEZ_JARS}/conf
+export HADOOP_CLASSPATH=${TEZ_CONF_DIR}:${TEZ_JARS}/*:${TEZ_JARS}/lib/*:${HADOOP_CLASSPATH}
+(w/o this you'll see something like "java.lang.NoClassDefFoundError: org/apache/tez/dag/api/SessionNotRunning")

Modified: hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/deploy_e2e_artifacts.sh
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/deploy_e2e_artifacts.sh?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/deploy_e2e_artifacts.sh (original)
+++ hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/deploy_e2e_artifacts.sh Fri Jan 23 19:59:11 2015
@@ -48,5 +48,17 @@ ${HADOOP_HOME}/bin/hadoop fs -put ${PIG_
 
 ${HADOOP_HOME}/bin/hadoop fs -put /Users/ekoifman/dev/sqoop-1.4.5.bin__hadoop-2.0.4-alpha.tar.gz /apps/templeton/sqoop-1.4.5.bin__hadoop-2.0.4-alpha.tar.gz
 ${HADOOP_HOME}/bin/hadoop fs -put /Users/ekoifman/dev/mysql-connector-java-5.1.30/mysql-connector-java-5.1.30-bin.jar /apps/templeton/jdbc/mysql-connector-java.jar
+
+#Tez set up (http://tez.apache.org/install.html)
+#if not using Tez - ignore this
+${HADOOP_HOME}/bin/hdfs dfs -put /Users/ekoifman/dev/apache-tez-${TEZ_VERSION}-src/tez-dist/target/tez-${TEZ_VERSION}.tar.gz /apps/tez-${TEZ_VERSION}.tar.gz
+${HADOOP_HOME}/bin/hdfs dfs -mkdir /tmp/tezin
+${HADOOP_HOME}/bin/hdfs dfs -mkdir /tmp/tezout
+${HADOOP_HOME}/bin/hdfs dfs -put /Users/ekoifman/dev/hive/build.sh /tmp/tezin
+#Above line is for Sanity Check: this is to run #6 in http://tez.apache.org/install.html
+#$HADOOP_HOME/bin/hadoop jar tez-examples-0.5.3.jar orderedwordcount /tmp/tezin /tmp/tezout
+
+
+
 #check what got deployed
-${HADOOP_HOME}/bin/hdfs dfs -ls -R /apps/templeton webhcate2e /user/templeton /user/hive/warehouse
+${HADOOP_HOME}/bin/hdfs dfs -ls -R /apps webhcate2e /user/templeton /user/hive/warehouse

Modified: hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/env.sh
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/env.sh?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/env.sh (original)
+++ hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/env.sh Fri Jan 23 19:59:11 2015
@@ -22,14 +22,29 @@
 
 # define necessary env vars here and source it in other files
 
-export HADOOP_VERSION=2.4.1-SNAPSHOT
-#export HIVE_VERSION=0.14.0-SNAPSHOT
-export PIG_VERSION=0.12.2-SNAPSHOT
+echo ${HADOOP_VERSION};
+
+if [ -z ${HADOOP_VERSION} ]; then
+  export HADOOP_VERSION=2.4.1-SNAPSHOT
+fi
+
+if [ -z ${HIVE_VERSION} ]; then
+  export HIVE_VERSION=0.14.0-SNAPSHOT
+fi
+
+if [ -z ${PIG_VERSION} ]; then
+  export PIG_VERSION=0.12.2-SNAPSHOT
+fi
 
 #Root of project source tree
-export PROJ_HOME=/Users/${USER}/dev/hive
+if [ -z ${PROJ_HOME} ]; then
+  export PROJ_HOME=/Users/${USER}/dev/hive
+fi
 export HIVE_HOME=${PROJ_HOME}/packaging/target/apache-hive-${HIVE_VERSION}-bin/apache-hive-${HIVE_VERSION}-bin
-export HADOOP_HOME=/Users/${USER}/dev/hwxhadoop/hadoop-dist/target/hadoop-${HADOOP_VERSION}
+
+if [ -z ${HADOOP_HOME} ]; then
+  export HADOOP_HOME=/Users/${USER}/dev/hwxhadoop/hadoop-dist/target/hadoop-${HADOOP_VERSION}
+fi
 
 #Make sure Pig is built for the Hadoop version you are running
 export PIG_TAR_PATH=/Users/${USER}/dev/pig-${PIG_VERSION}-src/build

Modified: hive/branches/llap/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm (original)
+++ hive/branches/llap/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm Fri Jan 23 19:59:11 2015
@@ -878,7 +878,7 @@ sub compare
             if (defined($testCmd->{'check_job_percent_complete'})) {
               my $pcValue = $res_hash->{'percentComplete'};
               my $expectedPercentComplete = $testCmd->{'check_job_percent_complete'};
-              if ( (!defined $pcValue) || $pcValue ne $expectedPercentComplete ) {
+              if ( (!defined $pcValue) || $pcValue !~ m/$expectedPercentComplete/ ) {
                 print $log "check_job_percent_complete failed. got percentComplete $pcValue,  expected  $expectedPercentComplete";
                 $result = 0;
               }

Modified: hive/branches/llap/hcatalog/src/test/e2e/templeton/tests/jobsubmission.conf
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/src/test/e2e/templeton/tests/jobsubmission.conf?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/src/test/e2e/templeton/tests/jobsubmission.conf (original)
+++ hive/branches/llap/hcatalog/src/test/e2e/templeton/tests/jobsubmission.conf Fri Jan 23 19:59:11 2015
@@ -405,7 +405,7 @@ $cfg =
 
     {
                                 #test select a,b
-     'num' => 7,
+     'num' => 7,#seems to be the same as test 6 except for percent_complete check
      'method' => 'POST',
      'url' => ':TEMPLETON_URL:/templeton/v1/hive',
      'post_options' => ['user.name=:UNAME:','execute=select count(*) from mynums', ],
@@ -414,7 +414,7 @@ $cfg =
      'status_code' => 200,
      'check_job_created' => 1,
      'check_job_complete' => 'SUCCESS', 
-     'check_job_percent_complete' => 'map 100% reduce 100%',
+     'check_job_percent_complete' => 'map 100% reduce 100%|100% complete',
      'check_job_exit_value' => 0,
      'check_call_back' => 1,
 
@@ -524,7 +524,7 @@ $cfg =
      'num' => 1,
      'method' => 'POST',
      'url' => ':TEMPLETON_URL:/templeton/v1/sqoop?user.name=:UNAME:',
-     'post_options' => ['libdir=hdfs:///apps/templeton/jdbc', 'command=export --connect :DB_CONNECTION_STRING: --username :DB_USER_NAME: --password :DB_PASSWORD: --export-dir :INPDIR_HDFS:/sqoop --table person','statusdir=TestSqoop_:TNUM:' ],
+     'post_options' => ['libdir=/apps/templeton/jdbc', 'command=export --connect :DB_CONNECTION_STRING: --username :DB_USER_NAME: --password :DB_PASSWORD: --export-dir :INPDIR_HDFS:/sqoop --table person','statusdir=TestSqoop_:TNUM:' ],
      'json_field_substr_match' => { 'id' => '\d+'},
                    #results
      'status_code' => 200,
@@ -539,7 +539,7 @@ $cfg =
      'num' => 2,
      'method' => 'POST',
      'url' => ':TEMPLETON_URL:/templeton/v1/sqoop?user.name=:UNAME:',
-     'post_options' => ['libdir=hdfs:///apps/templeton/jdbc', 'files=:INPDIR_HDFS:/sqoopcommand.txt','command=import --connect :DB_CONNECTION_STRING: --username :DB_USER_NAME: --password :DB_PASSWORD: --options-file sqoopcommand.txt','statusdir=TestSqoop_:TNUM:' ],
+     'post_options' => ['libdir=/apps/templeton/jdbc', 'files=:INPDIR_HDFS:/sqoopcommand.txt','command=import --connect :DB_CONNECTION_STRING: --username :DB_USER_NAME: --password :DB_PASSWORD: --options-file sqoopcommand.txt','statusdir=TestSqoop_:TNUM:' ],
      'json_field_substr_match' => { 'id' => '\d+'},
                    #results
      'status_code' => 200,

Modified: hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java (original)
+++ hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java Fri Jan 23 19:59:11 2015
@@ -24,6 +24,7 @@ import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hive.hcatalog.common.HCatException;
@@ -467,6 +468,31 @@ public abstract class HCatClient {
   public abstract String getMessageBusTopicName(String dbName, String tableName) throws HCatException;
 
   /**
+   * Get a list of notifications
+   * @param lastEventId The last event id that was consumed by this reader.  The returned
+   *                    notifications will start at the next eventId available this eventId that
+   *                    matches the filter.
+   * @param maxEvents Maximum number of events to return.  If < 1, then all available events will
+   *                  be returned.
+   * @param filter Filter to determine if message should be accepted.  If null, then all
+   *               available events up to maxEvents will be returned.
+   * @return list of notifications, sorted by eventId.  It is guaranteed that the events are in
+   * the order that the operations were done on the database.
+   * @throws HCatException
+   */
+  public abstract List<HCatNotificationEvent> getNextNotification(long lastEventId,
+                                                                  int maxEvents,
+                                                                  IMetaStoreClient.NotificationFilter filter)
+      throws HCatException;
+
+  /**
+   * Get the most recently used notification id.
+   * @return
+   * @throws HCatException
+   */
+  public abstract long getCurrentNotificationEventId() throws HCatException;
+
+  /**
    * Close the hcatalog client.
    *
    * @throws HCatException

Modified: hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java (original)
+++ hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java Fri Jan 23 19:59:11 2015
@@ -23,20 +23,25 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
@@ -825,7 +830,8 @@ public class HCatClientHMSImpl extends H
   @Override
   public String getMessageBusTopicName(String dbName, String tableName) throws HCatException {
     try {
-      return hmsClient.getTable(dbName, tableName).getParameters().get(HCatConstants.HCAT_MSGBUS_TOPIC_NAME);
+      return hmsClient.getTable(dbName, tableName).getParameters().get(
+          HCatConstants.HCAT_MSGBUS_TOPIC_NAME);
     }
     catch (MetaException e) {
       throw new HCatException("MetaException while retrieving JMS Topic name.", e);
@@ -836,7 +842,36 @@ public class HCatClientHMSImpl extends H
           "TException while retrieving JMS Topic name.", e);
     }
   }
-  
+
+  @Override
+  public List<HCatNotificationEvent> getNextNotification(long lastEventId, int maxEvents,
+                                                         IMetaStoreClient.NotificationFilter filter)
+      throws HCatException {
+    try {
+      List<HCatNotificationEvent> events = new ArrayList<HCatNotificationEvent>();
+      NotificationEventResponse rsp = hmsClient.getNextNotification(lastEventId, maxEvents, filter);
+      if (rsp != null && rsp.getEvents() != null) {
+        for (NotificationEvent event : rsp.getEvents()) {
+          events.add(new HCatNotificationEvent(event));
+        }
+      }
+      return events;
+    } catch (TException e) {
+      throw new ConnectionFailureException("TException while getting notifications", e);
+    }
+  }
+
+  @Override
+  public long getCurrentNotificationEventId() throws HCatException {
+    try {
+      CurrentNotificationEventId id = hmsClient.getCurrentNotificationEventId();
+      return id.getEventId();
+    } catch (TException e) {
+      throw new ConnectionFailureException("TException while getting current notification event " +
+          "id " , e);
+    }
+  }
+
   @Override
   public String serializeTable(HCatTable hcatTable) throws HCatException {
     return MetadataSerializer.get().serializeTable(hcatTable);
@@ -905,8 +940,10 @@ public class HCatClientHMSImpl extends H
 
   @Override
   public HCatPartitionSpec deserializePartitionSpec(List<String> hcatPartitionSpecStrings) throws HCatException {
-    HCatPartitionSpec hcatPartitionSpec = MetadataSerializer.get().deserializePartitionSpec(hcatPartitionSpecStrings);
-    hcatPartitionSpec.hcatTable(getTable(hcatPartitionSpec.getDbName(), hcatPartitionSpec.getTableName()));
+    HCatPartitionSpec hcatPartitionSpec = MetadataSerializer.get()
+        .deserializePartitionSpec(hcatPartitionSpecStrings);
+    hcatPartitionSpec
+        .hcatTable(getTable(hcatPartitionSpec.getDbName(), hcatPartitionSpec.getTableName()));
     return hcatPartitionSpec;
   }
 }

Modified: hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/HDFSCleanup.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/HDFSCleanup.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/HDFSCleanup.java (original)
+++ hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/HDFSCleanup.java Fri Jan 23 19:59:11 2015
@@ -91,18 +91,25 @@ public class HDFSCleanup extends Thread
    *
    */
   public void run() {
-    FileSystem fs = null;
     while (!stop) {
       try {
         // Put each check in a separate try/catch, so if that particular
         // cycle fails, it'll try again on the next cycle.
+        FileSystem fs=null;
         try {
-          if (fs == null) {
-            fs = new Path(storage_root).getFileSystem(appConf);
-          }
+          fs = new Path(storage_root).getFileSystem(appConf);
           checkFiles(fs);
         } catch (Exception e) {
           LOG.error("Cleanup cycle failed: " + e.getMessage());
+        } finally {
+          if(fs != null) {
+            try {
+              fs.close();
+            }
+            catch (Exception e) {
+              LOG.error("Closing file system failed: " + e.getMessage());
+            }
+          }
         }
 
         long sleepMillis = (long) (Math.random() * interval);

Modified: hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonUtils.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonUtils.java (original)
+++ hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonUtils.java Fri Jan 23 19:59:11 2015
@@ -42,6 +42,8 @@ import java.util.regex.Pattern;
 
 import javax.ws.rs.core.UriBuilder;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -57,6 +59,8 @@ import org.apache.hive.hcatalog.templeto
  * General utility methods.
  */
 public class TempletonUtils {
+  private static final Log LOG = LogFactory.getLog(TempletonUtils.class);
+
   /**
    * Is the object non-empty?
    */
@@ -98,6 +102,24 @@ public class TempletonUtils {
   public static final Pattern PIG_COMPLETE = Pattern.compile(" \\d+% complete$");
   //looking for map = 100%,  reduce = 100%
   public static final Pattern HIVE_COMPLETE = Pattern.compile(" map = (\\d+%),\\s+reduce = (\\d+%).*$");
+  /**
+   * Hive on Tez produces progress report that looks like this
+   * Map 1: -/-	Reducer 2: 0/1	
+   * Map 1: -/-	Reducer 2: 0(+1)/1	
+   * Map 1: -/-	Reducer 2: 1/1
+   * 
+   * -/- means there are no tasks (yet)
+   * 0/1 means 1 total tasks, 0 completed
+   * 1(+2)/3 means 3 total, 1 completed and 2 running
+   * 
+   * HIVE-8495, in particular https://issues.apache.org/jira/secure/attachment/12675504/Screen%20Shot%202014-10-16%20at%209.35.26%20PM.png
+   * has more examples.
+   * To report progress, we'll assume all tasks are equal size and compute "completed" as percent of "total"
+   * "(Map|Reducer) (\\d+:) ((-/-)|(\\d+(\\(\\+\\d+\\))?/\\d+))" is the complete pattern but we'll drop "-/-" to exclude
+   * groups that don't add information such as "Map 1: -/-"
+   */
+  public static final Pattern TEZ_COMPLETE = Pattern.compile("(Map|Reducer) (\\d+:) (\\d+(\\(\\+\\d+\\))?/\\d+)");
+  public static final Pattern TEZ_COUNTERS = Pattern.compile("\\d+");
 
   /**
    * Extract the percent complete line from Pig or Jar jobs.
@@ -115,6 +137,31 @@ public class TempletonUtils {
     if(hive.find()) {
       return "map " + hive.group(1) + " reduce " + hive.group(2);
     }
+    Matcher tez = TEZ_COMPLETE.matcher(line);
+    if(tez.find()) {
+      int totalTasks = 0;
+      int completedTasks = 0;
+      do {
+        //here each group looks something like "Map 2: 2/4" "Reducer 3: 1(+2)/4"
+        //just parse the numbers and ignore one from "Map 2" and from "(+2)" if it's there
+        Matcher counts = TEZ_COUNTERS.matcher(tez.group());
+        List<String> items = new ArrayList<String>(4);
+        while(counts.find()) {
+          items.add(counts.group());
+        }
+        completedTasks += Integer.parseInt(items.get(1));
+        if(items.size() == 3) {
+          totalTasks += Integer.parseInt(items.get(2));
+        }
+        else {
+          totalTasks += Integer.parseInt(items.get(3));
+        }
+      } while(tez.find());
+      if(totalTasks == 0) {
+        return "0% complete (0 total tasks)";
+      }
+      return completedTasks * 100 / totalTasks + "% complete";
+    }
     return null;
   }
 

Modified: hive/branches/llap/itests/hcatalog-unit/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/hcatalog-unit/pom.xml?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/itests/hcatalog-unit/pom.xml (original)
+++ hive/branches/llap/itests/hcatalog-unit/pom.xml Fri Jan 23 19:59:11 2015
@@ -60,6 +60,18 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.hive.hcatalog</groupId>
+      <artifactId>hive-hcatalog-server-extensions</artifactId>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive.hcatalog</groupId>
+      <artifactId>hive-webhcat-java-client</artifactId>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-hbase-handler</artifactId>
       <version>${project.version}</version>

Modified: hive/branches/llap/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoop20SAuthBridge.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoop20SAuthBridge.java?rev=1654355&r1=1654354&r2=1654355&view=diff
==============================================================================
--- hive/branches/llap/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoop20SAuthBridge.java (original)
+++ hive/branches/llap/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoop20SAuthBridge.java Fri Jan 23 19:59:11 2015
@@ -131,7 +131,7 @@ public class TestHadoop20SAuthBridge ext
     }
     builder.append("127.0.1.1,");
     builder.append(InetAddress.getLocalHost().getCanonicalHostName());
-    conf.setStrings(DefaultImpersonationProvider.getProxySuperuserIpConfKey(superUserShortName),
+    conf.setStrings(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(superUserShortName),
         builder.toString());
   }
 
@@ -294,7 +294,7 @@ public class TestHadoop20SAuthBridge ext
   private void setGroupsInConf(String[] groupNames, String proxyUserName)
   throws IOException {
    conf.set(
-      DefaultImpersonationProvider.getProxySuperuserGroupConfKey(proxyUserName),
+      DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(proxyUserName),
       StringUtils.join(",", Arrays.asList(groupNames)));
     configureSuperUserIPAddresses(conf, proxyUserName);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);



Mime
View raw message