Return-Path: X-Original-To: apmail-hive-commits-archive@www.apache.org Delivered-To: apmail-hive-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id AFBC2189E8 for ; Mon, 12 Oct 2015 18:33:59 +0000 (UTC) Received: (qmail 76843 invoked by uid 500); 12 Oct 2015 18:33:01 -0000 Delivered-To: apmail-hive-commits-archive@hive.apache.org Received: (qmail 76796 invoked by uid 500); 12 Oct 2015 18:33:01 -0000 Mailing-List: contact commits-help@hive.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hive-dev@hive.apache.org Delivered-To: mailing list commits@hive.apache.org Received: (qmail 76785 invoked by uid 99); 12 Oct 2015 18:33:01 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 12 Oct 2015 18:33:01 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 9885CE03EC; Mon, 12 Oct 2015 18:33:00 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: sershe@apache.org To: commits@hive.apache.org Message-Id: <30395f94705441aab3f8b56e57d291bd@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: hive git commit: HIVE-12078 : LLAP: document config settings (Sergey Shelukhin) Date: Mon, 12 Oct 2015 18:33:00 +0000 (UTC) Repository: hive Updated Branches: refs/heads/llap 4e53bfd10 -> 3cfcad660 HIVE-12078 : LLAP: document config settings (Sergey Shelukhin) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3cfcad66 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3cfcad66 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3cfcad66 Branch: refs/heads/llap Commit: 3cfcad660b905a0791638e82649b27cf3be7053a Parents: 4e53bfd Author: Sergey Shelukhin Authored: Mon Oct 12 11:35:17 2015 -0700 Committer: Sergey Shelukhin Committed: Mon Oct 12 11:35:17 2015 -0700 ---------------------------------------------------------------------- .../org/apache/hadoop/hive/conf/HiveConf.java | 66 ++++++++++++-------- 1 file changed, 39 insertions(+), 27 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hive/blob/3cfcad66/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java ---------------------------------------------------------------------- diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 109b19b..0fcd39b 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -891,10 +891,10 @@ public class HiveConf extends Configuration { HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC"), "Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]"), HIVEDEFAULTMANAGEDFILEFORMAT("hive.default.fileformat.managed", "none", - new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"), - "Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" + - "created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" + - "for all tables."), + new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"), + "Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" + + "created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" + + "for all tables."), HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile"), "Default file format for storing result of the query."), HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"), @@ -2225,8 +2225,8 @@ public class HiveConf extends Configuration { "Whether to send the query plan via local resource or RPC"), HIVE_AM_SPLIT_GENERATION("hive.compute.splits.in.am", true, "Whether to generate the splits locally or in the AM (tez only)"), - HIVE_TEZ_GENERATE_CONSISTENT_SPLITS("hive.tez.input.generate.consistent.splits", true, "Whether to generate consisten split" + - "locations when generating splits in the AM"), + HIVE_TEZ_GENERATE_CONSISTENT_SPLITS("hive.tez.input.generate.consistent.splits", true, + "Whether to generate consistent split locations when generating splits in the AM"), HIVE_PREWARM_ENABLED("hive.prewarm.enabled", false, "Enables container prewarm for Tez/Spark (Hadoop 2 only)"), HIVE_PREWARM_NUM_CONTAINERS("hive.prewarm.numcontainers", 10, "Controls the number of containers to prewarm for Tez/Spark (Hadoop 2 only)"), @@ -2302,37 +2302,49 @@ public class HiveConf extends Configuration { "hive.tez.exec.inplace.progress", true, "Updates tez job execution progress in-place in the terminal."), - LLAP_IO_ENABLED("hive.llap.io.enabled", false, ""), - LLAP_LOW_LEVEL_CACHE("hive.llap.io.use.lowlevel.cache", true, ""), - LLAP_ORC_CACHE_MIN_ALLOC("hive.llap.io.cache.orc.alloc.min", 128 * 1024, ""), - LLAP_ORC_CACHE_MAX_ALLOC("hive.llap.io.cache.orc.alloc.max", 16 * 1024 * 1024, ""), - LLAP_ORC_CACHE_ARENA_SIZE("hive.llap.io.cache.orc.arena.size", 128 * 1024 * 1024, ""), - LLAP_ORC_CACHE_MAX_SIZE("hive.llap.io.cache.orc.size", 1024L * 1024 * 1024, ""), - LLAP_ORC_CACHE_ALLOCATE_DIRECT("hive.llap.io.cache.direct", true, ""), - LLAP_USE_LRFU("hive.llap.io.use.lrfu", false, ""), - LLAP_LRFU_LAMBDA("hive.llap.io.lrfu.lambda", 0.01f, ""), - LLAP_ORC_ENABLE_TIME_COUNTERS("hive.llap.io.orc.time.counters", true, ""), + LLAP_IO_ENABLED("hive.llap.io.enabled", false, "Whether the LLAP IO layer is enabled."), + LLAP_LOW_LEVEL_CACHE("hive.llap.io.use.lowlevel.cache", true, "Must always be true for now"), + LLAP_ORC_CACHE_MIN_ALLOC("hive.llap.io.cache.orc.alloc.min", 128 * 1024, + "Minimum allocation possible from LLAP low-level cache for ORC. Allocations below that\n" + + "will be padded to minimum allocation. Should generally be the same as expected ORC\n" + + "compression buffer size, or next lowest power of 2. Must be power of 2."), + LLAP_ORC_CACHE_MAX_ALLOC("hive.llap.io.cache.orc.alloc.max", 16 * 1024 * 1024, + "Maximum allocation possible from LLAP low-level cache for ORC. Should be as large as\n" + + "the largest expected ORC compression buffer size. Must be power of 2."), + LLAP_ORC_CACHE_ARENA_SIZE("hive.llap.io.cache.orc.arena.size", 128 * 1024 * 1024, + "Arena size for ORC low-level cache; cache will be allocated in arena-sized steps.\n" + + "Must presently be a power of two."), + LLAP_ORC_CACHE_MAX_SIZE("hive.llap.io.cache.orc.size", 1024L * 1024 * 1024, + "Maximum size for ORC low-level cache; must be a multiple of arena size."), + LLAP_ORC_CACHE_ALLOCATE_DIRECT("hive.llap.io.cache.direct", true, + "Whether ORC low-level cache should use direct allocation."), + LLAP_USE_LRFU("hive.llap.io.use.lrfu", false, + "Whether ORC low-level cache should use LRFU cache policy instead of default (FIFO)."), + LLAP_LRFU_LAMBDA("hive.llap.io.lrfu.lambda", 0.01f, + "Lambda for ORC low-level cache LRFU cache policy."), + LLAP_ORC_ENABLE_TIME_COUNTERS("hive.llap.io.orc.time.counters", true, + "Whether to enable time counters for LLAP IO layer (time spent in HDFS, etc.)"), LLAP_AUTO_ALLOW_UBER("hive.llap.auto.allow.uber", true, - "Whether or not to allow the planner to run vertices in the AM"), + "Whether or not to allow the planner to run vertices in the AM."), LLAP_AUTO_ENFORCE_TREE("hive.llap.auto.enforce.tree", true, - "Enforce that all parents are in llap, before considering vertex"), + "Enforce that all parents are in llap, before considering vertex"), LLAP_AUTO_ENFORCE_VECTORIZED("hive.llap.auto.enforce.vectorized", true, - "Enforce that inputs are vectorized, before considering vertex"), + "Enforce that inputs are vectorized, before considering vertex"), LLAP_AUTO_ENFORCE_STATS("hive.llap.auto.enforce.stats", true, - "Enforce that col stats are available, before considering vertex"), + "Enforce that col stats are available, before considering vertex"), LLAP_AUTO_MAX_INPUT("hive.llap.auto.max.input.size", 10*1024*1024*1024L, - "Check input size, before considering vertex (-1 disables check)"), + "Check input size, before considering vertex (-1 disables check)"), LLAP_AUTO_MAX_OUTPUT("hive.llap.auto.max.output.size", 1*1024*1024*1024L, - "Check output size, before considering vertex (-1 disables check)"), + "Check output size, before considering vertex (-1 disables check)"), LLAP_EXECUTION_MODE("hive.llap.execution.mode", "none", - new StringSet("auto", "none", "all", "map"), + new StringSet("auto", "none", "all", "map"), "Chooses whether query fragments will run in container or in llap"), LLAP_OBJECT_CACHE_ENABLED("hive.llap.object.cache.enabled", true, - "Cache objects (plans, hashtables, etc) in llap"), + "Cache objects (plans, hashtables, etc) in llap"), LLAP_QUEUE_METRICS_PERCENTILE_INTERVALS("hive.llap.queue.metrics.percentiles.intervals", "", - "Comma-delimited set of integers denoting the desired rollover intervals (in seconds) for" + - "percentile latency metrics on the LLAP daemon producer-consumer queue. By default," + - "percentile latency metrics are disabled."), + "Comma-delimited set of integers denoting the desired rollover intervals (in seconds)\n" + + "for percentile latency metrics on the LLAP daemon producer-consumer queue.\n" + + "By default, percentile latency metrics are disabled."), LLAP_IO_THREADPOOL_SIZE("hive.llap.io.threadpool.size", 10, "Specify the number of threads to use for low-level IO thread pool."),