hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject svn commit: r1665716 [2/7] - in /hive/trunk: common/src/java/org/apache/hadoop/hive/conf/ contrib/src/test/queries/clientnegative/ contrib/src/test/queries/clientpositive/ contrib/src/test/results/clientnegative/ contrib/src/test/results/clientpositive...
Date Tue, 10 Mar 2015 22:00:43 GMT
Added: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSQL11ReservedKeyWordsPositive.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSQL11ReservedKeyWordsPositive.java?rev=1665716&view=auto
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSQL11ReservedKeyWordsPositive.java (added)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSQL11ReservedKeyWordsPositive.java Tue Mar 10 22:00:41 2015
@@ -0,0 +1,801 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.ql.Context;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Parser tests for SQL11 Reserved KeyWords. Please find more information in
+ * HIVE-6617. Total number : 74
+ */
+public class TestSQL11ReservedKeyWordsPositive {
+  private static HiveConf conf;
+
+  private ParseDriver pd;
+
+  @BeforeClass
+  public static void initialize() {
+    conf = new HiveConf(SemanticAnalyzer.class);
+    conf.setBoolVar(ConfVars.HIVE_SUPPORT_SQL11_RESERVED_KEYWORDS, false);
+    SessionState.start(conf);
+  }
+
+  @Before
+  public void setup() throws SemanticException, IOException {
+    pd = new ParseDriver();
+  }
+
+  ASTNode parse(String query) throws ParseException {
+    ASTNode nd = null;
+    try {
+      nd = pd.parse(query, new Context(conf));
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    return (ASTNode) nd.getChild(0);
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_ALL() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE ALL (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME ALL) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_ALTER() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE ALTER (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME ALTER) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_ARRAY() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE ARRAY (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME ARRAY) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_AS() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE AS (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME AS) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_AUTHORIZATION() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE AUTHORIZATION (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME AUTHORIZATION) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_BETWEEN() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE BETWEEN (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME BETWEEN) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_BIGINT() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE BIGINT (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME BIGINT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_BINARY() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE BINARY (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME BINARY) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_BOOLEAN() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE BOOLEAN (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME BOOLEAN) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_BOTH() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE BOTH (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME BOTH) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_BY() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE BY (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME BY) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_CREATE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE CREATE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME CREATE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_CUBE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE CUBE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME CUBE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_CURRENT_DATE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE CURRENT_DATE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME CURRENT_DATE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_CURRENT_TIMESTAMP() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE CURRENT_TIMESTAMP (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME CURRENT_TIMESTAMP) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_CURSOR() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE CURSOR (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME CURSOR) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_DATE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE DATE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME DATE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_DECIMAL() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE DECIMAL (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME DECIMAL) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_DELETE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE DELETE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME DELETE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_DESCRIBE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE DESCRIBE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME DESCRIBE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_DOUBLE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE DOUBLE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME DOUBLE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_DROP() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE DROP (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME DROP) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_EXISTS() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE EXISTS (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME EXISTS) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_EXTERNAL() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE EXTERNAL (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME EXTERNAL) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_FALSE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE FALSE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME FALSE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_FETCH() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE FETCH (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME FETCH) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_FLOAT() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE FLOAT (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME FLOAT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_FOR() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE FOR (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME FOR) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_FULL() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE FULL (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME FULL) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_GRANT() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE GRANT (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME GRANT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_GROUP() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE GROUP (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME GROUP) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_GROUPING() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE GROUPING (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME GROUPING) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_IMPORT() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE IMPORT (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME IMPORT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_IN() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE IN (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME IN) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_INNER() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE INNER (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME INNER) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_INSERT() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE INSERT (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME INSERT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_INT() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE INT (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME INT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_INTERSECT() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE INTERSECT (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME INTERSECT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_INTO() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE INTO (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME INTO) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_IS() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE IS (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME IS) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_LATERAL() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE LATERAL (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME LATERAL) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_LEFT() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE LEFT (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME LEFT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_LIKE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE LIKE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME LIKE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_LOCAL() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE LOCAL (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME LOCAL) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_NONE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE NONE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME NONE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_NULL() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE NULL (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME NULL) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_OF() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE OF (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME OF) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_ORDER() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE ORDER (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME ORDER) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_OUT() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE OUT (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME OUT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_OUTER() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE OUTER (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME OUTER) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_PARTITION() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE PARTITION (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME PARTITION) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_PERCENT() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE PERCENT (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME PERCENT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_PROCEDURE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE PROCEDURE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME PROCEDURE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_RANGE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE RANGE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME RANGE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_READS() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE READS (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME READS) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_REVOKE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE REVOKE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME REVOKE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_RIGHT() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE RIGHT (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME RIGHT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_ROLLUP() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE ROLLUP (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME ROLLUP) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_ROW() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE ROW (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME ROW) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_ROWS() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE ROWS (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME ROWS) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_SET() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE SET (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME SET) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_SMALLINT() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE SMALLINT (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME SMALLINT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_TABLE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE TABLE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME TABLE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_TIMESTAMP() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE TIMESTAMP (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME TIMESTAMP) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_TO() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE TO (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME TO) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_TRIGGER() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE TRIGGER (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME TRIGGER) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_TRUE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE TRUE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME TRUE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_TRUNCATE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE TRUNCATE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME TRUNCATE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_UNION() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE UNION (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME UNION) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_UPDATE() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE UPDATE (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME UPDATE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_USER() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE USER (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME USER) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_USING() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE USING (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME USING) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_VALUES() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE VALUES (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME VALUES) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+
+  @Test
+  public void testSQL11ReservedKeyWords_WITH() throws ParseException {
+    ASTNode ast = parse("CREATE TABLE WITH (col STRING)");
+    Assert
+        .assertEquals(
+            "AST doesn't match",
+            "(TOK_CREATETABLE (TOK_TABNAME WITH) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))",
+            ast.toStringTree());
+  }
+}

Modified: hive/trunk/ql/src/test/queries/clientnegative/serde_regex.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/serde_regex.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/serde_regex.q (original)
+++ hive/trunk/ql/src/test/queries/clientnegative/serde_regex.q Tue Mar 10 22:00:41 2015
@@ -3,7 +3,7 @@ USE default;
 CREATE TABLE serde_regex(
   host STRING,
   identity STRING,
-  user STRING,
+  `user` STRING,
   time TIMESTAMP,
   request STRING,
   status INT,

Modified: hive/trunk/ql/src/test/queries/clientnegative/serde_regex2.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/serde_regex2.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/serde_regex2.q (original)
+++ hive/trunk/ql/src/test/queries/clientnegative/serde_regex2.q Tue Mar 10 22:00:41 2015
@@ -3,7 +3,7 @@ USE default;
  CREATE TABLE serde_regex(
   host STRING,
   identity STRING,
-  user STRING,
+  `user` STRING,
   time STRING,
   request STRING,
   status STRING,

Modified: hive/trunk/ql/src/test/queries/clientnegative/serde_regex3.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/serde_regex3.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/serde_regex3.q (original)
+++ hive/trunk/ql/src/test/queries/clientnegative/serde_regex3.q Tue Mar 10 22:00:41 2015
@@ -3,7 +3,7 @@ USE default;
  CREATE TABLE serde_regex(
   host STRING,
   identity STRING,
-  user STRING,
+  `user` STRING,
   time STRING,
   request STRING,
   status STRING,

Added: hive/trunk/ql/src/test/queries/clientpositive/ambiguitycheck.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/ambiguitycheck.q?rev=1665716&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/ambiguitycheck.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/ambiguitycheck.q Tue Mar 10 22:00:41 2015
@@ -0,0 +1,44 @@
+set hive.cbo.enable=false;
+
+-- check cluster/distribute/partitionBy
+SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,value) ;
+
+SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),value) ;
+
+SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,(value)) ;
+
+SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(value)) ;
+
+SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(((value))));
+
+-- HIVE-6950
+SELECT tab1.key,
+       tab1.value,
+       SUM(1)
+FROM src as tab1
+GROUP BY tab1.key,
+         tab1.value
+GROUPING SETS ((tab1.key, tab1.value));
+
+SELECT key,
+       src.value,
+       SUM(1)
+FROM src
+GROUP BY key,
+         src.value
+GROUPING SETS ((key, src.value));
+
+explain extended select int(1.2) from src limit 1;
+select int(1.2) from src limit 1;
+select bigint(1.34) from src limit 1;
+select binary('1') from src limit 1;
+select boolean(1) from src limit 1;
+select date('1') from src limit 2;
+select double(1) from src limit 1;
+select float(1) from src limit 1;
+select smallint(0.9) from src limit 1;
+select timestamp('1') from src limit 2;
+
+explain extended desc default.src key;
+
+desc default.src key;

Modified: hive/trunk/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q Tue Mar 10 22:00:41 2015
@@ -1,11 +1,11 @@
 set hive.fetch.task.conversion=more;
 
-create table array_table (array array<string>, index int );
+create table array_table (`array` array<string>, index int );
 insert into table array_table select array('first', 'second', 'third'), key%3 from src tablesample (4 rows);
 
 explain
-select index, array[index] from array_table;
-select index, array[index] from array_table;
+select index, `array`[index] from array_table;
+select index, `array`[index] from array_table;
 
 create table map_table (data map<string,string>, key int );
 insert into table map_table select map('1','one','2','two','3','three'), cast((key%3+1) as int) from src tablesample (4 rows);

Modified: hive/trunk/ql/src/test/queries/clientpositive/decimal_10_0.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/decimal_10_0.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/decimal_10_0.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/decimal_10_0.q Tue Mar 10 22:00:41 2015
@@ -1,9 +1,9 @@
-DROP TABLE IF EXISTS DECIMAL;
+DROP TABLE IF EXISTS `DECIMAL`;
 
-CREATE TABLE DECIMAL (dec decimal);
+CREATE TABLE `DECIMAL` (dec decimal);
 
-LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL;
+LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE `DECIMAL`;
 
-SELECT dec FROM DECIMAL;
+SELECT dec FROM `DECIMAL`;
 
-DROP TABLE DECIMAL;
\ No newline at end of file
+DROP TABLE `DECIMAL`;
\ No newline at end of file

Modified: hive/trunk/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q Tue Mar 10 22:00:41 2015
@@ -8,49 +8,49 @@ set hive.optimize.index.filter=true;
 select distinct ds from srcpart;
 select distinct hr from srcpart;
 
-EXPLAIN create table srcpart_date as select ds as ds, ds as date from srcpart group by ds;
-create table srcpart_date as select ds as ds, ds as date from srcpart group by ds;
+EXPLAIN create table srcpart_date as select ds as ds, ds as `date`  from srcpart group by ds;
+create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds;
 create table srcpart_hour as select hr as hr, hr as hour from srcpart group by hr;
-create table srcpart_date_hour as select ds as ds, ds as date, hr as hr, hr as hour from srcpart group by ds, hr;
+create table srcpart_date_hour as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr;
 create table srcpart_double_hour as select (hr*2) as hr, hr as hour from srcpart group by hr;
 
 -- single column, single key
-EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08';
-select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08';
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
+select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 set hive.tez.dynamic.partition.pruning=false;
-EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08';
-select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08';
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
+select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 set hive.tez.dynamic.partition.pruning=true;
 select count(*) from srcpart where ds = '2008-04-08';
 
 -- multiple sources, single key
 EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
-where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11;
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11;
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
-where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11;
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11;
 set hive.tez.dynamic.partition.pruning=false;
 EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
-where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11;
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11;
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
-where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11;
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11;
 set hive.tez.dynamic.partition.pruning=true;
 select count(*) from srcpart where hr = 11 and ds = '2008-04-08';
 
 -- multiple columns single source
-EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11;
-select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11;
+EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
+select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
 set hive.tez.dynamic.partition.pruning=false;
-EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11;
-select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11;
+EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
+select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
 set hive.tez.dynamic.partition.pruning=true;
 select count(*) from srcpart where ds = '2008-04-08' and hr = 11;
 
 -- empty set
-EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST';
-select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST';
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
+select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
 set hive.tez.dynamic.partition.pruning=false;
-EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST';
-select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST';
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
+select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
 set hive.tez.dynamic.partition.pruning=true;
 select count(*) from srcpart where ds = 'I DONT EXIST';
 
@@ -73,34 +73,34 @@ select count(*) from srcpart where cast(
 
 
 -- parent is reduce tasks
-EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08';
-select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08';
+EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08';
+select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08';
 select count(*) from srcpart where ds = '2008-04-08';
 
 -- non-equi join
-EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr);
-select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr);
+EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr);
+select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr);
 
 -- old style join syntax
-EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr;
-select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr;
+EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr;
+select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr;
 
 -- left join
-EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08';
-EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08';
+EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
+EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 
 -- full outer
-EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08';
+EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 
 -- with static pruning
 EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
-where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11;
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11;
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
-where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11;
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11;
 EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
-where srcpart_date.date = '2008-04-08' and srcpart.hr = 13;
+where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13;
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
-where srcpart_date.date = '2008-04-08' and srcpart.hr = 13;
+where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13;
 
 -- union + subquery
 EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart);
@@ -115,26 +115,26 @@ set hive.auto.convert.join.noconditional
 set hive.auto.convert.join.noconditionaltask.size = 10000000;
 
 -- single column, single key
-EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08';
-select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08';
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
+select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 select count(*) from srcpart where ds = '2008-04-08';
 
 -- multiple sources, single key
 EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
-where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11;
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11;
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
-where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11;
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11;
 select count(*) from srcpart where hr = 11 and ds = '2008-04-08';
 
 -- multiple columns single source
-EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11;
-select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11;
+EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
+select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
 select count(*) from srcpart where ds = '2008-04-08' and hr = 11;
 
 -- empty set
-EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST';
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
 -- Disabled until TEZ-1486 is fixed
--- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST';
+-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
 
 -- expressions
 EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11;
@@ -144,27 +144,27 @@ select count(*) from srcpart join srcpar
 select count(*) from srcpart where hr = 11;
 
 -- parent is reduce tasks
-EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08';
-select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08';
+EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08';
+select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08';
 select count(*) from srcpart where ds = '2008-04-08';
 
 -- left join
-EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08';
-EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08';
+EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
+EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 
 -- full outer
-EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08';
+EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 
 -- with static pruning
 EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
-where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11;
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11;
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
-where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11;
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11;
 EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
-where srcpart_date.date = '2008-04-08' and srcpart.hr = 13;
+where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13;
 -- Disabled until TEZ-1486 is fixed
 -- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
--- where srcpart_date.date = '2008-04-08' and srcpart.hr = 13;
+-- where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13;
 
 -- union + subquery
 EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart);
@@ -180,8 +180,8 @@ set hive.vectorized.execution.enabled=fa
 set hive.exec.max.dynamic.partitions=1000;
 
 insert into table srcpart_orc partition (ds, hr) select key, value, ds, hr from srcpart;
-EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09');
-select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09');
+EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09');
+select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09');
 select count(*) from srcpart where (ds = '2008-04-08' or ds = '2008-04-09') and hr = 11;
 
 drop table srcpart_orc;

Modified: hive/trunk/ql/src/test/queries/clientpositive/innerjoin.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/innerjoin.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/innerjoin.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/innerjoin.q Tue Mar 10 22:00:41 2015
@@ -14,13 +14,14 @@ INSERT OVERWRITE TABLE dest_j1 SELECT sr
 SELECT dest_j1.* FROM dest_j1;
 
 -- verify that INNER is a non-reserved word for backwards compatibility
-create table inner(i int);
+-- change from HIVE-6617, inner is a SQL2011 reserved keyword.
+create table `inner`(i int);
 
-select i from inner;
+select i from `inner`;
 
-create table i(inner int);
+create table i(`inner` int);
 
-select inner from i;
+select `inner` from i;
 
-explain select * from (select * from src) inner left outer join src
-on inner.key=src.key;
+explain select * from (select * from src) `inner` left outer join src
+on `inner`.key=src.key;

Modified: hive/trunk/ql/src/test/queries/clientpositive/keyword_1.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/keyword_1.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/keyword_1.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/keyword_1.q Tue Mar 10 22:00:41 2015
@@ -1,9 +1,9 @@
 -- SORT_BEFORE_DIFF
 
-create table test_user (user string, `group` string);
+create table test_user (`user` string, `group` string);
 grant select on table test_user to user hive_test;
 
-explain select user from test_user;
+explain select `user` from test_user;
 
 show grant user hive_test on table test_user;
 

Modified: hive/trunk/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q Tue Mar 10 22:00:41 2015
@@ -1,12 +1,12 @@
-CREATE TABLE table(string string) STORED AS TEXTFILE;
+CREATE TABLE `table`(`string` string) STORED AS TEXTFILE;
 
-LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE table;
+LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE `table`;
 
-SELECT table, count(1)
+SELECT `table`, count(1)
 FROM
 (
-  FROM table
-  SELECT TRANSFORM (table.string)
-  USING 'java -cp ../util/target/classes/ org.apache.hadoop.hive.scripts.extracturl' AS (table, count)
+  FROM `table`
+  SELECT TRANSFORM (`table`.`string`)
+  USING 'java -cp ../util/target/classes/ org.apache.hadoop.hive.scripts.extracturl' AS (`table`, count)
 ) subq
-GROUP BY table;
+GROUP BY `table`;

Modified: hive/trunk/ql/src/test/queries/clientpositive/nonreserved_keywords_insert_into1.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/nonreserved_keywords_insert_into1.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/nonreserved_keywords_insert_into1.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/nonreserved_keywords_insert_into1.q Tue Mar 10 22:00:41 2015
@@ -1,26 +1,26 @@
-DROP TABLE insert;
+DROP TABLE `insert`;
 
-CREATE TABLE insert (key INT, as STRING);
+CREATE TABLE `insert` (key INT, `as` STRING);
 
-EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100;
-INSERT INTO TABLE insert SELECT * FROM src LIMIT 100;
+EXPLAIN INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100;
+INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100;
 SELECT SUM(HASH(hash)) FROM (
-    SELECT TRANSFORM(*) USING 'tr \t _' AS (hash) FROM insert
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (hash) FROM `insert`
 ) t;
 
-EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100;
-INSERT INTO TABLE insert SELECT * FROM src LIMIT 100;
+EXPLAIN INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100;
+INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100;
 SELECT SUM(HASH(sum)) FROM (
-    SELECT TRANSFORM(*) USING 'tr \t _' AS (sum) FROM insert
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (sum) FROM `insert`
 ) t;
 
-SELECT COUNT(*) FROM insert;
+SELECT COUNT(*) FROM `insert`;
 
-EXPLAIN INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10;
-INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10;
+EXPLAIN INSERT OVERWRITE TABLE `insert` SELECT * FROM src LIMIT 10;
+INSERT OVERWRITE TABLE `insert` SELECT * FROM src LIMIT 10;
 SELECT SUM(HASH(add)) FROM (
-    SELECT TRANSFORM(*) USING 'tr \t _' AS (add) FROM insert
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (add) FROM `insert`
 ) t;
 
 
-DROP TABLE insert;
+DROP TABLE `insert`;

Modified: hive/trunk/ql/src/test/queries/clientpositive/partition_wise_fileformat17.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/partition_wise_fileformat17.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/partition_wise_fileformat17.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/partition_wise_fileformat17.q Tue Mar 10 22:00:41 2015
@@ -4,7 +4,7 @@
 
 DROP TABLE PW17;
 ADD JAR ${system:maven.local.repository}/org/apache/hive/hive-it-custom-serde/${system:hive.version}/hive-it-custom-serde-${system:hive.version}.jar;
-CREATE TABLE PW17(USER STRING, COMPLEXDT ARRAY<INT>) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1';
+CREATE TABLE PW17(`USER` STRING, COMPLEXDT ARRAY<INT>) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1';
 LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW17 PARTITION (YEAR='1');
 ALTER TABLE PW17 PARTITION(YEAR='1') SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe2';
 ALTER TABLE PW17 SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1';
@@ -13,13 +13,13 @@ SELECT * FROM PW17;
 
 -- Test for non-parititioned table. 
 DROP TABLE PW17_2;
-CREATE TABLE PW17_2(USER STRING, COMPLEXDT ARRAY<INT>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1';
+CREATE TABLE PW17_2(`USER` STRING, COMPLEXDT ARRAY<INT>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1';
 LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW17_2;
 -- Without the fix HIVE-5199, will throw cast exception via MapOperator
 SELECT COUNT(*) FROM PW17_2;
 
 DROP TABLE PW17_3;
-CREATE TABLE PW17_3(USER STRING, COMPLEXDT ARRAY<ARRAY<INT> >) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3';
+CREATE TABLE PW17_3(`USER` STRING, COMPLEXDT ARRAY<ARRAY<INT> >) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3';
 LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW17_3 PARTITION (YEAR='1');
 ALTER TABLE PW17_3 PARTITION(YEAR='1') SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe2';
 ALTER TABLE PW17_3 SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3';
@@ -27,7 +27,7 @@ ALTER TABLE PW17_3 SET SERDE 'org.apache
 SELECT * FROM PW17;
 
 DROP TABLE PW17_4;
-CREATE TABLE PW17_4(USER STRING, COMPLEXDT ARRAY<ARRAY<INT> >) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3';
+CREATE TABLE PW17_4(`USER` STRING, COMPLEXDT ARRAY<ARRAY<INT> >) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3';
 LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW17_4;
 -- Without the fix HIVE-5285, will throw cast exception via MapOperator
 SELECT COUNT(*) FROM PW17_4;

Modified: hive/trunk/ql/src/test/queries/clientpositive/partition_wise_fileformat18.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/partition_wise_fileformat18.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/partition_wise_fileformat18.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/partition_wise_fileformat18.q Tue Mar 10 22:00:41 2015
@@ -5,7 +5,7 @@
 
 DROP TABLE PW18;
 ADD JAR ${system:maven.local.repository}/org/apache/hive/hive-it-custom-serde/${system:hive.version}/hive-it-custom-serde-${system:hive.version}.jar;
-CREATE TABLE PW18(USER STRING, COMPLEXDT UNIONTYPE<INT, DOUBLE>) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5';
+CREATE TABLE PW18(`USER` STRING, COMPLEXDT UNIONTYPE<INT, DOUBLE>) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5';
 LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW18 PARTITION (YEAR='1');
 ALTER TABLE PW18 PARTITION(YEAR='1') SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe4';
 -- Without the fix HIVE-5202, will throw unsupported data type exception.
@@ -13,7 +13,7 @@ SELECT * FROM PW18;
 
 -- Test for non-parititioned table. 
 DROP TABLE PW18_2;
-CREATE TABLE PW18_2(USER STRING, COMPLEXDT UNIONTYPE<INT, DOUBLE>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5';
+CREATE TABLE PW18_2(`USER` STRING, COMPLEXDT UNIONTYPE<INT, DOUBLE>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5';
 LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW18_2;
 -- Without the fix HIVE-5202, will throw unsupported data type exception
 SELECT COUNT(*) FROM PW18_2;

Modified: hive/trunk/ql/src/test/queries/clientpositive/ppd_field_garbage.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/ppd_field_garbage.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/ppd_field_garbage.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/ppd_field_garbage.q Tue Mar 10 22:00:41 2015
@@ -1,3 +1,5 @@
+set hive.support.sql11.reserved.keywords=false;
+-- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011
 -- ppd leaves invalid expr in field expr
 CREATE TABLE test_issue (fileid int, infos ARRAY<STRUCT<user:INT>>, test_c STRUCT<user_c:STRUCT<age:INT>>);
 CREATE VIEW v_test_issue AS SELECT fileid, i.user, test_c.user_c.age FROM test_issue LATERAL VIEW explode(infos) info AS i;

Modified: hive/trunk/ql/src/test/queries/clientpositive/serde_regex.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/serde_regex.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/serde_regex.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/serde_regex.q Tue Mar 10 22:00:41 2015
@@ -2,7 +2,7 @@ EXPLAIN
 CREATE TABLE serde_regex(
   host STRING,
   identity STRING,
-  user STRING,
+  `user` STRING,
   time STRING,
   request STRING,
   status STRING,
@@ -18,7 +18,7 @@ STORED AS TEXTFILE;
 CREATE TABLE serde_regex(
   host STRING,
   identity STRING,
-  user STRING,
+  `user` STRING,
   time STRING,
   request STRING,
   status STRING,

Modified: hive/trunk/ql/src/test/queries/clientpositive/tez_union_group_by.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/tez_union_group_by.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/tez_union_group_by.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/tez_union_group_by.q Tue Mar 10 22:00:41 2015
@@ -4,7 +4,7 @@ u bigint,
 t string,
 st string
 )
-PARTITIONED BY (date string)
+PARTITIONED BY (`date` string)
 STORED AS ORC 
 TBLPROPERTIES ("orc.compress"="ZLIB");
 
@@ -12,7 +12,7 @@ CREATE TABLE y
 (
 u bigint
 )
-PARTITIONED BY (date string)
+PARTITIONED BY (`date` string)
 STORED AS ORC 
 TBLPROPERTIES ("orc.compress"="ZLIB");
 
@@ -20,7 +20,7 @@ CREATE TABLE z
 (
 u bigint
 )
-PARTITIONED BY (date string)
+PARTITIONED BY (`date` string)
 STORED AS ORC 
 TBLPROPERTIES ("orc.compress"="ZLIB");
 
@@ -37,14 +37,14 @@ EXPLAIN
 SELECT o.u, n.u
 FROM 
 (
-SELECT m.u, Min(date) as ft
+SELECT m.u, Min(`date`) as ft
 FROM 
 (
-SELECT u, date FROM x WHERE date < '2014-09-02' 
+SELECT u, `date` FROM x WHERE `date` < '2014-09-02' 
 UNION ALL
-SELECT u, date FROM y WHERE date < '2014-09-02' 
+SELECT u, `date` FROM y WHERE `date` < '2014-09-02' 
 UNION ALL
-SELECT u, date FROM z WHERE date < '2014-09-02' 
+SELECT u, `date` FROM z WHERE `date` < '2014-09-02' 
 ) m
 GROUP BY m.u
 ) n 
@@ -54,7 +54,7 @@ SELECT x.u
 FROM x
 JOIN v 
 ON (x.t = v.t AND x.st <=> v.st)
-WHERE x.date >= '2014-03-04' AND x.date < '2014-09-03'
+WHERE x.`date` >= '2014-03-04' AND x.`date` < '2014-09-03'
 GROUP BY x.u
 ) o
 ON n.u = o.u 
@@ -63,14 +63,14 @@ WHERE n.u <> 0 AND n.ft <= '2014-09-02';
 SELECT o.u, n.u
 FROM 
 (
-SELECT m.u, Min(date) as ft
+SELECT m.u, Min(`date`) as ft
 FROM 
 (
-SELECT u, date FROM x WHERE date < '2014-09-02' 
+SELECT u, `date` FROM x WHERE `date` < '2014-09-02' 
 UNION ALL
-SELECT u, date FROM y WHERE date < '2014-09-02' 
+SELECT u, `date` FROM y WHERE `date` < '2014-09-02' 
 UNION ALL
-SELECT u, date FROM z WHERE date < '2014-09-02' 
+SELECT u, `date` FROM z WHERE `date` < '2014-09-02' 
 ) m
 GROUP BY m.u
 ) n 
@@ -80,7 +80,7 @@ SELECT x.u
 FROM x
 JOIN v 
 ON (x.t = v.t AND x.st <=> v.st)
-WHERE x.date >= '2014-03-04' AND x.date < '2014-09-03'
+WHERE x.`date` >= '2014-03-04' AND x.`date` < '2014-09-03'
 GROUP BY x.u
 ) o
 ON n.u = o.u 

Modified: hive/trunk/ql/src/test/queries/clientpositive/union_remove_1.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union_remove_1.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union_remove_1.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union_remove_1.q Tue Mar 10 22:00:41 2015
@@ -18,7 +18,7 @@ set mapred.input.dir.recursive=true;
 -- to run the test only on hadoop 23
 
 create table inputTbl1(key string, val string) stored as textfile;
-create table outputTbl1(key string, values bigint) stored as textfile;
+create table outputTbl1(key string, `values` bigint) stored as textfile;
 
 load data local inpath '../../data/files/T1.txt' into table inputTbl1;
 
@@ -26,20 +26,20 @@ explain
 insert overwrite table outputTbl1
 SELECT *
 FROM (
-  SELECT key, count(1) as values from inputTbl1 group by key
+  SELECT key, count(1) as `values` from inputTbl1 group by key
   UNION ALL
-  SELECT key, count(1) as values from inputTbl1 group by key
+  SELECT key, count(1) as `values` from inputTbl1 group by key
 ) a;
 
 insert overwrite table outputTbl1
 SELECT *
 FROM (
-  SELECT key, count(1) as values from inputTbl1 group by key
+  SELECT key, count(1) as `values` from inputTbl1 group by key
   UNION ALL
-  SELECT key, count(1) as values from inputTbl1 group by key
+  SELECT key, count(1) as `values` from inputTbl1 group by key
 ) a;
 
 desc formatted outputTbl1;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-select * from outputTbl1 order by key, values;
+select * from outputTbl1 order by key, `values`;

Modified: hive/trunk/ql/src/test/queries/clientpositive/union_remove_10.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union_remove_10.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union_remove_10.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union_remove_10.q Tue Mar 10 22:00:41 2015
@@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true;
 -- to run the test only on hadoop 23
 
 create table inputTbl1(key string, val string) stored as textfile;
-create table outputTbl1(key string, values bigint) stored as rcfile;
+create table outputTbl1(key string, `values` bigint) stored as rcfile;
 
 load data local inpath '../../data/files/T1.txt' into table inputTbl1;
 
@@ -31,28 +31,28 @@ explain
 insert overwrite table outputTbl1
 SELECT * FROM
 (
-select key, 1 as values from inputTbl1
+select key, 1 as `values` from inputTbl1
 union all
 select * FROM (
-  SELECT key, count(1) as values from inputTbl1 group by key
+  SELECT key, count(1) as `values` from inputTbl1 group by key
   UNION ALL
-  SELECT key, 2 as values from inputTbl1
+  SELECT key, 2 as `values` from inputTbl1
 ) a
 )b;
 
 insert overwrite table outputTbl1
 SELECT * FROM
 (
-select key, 1 as values from inputTbl1
+select key, 1 as `values` from inputTbl1
 union all
 select * FROM (
-  SELECT key, count(1) as values from inputTbl1 group by key
+  SELECT key, count(1) as `values` from inputTbl1 group by key
   UNION ALL
-  SELECT key, 2 as values from inputTbl1
+  SELECT key, 2 as `values` from inputTbl1
 ) a
 )b;
 
 desc formatted outputTbl1;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-select * from outputTbl1 order by key, values;
+select * from outputTbl1 order by key, `values`;

Modified: hive/trunk/ql/src/test/queries/clientpositive/union_remove_11.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union_remove_11.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union_remove_11.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union_remove_11.q Tue Mar 10 22:00:41 2015
@@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true;
 -- to run the test only on hadoop 23
 
 create table inputTbl1(key string, val string) stored as textfile;
-create table outputTbl1(key string, values bigint) stored as rcfile;
+create table outputTbl1(key string, `values` bigint) stored as rcfile;
 
 load data local inpath '../../data/files/T1.txt' into table inputTbl1;
 
@@ -31,28 +31,28 @@ explain
 insert overwrite table outputTbl1
 SELECT * FROM
 (
-select key, 1 as values from inputTbl1
+select key, 1 as `values` from inputTbl1
 union all
 select * FROM (
-  SELECT key, 2 values from inputTbl1 
+  SELECT key, 2 `values` from inputTbl1 
   UNION ALL
-  SELECT key, 3 as values from inputTbl1
+  SELECT key, 3 as `values` from inputTbl1
 ) a
 )b;
 
 insert overwrite table outputTbl1
 SELECT * FROM
 (
-select key, 1 as values from inputTbl1
+select key, 1 as `values` from inputTbl1
 union all
 select * FROM (
-  SELECT key, 2 as values from inputTbl1 
+  SELECT key, 2 as `values` from inputTbl1 
   UNION ALL
-  SELECT key, 3 as values from inputTbl1
+  SELECT key, 3 as `values` from inputTbl1
 ) a
 )b;
 
 desc formatted outputTbl1;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-select * from outputTbl1 order by key, values;
+select * from outputTbl1 order by key, `values`;

Modified: hive/trunk/ql/src/test/queries/clientpositive/union_remove_12.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union_remove_12.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union_remove_12.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union_remove_12.q Tue Mar 10 22:00:41 2015
@@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true;
 -- on
 
 create table inputTbl1(key string, val string) stored as textfile;
-create table outputTbl1(key string, values bigint) stored as rcfile;
+create table outputTbl1(key string, `values` bigint) stored as rcfile;
 
 load data local inpath '../../data/files/T1.txt' into table inputTbl1;
 
@@ -31,22 +31,22 @@ explain
 insert overwrite table outputTbl1
 SELECT * FROM
 (
-select key, 1 as values from inputTbl1
+select key, 1 as `values` from inputTbl1
 union all
-select a.key as key, b.val as values
+select a.key as key, b.val as `values`
 FROM inputTbl1 a join inputTbl1 b on a.key=b.key
 )c;
 
 insert overwrite table outputTbl1
 SELECT * FROM
 (
-select key, 1 as values from inputTbl1
+select key, 1 as `values` from inputTbl1
 union all
-select a.key as key, b.val as values
+select a.key as key, b.val as `values`
 FROM inputTbl1 a join inputTbl1 b on a.key=b.key
 )c;
 
 desc formatted outputTbl1;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-select * from outputTbl1 order by key, values;
+select * from outputTbl1 order by key, `values`;

Modified: hive/trunk/ql/src/test/queries/clientpositive/union_remove_13.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union_remove_13.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union_remove_13.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union_remove_13.q Tue Mar 10 22:00:41 2015
@@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true;
 -- on
 
 create table inputTbl1(key string, val string) stored as textfile;
-create table outputTbl1(key string, values bigint) stored as rcfile;
+create table outputTbl1(key string, `values` bigint) stored as rcfile;
 
 load data local inpath '../../data/files/T1.txt' into table inputTbl1;
 
@@ -31,22 +31,22 @@ explain
 insert overwrite table outputTbl1
 SELECT * FROM
 (
-select key, count(1) as values from inputTbl1 group by key
+select key, count(1) as `values` from inputTbl1 group by key
 union all
-select a.key as key, b.val as values
+select a.key as key, b.val as `values`
 FROM inputTbl1 a join inputTbl1 b on a.key=b.key
 )c;
 
 insert overwrite table outputTbl1
 SELECT * FROM
 (
-select key, count(1) as values from inputTbl1 group by key
+select key, count(1) as `values` from inputTbl1 group by key
 union all
-select a.key as key, b.val as values
+select a.key as key, b.val as `values`
 FROM inputTbl1 a join inputTbl1 b on a.key=b.key
 )c;
 
 desc formatted outputTbl1;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-select * from outputTbl1 order by key, values;
+select * from outputTbl1 order by key, `values`;

Modified: hive/trunk/ql/src/test/queries/clientpositive/union_remove_14.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union_remove_14.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union_remove_14.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union_remove_14.q Tue Mar 10 22:00:41 2015
@@ -24,7 +24,7 @@ set mapred.input.dir.recursive=true;
 -- on
 
 create table inputTbl1(key string, val string) stored as textfile;
-create table outputTbl1(key string, values bigint) stored as rcfile;
+create table outputTbl1(key string, `values` bigint) stored as rcfile;
 
 load data local inpath '../../data/files/T1.txt' into table inputTbl1;
 
@@ -32,22 +32,22 @@ explain
 insert overwrite table outputTbl1
 SELECT * FROM
 (
-select key, 1 as values from inputTbl1
+select key, 1 as `values` from inputTbl1
 union all
-select a.key as key, b.val as values
+select a.key as key, b.val as `values`
 FROM inputTbl1 a join inputTbl1 b on a.key=b.key
 )c;
 
 insert overwrite table outputTbl1
 SELECT * FROM
 (
-select key, 1 as values from inputTbl1
+select key, 1 as `values` from inputTbl1
 union all
-select a.key as key, b.val as values
+select a.key as key, b.val as `values`
 FROM inputTbl1 a join inputTbl1 b on a.key=b.key
 )c;
 
 desc formatted outputTbl1;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-select * from outputTbl1 order by key, values;
+select * from outputTbl1 order by key, `values`;

Modified: hive/trunk/ql/src/test/queries/clientpositive/union_remove_15.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union_remove_15.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union_remove_15.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union_remove_15.q Tue Mar 10 22:00:41 2015
@@ -24,7 +24,7 @@ set mapred.input.dir.recursive=true;
 -- to run the test only on hadoop 23
 
 create table inputTbl1(key string, val string) stored as textfile;
-create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile;
+create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile;
 
 load data local inpath '../../data/files/T1.txt' into table inputTbl1;
 
@@ -32,17 +32,17 @@ explain
 insert overwrite table outputTbl1 partition (ds)
 SELECT *
 FROM (
-  SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key
+  SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key
   UNION ALL
-  SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key
+  SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key
 ) a;
 
 insert overwrite table outputTbl1 partition (ds)
 SELECT *
 FROM (
-  SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key
+  SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key
   UNION ALL
-  SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key
+  SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key
 ) a;
 
 desc formatted outputTbl1;
@@ -50,5 +50,5 @@ desc formatted outputTbl1;
 show partitions outputTbl1;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-select * from outputTbl1 where ds = '1' order by key, values;
-select * from outputTbl1 where ds = '2' order by key, values;
+select * from outputTbl1 where ds = '1' order by key, `values`;
+select * from outputTbl1 where ds = '2' order by key, `values`;

Modified: hive/trunk/ql/src/test/queries/clientpositive/union_remove_16.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union_remove_16.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union_remove_16.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union_remove_16.q Tue Mar 10 22:00:41 2015
@@ -24,7 +24,7 @@ set hive.exec.dynamic.partition=true;
 -- to run the test only on hadoop 23
 
 create table inputTbl1(key string, val string) stored as textfile;
-create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile ;
+create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile ;
 
 load data local inpath '../../data/files/T1.txt' into table inputTbl1;
 
@@ -32,22 +32,22 @@ explain
 insert overwrite table outputTbl1 partition (ds)
 SELECT *
 FROM (
-  SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key
+  SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key
   UNION ALL
-  SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key
+  SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key
 ) a;
 
 insert overwrite table outputTbl1 partition (ds)
 SELECT *
 FROM (
-  SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key
+  SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key
   UNION ALL
-  SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key
+  SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key
 ) a;
 
 desc formatted outputTbl1;
 show partitions outputTbl1;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-select * from outputTbl1 where ds = '1' order by key, values;
-select * from outputTbl1 where ds = '2' order by key, values;
+select * from outputTbl1 where ds = '1' order by key, `values`;
+select * from outputTbl1 where ds = '2' order by key, `values`;

Modified: hive/trunk/ql/src/test/queries/clientpositive/union_remove_17.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union_remove_17.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union_remove_17.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union_remove_17.q Tue Mar 10 22:00:41 2015
@@ -21,7 +21,7 @@ set mapred.input.dir.recursive=true;
 -- to run the test only on hadoop 23
 
 create table inputTbl1(key string, val string) stored as textfile;
-create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile;
+create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile;
 
 load data local inpath '../../data/files/T1.txt' into table inputTbl1;
 
@@ -29,22 +29,22 @@ explain
 insert overwrite table outputTbl1 partition (ds)
 SELECT *
 FROM (
-  SELECT key, 1 as values, '1' as ds from inputTbl1
+  SELECT key, 1 as `values`, '1' as ds from inputTbl1
   UNION ALL
-  SELECT key, 2 as values, '2' as ds from inputTbl1
+  SELECT key, 2 as `values`, '2' as ds from inputTbl1
 ) a;
 
 insert overwrite table outputTbl1 partition (ds)
 SELECT *
 FROM (
-  SELECT key, 1 as values, '1' as ds from inputTbl1
+  SELECT key, 1 as `values`, '1' as ds from inputTbl1
   UNION ALL
-  SELECT key, 2 as values, '2' as ds from inputTbl1
+  SELECT key, 2 as `values`, '2' as ds from inputTbl1
 ) a;
 
 desc formatted outputTbl1;
 show partitions outputTbl1;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-select * from outputTbl1 where ds = '1' order by key, values;
-select * from outputTbl1 where ds = '2' order by key, values;
+select * from outputTbl1 where ds = '1' order by key, `values`;
+select * from outputTbl1 where ds = '2' order by key, `values`;

Modified: hive/trunk/ql/src/test/queries/clientpositive/union_remove_18.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union_remove_18.q?rev=1665716&r1=1665715&r2=1665716&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union_remove_18.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union_remove_18.q Tue Mar 10 22:00:41 2015
@@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true;
 -- to run the test only on hadoop 23
 
 create table inputTbl1(key string, ds string) stored as textfile;
-create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile;
+create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile;
 
 load data local inpath '../../data/files/T1.txt' into table inputTbl1;
 
@@ -31,17 +31,17 @@ explain
 insert overwrite table outputTbl1 partition (ds)
 SELECT *
 FROM (
-  SELECT key, count(1) as values, ds from inputTbl1 group by key, ds
+  SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds
   UNION ALL
-  SELECT key, count(1) as values, ds from inputTbl1 group by key, ds
+  SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds
 ) a;
 
 insert overwrite table outputTbl1 partition (ds)
 SELECT *
 FROM (
-  SELECT key, count(1) as values, ds from inputTbl1 group by key, ds
+  SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds
   UNION ALL
-  SELECT key, count(1) as values, ds from inputTbl1 group by key, ds
+  SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds
 ) a;
 
 desc formatted outputTbl1;
@@ -49,6 +49,6 @@ desc formatted outputTbl1;
 show partitions outputTbl1;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-select * from outputTbl1 where ds = '11' order by key, values;
-select * from outputTbl1 where ds = '18' order by key, values;
-select * from outputTbl1 where ds is not null order by key, values, ds;
+select * from outputTbl1 where ds = '11' order by key, `values`;
+select * from outputTbl1 where ds = '18' order by key, `values`;
+select * from outputTbl1 where ds is not null order by key, `values`, ds;



Mime
View raw message