impala-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From k...@apache.org
Subject [09/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)
Date Fri, 30 Sep 2016 02:14:26 GMT
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableSetColumnStats.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetColumnStats.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetColumnStats.java
new file mode 100644
index 0000000..e9c9a14
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetColumnStats.java
@@ -0,0 +1,158 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import com.cloudera.impala.catalog.Column;
+import com.cloudera.impala.catalog.ColumnStats;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.cloudera.impala.thrift.TAlterTableUpdateStatsParams;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+
+/**
+* Represents an ALTER TABLE [<dbName>.]<tableName> SET COLUMN STATS <colName>
+* ('statsKey'='val','statsKey2',='val2') statement.
+*
+* The keys as well as the values are specified as string literals to be consistent
+* with the existing DDL for setting TBLPROPERTIES/SERDEPROPERTIES, in particular,
+* setting the 'numRows' table/partition property.
+*
+* Stats key comparisons are case-insensitive.
+*/
+public class AlterTableSetColumnStats extends AlterTableStmt {
+  private final String colName_;
+  private final HashMap<String, String> statsMap_;
+
+  // Complete column stats reflecting this alteration. Existing stats values
+  // are preserved. Result of analysis.
+  private ColumnStats colStats_;
+
+  public AlterTableSetColumnStats(TableName tableName, String colName,
+      HashMap<String, String> statsMap) {
+    super(tableName);
+    colName_ = colName;
+    statsMap_ = statsMap;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+
+    Column col = getTargetTable().getColumn(colName_);
+    if (col == null) {
+      throw new AnalysisException(
+          String.format("Column '%s' does not exist in table: %s",
+              colName_, getTargetTable().getFullName()));
+    }
+    // Cannot update stats on partition columns because the HMS has no entries
+    // for them, and the stats can be computed directly from the metadata.
+    if (col.getPosition() < getTargetTable().getNumClusteringCols()) {
+      throw new AnalysisException(
+          "Updating the stats of a partition column is not allowed: " + colName_);
+    }
+    // Cannot update the stats if they are not supported for the column's type.
+    if (!ColumnStats.isSupportedColType(col.getType())) {
+      throw new AnalysisException(String.format(
+          "Statistics for column '%s' are not supported because " +
+          "it has type '%s'.", col.getName(), col.getType().toSql()));
+    }
+
+    // Copy the existing stats and then change the values according to the
+    // stats map of this stmt. The existing stats are first copied to preserve
+    // those stats values that are not changed by this stmt because all stats
+    // values are updated when altering the stats in the HMS.
+    colStats_ = col.getStats().clone();
+    for (Map.Entry<String, String> entry: statsMap_.entrySet()) {
+      ColumnStats.StatsKey statsKey = ColumnStats.StatsKey.fromString(entry.getKey());
+      if (statsKey == null) {
+        throw new AnalysisException(String.format(
+            "Invalid column stats key: %s\nValid keys are: %s",
+            entry.getKey(), Joiner.on(',').join(ColumnStats.StatsKey.values())));
+      }
+      setStatsValue(statsKey, entry.getValue(), col, colStats_);
+    }
+  }
+
+  /**
+   * Updates the given column stats based on statsKey and statsValue.
+   * Throws an AnalysisException if the statsValue is invalid or not applicable to the
+   * column (e.g., trying to update the avg/max size of a fixed-length column).
+   */
+  private void setStatsValue(ColumnStats.StatsKey statsKey, String statsValue,
+      Column col, ColumnStats stats) throws AnalysisException {
+    // Updating max/avg size is only allowed for variable length columns.
+    if (col.getType().isFixedLengthType()
+        && (statsKey == ColumnStats.StatsKey.AVG_SIZE
+            || statsKey == ColumnStats.StatsKey.MAX_SIZE)) {
+      throw new AnalysisException(String.format(
+          "Cannot update the '%s' stats of column '%s' with type '%s'.\n" +
+          "Changing '%s' is only allowed for variable-length columns.",
+          statsKey, col.getName(), col.getType().toSql(), statsKey));
+    }
+
+    if (statsKey == ColumnStats.StatsKey.NUM_DISTINCT_VALUES ||
+        statsKey == ColumnStats.StatsKey.NUM_NULLS ||
+        statsKey == ColumnStats.StatsKey.MAX_SIZE) {
+      Long statsVal = null;
+      try {
+        statsVal = Long.parseLong(statsValue);
+      } catch (Exception e) {
+      }
+      if (statsVal == null || statsVal < -1) {
+        throw new AnalysisException(String.format(
+            "Invalid stats value '%s' for column stats key: %s\n" +
+            "Expected a positive integer or -1 for unknown.",
+            statsValue, statsKey));
+      }
+      stats.update(statsKey, statsVal);
+    } else if (statsKey == ColumnStats.StatsKey.AVG_SIZE) {
+      Float statsVal = null;
+      try {
+        statsVal = Float.parseFloat(statsValue);
+      } catch (Exception e) {
+      }
+      if (statsVal == null || (statsVal < 0 && statsVal != -1) ||
+          statsVal.isNaN() || statsVal.isInfinite()) {
+        throw new AnalysisException(String.format(
+            "Invalid stats value '%s' for column stats key: %s\n" +
+            "Expected a positive floating-point number or -1 for unknown.",
+            statsValue, statsKey));
+      }
+      stats.update(statsKey, statsVal);
+    } else {
+      Preconditions.checkState(false, "Unhandled StatsKey value: " + statsKey);
+    }
+  }
+
+  @Override
+  public TAlterTableParams toThrift() {
+   TAlterTableParams params = super.toThrift();
+   params.setAlter_type(TAlterTableType.UPDATE_STATS);
+   TAlterTableUpdateStatsParams updateStatsParams =
+       new TAlterTableUpdateStatsParams();
+   updateStatsParams.setTable_name(getTargetTable().getTableName().toThrift());
+   updateStatsParams.putToColumn_stats(colName_.toString(), colStats_.toThrift());
+   params.setUpdate_stats_params(updateStatsParams);
+   return params;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
new file mode 100644
index 0000000..7e18aa6
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
@@ -0,0 +1,57 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableSetFileFormatParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.cloudera.impala.thrift.THdfsFileFormat;
+
+/**
+ * Represents an ALTER TABLE [PARTITION partitionSpec] SET FILEFORMAT statement.
+ */
+public class AlterTableSetFileFormatStmt extends AlterTableSetStmt {
+  private final THdfsFileFormat fileFormat_;
+
+  public AlterTableSetFileFormatStmt(TableName tableName,
+      PartitionSpec partitionSpec, THdfsFileFormat fileFormat) {
+    super(tableName, partitionSpec);
+    this.fileFormat_ = fileFormat;
+  }
+
+  public THdfsFileFormat getFileFormat() { return fileFormat_; }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.SET_FILE_FORMAT);
+    TAlterTableSetFileFormatParams fileFormatParams =
+        new TAlterTableSetFileFormatParams(fileFormat_);
+    if (getPartitionSpec() != null) {
+      fileFormatParams.setPartition_spec(getPartitionSpec().toThrift());
+    }
+    params.setSet_file_format_params(fileFormatParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
new file mode 100644
index 0000000..fcc9b53
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
@@ -0,0 +1,86 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.HdfsPartition;
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableSetLocationParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.permission.FsAction;
+
+/**
+ * Represents an ALTER TABLE [PARTITION partitionSpec] SET LOCATION statement.
+ */
+public class AlterTableSetLocationStmt extends AlterTableSetStmt {
+  private final HdfsUri location_;
+
+  public AlterTableSetLocationStmt(TableName tableName,
+      PartitionSpec partitionSpec, HdfsUri location) {
+    super(tableName, partitionSpec);
+    Preconditions.checkNotNull(location);
+    this.location_ = location;
+  }
+
+  public HdfsUri getLocation() { return location_; }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.SET_LOCATION);
+    TAlterTableSetLocationParams locationParams =
+        new TAlterTableSetLocationParams(location_.toString());
+    if (getPartitionSpec() != null) {
+      locationParams.setPartition_spec(getPartitionSpec().toThrift());
+    }
+    params.setSet_location_params(locationParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
+
+    Table table = getTargetTable();
+    Preconditions.checkNotNull(table);
+    if (table instanceof HdfsTable) {
+      HdfsTable hdfsTable = (HdfsTable) table;
+      if (getPartitionSpec() != null) {
+        // Targeting a partition rather than a table.
+        PartitionSpec partitionSpec = getPartitionSpec();
+        HdfsPartition partition = hdfsTable.getPartition(
+            partitionSpec.getPartitionSpecKeyValues());
+        Preconditions.checkNotNull(partition);
+        if (partition.isMarkedCached()) {
+          throw new AnalysisException(String.format("Target partition is cached, " +
+              "please uncache before changing the location using: ALTER TABLE %s %s " +
+              "SET UNCACHED", table.getFullName(), partitionSpec.toSql()));
+        }
+      } else if (hdfsTable.isMarkedCached()) {
+        throw new AnalysisException(String.format("Target table is cached, please " +
+            "uncache before changing the location using: ALTER TABLE %s SET UNCACHED",
+            table.getFullName()));
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
new file mode 100644
index 0000000..2857211
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
@@ -0,0 +1,58 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.HBaseTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+
+/**
+ * Base class for all ALTER TABLE ... SET statements
+ */
+public class AlterTableSetStmt extends AlterTableStmt {
+  protected final PartitionSpec partitionSpec_;
+
+  public AlterTableSetStmt(TableName tableName, PartitionSpec partitionSpec) {
+    super(tableName);
+    partitionSpec_ = partitionSpec;
+    if (partitionSpec_ != null) partitionSpec_.setTableName(tableName);
+  }
+
+  public PartitionSpec getPartitionSpec() { return partitionSpec_; }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    Table t = getTargetTable();
+    // TODO: Support ALTER TABLE SET on HBase tables. Requires validating changes
+    // to the SERDEPROPERTIES and TBLPROPERTIES to ensure the table metadata does not
+    // become invalid.
+    if (t instanceof HBaseTable) {
+      throw new AnalysisException("ALTER TABLE SET not currently supported on " +
+          "HBase tables.");
+    }
+
+    // Altering the table rather than the partition.
+    if (partitionSpec_ == null) return;
+
+    partitionSpec_.setPartitionShouldExist();
+    partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
+    partitionSpec_.analyze(analyzer);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
new file mode 100644
index 0000000..da92267
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
@@ -0,0 +1,150 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.avro.SchemaParseException;
+import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
+
+import com.cloudera.impala.catalog.HdfsFileFormat;
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.*;
+import com.cloudera.impala.util.AvroSchemaParser;
+import com.cloudera.impala.util.AvroSchemaUtils;
+import com.cloudera.impala.util.MetaStoreUtil;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import com.google.common.collect.Lists;
+
+/**
+* Represents an ALTER TABLE SET [PARTITION ('k1'='a', 'k2'='b'...)]
+* TBLPROPERTIES|SERDEPROPERTIES ('p1'='v1', ...) statement.
+*/
+public class AlterTableSetTblProperties extends AlterTableSetStmt {
+  private final TTablePropertyType targetProperty_;
+  private final HashMap<String, String> tblProperties_;
+
+  public AlterTableSetTblProperties(TableName tableName, PartitionSpec partitionSpec,
+      TTablePropertyType targetProperty, HashMap<String, String> tblProperties) {
+    super(tableName, partitionSpec);
+    Preconditions.checkNotNull(tblProperties);
+    Preconditions.checkNotNull(targetProperty);
+    targetProperty_ = targetProperty;
+    tblProperties_ = tblProperties;
+    CreateTableStmt.unescapeProperties(tblProperties_);
+  }
+
+  public HashMap<String, String> getTblProperties() { return tblProperties_; }
+
+  @Override
+  public TAlterTableParams toThrift() {
+   TAlterTableParams params = super.toThrift();
+   params.setAlter_type(TAlterTableType.SET_TBL_PROPERTIES);
+   TAlterTableSetTblPropertiesParams tblPropertyParams =
+       new TAlterTableSetTblPropertiesParams();
+   tblPropertyParams.setTarget(targetProperty_);
+   tblPropertyParams.setProperties(tblProperties_);
+   if (partitionSpec_ != null) {
+     tblPropertyParams.setPartition_spec(partitionSpec_.toThrift());
+   }
+   params.setSet_tbl_properties_params(tblPropertyParams);
+   return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+
+    MetaStoreUtil.checkShortPropertyMap("Property", tblProperties_);
+
+    // Check avro schema when it is set in avro.schema.url or avro.schema.literal to
+    // avoid potential metadata corruption (see IMPALA-2042).
+    // If both properties are set then only check avro.schema.literal and ignore
+    // avro.schema.url.
+    if (tblProperties_.containsKey(
+            AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName()) ||
+        tblProperties_.containsKey(
+            AvroSerdeUtils.AvroTableProperties.SCHEMA_URL.getPropName())) {
+      analyzeAvroSchema(analyzer);
+    }
+
+    // Analyze 'skip.header.line.format' property.
+    analyzeSkipHeaderLineCount(getTargetTable(), tblProperties_);
+  }
+
+  /**
+   * Check that Avro schema provided in avro.schema.url or avro.schema.literal is valid
+   * Json and contains only supported Impala types. If both properties are set, then
+   * avro.schema.url is ignored.
+   */
+  private void analyzeAvroSchema(Analyzer analyzer)
+      throws AnalysisException {
+    List<Map<String, String>> schemaSearchLocations = Lists.newArrayList();
+    schemaSearchLocations.add(tblProperties_);
+
+    String avroSchema = AvroSchemaUtils.getAvroSchema(schemaSearchLocations);
+    avroSchema = Strings.nullToEmpty(avroSchema);
+    if (avroSchema.isEmpty()) {
+      throw new AnalysisException("Avro schema is null or empty: " +
+          table_.getFullName());
+    }
+
+    // Check if the schema is valid and is supported by Impala
+    try {
+      AvroSchemaParser.parse(avroSchema);
+    } catch (SchemaParseException e) {
+      throw new AnalysisException(String.format(
+          "Error parsing Avro schema for table '%s': %s", table_.getFullName(),
+          e.getMessage()));
+    }
+  }
+
+  /**
+   * Analyze the 'skip.header.line.count' property to make sure it is set to a valid
+   * value. It is looked up in 'tblProperties', which must not be null.
+   */
+  public static void analyzeSkipHeaderLineCount(Map<String, String> tblProperties)
+      throws AnalysisException {
+    analyzeSkipHeaderLineCount(null, tblProperties);
+  }
+
+  /**
+   * Analyze the 'skip.header.line.count' property to make sure it is set to a valid
+   * value. It is looked up in 'tblProperties', which must not be null. If 'table' is not
+   * null, then the method ensures that 'skip.header.line.count' is supported for its
+   * table type. If it is null, then this check is omitted.
+   */
+  public static void analyzeSkipHeaderLineCount(Table table,
+      Map<String, String> tblProperties) throws AnalysisException {
+    if (tblProperties.containsKey(HdfsTable.TBL_PROP_SKIP_HEADER_LINE_COUNT)) {
+      if (table != null && !(table instanceof HdfsTable)) {
+        throw new AnalysisException(String.format("Table property " +
+            "'skip.header.line.count' is only supported for HDFS tables."));
+      }
+      StringBuilder error = new StringBuilder();
+      HdfsTable.parseSkipHeaderLineCount(tblProperties, error);
+      if (error.length() > 0) throw new AnalysisException(error.toString());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
new file mode 100644
index 0000000..aebd009
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
@@ -0,0 +1,91 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.DataSourceTable;
+import com.cloudera.impala.catalog.KuduTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.catalog.View;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TTableName;
+import com.google.common.base.Preconditions;
+
+/**
+ * Base class for all ALTER TABLE statements.
+ */
+public abstract class AlterTableStmt extends StatementBase {
+  protected final TableName tableName_;
+
+  // Set during analysis.
+  protected Table table_;
+
+  protected AlterTableStmt(TableName tableName) {
+    Preconditions.checkState(tableName != null && !tableName.isEmpty());
+    tableName_ = tableName;
+    table_ = null;
+  }
+
+  public String getTbl() { return tableName_.getTbl(); }
+
+  /**
+   * Can only be called after analysis, returns the parent database name of the target
+   * table for this ALTER TABLE statement.
+   */
+  public String getDb() {
+    return getTargetTable().getDb().getName();
+  }
+
+  /**
+   * Can only be called after analysis, returns the Table object of the target of this
+   * ALTER TABLE statement.
+   */
+  protected Table getTargetTable() {
+    Preconditions.checkNotNull(table_);
+    return table_;
+  }
+
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = new TAlterTableParams();
+    params.setTable_name(new TTableName(getDb(), getTbl()));
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    table_ = analyzer.getTable(tableName_, Privilege.ALTER);
+    if (table_ instanceof KuduTable
+        && !(this instanceof AlterTableSetTblProperties)
+        && !(this instanceof AlterTableSetColumnStats)
+        && !(this instanceof AlterTableOrViewRenameStmt)) {
+      throw new AnalysisException(String.format(
+          "ALTER TABLE not allowed on Kudu table: %s", table_.getFullName()));
+    }
+    if (table_ instanceof View) {
+      throw new AnalysisException(String.format(
+          "ALTER TABLE not allowed on a view: %s", table_.getFullName()));
+    }
+    if (table_ instanceof DataSourceTable
+        && !(this instanceof AlterTableSetColumnStats)) {
+      throw new AnalysisException(String.format(
+          "ALTER TABLE not allowed on a table produced by a data source: %s",
+          table_.getFullName()));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java
new file mode 100644
index 0000000..640b3a1
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java
@@ -0,0 +1,71 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.catalog.View;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.RuntimeEnv;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents an ALTER VIEW AS statement.
+ */
+public class AlterViewStmt extends CreateOrAlterViewStmtBase {
+
+  public AlterViewStmt(TableName tableName, QueryStmt viewDefStmt) {
+    super(false, tableName, null, null, viewDefStmt);
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    // Enforce Hive column labels for view compatibility.
+    analyzer.setUseHiveColLabels(true);
+    viewDefStmt_.analyze(analyzer);
+
+    Preconditions.checkState(tableName_ != null && !tableName_.isEmpty());
+    dbName_ = analyzer.getTargetDbName(tableName_);
+    owner_ = analyzer.getUser().getName();
+
+    Table table = analyzer.getTable(tableName_, Privilege.ALTER);
+    Preconditions.checkNotNull(table);
+    if (!(table instanceof View)) {
+      throw new AnalysisException(String.format(
+          "ALTER VIEW not allowed on a table: %s.%s", dbName_, getTbl()));
+    }
+
+    createColumnAndViewDefs(analyzer);
+    if (RuntimeEnv.INSTANCE.computeLineage() || RuntimeEnv.INSTANCE.isTestEnv()) {
+      computeLineageGraph(analyzer);
+    }
+  }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("ALTER VIEW ");
+    if (tableName_.getDb() != null) {
+      sb.append(tableName_.getDb() + ".");
+    }
+    sb.append(tableName_.getTbl());
+    sb.append(" AS " + viewDefStmt_.toSql());
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java b/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
new file mode 100644
index 0000000..006474d
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
@@ -0,0 +1,544 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.io.StringReader;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.authorization.AuthorizationChecker;
+import com.cloudera.impala.authorization.AuthorizationConfig;
+import com.cloudera.impala.authorization.AuthorizeableColumn;
+import com.cloudera.impala.authorization.AuthorizeableTable;
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.authorization.PrivilegeRequest;
+import com.cloudera.impala.catalog.AuthorizationException;
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.catalog.ImpaladCatalog;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.InternalException;
+import com.cloudera.impala.common.Pair;
+import com.cloudera.impala.thrift.TAccessEvent;
+import com.cloudera.impala.thrift.TLineageGraph;
+import com.cloudera.impala.thrift.TQueryCtx;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+/**
+ * Wrapper class for parser and analyzer.
+ */
+public class AnalysisContext {
+  private final static Logger LOG = LoggerFactory.getLogger(AnalysisContext.class);
+  private final ImpaladCatalog catalog_;
+  private final TQueryCtx queryCtx_;
+  private final AuthorizationConfig authzConfig_;
+
+  // Set in analyze()
+  private AnalysisResult analysisResult_;
+
+  public AnalysisContext(ImpaladCatalog catalog, TQueryCtx queryCtx,
+      AuthorizationConfig authzConfig) {
+    catalog_ = catalog;
+    queryCtx_ = queryCtx;
+    authzConfig_ = authzConfig;
+  }
+
+  static public class AnalysisResult {
+    private StatementBase stmt_;
+    private Analyzer analyzer_;
+    private CreateTableStmt tmpCreateTableStmt_;
+
+    public boolean isAlterTableStmt() { return stmt_ instanceof AlterTableStmt; }
+    public boolean isAlterViewStmt() { return stmt_ instanceof AlterViewStmt; }
+    public boolean isComputeStatsStmt() { return stmt_ instanceof ComputeStatsStmt; }
+    public boolean isQueryStmt() { return stmt_ instanceof QueryStmt; }
+    public boolean isInsertStmt() { return stmt_ instanceof InsertStmt; }
+    public boolean isDropDbStmt() { return stmt_ instanceof DropDbStmt; }
+    public boolean isDropTableOrViewStmt() {
+      return stmt_ instanceof DropTableOrViewStmt;
+    }
+    public boolean isDropFunctionStmt() { return stmt_ instanceof DropFunctionStmt; }
+    public boolean isDropDataSrcStmt() { return stmt_ instanceof DropDataSrcStmt; }
+    public boolean isDropStatsStmt() { return stmt_ instanceof DropStatsStmt; }
+    public boolean isCreateTableLikeStmt() {
+      return stmt_ instanceof CreateTableLikeStmt;
+    }
+    public boolean isCreateViewStmt() { return stmt_ instanceof CreateViewStmt; }
+    public boolean isCreateTableAsSelectStmt() {
+      return stmt_ instanceof CreateTableAsSelectStmt;
+    }
+    public boolean isCreateTableStmt() { return stmt_ instanceof CreateTableStmt; }
+    public boolean isCreateDbStmt() { return stmt_ instanceof CreateDbStmt; }
+    public boolean isCreateUdfStmt() { return stmt_ instanceof CreateUdfStmt; }
+    public boolean isCreateUdaStmt() { return stmt_ instanceof CreateUdaStmt; }
+    public boolean isCreateDataSrcStmt() { return stmt_ instanceof CreateDataSrcStmt; }
+    public boolean isLoadDataStmt() { return stmt_ instanceof LoadDataStmt; }
+    public boolean isUseStmt() { return stmt_ instanceof UseStmt; }
+    public boolean isSetStmt() { return stmt_ instanceof SetStmt; }
+    public boolean isShowTablesStmt() { return stmt_ instanceof ShowTablesStmt; }
+    public boolean isShowDbsStmt() { return stmt_ instanceof ShowDbsStmt; }
+    public boolean isShowDataSrcsStmt() { return stmt_ instanceof ShowDataSrcsStmt; }
+    public boolean isShowStatsStmt() { return stmt_ instanceof ShowStatsStmt; }
+    public boolean isShowFunctionsStmt() { return stmt_ instanceof ShowFunctionsStmt; }
+    public boolean isShowCreateTableStmt() {
+      return stmt_ instanceof ShowCreateTableStmt;
+    }
+    public boolean isShowCreateFunctionStmt() {
+      return stmt_ instanceof ShowCreateFunctionStmt;
+    }
+    public boolean isShowFilesStmt() { return stmt_ instanceof ShowFilesStmt; }
+    public boolean isDescribeDbStmt() { return stmt_ instanceof DescribeDbStmt; }
+    public boolean isDescribeTableStmt() { return stmt_ instanceof DescribeTableStmt; }
+    public boolean isResetMetadataStmt() { return stmt_ instanceof ResetMetadataStmt; }
+    public boolean isExplainStmt() { return stmt_.isExplain(); }
+    public boolean isShowRolesStmt() { return stmt_ instanceof ShowRolesStmt; }
+    public boolean isShowGrantRoleStmt() { return stmt_ instanceof ShowGrantRoleStmt; }
+    public boolean isCreateDropRoleStmt() { return stmt_ instanceof CreateDropRoleStmt; }
+    public boolean isGrantRevokeRoleStmt() {
+      return stmt_ instanceof GrantRevokeRoleStmt;
+    }
+    public boolean isGrantRevokePrivStmt() {
+      return stmt_ instanceof GrantRevokePrivStmt;
+    }
+    public boolean isTruncateStmt() { return stmt_ instanceof TruncateStmt; }
+    public boolean isUpdateStmt() { return stmt_ instanceof UpdateStmt; }
+    public UpdateStmt getUpdateStmt() { return (UpdateStmt) stmt_; }
+    public boolean isDeleteStmt() { return stmt_ instanceof DeleteStmt; }
+    public DeleteStmt getDeleteStmt() { return (DeleteStmt) stmt_; }
+
+    public boolean isCatalogOp() {
+      return isUseStmt() || isViewMetadataStmt() || isDdlStmt();
+    }
+
+    private boolean isDdlStmt() {
+      return isCreateTableLikeStmt() || isCreateTableStmt() ||
+          isCreateViewStmt() || isCreateDbStmt() || isDropDbStmt() ||
+          isDropTableOrViewStmt() || isResetMetadataStmt() || isAlterTableStmt() ||
+          isAlterViewStmt() || isComputeStatsStmt() || isCreateUdfStmt() ||
+          isCreateUdaStmt() || isDropFunctionStmt() || isCreateTableAsSelectStmt() ||
+          isCreateDataSrcStmt() || isDropDataSrcStmt() || isDropStatsStmt() ||
+          isCreateDropRoleStmt() || isGrantRevokeStmt() || isTruncateStmt();
+    }
+
+    private boolean isViewMetadataStmt() {
+      return isShowFilesStmt() || isShowTablesStmt() || isShowDbsStmt() ||
+          isShowFunctionsStmt() || isShowRolesStmt() || isShowGrantRoleStmt() ||
+          isShowCreateTableStmt() || isShowDataSrcsStmt() || isShowStatsStmt() ||
+          isDescribeTableStmt() || isDescribeDbStmt() || isShowCreateFunctionStmt();
+    }
+
+    private boolean isGrantRevokeStmt() {
+      return isGrantRevokeRoleStmt() || isGrantRevokePrivStmt();
+    }
+
+    public boolean isDmlStmt() {
+      return isInsertStmt();
+    }
+
+    public AlterTableStmt getAlterTableStmt() {
+      Preconditions.checkState(isAlterTableStmt());
+      return (AlterTableStmt) stmt_;
+    }
+
+    public AlterViewStmt getAlterViewStmt() {
+      Preconditions.checkState(isAlterViewStmt());
+      return (AlterViewStmt) stmt_;
+    }
+
+    public ComputeStatsStmt getComputeStatsStmt() {
+      Preconditions.checkState(isComputeStatsStmt());
+      return (ComputeStatsStmt) stmt_;
+    }
+
+    public CreateTableLikeStmt getCreateTableLikeStmt() {
+      Preconditions.checkState(isCreateTableLikeStmt());
+      return (CreateTableLikeStmt) stmt_;
+    }
+
+    public CreateViewStmt getCreateViewStmt() {
+      Preconditions.checkState(isCreateViewStmt());
+      return (CreateViewStmt) stmt_;
+    }
+
+    public CreateTableAsSelectStmt getCreateTableAsSelectStmt() {
+      Preconditions.checkState(isCreateTableAsSelectStmt());
+      return (CreateTableAsSelectStmt) stmt_;
+    }
+
+    public CreateTableStmt getCreateTableStmt() {
+      Preconditions.checkState(isCreateTableStmt());
+      return (CreateTableStmt) stmt_;
+    }
+
+    public CreateTableStmt getTmpCreateTableStmt() {
+      return tmpCreateTableStmt_;
+    }
+
+    public CreateDbStmt getCreateDbStmt() {
+      Preconditions.checkState(isCreateDbStmt());
+      return (CreateDbStmt) stmt_;
+    }
+
+    public CreateUdfStmt getCreateUdfStmt() {
+      Preconditions.checkState(isCreateUdfStmt());
+      return (CreateUdfStmt) stmt_;
+    }
+
+    public CreateUdaStmt getCreateUdaStmt() {
+      Preconditions.checkState(isCreateUdfStmt());
+      return (CreateUdaStmt) stmt_;
+    }
+
+    public DropDbStmt getDropDbStmt() {
+      Preconditions.checkState(isDropDbStmt());
+      return (DropDbStmt) stmt_;
+    }
+
+    public DropTableOrViewStmt getDropTableOrViewStmt() {
+      Preconditions.checkState(isDropTableOrViewStmt());
+      return (DropTableOrViewStmt) stmt_;
+    }
+
+    public TruncateStmt getTruncateStmt() {
+      Preconditions.checkState(isTruncateStmt());
+      return (TruncateStmt) stmt_;
+    }
+
+    public DropFunctionStmt getDropFunctionStmt() {
+      Preconditions.checkState(isDropFunctionStmt());
+      return (DropFunctionStmt) stmt_;
+    }
+
+    public LoadDataStmt getLoadDataStmt() {
+      Preconditions.checkState(isLoadDataStmt());
+      return (LoadDataStmt) stmt_;
+    }
+
+    public QueryStmt getQueryStmt() {
+      Preconditions.checkState(isQueryStmt());
+      return (QueryStmt) stmt_;
+    }
+
+    public InsertStmt getInsertStmt() {
+      if (isCreateTableAsSelectStmt()) {
+        return getCreateTableAsSelectStmt().getInsertStmt();
+      } else {
+        Preconditions.checkState(isInsertStmt());
+        return (InsertStmt) stmt_;
+      }
+    }
+
+    public UseStmt getUseStmt() {
+      Preconditions.checkState(isUseStmt());
+      return (UseStmt) stmt_;
+    }
+
+    public SetStmt getSetStmt() {
+      Preconditions.checkState(isSetStmt());
+      return (SetStmt) stmt_;
+    }
+
+    public ShowTablesStmt getShowTablesStmt() {
+      Preconditions.checkState(isShowTablesStmt());
+      return (ShowTablesStmt) stmt_;
+    }
+
+    public ShowDbsStmt getShowDbsStmt() {
+      Preconditions.checkState(isShowDbsStmt());
+      return (ShowDbsStmt) stmt_;
+    }
+
+    public ShowDataSrcsStmt getShowDataSrcsStmt() {
+      Preconditions.checkState(isShowDataSrcsStmt());
+      return (ShowDataSrcsStmt) stmt_;
+    }
+
+    public ShowStatsStmt getShowStatsStmt() {
+      Preconditions.checkState(isShowStatsStmt());
+      return (ShowStatsStmt) stmt_;
+    }
+
+    public ShowFunctionsStmt getShowFunctionsStmt() {
+      Preconditions.checkState(isShowFunctionsStmt());
+      return (ShowFunctionsStmt) stmt_;
+    }
+
+    public ShowFilesStmt getShowFilesStmt() {
+      Preconditions.checkState(isShowFilesStmt());
+      return (ShowFilesStmt) stmt_;
+    }
+
+    public DescribeDbStmt getDescribeDbStmt() {
+      Preconditions.checkState(isDescribeDbStmt());
+      return (DescribeDbStmt) stmt_;
+    }
+
+    public DescribeTableStmt getDescribeTableStmt() {
+      Preconditions.checkState(isDescribeTableStmt());
+      return (DescribeTableStmt) stmt_;
+    }
+
+    public ShowCreateTableStmt getShowCreateTableStmt() {
+      Preconditions.checkState(isShowCreateTableStmt());
+      return (ShowCreateTableStmt) stmt_;
+    }
+
+    public ShowCreateFunctionStmt getShowCreateFunctionStmt() {
+      Preconditions.checkState(isShowCreateFunctionStmt());
+      return (ShowCreateFunctionStmt) stmt_;
+    }
+
+    public StatementBase getStmt() { return stmt_; }
+    public Analyzer getAnalyzer() { return analyzer_; }
+    public Set<TAccessEvent> getAccessEvents() { return analyzer_.getAccessEvents(); }
+    public boolean requiresRewrite() {
+      return analyzer_.containsSubquery() && !(stmt_ instanceof CreateViewStmt)
+          && !(stmt_ instanceof AlterViewStmt);
+    }
+    public TLineageGraph getThriftLineageGraph() {
+      return analyzer_.getThriftSerializedLineageGraph();
+    }
+  }
+
+  /**
+   * Parse and analyze 'stmt'. If 'stmt' is a nested query (i.e. query that
+   * contains subqueries), it is also rewritten by performing subquery unnesting.
+   * The transformed stmt is then re-analyzed in a new analysis context.
+   *
+   * The result of analysis can be retrieved by calling
+   * getAnalysisResult().
+   *
+   * @throws AnalysisException
+   *           On any other error, including parsing errors. Also thrown when any
+   *           missing tables are detected as a result of running analysis.
+   */
+  public void analyze(String stmt) throws AnalysisException {
+    Analyzer analyzer = new Analyzer(catalog_, queryCtx_, authzConfig_);
+    analyze(stmt, analyzer);
+  }
+
+  /**
+   * Parse and analyze 'stmt' using a specified Analyzer.
+   */
+  public void analyze(String stmt, Analyzer analyzer) throws AnalysisException {
+    SqlScanner input = new SqlScanner(new StringReader(stmt));
+    SqlParser parser = new SqlParser(input);
+    try {
+      analysisResult_ = new AnalysisResult();
+      analysisResult_.analyzer_ = analyzer;
+      if (analysisResult_.analyzer_ == null) {
+        analysisResult_.analyzer_ = new Analyzer(catalog_, queryCtx_, authzConfig_);
+      }
+      analysisResult_.stmt_ = (StatementBase) parser.parse().value;
+      if (analysisResult_.stmt_ == null) return;
+
+      // For CTAS, we copy the create statement in case we have to create a new CTAS
+      // statement after a query rewrite.
+      if (analysisResult_.stmt_ instanceof CreateTableAsSelectStmt) {
+        analysisResult_.tmpCreateTableStmt_ =
+            ((CreateTableAsSelectStmt)analysisResult_.stmt_).getCreateStmt().clone();
+      }
+
+      analysisResult_.stmt_.analyze(analysisResult_.analyzer_);
+      boolean isExplain = analysisResult_.isExplainStmt();
+
+      // Check if we need to rewrite the statement.
+      if (analysisResult_.requiresRewrite()) {
+        StatementBase rewrittenStmt = StmtRewriter.rewrite(analysisResult_);
+        // Re-analyze the rewritten statement.
+        Preconditions.checkNotNull(rewrittenStmt);
+        analysisResult_ = new AnalysisResult();
+        analysisResult_.analyzer_ = new Analyzer(catalog_, queryCtx_, authzConfig_);
+        analysisResult_.stmt_ = rewrittenStmt;
+        analysisResult_.stmt_.analyze(analysisResult_.analyzer_);
+        LOG.trace("rewrittenStmt: " + rewrittenStmt.toSql());
+        if (isExplain) analysisResult_.stmt_.setIsExplain();
+        Preconditions.checkState(!analysisResult_.requiresRewrite());
+      }
+    } catch (AnalysisException e) {
+      // Don't wrap AnalysisExceptions in another AnalysisException
+      throw e;
+    } catch (Exception e) {
+      throw new AnalysisException(parser.getErrorMsg(stmt), e);
+    }
+  }
+
+  /**
+   * Authorize an analyzed statement.
+   * analyze() must have already been called. Throws an AuthorizationException if the
+   * user doesn't have sufficient privileges to run this statement.
+   */
+  public void authorize(AuthorizationChecker authzChecker)
+      throws AuthorizationException, InternalException {
+    Preconditions.checkNotNull(analysisResult_);
+    Analyzer analyzer = getAnalyzer();
+    // Process statements for which column-level privilege requests may be registered
+    // except for DESCRIBE TABLE or REFRESH/INVALIDATE statements
+    if (analysisResult_.isQueryStmt() || analysisResult_.isInsertStmt() ||
+        analysisResult_.isUpdateStmt() || analysisResult_.isDeleteStmt() ||
+        analysisResult_.isCreateTableAsSelectStmt() ||
+        analysisResult_.isCreateViewStmt() || analysisResult_.isAlterViewStmt()) {
+      // Map of table name to a list of privilege requests associated with that table.
+      // These include both table-level and column-level privilege requests.
+      Map<String, List<PrivilegeRequest>> tablePrivReqs = Maps.newHashMap();
+      // Privilege requests that are not column or table-level.
+      List<PrivilegeRequest> otherPrivReqs = Lists.newArrayList();
+      // Group the registered privilege requests based on the table they reference.
+      for (PrivilegeRequest privReq: analyzer.getPrivilegeReqs()) {
+        String tableName = privReq.getAuthorizeable().getFullTableName();
+        if (tableName == null) {
+          otherPrivReqs.add(privReq);
+        } else {
+          List<PrivilegeRequest> requests = tablePrivReqs.get(tableName);
+          if (requests == null) {
+            requests = Lists.newArrayList();
+            tablePrivReqs.put(tableName, requests);
+          }
+          // The table-level SELECT must be the first table-level request, and it
+          // must precede all column-level privilege requests.
+          Preconditions.checkState((requests.isEmpty() ||
+              !(privReq.getAuthorizeable() instanceof AuthorizeableColumn)) ||
+              (requests.get(0).getAuthorizeable() instanceof AuthorizeableTable &&
+              requests.get(0).getPrivilege() == Privilege.SELECT));
+          requests.add(privReq);
+        }
+      }
+
+      // Check any non-table, non-column privilege requests first.
+      for (PrivilegeRequest request: otherPrivReqs) {
+        authorizePrivilegeRequest(authzChecker, request);
+      }
+
+      // Authorize table accesses, one table at a time, by considering both table and
+      // column-level privilege requests.
+      for (Map.Entry<String, List<PrivilegeRequest>> entry: tablePrivReqs.entrySet()) {
+        authorizeTableAccess(authzChecker, entry.getValue());
+      }
+    } else {
+      for (PrivilegeRequest privReq: analyzer.getPrivilegeReqs()) {
+        Preconditions.checkState(
+            !(privReq.getAuthorizeable() instanceof AuthorizeableColumn) ||
+            analysisResult_.isDescribeTableStmt() ||
+            analysisResult_.isResetMetadataStmt());
+        authorizePrivilegeRequest(authzChecker, privReq);
+      }
+    }
+
+    // Check any masked requests.
+    for (Pair<PrivilegeRequest, String> maskedReq: analyzer.getMaskedPrivilegeReqs()) {
+      if (!authzChecker.hasAccess(analyzer.getUser(), maskedReq.first)) {
+        throw new AuthorizationException(maskedReq.second);
+      }
+    }
+  }
+
+  /**
+   * Authorize a privilege request.
+   * Throws an AuthorizationException if the user doesn't have sufficient privileges for
+   * this request. Also, checks if the request references a system database.
+   */
+  private void authorizePrivilegeRequest(AuthorizationChecker authzChecker,
+    PrivilegeRequest request) throws AuthorizationException, InternalException {
+    Preconditions.checkNotNull(request);
+    String dbName = null;
+    if (request.getAuthorizeable() != null) {
+      dbName = request.getAuthorizeable().getDbName();
+    }
+    // If this is a system database, some actions should always be allowed
+    // or disabled, regardless of what is in the auth policy.
+    if (dbName != null && checkSystemDbAccess(dbName, request.getPrivilege())) {
+      return;
+    }
+    authzChecker.checkAccess(getAnalyzer().getUser(), request);
+  }
+
+  /**
+   * Authorize a list of privilege requests associated with a single table.
+   * It checks if the user has sufficient table-level privileges and if that is
+   * not the case, it falls back on checking column-level privileges, if any. This
+   * function requires 'SELECT' requests to be ordered by table and then by column
+   * privilege requests. Throws an AuthorizationException if the user doesn't have
+   * sufficient privileges.
+   */
+  private void authorizeTableAccess(AuthorizationChecker authzChecker,
+      List<PrivilegeRequest> requests)
+      throws AuthorizationException, InternalException {
+    Preconditions.checkState(!requests.isEmpty());
+    Analyzer analyzer = getAnalyzer();
+    boolean hasTableSelectPriv = true;
+    boolean hasColumnSelectPriv = false;
+    for (PrivilegeRequest request: requests) {
+      if (request.getAuthorizeable() instanceof AuthorizeableTable) {
+        try {
+          authorizePrivilegeRequest(authzChecker, request);
+        } catch (AuthorizationException e) {
+          // Authorization fails if we fail to authorize any table-level request that is
+          // not a SELECT privilege (e.g. INSERT).
+          if (request.getPrivilege() != Privilege.SELECT) throw e;
+          hasTableSelectPriv = false;
+        }
+      } else {
+        Preconditions.checkState(
+            request.getAuthorizeable() instanceof AuthorizeableColumn);
+        if (hasTableSelectPriv) continue;
+        if (authzChecker.hasAccess(analyzer.getUser(), request)) {
+          hasColumnSelectPriv = true;
+          continue;
+        }
+        // Make sure we don't reveal any column names in the error message.
+        throw new AuthorizationException(String.format("User '%s' does not have " +
+          "privileges to execute '%s' on: %s", analyzer.getUser().getName(),
+          request.getPrivilege().toString(),
+          request.getAuthorizeable().getFullTableName()));
+      }
+    }
+    if (!hasTableSelectPriv && !hasColumnSelectPriv) {
+       throw new AuthorizationException(String.format("User '%s' does not have " +
+          "privileges to execute 'SELECT' on: %s", analyzer.getUser().getName(),
+          requests.get(0).getAuthorizeable().getFullTableName()));
+    }
+  }
+
+  /**
+   * Throws an AuthorizationException if the dbName is a system db
+   * and the user is trying to modify it.
+   * Returns true if this is a system db and the action is allowed.
+   */
+  private boolean checkSystemDbAccess(String dbName, Privilege privilege)
+      throws AuthorizationException {
+    Db db = catalog_.getDb(dbName);
+    if (db != null && db.isSystemDb()) {
+      switch (privilege) {
+        case VIEW_METADATA:
+        case ANY:
+          return true;
+        default:
+          throw new AuthorizationException("Cannot modify system database.");
+      }
+    }
+    return false;
+  }
+
+  public AnalysisResult getAnalysisResult() { return analysisResult_; }
+  public Analyzer getAnalyzer() { return getAnalysisResult().getAnalyzer(); }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AnalyticExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AnalyticExpr.java b/fe/src/main/java/org/apache/impala/analysis/AnalyticExpr.java
new file mode 100644
index 0000000..9abd82d
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AnalyticExpr.java
@@ -0,0 +1,839 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.analysis.AnalyticWindow.Boundary;
+import com.cloudera.impala.analysis.AnalyticWindow.BoundaryType;
+import com.cloudera.impala.catalog.AggregateFunction;
+import com.cloudera.impala.catalog.Function;
+import com.cloudera.impala.catalog.ScalarType;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.InternalException;
+import com.cloudera.impala.common.TreeNode;
+import com.cloudera.impala.service.FeSupport;
+import com.cloudera.impala.thrift.TColumnValue;
+import com.cloudera.impala.thrift.TExprNode;
+import com.cloudera.impala.util.TColumnValueUtil;
+import com.google.common.base.Joiner;
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Representation of an analytic function call with OVER clause.
+ * All "subexpressions" (such as the actual function call parameters as well as the
+ * partition/ordering exprs, etc.) are embedded as children in order to allow expr
+ * substitution:
+ *   function call params: child 0 .. #params
+ *   partition exprs: children #params + 1 .. #params + #partition-exprs
+ *   ordering exprs:
+ *     children #params + #partition-exprs + 1 ..
+ *       #params + #partition-exprs + #order-by-elements
+ *   exprs in windowing clause: remaining children
+ *
+ * Note that it's wrong to embed the FunctionCallExpr itself as a child,
+ * because in 'COUNT(..) OVER (..)' the 'COUNT(..)' is not part of a standard aggregate
+ * computation and must not be substituted as such. However, the parameters of the
+ * analytic function call might reference the output of an aggregate computation
+ * and need to be substituted as such; example: COUNT(COUNT(..)) OVER (..)
+ */
+public class AnalyticExpr extends Expr {
+  private final static Logger LOG = LoggerFactory.getLogger(AnalyticExpr.class);
+
+  private FunctionCallExpr fnCall_;
+  private final List<Expr> partitionExprs_;
+  // These elements are modified to point to the corresponding child exprs to keep them
+  // in sync through expr substitutions.
+  private List<OrderByElement> orderByElements_ = Lists.newArrayList();
+  private AnalyticWindow window_;
+
+  // If set, requires the window to be set to null in resetAnalysisState(). Required for
+  // proper substitution/cloning because standardization may set a window that is illegal
+  // in SQL, and hence, will fail analysis().
+  private boolean resetWindow_ = false;
+
+  // SQL string of this AnalyticExpr before standardization. Returned in toSqlImpl().
+  private String sqlString_;
+
+  private static String LEAD = "lead";
+  private static String LAG = "lag";
+  private static String FIRST_VALUE = "first_value";
+  private static String LAST_VALUE = "last_value";
+  private static String FIRST_VALUE_IGNORE_NULLS = "first_value_ignore_nulls";
+  private static String LAST_VALUE_IGNORE_NULLS = "last_value_ignore_nulls";
+  private static String RANK = "rank";
+  private static String DENSERANK = "dense_rank";
+  private static String ROWNUMBER = "row_number";
+  private static String MIN = "min";
+  private static String MAX = "max";
+  private static String PERCENT_RANK = "percent_rank";
+  private static String CUME_DIST = "cume_dist";
+  private static String NTILE = "ntile";
+
+  // Internal function used to implement FIRST_VALUE with a window rewrite and
+  // additional null handling in the backend.
+  public static String FIRST_VALUE_REWRITE = "first_value_rewrite";
+
+  public AnalyticExpr(FunctionCallExpr fnCall, List<Expr> partitionExprs,
+      List<OrderByElement> orderByElements, AnalyticWindow window) {
+    Preconditions.checkNotNull(fnCall);
+    fnCall_ = fnCall;
+    partitionExprs_ = partitionExprs != null ? partitionExprs : new ArrayList<Expr>();
+    if (orderByElements != null) orderByElements_.addAll(orderByElements);
+    window_ = window;
+    setChildren();
+  }
+
+  /**
+   * clone() c'tor
+   */
+  protected AnalyticExpr(AnalyticExpr other) {
+    super(other);
+    fnCall_ = (FunctionCallExpr) other.fnCall_.clone();
+    for (OrderByElement e: other.orderByElements_) {
+      orderByElements_.add(e.clone());
+    }
+    partitionExprs_ = Expr.cloneList(other.partitionExprs_);
+    window_ = (other.window_ != null ? other.window_.clone() : null);
+    resetWindow_ = other.resetWindow_;
+    sqlString_ = other.sqlString_;
+    setChildren();
+  }
+
+  public FunctionCallExpr getFnCall() { return fnCall_; }
+  public List<Expr> getPartitionExprs() { return partitionExprs_; }
+  public List<OrderByElement> getOrderByElements() { return orderByElements_; }
+  public AnalyticWindow getWindow() { return window_; }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!super.equals(obj)) return false;
+    AnalyticExpr o = (AnalyticExpr)obj;
+    if (!fnCall_.equals(o.getFnCall())) return false;
+    if ((window_ == null) != (o.window_ == null)) return false;
+    if (window_ != null) {
+      if (!window_.equals(o.window_)) return false;
+    }
+    return orderByElements_.equals(o.orderByElements_);
+  }
+
+  /**
+   * Analytic exprs cannot be constant.
+   */
+  @Override
+  public boolean isConstant() { return false; }
+
+  @Override
+  public Expr clone() { return new AnalyticExpr(this); }
+
+  @Override
+  public String toSqlImpl() {
+    if (sqlString_ != null) return sqlString_;
+    StringBuilder sb = new StringBuilder();
+    sb.append(fnCall_.toSql()).append(" OVER (");
+    boolean needsSpace = false;
+    if (!partitionExprs_.isEmpty()) {
+      sb.append("PARTITION BY ").append(Expr.toSql(partitionExprs_));
+      needsSpace = true;
+    }
+    if (!orderByElements_.isEmpty()) {
+      List<String> orderByStrings = Lists.newArrayList();
+      for (OrderByElement e: orderByElements_) {
+        orderByStrings.add(e.toSql());
+      }
+      if (needsSpace) sb.append(" ");
+      sb.append("ORDER BY ").append(Joiner.on(", ").join(orderByStrings));
+      needsSpace = true;
+    }
+    if (window_ != null) {
+      if (needsSpace) sb.append(" ");
+      sb.append(window_.toSql());
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  @Override
+  public String debugString() {
+    return Objects.toStringHelper(this)
+        .add("fn", getFnCall())
+        .add("window", window_)
+        .addValue(super.debugString())
+        .toString();
+  }
+
+  @Override
+  protected void toThrift(TExprNode msg) {
+  }
+
+  private static boolean isAnalyticFn(Function fn) {
+    return fn instanceof AggregateFunction
+        && ((AggregateFunction) fn).isAnalyticFn();
+  }
+
+  private static boolean isAnalyticFn(Function fn, String fnName) {
+    return isAnalyticFn(fn) && fn.functionName().equals(fnName);
+  }
+
+  public static boolean isAggregateFn(Function fn) {
+    return fn instanceof AggregateFunction
+        && ((AggregateFunction) fn).isAggregateFn();
+  }
+
+  public static boolean isPercentRankFn(Function fn) {
+    return isAnalyticFn(fn, PERCENT_RANK);
+  }
+
+  public static boolean isCumeDistFn(Function fn) {
+    return isAnalyticFn(fn, CUME_DIST);
+  }
+
+  public static boolean isNtileFn(Function fn) {
+    return isAnalyticFn(fn, NTILE);
+  }
+
+  static private boolean isOffsetFn(Function fn) {
+    return isAnalyticFn(fn, LEAD) || isAnalyticFn(fn, LAG);
+  }
+
+  static private boolean isMinMax(Function fn) {
+    return isAnalyticFn(fn, MIN) || isAnalyticFn(fn, MAX);
+  }
+
+  static private boolean isRankingFn(Function fn) {
+    return isAnalyticFn(fn, RANK) || isAnalyticFn(fn, DENSERANK) ||
+        isAnalyticFn(fn, ROWNUMBER);
+  }
+
+  /**
+   * Rewrite the following analytic functions:
+   * percent_rank(), cume_dist() and ntile()
+   *
+   * Returns a new Expr if the analytic expr is rewritten, returns null if it's not one
+   * that we want to rewrite.
+   */
+  public static Expr rewrite(AnalyticExpr analyticExpr) {
+    Function fn = analyticExpr.getFnCall().getFn();
+    if (AnalyticExpr.isPercentRankFn(fn)) {
+      return createPercentRank(analyticExpr);
+    } else if (AnalyticExpr.isCumeDistFn(fn)) {
+      return createCumeDist(analyticExpr);
+    } else if (AnalyticExpr.isNtileFn(fn)) {
+      return createNtile(analyticExpr);
+    }
+    return null;
+  }
+
+  /**
+   * Rewrite percent_rank() to the following:
+   *
+   * percent_rank() over([partition by clause] order by clause)
+   *    = (Count == 1) ? 0:(Rank - 1)/(Count - 1)
+   * where,
+   *  Rank = rank() over([partition by clause] order by clause)
+   *  Count = count() over([partition by clause])
+   */
+  private static Expr createPercentRank(AnalyticExpr analyticExpr) {
+    Preconditions.checkState(
+        AnalyticExpr.isPercentRankFn(analyticExpr.getFnCall().getFn()));
+
+    NumericLiteral zero = new NumericLiteral(BigInteger.valueOf(0), ScalarType.BIGINT);
+    NumericLiteral one = new NumericLiteral(BigInteger.valueOf(1), ScalarType.BIGINT);
+    AnalyticExpr countExpr = create("count", analyticExpr, false, false);
+    AnalyticExpr rankExpr = create("rank", analyticExpr, true, false);
+
+    ArithmeticExpr arithmeticRewrite =
+      new ArithmeticExpr(ArithmeticExpr.Operator.DIVIDE,
+        new ArithmeticExpr(ArithmeticExpr.Operator.SUBTRACT, rankExpr, one),
+        new ArithmeticExpr(ArithmeticExpr.Operator.SUBTRACT, countExpr, one));
+
+    List<Expr> ifParams = Lists.newArrayList();
+    ifParams.add(
+      new BinaryPredicate(BinaryPredicate.Operator.EQ, one, countExpr));
+    ifParams.add(zero);
+    ifParams.add(arithmeticRewrite);
+    FunctionCallExpr resultantRewrite = new FunctionCallExpr("if", ifParams);
+
+    return resultantRewrite;
+  }
+
+  /**
+   * Rewrite cume_dist() to the following:
+   *
+   * cume_dist() over([partition by clause] order by clause)
+   *    = ((Count - Rank) + 1)/Count
+   * where,
+   *  Rank = rank() over([partition by clause] order by clause DESC)
+   *  Count = count() over([partition by clause])
+   */
+  private static Expr createCumeDist(AnalyticExpr analyticExpr) {
+    Preconditions.checkState(
+        AnalyticExpr.isCumeDistFn(analyticExpr.getFnCall().getFn()));
+    AnalyticExpr rankExpr = create("rank", analyticExpr, true, true);
+    AnalyticExpr countExpr = create("count", analyticExpr, false, false);
+    NumericLiteral one = new NumericLiteral(BigInteger.valueOf(1), ScalarType.BIGINT);
+    ArithmeticExpr arithmeticRewrite =
+        new ArithmeticExpr(ArithmeticExpr.Operator.DIVIDE,
+          new ArithmeticExpr(ArithmeticExpr.Operator.ADD,
+            new ArithmeticExpr(ArithmeticExpr.Operator.SUBTRACT, countExpr, rankExpr),
+          one),
+        countExpr);
+    return arithmeticRewrite;
+  }
+
+  /**
+   * Rewrite ntile() to the following:
+   *
+   * ntile(B) over([partition by clause] order by clause)
+   *    = floor(min(Count, B) * (RowNumber - 1)/Count) + 1
+   * where,
+   *  RowNumber = row_number() over([partition by clause] order by clause)
+   *  Count = count() over([partition by clause])
+   */
+  private static Expr createNtile(AnalyticExpr analyticExpr) {
+    Preconditions.checkState(
+        AnalyticExpr.isNtileFn(analyticExpr.getFnCall().getFn()));
+    Expr bucketExpr = analyticExpr.getChild(0);
+    AnalyticExpr rowNumExpr = create("row_number", analyticExpr, true, false);
+    AnalyticExpr countExpr = create("count", analyticExpr, false, false);
+
+    List<Expr> ifParams = Lists.newArrayList();
+    ifParams.add(
+        new BinaryPredicate(BinaryPredicate.Operator.LT, bucketExpr, countExpr));
+    ifParams.add(bucketExpr);
+    ifParams.add(countExpr);
+
+    NumericLiteral one = new NumericLiteral(BigInteger.valueOf(1), ScalarType.BIGINT);
+    ArithmeticExpr minMultiplyRowMinusOne =
+        new ArithmeticExpr(ArithmeticExpr.Operator.MULTIPLY,
+          new ArithmeticExpr(ArithmeticExpr.Operator.SUBTRACT, rowNumExpr, one),
+          new FunctionCallExpr("if", ifParams));
+    ArithmeticExpr divideAddOne =
+        new ArithmeticExpr(ArithmeticExpr.Operator.ADD,
+          new ArithmeticExpr(ArithmeticExpr.Operator.INT_DIVIDE,
+            minMultiplyRowMinusOne, countExpr),
+        one);
+    return divideAddOne;
+  }
+
+  /**
+   * Create a new Analytic Expr and associate it with a new function.
+   * Takes a reference analytic expression and clones the partition expressions and the
+   * order by expressions if 'copyOrderBy' is set and optionally reverses it if
+   * 'reverseOrderBy' is set. The new function that it will be associated with is
+   * specified by fnName.
+   */
+  private static AnalyticExpr create(String fnName,
+      AnalyticExpr referenceExpr, boolean copyOrderBy, boolean reverseOrderBy) {
+    FunctionCallExpr fnExpr = new FunctionCallExpr(fnName, new ArrayList<Expr>());
+    fnExpr.setIsAnalyticFnCall(true);
+    List<OrderByElement> orderByElements = null;
+    if (copyOrderBy) {
+      if (reverseOrderBy) {
+        orderByElements = OrderByElement.reverse(referenceExpr.getOrderByElements());
+      } else {
+        orderByElements = Lists.newArrayList();
+        for (OrderByElement elem: referenceExpr.getOrderByElements()) {
+          orderByElements.add(elem.clone());
+        }
+      }
+    }
+    AnalyticExpr analyticExpr = new AnalyticExpr(fnExpr,
+        Expr.cloneList(referenceExpr.getPartitionExprs()), orderByElements, null);
+    return analyticExpr;
+  }
+
+  /**
+   * Checks that the value expr of an offset boundary of a RANGE window is compatible
+   * with orderingExprs (and that there's only a single ordering expr).
+   */
+  private void checkRangeOffsetBoundaryExpr(AnalyticWindow.Boundary boundary)
+      throws AnalysisException {
+    Preconditions.checkState(boundary.getType().isOffset());
+    if (orderByElements_.size() > 1) {
+      throw new AnalysisException("Only one ORDER BY expression allowed if used with "
+          + "a RANGE window with PRECEDING/FOLLOWING: " + toSql());
+    }
+    Expr rangeExpr = boundary.getExpr();
+    if (!Type.isImplicitlyCastable(
+        rangeExpr.getType(), orderByElements_.get(0).getExpr().getType(), false)) {
+      throw new AnalysisException(
+          "The value expression of a PRECEDING/FOLLOWING clause of a RANGE window must "
+            + "be implicitly convertable to the ORDER BY expression's type: "
+            + rangeExpr.toSql() + " cannot be implicitly converted to "
+            + orderByElements_.get(0).getExpr().getType().toSql());
+    }
+  }
+
+  /**
+   * Checks offset of lag()/lead().
+   */
+  void checkOffset(Analyzer analyzer) throws AnalysisException {
+    Preconditions.checkState(isOffsetFn(getFnCall().getFn()));
+    Preconditions.checkState(getFnCall().getChildren().size() > 1);
+    Expr offset = getFnCall().getChild(1);
+    Preconditions.checkState(offset.getType().isIntegerType());
+    boolean isPosConstant = true;
+    if (!offset.isConstant()) {
+      isPosConstant = false;
+    } else {
+      try {
+        TColumnValue val = FeSupport.EvalConstExpr(offset, analyzer.getQueryCtx());
+        if (TColumnValueUtil.getNumericVal(val) <= 0) isPosConstant = false;
+      } catch (InternalException exc) {
+        throw new AnalysisException(
+            "Couldn't evaluate LEAD/LAG offset: " + exc.getMessage());
+      }
+    }
+    if (!isPosConstant) {
+      throw new AnalysisException(
+          "The offset parameter of LEAD/LAG must be a constant positive integer: "
+            + getFnCall().toSql());
+    }
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (isAnalyzed_) return;
+    fnCall_.analyze(analyzer);
+    super.analyze(analyzer);
+    type_ = getFnCall().getType();
+
+    for (Expr e: partitionExprs_) {
+      if (e.isConstant()) {
+        throw new AnalysisException(
+            "Expressions in the PARTITION BY clause must not be constant: "
+              + e.toSql() + " (in " + toSql() + ")");
+      } else if (e.getType().isComplexType()) {
+        throw new AnalysisException(String.format("PARTITION BY expression '%s' with " +
+            "complex type '%s' is not supported.", e.toSql(),
+            e.getType().toSql()));
+      }
+    }
+    for (OrderByElement e: orderByElements_) {
+      if (e.getExpr().isConstant()) {
+        throw new AnalysisException(
+            "Expressions in the ORDER BY clause must not be constant: "
+              + e.getExpr().toSql() + " (in " + toSql() + ")");
+      } else if (e.getExpr().getType().isComplexType()) {
+        throw new AnalysisException(String.format("ORDER BY expression '%s' with " +
+            "complex type '%s' is not supported.", e.getExpr().toSql(),
+            e.getExpr().getType().toSql()));
+      }
+    }
+
+    if (getFnCall().getParams().isDistinct()) {
+      throw new AnalysisException(
+          "DISTINCT not allowed in analytic function: " + getFnCall().toSql());
+    }
+
+    if (getFnCall().getParams().isIgnoreNulls()) {
+      String fnName = getFnCall().getFnName().getFunction();
+      if (!fnName.equals(LAST_VALUE) && !fnName.equals(FIRST_VALUE)) {
+        throw new AnalysisException("Function " + fnName.toUpperCase()
+            + " does not accept the keyword IGNORE NULLS.");
+      }
+    }
+
+    // check for correct composition of analytic expr
+    Function fn = getFnCall().getFn();
+    if (!(fn instanceof AggregateFunction)) {
+        throw new AnalysisException(
+            "OVER clause requires aggregate or analytic function: "
+              + getFnCall().toSql());
+    }
+
+    // check for non-analytic aggregate functions
+    if (!isAnalyticFn(fn)) {
+      throw new AnalysisException(
+          String.format("Aggregate function '%s' not supported with OVER clause.",
+              getFnCall().toSql()));
+    }
+
+    if (isAnalyticFn(fn) && !isAggregateFn(fn)) {
+      if (orderByElements_.isEmpty()) {
+        throw new AnalysisException(
+            "'" + getFnCall().toSql() + "' requires an ORDER BY clause");
+      }
+      if ((isRankingFn(fn) || isOffsetFn(fn)) && window_ != null) {
+        throw new AnalysisException(
+            "Windowing clause not allowed with '" + getFnCall().toSql() + "'");
+      }
+      if (isOffsetFn(fn) && getFnCall().getChildren().size() > 1) {
+        checkOffset(analyzer);
+        // check the default, which needs to be a constant at the moment
+        // TODO: remove this check when the backend can handle non-constants
+        if (getFnCall().getChildren().size() > 2) {
+          if (!getFnCall().getChild(2).isConstant()) {
+            throw new AnalysisException(
+                "The default parameter (parameter 3) of LEAD/LAG must be a constant: "
+                  + getFnCall().toSql());
+          }
+        }
+      }
+      if (isNtileFn(fn)) {
+        // TODO: IMPALA-2171:Remove this when ntile() can handle a non-constant argument.
+        if (!getFnCall().getChild(0).isConstant()) {
+          throw new AnalysisException("NTILE() requires a constant argument");
+        }
+        // Check if argument value is zero or negative and throw an exception if found.
+        try {
+          TColumnValue bucketValue =
+              FeSupport.EvalConstExpr(getFnCall().getChild(0), analyzer.getQueryCtx());
+          Long arg = bucketValue.getLong_val();
+          if (arg <= 0) {
+            throw new AnalysisException("NTILE() requires a positive argument: " + arg);
+          }
+        } catch (InternalException e) {
+          throw new AnalysisException(e.toString());
+        }
+      }
+    }
+
+    if (window_ != null) {
+      if (orderByElements_.isEmpty()) {
+        throw new AnalysisException("Windowing clause requires ORDER BY clause: "
+            + toSql());
+      }
+      window_.analyze(analyzer);
+
+      if (!orderByElements_.isEmpty()
+          && window_.getType() == AnalyticWindow.Type.RANGE) {
+        // check that preceding/following ranges match ordering
+        if (window_.getLeftBoundary().getType().isOffset()) {
+          checkRangeOffsetBoundaryExpr(window_.getLeftBoundary());
+        }
+        if (window_.getRightBoundary() != null
+            && window_.getRightBoundary().getType().isOffset()) {
+          checkRangeOffsetBoundaryExpr(window_.getRightBoundary());
+        }
+      }
+    }
+
+    // check nesting
+    if (TreeNode.contains(getChildren(), AnalyticExpr.class)) {
+      throw new AnalysisException(
+          "Nesting of analytic expressions is not allowed: " + toSql());
+    }
+    sqlString_ = toSql();
+
+    standardize(analyzer);
+
+    // min/max is not currently supported on sliding windows (i.e. start bound is not
+    // unbounded).
+    if (window_ != null && isMinMax(fn) &&
+        window_.getLeftBoundary().getType() != BoundaryType.UNBOUNDED_PRECEDING) {
+      throw new AnalysisException(
+          "'" + getFnCall().toSql() + "' is only supported with an "
+            + "UNBOUNDED PRECEDING start bound.");
+    }
+
+    setChildren();
+  }
+
+  /**
+   * If necessary, rewrites the analytic function, window, and/or order-by elements into
+   * a standard format for the purpose of simpler backend execution, as follows:
+   * 1. row_number():
+   *    Set a window from UNBOUNDED PRECEDING to CURRENT_ROW.
+   * 2. lead()/lag():
+   *    Explicitly set the default arguments to for BE simplicity.
+   *    Set a window for lead(): UNBOUNDED PRECEDING to OFFSET FOLLOWING.
+   *    Set a window for lag(): UNBOUNDED PRECEDING to OFFSET PRECEDING.
+   * 3. FIRST_VALUE without UNBOUNDED PRECEDING or IGNORE NULLS gets rewritten to use a
+   *    different window and function. There are a few cases:
+   *     a) Start bound is X FOLLOWING or CURRENT ROW (X=0):
+   *        Use 'last_value' with a window where both bounds are X FOLLOWING (or
+   *        CURRENT ROW). Setting the start bound to X following is necessary because the
+   *        X rows at the end of a partition have no rows in their window. Note that X
+   *        FOLLOWING could be rewritten as lead(X) but that would not work for CURRENT
+   *        ROW.
+   *     b) Start bound is X PRECEDING and end bound is CURRENT ROW or FOLLOWING:
+   *        Use 'first_value_rewrite' and a window with an end bound X PRECEDING. An
+   *        extra parameter '-1' is added to indicate to the backend that NULLs should
+   *        not be added for the first X rows.
+   *     c) Start bound is X PRECEDING and end bound is Y PRECEDING:
+   *        Use 'first_value_rewrite' and a window with an end bound X PRECEDING. The
+   *        first Y rows in a partition have empty windows and should be NULL. An extra
+   *        parameter with the integer constant Y is added to indicate to the backend
+   *        that NULLs should be added for the first Y rows.
+   *    The performance optimization here and in 5. below cannot be applied in the case of
+   *    IGNORE NULLS because they change what values appear in the window, which in the
+   *    IGNORE NULLS case could mean the correct value to return isn't even in the window,
+   *    eg. if all of the values in the rewritten window are NULL but one of the values in
+   *    the original window isn't.
+   * 4. Start bound is not UNBOUNDED PRECEDING and either the end bound is UNBOUNDED
+   *    FOLLOWING or the function is first_value(... ignore nulls):
+   *    Reverse the ordering and window, and flip first_value() and last_value().
+   * 5. first_value() with UNBOUNDED PRECEDING and not IGNORE NULLS:
+   *    Set the end boundary to CURRENT_ROW.
+   * 6. Rewrite IGNORE NULLS as regular FunctionCallExprs with '_ignore_nulls'
+   *    appended to the function name, because the BE implements them as different
+   *    functions.
+   * 7. Explicitly set the default window if no window was given but there
+   *    are order-by elements.
+   * 8. first/last_value() with RANGE window:
+   *    Rewrite as a ROWS window.
+   */
+  private void standardize(Analyzer analyzer) {
+    FunctionName analyticFnName = getFnCall().getFnName();
+
+    // 1. Set a window from UNBOUNDED PRECEDING to CURRENT_ROW for row_number().
+    if (analyticFnName.getFunction().equals(ROWNUMBER)) {
+      Preconditions.checkState(window_ == null, "Unexpected window set for row_numer()");
+      window_ = new AnalyticWindow(AnalyticWindow.Type.ROWS,
+          new Boundary(BoundaryType.UNBOUNDED_PRECEDING, null),
+          new Boundary(BoundaryType.CURRENT_ROW, null));
+      resetWindow_ = true;
+      return;
+    }
+
+    // 2. Explicitly set the default arguments to lead()/lag() for BE simplicity.
+    // Set a window for lead(): UNBOUNDED PRECEDING to OFFSET FOLLOWING,
+    // Set a window for lag(): UNBOUNDED PRECEDING to OFFSET PRECEDING.
+    if (isOffsetFn(getFnCall().getFn())) {
+      Preconditions.checkState(window_ == null);
+
+      // If necessary, create a new fn call with the default args explicitly set.
+      List<Expr> newExprParams = null;
+      if (getFnCall().getChildren().size() == 1) {
+        newExprParams = Lists.newArrayListWithExpectedSize(3);
+        newExprParams.addAll(getFnCall().getChildren());
+        // Default offset is 1.
+        newExprParams.add(new NumericLiteral(BigDecimal.valueOf(1)));
+        // Default default value is NULL.
+        newExprParams.add(new NullLiteral());
+      } else if (getFnCall().getChildren().size() == 2) {
+        newExprParams = Lists.newArrayListWithExpectedSize(3);
+        newExprParams.addAll(getFnCall().getChildren());
+        // Default default value is NULL.
+        newExprParams.add(new NullLiteral());
+      } else  {
+        Preconditions.checkState(getFnCall().getChildren().size() == 3);
+      }
+      if (newExprParams != null) {
+        fnCall_ = new FunctionCallExpr(getFnCall().getFnName(),
+            new FunctionParams(newExprParams));
+        fnCall_.setIsAnalyticFnCall(true);
+        fnCall_.analyzeNoThrow(analyzer);
+      }
+
+      // Set the window.
+      BoundaryType rightBoundaryType = BoundaryType.FOLLOWING;
+      if (analyticFnName.getFunction().equals(LAG)) {
+        rightBoundaryType = BoundaryType.PRECEDING;
+      }
+      window_ = new AnalyticWindow(AnalyticWindow.Type.ROWS,
+          new Boundary(BoundaryType.UNBOUNDED_PRECEDING, null),
+          new Boundary(rightBoundaryType, getOffsetExpr(getFnCall())));
+      try {
+        window_.analyze(analyzer);
+      } catch (AnalysisException e) {
+        throw new IllegalStateException(e);
+      }
+      resetWindow_ = true;
+      return;
+    }
+
+    // 3.
+    if (analyticFnName.getFunction().equals(FIRST_VALUE)
+        && window_ != null
+        && window_.getLeftBoundary().getType() != BoundaryType.UNBOUNDED_PRECEDING
+        && !getFnCall().getParams().isIgnoreNulls()) {
+      if (window_.getLeftBoundary().getType() != BoundaryType.PRECEDING) {
+        window_ = new AnalyticWindow(window_.getType(), window_.getLeftBoundary(),
+            window_.getLeftBoundary());
+        fnCall_ = new FunctionCallExpr(new FunctionName(LAST_VALUE),
+            getFnCall().getParams());
+      } else {
+        List<Expr> paramExprs = Expr.cloneList(getFnCall().getParams().exprs());
+        if (window_.getRightBoundary().getType() == BoundaryType.PRECEDING) {
+          // The number of rows preceding for the end bound determines the number of
+          // rows at the beginning of each partition that should have a NULL value.
+          paramExprs.add(new NumericLiteral(window_.getRightBoundary().getOffsetValue(),
+              Type.BIGINT));
+        } else {
+          // -1 indicates that no NULL values are inserted even though we set the end
+          // bound to the start bound (which is PRECEDING) below; this is different from
+          // the default behavior of windows with an end bound PRECEDING.
+          paramExprs.add(new NumericLiteral(BigInteger.valueOf(-1), Type.BIGINT));
+        }
+
+        window_ = new AnalyticWindow(window_.getType(),
+            new Boundary(BoundaryType.UNBOUNDED_PRECEDING, null),
+            window_.getLeftBoundary());
+        fnCall_ = new FunctionCallExpr(new FunctionName(FIRST_VALUE_REWRITE),
+            new FunctionParams(paramExprs));
+        fnCall_.setIsInternalFnCall(true);
+      }
+      fnCall_.setIsAnalyticFnCall(true);
+      fnCall_.analyzeNoThrow(analyzer);
+      // Use getType() instead if getReturnType() because wildcard decimals
+      // have only been resolved in the former.
+      type_ = fnCall_.getType();
+      analyticFnName = getFnCall().getFnName();
+    }
+
+    // 4. Reverse the ordering and window for windows not starting with UNBOUNDED
+    // PRECEDING and either: ending with UNBOUNDED FOLLOWING or
+    // first_value(... ignore nulls)
+    if (window_ != null
+        && window_.getLeftBoundary().getType() != BoundaryType.UNBOUNDED_PRECEDING
+        && (window_.getRightBoundary().getType() == BoundaryType.UNBOUNDED_FOLLOWING
+            || (analyticFnName.getFunction().equals(FIRST_VALUE)
+                && getFnCall().getParams().isIgnoreNulls()))) {
+      orderByElements_ = OrderByElement.reverse(orderByElements_);
+      window_ = window_.reverse();
+
+      // Also flip first_value()/last_value(). For other analytic functions there is no
+      // need to also change the function.
+      FunctionName reversedFnName = null;
+      if (analyticFnName.getFunction().equals(FIRST_VALUE)) {
+        reversedFnName = new FunctionName(LAST_VALUE);
+      } else if (analyticFnName.getFunction().equals(LAST_VALUE)) {
+        reversedFnName = new FunctionName(FIRST_VALUE);
+      }
+      if (reversedFnName != null) {
+        fnCall_ = new FunctionCallExpr(reversedFnName, getFnCall().getParams());
+        fnCall_.setIsAnalyticFnCall(true);
+        fnCall_.analyzeNoThrow(analyzer);
+      }
+      analyticFnName = getFnCall().getFnName();
+    }
+
+    // 5. Set the start boundary to CURRENT_ROW for first_value() if the end boundary
+    // is UNBOUNDED_PRECEDING and IGNORE NULLS is not set.
+    if (analyticFnName.getFunction().equals(FIRST_VALUE)
+        && window_ != null
+        && window_.getLeftBoundary().getType() == BoundaryType.UNBOUNDED_PRECEDING
+        && window_.getRightBoundary().getType() != BoundaryType.PRECEDING
+        && !getFnCall().getParams().isIgnoreNulls()) {
+      window_.setRightBoundary(new Boundary(BoundaryType.CURRENT_ROW, null));
+    }
+
+    // 6. Set the default window.
+    if (!orderByElements_.isEmpty() && window_ == null) {
+      window_ = AnalyticWindow.DEFAULT_WINDOW;
+      resetWindow_ = true;
+    }
+
+    // 7. Change first_value/last_value RANGE windows to ROWS.
+    if ((analyticFnName.getFunction().equals(FIRST_VALUE)
+         || analyticFnName.getFunction().equals(LAST_VALUE))
+        && window_ != null
+        && window_.getType() == AnalyticWindow.Type.RANGE) {
+      window_ = new AnalyticWindow(AnalyticWindow.Type.ROWS, window_.getLeftBoundary(),
+          window_.getRightBoundary());
+    }
+
+    // 8. Append IGNORE NULLS to fn name if set.
+    if (getFnCall().getParams().isIgnoreNulls()) {
+      if (analyticFnName.getFunction().equals(LAST_VALUE)) {
+        fnCall_ = new FunctionCallExpr(new FunctionName(LAST_VALUE_IGNORE_NULLS),
+            getFnCall().getParams());
+      } else {
+        Preconditions.checkState(analyticFnName.getFunction().equals(FIRST_VALUE));
+        fnCall_ = new FunctionCallExpr(new FunctionName(FIRST_VALUE_IGNORE_NULLS),
+            getFnCall().getParams());
+      }
+
+      fnCall_.setIsAnalyticFnCall(true);
+      fnCall_.setIsInternalFnCall(true);
+      fnCall_.analyzeNoThrow(analyzer);
+      analyticFnName = getFnCall().getFnName();
+      Preconditions.checkState(type_.equals(fnCall_.getType()));
+    }
+  }
+
+  /**
+   * Returns the explicit or implicit offset of an analytic function call.
+   */
+  private Expr getOffsetExpr(FunctionCallExpr offsetFnCall) {
+    Preconditions.checkState(isOffsetFn(getFnCall().getFn()));
+    if (offsetFnCall.getChild(1) != null) return offsetFnCall.getChild(1);
+    // The default offset is 1.
+    return new NumericLiteral(BigDecimal.valueOf(1));
+  }
+
+  /**
+   * Keep fnCall_, partitionExprs_ and orderByElements_ in sync with children_.
+   */
+  private void syncWithChildren() {
+    int numArgs = fnCall_.getChildren().size();
+    for (int i = 0; i < numArgs; ++i) {
+      fnCall_.setChild(i, getChild(i));
+    }
+    int numPartitionExprs = partitionExprs_.size();
+    for (int i = 0; i < numPartitionExprs; ++i) {
+      partitionExprs_.set(i, getChild(numArgs + i));
+    }
+    for (int i = 0; i < orderByElements_.size(); ++i) {
+      orderByElements_.get(i).setExpr(getChild(numArgs + numPartitionExprs + i));
+    }
+  }
+
+  /**
+   * Populate children_ from fnCall_, partitionExprs_, orderByElements_
+   */
+  private void setChildren() {
+    getChildren().clear();
+    addChildren(fnCall_.getChildren());
+    addChildren(partitionExprs_);
+    for (OrderByElement e: orderByElements_) {
+      addChild(e.getExpr());
+    }
+    if (window_ != null) {
+      if (window_.getLeftBoundary().getExpr() != null) {
+        addChild(window_.getLeftBoundary().getExpr());
+      }
+      if (window_.getRightBoundary() != null
+          && window_.getRightBoundary().getExpr() != null) {
+        addChild(window_.getRightBoundary().getExpr());
+      }
+    }
+  }
+
+  @Override
+  protected void resetAnalysisState() {
+    super.resetAnalysisState();
+    fnCall_.resetAnalysisState();
+    if (resetWindow_) window_ = null;
+    resetWindow_ = false;
+    // sync with children, now that they've been reset
+    syncWithChildren();
+  }
+
+  @Override
+  protected Expr substituteImpl(ExprSubstitutionMap smap, Analyzer analyzer)
+      throws AnalysisException {
+    Expr e = super.substituteImpl(smap, analyzer);
+    if (!(e instanceof AnalyticExpr)) return e;
+    // Re-sync state after possible child substitution.
+    ((AnalyticExpr) e).syncWithChildren();
+    return e;
+  }
+}



Mime
View raw message