hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject svn commit: r1537949 [1/13] - in /hive/trunk: common/src/java/org/apache/hadoop/hive/common/ hbase-handler/src/java/org/apache/hadoop/hive/hbase/ hbase-handler/src/test/results/positive/ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/ ...
Date Fri, 01 Nov 2013 14:35:21 GMT
Author: hashutosh
Date: Fri Nov  1 14:35:17 2013
New Revision: 1537949

URL: http://svn.apache.org/r1537949
Log:
HIVE-3959 : Update Partition Statistics in Metastore Layer (Ashutosh Chauhan, Bhushan Mandhani, Gang Tim Liu via Thejas Nair)

Added:
    hive/trunk/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java
    hive/trunk/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
Modified:
    hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStatsUtils.java
    hive/trunk/hbase-handler/src/test/results/positive/hbase_stats.q.out
    hive/trunk/hbase-handler/src/test/results/positive/hbase_stats2.q.out
    hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
    hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
    hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
    hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/MergeWork.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanMapper.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsSetupConst.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsUtils.java
    hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestStatsPublisherEnhanced.java
    hive/trunk/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out
    hive/trunk/ql/src/test/results/clientnegative/unset_table_property.q.out
    hive/trunk/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
    hive/trunk/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
    hive/trunk/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
    hive/trunk/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
    hive/trunk/ql/src/test/results/clientpositive/alter_skewed_table.q.out
    hive/trunk/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
    hive/trunk/ql/src/test/results/clientpositive/alter_table_serde2.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_3.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_7.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_8.q.out
    hive/trunk/ql/src/test/results/clientpositive/binary_output_format.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucket1.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucket2.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucket3.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucket4.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucket5.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketcontext_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketcontext_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketcontext_3.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketcontext_4.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketcontext_5.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketcontext_6.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketcontext_7.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketcontext_8.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin1.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin10.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin11.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin12.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin13.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin2.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin3.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin4.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin5.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin7.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin8.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin9.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketsortoptimize_insert_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketsortoptimize_insert_7.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out
    hive/trunk/ql/src/test/results/clientpositive/columnstats_partlvl.q.out
    hive/trunk/ql/src/test/results/clientpositive/columnstats_tbllvl.q.out
    hive/trunk/ql/src/test/results/clientpositive/combine2_hadoop20.q.out
    hive/trunk/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out
    hive/trunk/ql/src/test/results/clientpositive/ctas_colname.q.out
    hive/trunk/ql/src/test/results/clientpositive/ctas_hadoop20.q.out
    hive/trunk/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
    hive/trunk/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out
    hive/trunk/ql/src/test/results/clientpositive/describe_table.q.out
    hive/trunk/ql/src/test/results/clientpositive/disable_merge_for_bucketing.q.out
    hive/trunk/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out
    hive/trunk/ql/src/test/results/clientpositive/filter_join_breaktask.q.out
    hive/trunk/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out
    hive/trunk/ql/src/test/results/clientpositive/groupby_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out
    hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/groupby_sort_6.q.out
    hive/trunk/ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/infer_bucket_sort.q.out
    hive/trunk/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
    hive/trunk/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
    hive/trunk/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out
    hive/trunk/ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out
    hive/trunk/ql/src/test/results/clientpositive/input23.q.out
    hive/trunk/ql/src/test/results/clientpositive/input42.q.out
    hive/trunk/ql/src/test/results/clientpositive/input_part1.q.out
    hive/trunk/ql/src/test/results/clientpositive/input_part2.q.out
    hive/trunk/ql/src/test/results/clientpositive/input_part7.q.out
    hive/trunk/ql/src/test/results/clientpositive/input_part9.q.out
    hive/trunk/ql/src/test/results/clientpositive/insert_into5.q.out
    hive/trunk/ql/src/test/results/clientpositive/join17.q.out
    hive/trunk/ql/src/test/results/clientpositive/join26.q.out
    hive/trunk/ql/src/test/results/clientpositive/join32.q.out
    hive/trunk/ql/src/test/results/clientpositive/join32_lessSize.q.out
    hive/trunk/ql/src/test/results/clientpositive/join33.q.out
    hive/trunk/ql/src/test/results/clientpositive/join34.q.out
    hive/trunk/ql/src/test/results/clientpositive/join35.q.out
    hive/trunk/ql/src/test/results/clientpositive/join9.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_filters_overlap.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_map_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/list_bucket_dml_10.q.out
    hive/trunk/ql/src/test/results/clientpositive/load_dyn_part8.q.out
    hive/trunk/ql/src/test/results/clientpositive/louter_join_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/merge3.q.out
    hive/trunk/ql/src/test/results/clientpositive/merge4.q.out
    hive/trunk/ql/src/test/results/clientpositive/outer_join_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/parallel_orderby.q.out
    hive/trunk/ql/src/test/results/clientpositive/partition_date2.q.out
    hive/trunk/ql/src/test/results/clientpositive/pcr.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_join_filter.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_union_view.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_vc.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
    hive/trunk/ql/src/test/results/clientpositive/push_or.q.out
    hive/trunk/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out
    hive/trunk/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
    hive/trunk/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out
    hive/trunk/ql/src/test/results/clientpositive/rcfile_default_format.q.out
    hive/trunk/ql/src/test/results/clientpositive/reduce_deduplicate.q.out
    hive/trunk/ql/src/test/results/clientpositive/regexp_extract.q.out
    hive/trunk/ql/src/test/results/clientpositive/router_join_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/sample1.q.out
    hive/trunk/ql/src/test/results/clientpositive/sample10.q.out
    hive/trunk/ql/src/test/results/clientpositive/sample2.q.out
    hive/trunk/ql/src/test/results/clientpositive/sample4.q.out
    hive/trunk/ql/src/test/results/clientpositive/sample5.q.out
    hive/trunk/ql/src/test/results/clientpositive/sample6.q.out
    hive/trunk/ql/src/test/results/clientpositive/sample7.q.out
    hive/trunk/ql/src/test/results/clientpositive/sample8.q.out
    hive/trunk/ql/src/test/results/clientpositive/sample9.q.out
    hive/trunk/ql/src/test/results/clientpositive/serde_user_properties.q.out
    hive/trunk/ql/src/test/results/clientpositive/show_create_table_alter.q.out
    hive/trunk/ql/src/test/results/clientpositive/show_tblproperties.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_15.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_18.q.out
    hive/trunk/ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out
    hive/trunk/ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out
    hive/trunk/ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats0.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats1.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats10.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats11.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats12.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats13.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats14.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats15.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats16.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats2.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats20.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats3.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats4.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats5.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats6.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats7.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats8.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats9.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats_noscan_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats_noscan_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats_partscan_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/transform_ppr1.q.out
    hive/trunk/ql/src/test/results/clientpositive/transform_ppr2.q.out
    hive/trunk/ql/src/test/results/clientpositive/truncate_column.q.out
    hive/trunk/ql/src/test/results/clientpositive/udf_explode.q.out
    hive/trunk/ql/src/test/results/clientpositive/udf_reflect2.q.out
    hive/trunk/ql/src/test/results/clientpositive/udtf_explode.q.out
    hive/trunk/ql/src/test/results/clientpositive/union22.q.out
    hive/trunk/ql/src/test/results/clientpositive/union24.q.out
    hive/trunk/ql/src/test/results/clientpositive/union_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/unset_table_view_property.q.out
    hive/trunk/ql/src/test/results/compiler/plan/case_sensitivity.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/cast1.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/groupby1.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/groupby2.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/groupby3.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/groupby4.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/groupby5.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/groupby6.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input1.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input2.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input20.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input3.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input4.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input5.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input6.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input7.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input8.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input9.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input_part1.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input_testsequencefile.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input_testxpath.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input_testxpath2.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join1.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join2.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join3.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join4.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join5.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join6.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join7.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join8.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/sample1.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/sample2.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/sample3.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/sample4.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/sample5.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/sample6.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/sample7.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/subq.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/udf1.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/udf6.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/udf_case.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/udf_when.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/union.q.xml

Added: hive/trunk/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java?rev=1537949&view=auto
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java (added)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java Fri Nov  1 14:35:17 2013
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.common;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * HiveStatsUtils.
+ * A collection of utilities used for hive statistics.
+ * Used by classes in both metastore and ql package
+ */
+
+public class HiveStatsUtils {
+
+  /**
+   * Get all file status from a root path and recursively go deep into certain levels.
+   *
+   * @param path
+   *          the root path
+   * @param level
+   *          the depth of directory to explore
+   * @param fs
+   *          the file system
+   * @return array of FileStatus
+   * @throws IOException
+   */
+  public static FileStatus[] getFileStatusRecurse(Path path, int level, FileSystem fs)
+      throws IOException {
+
+    // construct a path pattern (e.g., /*/*) to find all dynamically generated paths
+    StringBuilder sb = new StringBuilder(path.toUri().getPath());
+    for (int i = 0; i < level; i++) {
+      sb.append(Path.SEPARATOR).append("*");
+    }
+    Path pathPattern = new Path(path, sb.toString());
+    return fs.globStatus(pathPattern);
+  }
+
+}

Added: hive/trunk/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java?rev=1537949&view=auto
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java (added)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java Fri Nov  1 14:35:17 2013
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.common;
+
+import java.util.ArrayList;
+import java.util.List;
+
+
+/**
+ * A class that defines the constant strings used by the statistics implementation.
+ */
+
+public class StatsSetupConst {
+
+  /**
+   * The value of the user variable "hive.stats.dbclass" to use HBase implementation.
+   */
+  public static final String HBASE_IMPL_CLASS_VAL = "hbase";
+
+  /**
+   * The value of the user variable "hive.stats.dbclass" to use JDBC implementation.
+   */
+  public static final String JDBC_IMPL_CLASS_VAL = "jdbc";
+
+  // statistics stored in metastore
+  /**
+   * The name of the statistic Num Files to be published or gathered.
+   */
+  public static final String NUM_FILES = "numFiles";
+
+  /**
+   * The name of the statistic Num Partitions to be published or gathered.
+   */
+  public static final String NUM_PARTITIONS = "numPartitions";
+
+  /**
+   * The name of the statistic Total Size to be published or gathered.
+   */
+  public static final String TOTAL_SIZE = "totalSize";
+
+  /**
+   * The name of the statistic Row Count to be published or gathered.
+   */
+  public static final String ROW_COUNT = "numRows";
+
+  /**
+   * The name of the statistic Raw Data Size to be published or gathered.
+   */
+  public static final String RAW_DATA_SIZE = "rawDataSize";
+
+  /**
+   * @return List of all supported statistics
+   */
+  public static List<String> getSupportedStats() {
+    List<String> supportedStats = new ArrayList<String>();
+    supportedStats.add(NUM_FILES);
+    supportedStats.add(ROW_COUNT);
+    supportedStats.add(TOTAL_SIZE);
+    supportedStats.add(RAW_DATA_SIZE);
+    return supportedStats;
+  }
+
+  /**
+   * @return List of all statistics that need to be collected during query execution. These are
+   * statistics that inherently require a scan of the data.
+   */
+  public static List<String> getStatsToBeCollected() {
+    List<String> collectableStats = new ArrayList<String>();
+    collectableStats.add(ROW_COUNT);
+    collectableStats.add(RAW_DATA_SIZE);
+    return collectableStats;
+  }
+
+  /**
+   * @return List of statistics that can be collected quickly without requiring a scan of the data.
+   */
+  public static List<String> getStatsFastCollection() {
+    List<String> fastStats = new ArrayList<String>();
+    fastStats.add(NUM_FILES);
+    fastStats.add(TOTAL_SIZE);
+    return fastStats;
+  }
+}

Modified: hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStatsUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStatsUtils.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStatsUtils.java (original)
+++ hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStatsUtils.java Fri Nov  1 14:35:17 2013
@@ -25,7 +25,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hive.ql.stats.StatsSetupConst;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 
 
 

Modified: hive/trunk/hbase-handler/src/test/results/positive/hbase_stats.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/hbase-handler/src/test/results/positive/hbase_stats.q.out?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/hbase-handler/src/test/results/positive/hbase_stats.q.out (original)
+++ hive/trunk/hbase-handler/src/test/results/positive/hbase_stats.q.out Fri Nov  1 14:35:17 2013
@@ -43,7 +43,6 @@ Retention:          	0                  
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
 	numFiles            	1                   
-	numPartitions       	0                   
 	numRows             	500                 
 	rawDataSize         	5312                
 	totalSize           	5812                
@@ -175,11 +174,6 @@ Retention:          	0                  
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	numFiles            	3                   
-	numPartitions       	3                   
-	numRows             	1500                
-	rawDataSize         	15936               
-	totalSize           	17436               
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -345,11 +339,6 @@ Retention:          	0                  
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	numFiles            	3                   
-	numPartitions       	3                   
-	numRows             	1500                
-	rawDataSize         	15936               
-	totalSize           	17436               
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

Modified: hive/trunk/hbase-handler/src/test/results/positive/hbase_stats2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/hbase-handler/src/test/results/positive/hbase_stats2.q.out?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/hbase-handler/src/test/results/positive/hbase_stats2.q.out (original)
+++ hive/trunk/hbase-handler/src/test/results/positive/hbase_stats2.q.out Fri Nov  1 14:35:17 2013
@@ -43,7 +43,6 @@ Retention:          	0                  
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
 	numFiles            	1                   
-	numPartitions       	0                   
 	numRows             	500                 
 	rawDataSize         	5312                
 	totalSize           	5812                
@@ -175,11 +174,6 @@ Retention:          	0                  
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	numFiles            	3                   
-	numPartitions       	3                   
-	numRows             	1500                
-	rawDataSize         	15936               
-	totalSize           	17436               
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -345,11 +339,6 @@ Retention:          	0                  
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	numFiles            	3                   
-	numPartitions       	3                   
-	numRows             	1500                
-	rawDataSize         	15936               
-	totalSize           	17436               
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

Modified: hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (original)
+++ hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java Fri Nov  1 14:35:17 2013
@@ -467,7 +467,7 @@ public abstract class TestHiveMetaStore 
   }
 
   private static Partition makePartitionObject(String dbName, String tblName,
-      List<String> ptnVals, Table tbl, String ptnLocationSuffix) {
+      List<String> ptnVals, Table tbl, String ptnLocationSuffix) throws MetaException {
     Partition part4 = new Partition();
     part4.setDbName(dbName);
     part4.setTableName(tblName);
@@ -476,6 +476,7 @@ public abstract class TestHiveMetaStore 
     part4.setSd(tbl.getSd().deepCopy());
     part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy());
     part4.getSd().setLocation(tbl.getSd().getLocation() + ptnLocationSuffix);
+    MetaStoreUtils.updatePartitionStatsFast(part4, warehouse);
     return part4;
   }
 

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java Fri Nov  1 14:35:17 2013
@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
@@ -195,6 +196,12 @@ public class HiveAlterHandler implements
             msdb.alterPartition(dbname, name, part.getValues(), part);
           }
         }
+      } else if (MetaStoreUtils.requireCalStats(hiveConf, null, null, newt) &&
+        (newt.getPartitionKeysSize() == 0)) {
+          Database db = msdb.getDatabase(newt.getDbName());
+          // Update table stats. For partitioned table, we update stats in
+          // alterPartition()
+          MetaStoreUtils.updateUnpartitionedTableStatsFast(db, newt, wh, false, true);
       }
       // now finally call alter table
       msdb.alterTable(dbname, name, newt);
@@ -254,10 +261,10 @@ public class HiveAlterHandler implements
     Path destPath = null;
     FileSystem srcFs = null;
     FileSystem destFs = null;
-    Table tbl = null;
     Partition oldPart = null;
     String oldPartLoc = null;
     String newPartLoc = null;
+
     // Set DDL time to now if not specified
     if (new_part.getParameters() == null ||
         new_part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null ||
@@ -265,10 +272,15 @@ public class HiveAlterHandler implements
       new_part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
           .currentTimeMillis() / 1000));
     }
+
+    Table tbl = msdb.getTable(dbname, name);
     //alter partition
     if (part_vals == null || part_vals.size() == 0) {
       try {
         oldPart = msdb.getPartition(dbname, name, new_part.getValues());
+        if (MetaStoreUtils.requireCalStats(hiveConf, oldPart, new_part, tbl)) {
+          MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true);
+        }
         msdb.alterPartition(dbname, name, new_part.getValues(), new_part);
       } catch (InvalidObjectException e) {
         throw new InvalidOperationException("alter is not possible");
@@ -299,7 +311,6 @@ public class HiveAlterHandler implements
         throw new AlreadyExistsException("Partition already exists:" + dbname + "." + name + "." +
             new_part.getValues());
       }
-      tbl = msdb.getTable(dbname, name);
       if (tbl == null) {
         throw new InvalidObjectException(
             "Unable to rename partition because table or database do not exist");
@@ -351,6 +362,9 @@ public class HiveAlterHandler implements
               + tbl.getTableName() + " " + new_part.getValues());
           }
           new_part.getSd().setLocation(newPartLoc);
+          if (MetaStoreUtils.requireCalStats(hiveConf, oldPart, new_part, tbl)) {
+            MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true);
+          }
           msdb.alterPartition(dbname, name, part_vals, new_part);
         }
       }
@@ -399,6 +413,7 @@ public class HiveAlterHandler implements
       MetaException {
     List<Partition> oldParts = new ArrayList<Partition>();
     List<List<String>> partValsList = new ArrayList<List<String>>();
+    Table tbl = msdb.getTable(dbname, name);
     try {
       for (Partition tmpPart: new_parts) {
         // Set DDL time to now if not specified
@@ -408,9 +423,14 @@ public class HiveAlterHandler implements
           tmpPart.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
               .currentTimeMillis() / 1000));
         }
+
         Partition oldTmpPart = msdb.getPartition(dbname, name, tmpPart.getValues());
         oldParts.add(oldTmpPart);
         partValsList.add(tmpPart.getValues());
+
+        if (MetaStoreUtils.requireCalStats(hiveConf, oldTmpPart, tmpPart, tbl)) {
+          MetaStoreUtils.updatePartitionStatsFast(tmpPart, wh, false, true);
+        }
       }
       msdb.alterPartitions(dbname, name, partValsList, new_parts);
     } catch (InvalidObjectException e) {

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Fri Nov  1 14:35:17 2013
@@ -1029,7 +1029,8 @@ public class HiveMetaStore extends Thrif
 
         ms.openTransaction();
 
-        if (ms.getDatabase(tbl.getDbName()) == null) {
+        Database db = ms.getDatabase(tbl.getDbName());
+        if (db == null) {
           throw new NoSuchObjectException("The database " + tbl.getDbName() + " does not exist");
         }
 
@@ -1063,6 +1064,14 @@ public class HiveMetaStore extends Thrif
             madeDir = true;
           }
         }
+        if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) &&
+            !MetaStoreUtils.isView(tbl)) {
+          if (tbl.getPartitionKeysSize() == 0)  { // Unpartitioned table
+            MetaStoreUtils.updateUnpartitionedTableStatsFast(db, tbl, wh, madeDir);
+          } else { // Partitioned table with no partitions.
+            MetaStoreUtils.updateUnpartitionedTableStatsFast(db, tbl, wh, true);
+          }
+        }
 
         // set create time
         long time = System.currentTimeMillis() / 1000;
@@ -1540,6 +1549,11 @@ public class HiveMetaStore extends Thrif
         part.setCreateTime((int) time);
         part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
 
+        if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) &&
+            !MetaStoreUtils.isView(tbl)) {
+          MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir);
+        }
+
         success = ms.addPartition(part);
         if (success) {
           success = ms.commitTransaction();
@@ -1760,6 +1774,11 @@ public class HiveMetaStore extends Thrif
           }
         }
 
+        if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) &&
+            !MetaStoreUtils.isView(tbl)) {
+          MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir);
+        }
+
         // set create time
         long time = System.currentTimeMillis() / 1000;
         part.setCreateTime((int) time);

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Fri Nov  1 14:35:17 2013
@@ -41,13 +41,17 @@ import org.apache.commons.lang.StringUti
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.JavaUtils;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
@@ -140,6 +144,166 @@ public class MetaStoreUtils {
   }
 
   /**
+   * @param partParams
+   * @return True if the passed Parameters Map contains values for all "Fast Stats".
+   */
+  public static boolean containsAllFastStats(Map<String, String> partParams) {
+    List<String> fastStats = StatsSetupConst.getStatsFastCollection();
+    for (String stat : fastStats) {
+      if (!partParams.containsKey(stat)) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, Warehouse wh)
+      throws MetaException {
+    return updateUnpartitionedTableStatsFast(db, tbl, wh, false, false);
+  }
+
+  public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, Warehouse wh,
+      boolean madeDir) throws MetaException {
+    return updateUnpartitionedTableStatsFast(db, tbl, wh, madeDir, false);
+  }
+
+  /**
+   * Updates the numFiles and totalSize parameters for the passed unpartitioned Table by querying
+   * the warehouse if the passed Table does not already have values for these parameters.
+   * @param db
+   * @param tbl
+   * @param wh
+   * @param newDir if true, the directory was just created and can be assumed to be empty
+   * @param forceRecompute Recompute stats even if the passed Table already has
+   * these parameters set
+   * @return true if the stats were updated, false otherwise
+   */
+  public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, Warehouse wh,
+      boolean newDir, boolean forceRecompute) throws MetaException {
+    Map<String,String> params = tbl.getParameters();
+    boolean updated = false;
+    if (forceRecompute ||
+        params == null ||
+        !containsAllFastStats(params)) {
+      if (params == null) {
+        params = new HashMap<String,String>();
+      }
+      if (!newDir) {
+        // The table location already exists and may contain data.
+        // Let's try to populate those stats that don't require full scan.
+        LOG.info("Updating table stats fast for " + tbl.getTableName());
+        FileStatus[] fileStatus = wh.getFileStatusesForUnpartitionedTable(db, tbl);
+        params.put(StatsSetupConst.NUM_FILES, Integer.toString(fileStatus.length));
+        long tableSize = 0L;
+        for (FileStatus status : fileStatus) {
+          tableSize += status.getLen();
+        }
+        params.put(StatsSetupConst.TOTAL_SIZE, Long.toString(tableSize));
+        LOG.info("Updated size of table " + tbl.getTableName() +" to "+ Long.toString(tableSize));
+        if (params.containsKey(StatsSetupConst.ROW_COUNT) ||
+            params.containsKey(StatsSetupConst.RAW_DATA_SIZE)) {
+          // TODO: Add a MetaStore flag indicating accuracy of these stats and update it here.
+        }
+      }
+      tbl.setParameters(params);
+      updated = true;
+    }
+    return updated;
+  }
+
+  // check if stats need to be (re)calculated
+  public static boolean requireCalStats(Configuration hiveConf, Partition oldPart,
+    Partition newPart, Table tbl) {
+
+    if (!HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
+      return false;
+    }
+
+    if (MetaStoreUtils.isView(tbl)) {
+      return false;
+    }
+
+    if  (oldPart == null && newPart == null) {
+      return true;
+    }
+
+    // requires to calculate stats if new partition doesn't have it
+    if ((newPart == null) || (newPart.getParameters() == null)
+        || !containsAllFastStats(newPart.getParameters())) {
+      return true;
+    }
+
+    // requires to calculate stats if new and old have different fast stats
+    if ((oldPart != null) && (oldPart.getParameters() != null)) {
+      for (String stat : StatsSetupConst.getStatsFastCollection()) {
+        if (oldPart.getParameters().containsKey(stat)) {
+          Long oldStat = Long.parseLong(oldPart.getParameters().get(stat));
+          Long newStat = Long.parseLong(newPart.getParameters().get(stat));
+          if (oldStat != newStat) {
+            return true;
+          }
+        }
+      }
+    }
+    return false;
+  }
+
+  public static boolean updatePartitionStatsFast(Partition part, Warehouse wh)
+      throws MetaException {
+    return updatePartitionStatsFast(part, wh, false, false);
+  }
+
+  public static boolean updatePartitionStatsFast(Partition part, Warehouse wh, boolean madeDir)
+      throws MetaException {
+    return updatePartitionStatsFast(part, wh, madeDir, false);
+  }
+
+  /**
+   * Updates the numFiles and totalSize parameters for the passed Partition by querying
+   *  the warehouse if the passed Partition does not already have values for these parameters.
+   * @param part
+   * @param wh
+   * @param madeDir if true, the directory was just created and can be assumed to be empty
+   * @param forceRecompute Recompute stats even if the passed Partition already has
+   * these parameters set
+   * @return true if the stats were updated, false otherwise
+   */
+  public static boolean updatePartitionStatsFast(Partition part, Warehouse wh,
+      boolean madeDir, boolean forceRecompute) throws MetaException {
+    Map<String,String> params = part.getParameters();
+    boolean updated = false;
+    if (forceRecompute ||
+        params == null ||
+        !containsAllFastStats(params)) {
+      if (params == null) {
+        params = new HashMap<String,String>();
+      }
+      if (!madeDir) {
+        // The partitition location already existed and may contain data. Lets try to
+        // populate those statistics that don't require a full scan of the data.
+        LOG.warn("Updating partition stats fast for: " + part.getTableName());
+        FileStatus[] fileStatus = wh.getFileStatusesForPartition(part);
+        params.put(StatsSetupConst.NUM_FILES, Integer.toString(fileStatus.length));
+        long partSize = 0L;
+        for (int i = 0; i < fileStatus.length; i++) {
+          partSize += fileStatus[i].getLen();
+        }
+        params.put(StatsSetupConst.TOTAL_SIZE, Long.toString(partSize));
+        LOG.warn("Updated size to " + Long.toString(partSize));
+        if (params.containsKey(StatsSetupConst.ROW_COUNT) ||
+            params.containsKey(StatsSetupConst.RAW_DATA_SIZE)) {
+          // The accuracy of these "collectable" stats at this point is suspect unless we know that
+          // StatsTask was just run before this MetaStore call and populated them.
+          // TODO: Add a MetaStore flag indicating accuracy of these stats and update it here.
+        }
+      }
+      part.setParameters(params);
+      updated = true;
+    }
+    return updated;
+  }
+
+  /**
    * getDeserializer
    *
    * Get the Deserializer for a table given its name and properties.
@@ -1122,6 +1286,13 @@ public class MetaStoreUtils {
     return filter.toString();
   }
 
+  public static boolean isView(Table table) {
+    if (table == null) {
+      return false;
+    }
+    return TableType.VIRTUAL_VIEW.toString().equals(table.getTableType());
+  }
+
   /**
    * create listener instances as per the configuration.
    *

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java Fri Nov  1 14:35:17 2013
@@ -45,11 +45,15 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -496,6 +500,63 @@ public class Warehouse {
   }
 
   /**
+   * @param partn
+   * @return array of FileStatus objects corresponding to the files making up the passed partition
+   */
+  public FileStatus[] getFileStatusesForPartition(Partition partn)
+      throws MetaException {
+    try {
+      Path path = new Path(partn.getSd().getLocation());
+      FileSystem fileSys = path.getFileSystem(conf);
+      /* consider sub-directory created from list bucketing. */
+      int listBucketingDepth = calculateListBucketingDMLDepth(partn);
+      return HiveStatsUtils.getFileStatusRecurse(path, (1 + listBucketingDepth), fileSys);
+    } catch (IOException ioe) {
+      MetaStoreUtils.logAndThrowMetaException(ioe);
+    }
+    return null;
+  }
+
+  /**
+   * List bucketing will introduce sub-directories.
+   * calculate it here in order to go to the leaf directory
+   * so that we can count right number of files.
+   * @param partn
+   * @return
+   */
+  private static int calculateListBucketingDMLDepth(Partition partn) {
+    // list bucketing will introduce more files
+    int listBucketingDepth = 0;
+    SkewedInfo skewedInfo = partn.getSd().getSkewedInfo();
+    if ((skewedInfo != null) && (skewedInfo.getSkewedColNames() != null)
+        && (skewedInfo.getSkewedColNames().size() > 0)
+        && (skewedInfo.getSkewedColValues() != null)
+        && (skewedInfo.getSkewedColValues().size() > 0)
+        && (skewedInfo.getSkewedColValueLocationMaps() != null)
+        && (skewedInfo.getSkewedColValueLocationMaps().size() > 0)) {
+      listBucketingDepth = skewedInfo.getSkewedColNames().size();
+    }
+    return listBucketingDepth;
+  }
+
+  /**
+   * @param table
+   * @return array of FileStatus objects corresponding to the files making up the passed
+   * unpartitioned table
+   */
+  public FileStatus[] getFileStatusesForUnpartitionedTable(Database db, Table table)
+      throws MetaException {
+    Path tablePath = getTablePath(db, table.getTableName());
+    try {
+      FileSystem fileSys = tablePath.getFileSystem(conf);
+      return HiveStatsUtils.getFileStatusRecurse(tablePath, 1, fileSys);
+    } catch (IOException ioe) {
+      MetaStoreUtils.logAndThrowMetaException(ioe);
+    }
+    return null;
+  }
+
+  /**
    * Makes a valid partition name.
    * @param partCols The partition columns
    * @param vals The partition values

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java Fri Nov  1 14:35:17 2013
@@ -34,6 +34,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.HiveStatsUtils;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.FSRecordWriter.StatsProvidingRecordWriter;
@@ -51,7 +53,6 @@ import org.apache.hadoop.hive.ql.plan.Pl
 import org.apache.hadoop.hive.ql.plan.SkewedColumnPositionPair;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
-import org.apache.hadoop.hive.ql.stats.StatsSetupConst;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.SerDeStats;
 import org.apache.hadoop.hive.serde2.Serializer;
@@ -836,7 +837,7 @@ public class FileSinkOperator extends Te
       if (conf.isLinkedFileSink()) {
         level++;
       }
-      FileStatus[] status = Utilities.getFileStatusRecurse(tmpPath, level, fs);
+      FileStatus[] status = HiveStatsUtils.getFileStatusRecurse(tmpPath, level, fs);
       sb.append("Sample of ")
         .append(Math.min(status.length, 100))
         .append(" partitions created under ")

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java Fri Nov  1 14:35:17 2013
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
@@ -194,11 +195,11 @@ public class MoveTask extends Task<MoveW
     }
   }
 
-
   @Override
   public int execute(DriverContext driverContext) {
 
     try {
+
       // Do any hive related operations like moving tables and files
       // to appropriate locations
       LoadFileDesc lfd = work.getLoadFileWork();
@@ -460,7 +461,7 @@ public class MoveTask extends Task<MoveW
     boolean updateBucketCols = false;
     if (bucketCols != null) {
       FileSystem fileSys = partn.getPartitionPath().getFileSystem(conf);
-      FileStatus[] fileStatus = Utilities.getFileStatusRecurse(
+      FileStatus[] fileStatus = HiveStatsUtils.getFileStatusRecurse(
           partn.getPartitionPath(), 1, fileSys);
       // Verify the number of buckets equals the number of files
       // This will not hold for dynamic partitions where not every reducer produced a file for

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java Fri Nov  1 14:35:17 2013
@@ -19,23 +19,19 @@
 
 package org.apache.hadoop.hive.ql.exec;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
 import java.io.Serializable;
-import java.net.URI;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.ql.DriverContext;
@@ -43,7 +39,6 @@ import org.apache.hadoop.hive.ql.ErrorMs
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec;
 import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
@@ -52,12 +47,16 @@ import org.apache.hadoop.hive.ql.plan.ap
 import org.apache.hadoop.hive.ql.stats.StatsAggregator;
 import org.apache.hadoop.hive.ql.stats.StatsFactory;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
-import org.apache.hadoop.hive.ql.stats.StatsSetupConst;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.util.StringUtils;
 
 /**
- * StatsTask implementation.
+ * StatsTask implementation. StatsTask mainly deals with "collectable" stats. These are
+ * stats that require data scanning and are collected during query execution (unless the user
+ * explicitly requests data scanning just for the purpose of stats computation using the "ANALYZE"
+ * command. All other stats are computed directly by the MetaStore. The rationale being that the
+ * MetaStore layer covers all Thrift calls and provides better guarantees about the accuracy of
+ * those stats.
  **/
 public class StatsTask extends Task<StatsWork> implements Serializable {
 
@@ -67,25 +66,8 @@ public class StatsTask extends Task<Stat
   private Table table;
   private List<LinkedHashMap<String, String>> dpPartSpecs;
 
-  private static final List<String> supportedStats = new ArrayList<String>();
-  private static final List<String> collectableStats = new ArrayList<String>();
-  private static final Map<String, String> nameMapping = new HashMap<String, String>();
-  static {
-    // supported statistics
-    supportedStats.add(StatsSetupConst.NUM_FILES);
-    supportedStats.add(StatsSetupConst.ROW_COUNT);
-    supportedStats.add(StatsSetupConst.TOTAL_SIZE);
-    supportedStats.add(StatsSetupConst.RAW_DATA_SIZE);
-
-    // statistics that need to be collected throughout the execution
-    collectableStats.add(StatsSetupConst.ROW_COUNT);
-    collectableStats.add(StatsSetupConst.RAW_DATA_SIZE);
-
-    nameMapping.put(StatsSetupConst.NUM_FILES, "num_files");
-    nameMapping.put(StatsSetupConst.ROW_COUNT, "num_rows");
-    nameMapping.put(StatsSetupConst.TOTAL_SIZE, "total_size");
-    nameMapping.put(StatsSetupConst.RAW_DATA_SIZE, "raw_data_size");
-  }
+  private static final List<String> collectableStats = StatsSetupConst.getStatsToBeCollected();
+  private static final List<String> supportedStats = StatsSetupConst.getSupportedStats();
 
   public StatsTask() {
     super();
@@ -94,20 +76,20 @@ public class StatsTask extends Task<Stat
 
   /**
    *
-   * Partition Level Statistics.
+   * Statistics for a Partition or Unpartitioned Table
    *
    */
-  class PartitionStatistics {
+  class Statistics {
     Map<String, LongWritable> stats;
 
-    public PartitionStatistics() {
+    public Statistics() {
       stats = new HashMap<String, LongWritable>();
       for (String statType : supportedStats) {
         stats.put(statType, new LongWritable(0L));
       }
     }
 
-    public PartitionStatistics(Map<String, Long> st) {
+    public Statistics(Map<String, Long> st) {
       stats = new HashMap<String, LongWritable>();
       for (String statType : st.keySet()) {
         Long stValue = st.get(statType) == null ? 0L : st.get(statType);
@@ -126,86 +108,7 @@ public class StatsTask extends Task<Stat
 
     @Override
     public String toString() {
-      StringBuilder sb = new StringBuilder();
-      for (String statType : supportedStats) {
-        sb.append(nameMapping.get(statType)).append(": ").append(stats.get(statType)).append(", ");
-      }
-      sb.delete(sb.length() - 2, sb.length());
-      return sb.toString();
-    }
-  }
-
-  /**
-   * Table Level Statistics.
-   */
-  class TableStatistics extends PartitionStatistics {
-    int numPartitions; // number of partitions
-
-    public TableStatistics() {
-      super();
-      numPartitions = 0;
-    }
-
-    public void setNumPartitions(int np) {
-      numPartitions = np;
-    }
-
-    public int getNumPartitions() {
-      return numPartitions;
-    }
-
-    /**
-     * Incrementally update the table statistics according to the old and new
-     * partition level statistics.
-     *
-     * @param oldStats
-     *          The old statistics of a partition.
-     * @param newStats
-     *          The new statistics of a partition.
-     */
-    public void updateStats(PartitionStatistics oldStats, PartitionStatistics newStats) {
-      deletePartitionStats(oldStats);
-      addPartitionStats(newStats);
-    }
-
-    /**
-     * Update the table level statistics when a new partition is added.
-     *
-     * @param newStats
-     *          the new partition statistics.
-     */
-    public void addPartitionStats(PartitionStatistics newStats) {
-      for (String statType : supportedStats) {
-        LongWritable value = stats.get(statType);
-        if (value == null) {
-          stats.put(statType, new LongWritable(newStats.getStat(statType)));
-        } else {
-          value.set(value.get() + newStats.getStat(statType));
-        }
-      }
-      this.numPartitions++;
-    }
-
-    /**
-     * Update the table level statistics when an old partition is dropped.
-     *
-     * @param oldStats
-     *          the old partition statistics.
-     */
-    public void deletePartitionStats(PartitionStatistics oldStats) {
-      for (String statType : supportedStats) {
-        LongWritable value = stats.get(statType);
-        value.set(value.get() - oldStats.getStat(statType));
-      }
-      this.numPartitions--;
-    }
-
-    @Override
-    public String toString() {
-      StringBuilder sb = new StringBuilder();
-      sb.append("num_partitions: ").append(numPartitions).append(", ");
-      sb.append(super.toString());
-      return sb.toString();
+      return org.apache.commons.lang.StringUtils.join(supportedStats, ", ");
     }
   }
 
@@ -297,7 +200,7 @@ public class StatsTask extends Task<Stat
         }
       }
 
-      TableStatistics tblStats = new TableStatistics();
+      Statistics tblStats = new Statistics();
 
       org.apache.hadoop.hive.metastore.api.Table tTable = table.getTTable();
       Map<String, String> parameters = tTable.getParameters();
@@ -310,10 +213,6 @@ public class StatsTask extends Task<Stat
         }
       }
 
-      if (parameters.containsKey(StatsSetupConst.NUM_PARTITIONS)) {
-        tblStats.setNumPartitions(Integer.parseInt(parameters.get(StatsSetupConst.NUM_PARTITIONS)));
-      }
-
       List<Partition> partitions = getPartitionsList();
       boolean atomic = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_ATOMIC);
       int maxPrefixLength = HiveConf.getIntVar(conf,
@@ -324,10 +223,6 @@ public class StatsTask extends Task<Stat
         if (!tableStatsExist && atomic) {
           return 0;
         }
-        long[] summary = summary(conf, table);
-        tblStats.setStat(StatsSetupConst.NUM_FILES, summary[0]);
-        tblStats.setStat(StatsSetupConst.TOTAL_SIZE, summary[1]);
-
         // In case of a non-partitioned table, the key for stats temporary store is "rootDir"
         if (statsAggregator != null) {
           String aggKey = Utilities.getHashedStatsPrefix(work.getAggKey(), maxPrefixLength);
@@ -344,6 +239,19 @@ public class StatsTask extends Task<Stat
             }
           }
         }
+
+        // write table stats to metastore
+        parameters = tTable.getParameters();
+        for (String statType : collectableStats) {
+          parameters.put(statType, Long.toString(tblStats.getStat(statType)));
+        }
+        tTable.setParameters(parameters);
+
+        String tableFullName = table.getDbName() + "." + table.getTableName();
+
+        db.alterTable(tableFullName, new Table(tTable));
+
+        console.printInfo("Table " + tableFullName + " stats: [" + tblStats.toString() + ']');
       } else {
         // Partitioned table:
         // Need to get the old stats of the partition
@@ -370,7 +278,7 @@ public class StatsTask extends Task<Stat
           //
           // get the new partition stats
           //
-          PartitionStatistics newPartStats = new PartitionStatistics();
+          Statistics newPartStats = new Statistics();
 
           // In that case of a partition, the key for stats temporary store is
           // "rootDir/[dynamic_partition_specs/]%"
@@ -398,16 +306,16 @@ public class StatsTask extends Task<Stat
             }
           }
 
-          long[] summary = summary(conf, partn);
-          newPartStats.setStat(StatsSetupConst.NUM_FILES, summary[0]);
-          newPartStats.setStat(StatsSetupConst.TOTAL_SIZE, summary[1]);
-
-          if (hasStats) {
-            PartitionStatistics oldPartStats = new PartitionStatistics(currentValues);
-            tblStats.updateStats(oldPartStats, newPartStats);
-          } else {
-            tblStats.addPartitionStats(newPartStats);
+          /**
+           * calculate fast statistics
+           */
+          FileStatus[] partfileStatus = wh.getFileStatusesForPartition(tPart);
+          newPartStats.setStat(StatsSetupConst.NUM_FILES, partfileStatus.length);
+          long partSize = 0L;
+          for (int i = 0; i < partfileStatus.length; i++) {
+            partSize += partfileStatus[i].getLen();
           }
+          newPartStats.setStat(StatsSetupConst.TOTAL_SIZE, partSize);
 
           //
           // update the metastore
@@ -429,22 +337,6 @@ public class StatsTask extends Task<Stat
 
       }
 
-      //
-      // write table stats to metastore
-      //
-      parameters = tTable.getParameters();
-      for (String statType : supportedStats) {
-        parameters.put(statType, Long.toString(tblStats.getStat(statType)));
-      }
-      parameters.put(StatsSetupConst.NUM_PARTITIONS, Integer.toString(tblStats.getNumPartitions()));
-      tTable.setParameters(parameters);
-
-      String tableFullName = table.getDbName() + "." + table.getTableName();
-
-      db.alterTable(tableFullName, new Table(tTable));
-
-      console.printInfo("Table " + tableFullName + " stats: [" + tblStats.toString() + ']');
-
     } catch (Exception e) {
       console.printInfo("[Warning] could not update stats.",
           "Failed with exception " + e.getMessage() + "\n"
@@ -464,105 +356,6 @@ public class StatsTask extends Task<Stat
     return ret;
   }
 
-  private long[] summary(HiveConf conf, Partition partn) throws IOException {
-    Path path = partn.getPartitionPath();
-    FileSystem fs = path.getFileSystem(conf);
-    List<String> skewedColNames = partn.getSkewedColNames();
-    if (skewedColNames == null || skewedColNames.isEmpty()) {
-      return summary(fs, path);
-    }
-    List<List<String>> skewColValues = table.getSkewedColValues();
-    if (skewColValues == null || skewColValues.isEmpty()) {
-      return summary(fs, toDefaultLBPath(path));
-    }
-    return summary(fs, path, skewedColNames);
-  }
-
-  private long[] summary(HiveConf conf, Table table) throws IOException {
-    Path path = table.getPath();
-    FileSystem fs = path.getFileSystem(conf);
-    List<String> skewedColNames = table.getSkewedColNames();
-    if (skewedColNames == null || skewedColNames.isEmpty()) {
-      return summary(fs, path);
-    }
-    List<List<String>> skewColValues = table.getSkewedColValues();
-    if (skewColValues == null || skewColValues.isEmpty()) {
-      return summary(fs, toDefaultLBPath(path));
-    }
-    return summary(fs, path, table.getSkewedColNames());
-  }
-
-  private Path toDefaultLBPath(Path path) {
-    return new Path(path, ListBucketingPrunerUtils.HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME);
-  }
-
-  private long[] summary(FileSystem fs, Path path) throws IOException {
-    try {
-      FileStatus status = fs.getFileStatus(path);
-      if (!status.isDir()) {
-        return new long[] {1, status.getLen()};
-      }
-    } catch (FileNotFoundException e) {
-      return new long[] {0, 0};
-    }
-    FileStatus[] children = fs.listStatus(path);  // can be null
-    if (children == null) {
-      return new long[] {0, 0};
-    }
-    long numFiles = 0L;
-    long tableSize = 0L;
-    for (FileStatus child : children) {
-      if (!child.isDir()) {
-        tableSize += child.getLen();
-        numFiles++;
-      }
-    }
-    return new long[] {numFiles, tableSize};
-  }
-
-  private Pattern toPattern(List<String> skewCols) {
-    StringBuilder builder = new StringBuilder();
-    for (String skewCol : skewCols) {
-      if (builder.length() > 0) {
-        builder.append(Path.SEPARATOR_CHAR);
-      }
-      builder.append(skewCol).append('=');
-      builder.append("[^").append(Path.SEPARATOR_CHAR).append("]*");
-    }
-    builder.append(Path.SEPARATOR_CHAR);
-    builder.append("[^").append(Path.SEPARATOR_CHAR).append("]*$");
-    return Pattern.compile(builder.toString());
-  }
-
-  private long[] summary(FileSystem fs, Path path, List<String> skewCols) throws IOException {
-    long numFiles = 0L;
-    long tableSize = 0L;
-    Pattern pattern = toPattern(skewCols);
-    for (FileStatus status : Utilities.getFileStatusRecurse(path, skewCols.size() + 1, fs)) {
-      if (status.isDir()) {
-        continue;
-      }
-      String relative = toRelativePath(path, status.getPath());
-      if (relative == null) {
-        continue;
-      }
-      if (relative.startsWith(ListBucketingPrunerUtils.HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME) ||
-        pattern.matcher(relative).matches()) {
-        tableSize += status.getLen();
-        numFiles++;
-      }
-    }
-    return new long[] {numFiles, tableSize};
-  }
-
-  private String toRelativePath(Path path1, Path path2) {
-    URI relative = path1.toUri().relativize(path2.toUri());
-    if (relative == path2.toUri()) {
-      return null;
-    }
-    return relative.getPath();
-  }
-
   private boolean existStats(Map<String, String> parameters) {
     return parameters.containsKey(StatsSetupConst.ROW_COUNT)
         || parameters.containsKey(StatsSetupConst.NUM_FILES)
@@ -571,7 +364,7 @@ public class StatsTask extends Task<Stat
         || parameters.containsKey(StatsSetupConst.NUM_PARTITIONS);
   }
 
-  private void updateStats(List<String> statsList, PartitionStatistics stats,
+  private void updateStats(List<String> statsList, Statistics stats,
       StatsAggregator statsAggregator, Map<String, String> parameters,
       String aggKey, boolean atomic) throws HiveException {
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java Fri Nov  1 14:35:17 2013
@@ -27,6 +27,7 @@ import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
@@ -35,7 +36,6 @@ import org.apache.hadoop.hive.ql.plan.Ta
 import org.apache.hadoop.hive.ql.plan.TableScanDesc;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
-import org.apache.hadoop.hive.ql.stats.StatsSetupConst;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Fri Nov  1 14:35:17 2013
@@ -95,6 +95,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.common.HiveInterruptCallback;
 import org.apache.hadoop.hive.common.HiveInterruptUtils;
+import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -1611,30 +1612,6 @@ public final class Utilities {
     }
   }
 
-  /**
-   * Get all file status from a root path and recursively go deep into certain levels.
-   *
-   * @param path
-   *          the root path
-   * @param level
-   *          the depth of directory should explore
-   * @param fs
-   *          the file system
-   * @return array of FileStatus
-   * @throws IOException
-   */
-  public static FileStatus[] getFileStatusRecurse(Path path, int level, FileSystem fs)
-      throws IOException {
-
-    // construct a path pattern (e.g., /*/*) to find all dynamically generated paths
-    StringBuilder sb = new StringBuilder(path.toUri().getPath());
-    for (int i = 0; i < level; ++i) {
-      sb.append(Path.SEPARATOR).append("*");
-    }
-    Path pathPattern = new Path(path, sb.toString());
-    return fs.globStatus(pathPattern);
-  }
-
   public static void mvFileToFinalPath(String specPath, Configuration hconf,
       boolean success, Log log, DynamicPartitionCtx dpCtx, FileSinkDesc conf,
       Reporter reporter) throws IOException,
@@ -1740,7 +1717,7 @@ public final class Utilities {
 
     ArrayList<String> result = new ArrayList<String>();
     if (dpCtx != null) {
-      FileStatus parts[] = getFileStatusRecurse(path, dpCtx.getNumDPCols(), fs);
+      FileStatus parts[] = HiveStatsUtils.getFileStatusRecurse(path, dpCtx.getNumDPCols(), fs);
       HashMap<String, FileStatus> taskIDToFile = null;
 
       for (int i = 0; i < parts.length; ++i) {
@@ -2241,7 +2218,7 @@ public final class Utilities {
       Path loadPath = new Path(dpCtx.getRootPath());
       FileSystem fs = loadPath.getFileSystem(conf);
       int numDPCols = dpCtx.getNumDPCols();
-      FileStatus[] status = Utilities.getFileStatusRecurse(loadPath, numDPCols, fs);
+      FileStatus[] status = HiveStatsUtils.getFileStatusRecurse(loadPath, numDPCols, fs);
 
       if (status.length == 0) {
         LOG.warn("No partition is generated by dynamic partitioning");

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java Fri Nov  1 14:35:17 2013
@@ -25,7 +25,7 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.ql.stats.StatsSetupConst;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.SerDeStats;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/MergeWork.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/MergeWork.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/MergeWork.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/MergeWork.java Fri Nov  1 14:35:17 2013
@@ -27,8 +27,8 @@ import java.util.List;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
 import org.apache.hadoop.hive.ql.plan.Explain;
@@ -152,7 +152,7 @@ public class MergeWork extends MapWork i
       Path dirPath = new Path(dirName);
       try {
         FileSystem inpFs = dirPath.getFileSystem(conf);
-        FileStatus[] status = Utilities.getFileStatusRecurse(dirPath, listBucketingCtx
+        FileStatus[] status = HiveStatsUtils.getFileStatusRecurse(dirPath, listBucketingCtx
             .getSkewedColNames().size(), inpFs);
         List<String> newInputPath = new ArrayList<String>();
         boolean succeed = true;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanMapper.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanMapper.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanMapper.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanMapper.java Fri Nov  1 14:35:17 2013
@@ -24,6 +24,7 @@ import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -33,7 +34,6 @@ import org.apache.hadoop.hive.ql.io.rcfi
 import org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileValueBufferWrapper;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
-import org.apache.hadoop.hive.ql.stats.StatsSetupConst;
 import org.apache.hadoop.hive.shims.CombineHiveKey;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.MapReduceBase;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Fri Nov  1 14:35:17 2013
@@ -48,6 +48,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.HiveStatsUtils;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -1367,7 +1369,7 @@ private void constructOneLBLocationMap(F
         new ArrayList<LinkedHashMap<String, String>>();
 
       FileSystem fs = loadPath.getFileSystem(conf);
-      FileStatus[] leafStatus = Utilities.getFileStatusRecurse(loadPath, numDP+1, fs);
+      FileStatus[] leafStatus = HiveStatsUtils.getFileStatusRecurse(loadPath, numDP+1, fs);
       // Check for empty partitions
       for (FileStatus s : leafStatus) {
         // Check if the hadoop version supports sub-directories for tables/partitions
@@ -1560,6 +1562,17 @@ private void constructOneLBLocationMap(F
     return getPartition(tbl, partSpec, forceCreate, null, true);
   }
 
+  private static void clearPartitionStats(org.apache.hadoop.hive.metastore.api.Partition tpart) {
+    Map<String,String> tpartParams = tpart.getParameters();
+    if (tpartParams == null) {
+      return;
+    }
+    List<String> statTypes = StatsSetupConst.getSupportedStats();
+    for (String statType : statTypes) {
+      tpartParams.remove(statType);
+    }
+  }
+
   /**
    * Returns partition metadata
    *
@@ -1629,6 +1642,7 @@ private void constructOneLBLocationMap(F
             throw new HiveException("new partition path should not be null or empty.");
           }
           tpart.getSd().setLocation(partPath);
+          clearPartitionStats(tpart);
           String fullName = tbl.getTableName();
           if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) {
             fullName = tbl.getDbName() + "." + tbl.getTableName();
@@ -2559,4 +2573,5 @@ private void constructOneLBLocationMap(F
   private static String[] getQualifiedNames(String qualifiedName) {
     return qualifiedName.split("\\.");
   }
+
 };

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java Fri Nov  1 14:35:17 2013
@@ -8,6 +8,7 @@ import java.util.Stack;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.Description;
@@ -37,7 +38,6 @@ import org.apache.hadoop.hive.ql.plan.Ex
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.FetchWork;
-import org.apache.hadoop.hive.ql.stats.StatsSetupConst;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMax;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMin;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java Fri Nov  1 14:35:17 2013
@@ -28,10 +28,10 @@ import java.util.Map;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.Utilities;
 
 /**
  * Conditional task resolution interface. This is invoked at run time to get the
@@ -231,7 +231,7 @@ public class ConditionalResolverMergeFil
       throws IOException {
     DynamicPartitionCtx dpCtx = ctx.getDPCtx();
     // get list of dynamic partitions
-    FileStatus[] status = Utilities.getFileStatusRecurse(dirPath, dpLbLevel, inpFs);
+    FileStatus[] status = HiveStatsUtils.getFileStatusRecurse(dirPath, dpLbLevel, inpFs);
 
     // cleanup pathToPartitionInfo
     Map<String, PartitionDesc> ptpi = work.getPathToPartitionInfo();

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java Fri Nov  1 14:35:17 2013
@@ -24,6 +24,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.JavaUtils;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.util.ReflectionUtils;
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsSetupConst.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsSetupConst.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsSetupConst.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsSetupConst.java Fri Nov  1 14:35:17 2013
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.stats;
-
-/**
- * A class that defines the constant strings used by the statistics implementation.
- */
-
-public class StatsSetupConst {
-
-  /**
-   * The value of the user variable "hive.stats.dbclass" to use HBase implementation.
-   */
-  public static final String HBASE_IMPL_CLASS_VAL = "hbase";
-
-  /**
-   * The value of the user variable "hive.stats.dbclass" to use JDBC implementation.
-   */
-  public static final String JDBC_IMPL_CLASS_VAL = "jdbc";
-
-  /**
-   * The name of the statistic Num Files to be published or gathered.
-   */
-  public static final String NUM_FILES = "numFiles";
-
-  /**
-   * The name of the statistic Num Partitions to be published or gathered.
-   */
-  public static final String NUM_PARTITIONS = "numPartitions";
-
-  /**
-   * The name of the statistic Total Size to be published or gathered.
-   */
-  public static final String TOTAL_SIZE = "totalSize";
-
-
-  // statistics stored in metastore
-
-  /**
-   * The name of the statistic Row Count to be published or gathered.
-   */
-  public static final String ROW_COUNT = "numRows";
-
-  /**
-   * The name of the statistic Raw Data Size to be published or gathered.
-   */
-  public static final String RAW_DATA_SIZE = "rawDataSize";
-
-}

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsUtils.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsUtils.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsUtils.java Fri Nov  1 14:35:17 2013
@@ -23,7 +23,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.hive.ql.stats.StatsSetupConst;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 
 public class JDBCStatsUtils {
 

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestStatsPublisherEnhanced.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestStatsPublisherEnhanced.java?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestStatsPublisherEnhanced.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestStatsPublisherEnhanced.java Fri Nov  1 14:35:17 2013
@@ -24,11 +24,11 @@ import java.util.Map;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.stats.StatsAggregator;
 import org.apache.hadoop.hive.ql.stats.StatsFactory;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
-import org.apache.hadoop.hive.ql.stats.StatsSetupConst;
 import org.apache.hadoop.mapred.JobConf;
 
 /**

Modified: hive/trunk/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out (original)
+++ hive/trunk/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out Fri Nov  1 14:35:17 2013
@@ -71,6 +71,8 @@ Table:              	analyze_srcpart_par
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	numFiles            	1                   
+	totalSize           	5293                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

Modified: hive/trunk/ql/src/test/results/clientnegative/unset_table_property.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/unset_table_property.q.out?rev=1537949&r1=1537948&r2=1537949&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/unset_table_property.q.out (original)
+++ hive/trunk/ql/src/test/results/clientnegative/unset_table_property.q.out Fri Nov  1 14:35:17 2013
@@ -16,11 +16,13 @@ PREHOOK: type: SHOW_TBLPROPERTIES
 POSTHOOK: query: SHOW TBLPROPERTIES testTable
 POSTHOOK: type: SHOW_TBLPROPERTIES
 	 
+numFiles	0
 #### A masked pattern was here ####
 c	3
 #### A masked pattern was here ####
 a	1
 #### A masked pattern was here ####
+totalSize	0
 FAILED: SemanticException [Error 10215]: Please use the following syntax if not sure whether the property existed or not:
 ALTER TABLE tableName UNSET TBLPROPERTIES IF EXISTS (key1, key2, ...)
  The following property z does not exist in testtable



Mime
View raw message