carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ravipes...@apache.org
Subject carbondata git commit: [CARBONDATA-2371] Add Profiler output in EXPLAIN command
Date Sun, 29 Apr 2018 08:03:44 GMT
Repository: carbondata
Updated Branches:
  refs/heads/master 9d9415101 -> 03a735bf7


[CARBONDATA-2371] Add Profiler output in EXPLAIN command

Add support in EXPLAIN command to show the effeteness of datamap including query rewrite and blocklet pruning. By using this feature, user will have more information about how to tune the datamap for better query performance.

For example:
explain select name,sum(age) from mainTable where city = 'shenzhen' group by name
will print:

|== CarbonData Profiler ==
Query rewrite based on DataMap:
 - agg1 (preaggregate)
Table Scan on maintable_agg1
 - filter: (maintable_name <> null and maintable_name = a)
 - pruned by main index
 - all blocklets: 5
 - skipped blocklets: 3

This closes #2197


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/03a735bf
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/03a735bf
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/03a735bf

Branch: refs/heads/master
Commit: 03a735bf7ad140cd908883f60369632e0ed2e8b1
Parents: 9d94151
Author: Jacky Li <jacky.likun@qq.com>
Authored: Fri Apr 20 16:02:32 2018 +0800
Committer: ravipesala <ravi.pesala@gmail.com>
Committed: Sun Apr 29 13:33:31 2018 +0530

----------------------------------------------------------------------
 .../carbondata/core/datamap/DataMapChooser.java |  15 +-
 .../datamap/dev/expr/AndDataMapExprWrapper.java |  10 +-
 .../datamap/dev/expr/DataMapExprWrapper.java    |  11 +-
 .../dev/expr/DataMapExprWrapperImpl.java        |   8 +-
 .../datamap/dev/expr/OrDataMapExprWrapper.java  |   9 +-
 .../blockletindex/BlockletDataMap.java          |  11 +-
 .../schema/table/AggregationDataMapSchema.java  |   6 +
 .../core/profiler/ExplainCollector.java         | 166 +++++++++++++++++++
 .../core/profiler/TablePruningInfo.java         |  99 +++++++++++
 .../core/scan/expression/MatchExpression.java   |   2 +-
 .../visitor/RangeNoDictionaryTypeVisitor.java   |   2 +-
 .../hadoop/api/CarbonInputFormat.java           |  23 ++-
 .../lucene/LuceneFineGrainDataMapSuite.scala    |  45 ++++-
 .../TestPreAggregateTableSelection.scala        |  66 ++++++++
 .../carbondata/spark/rdd/CarbonScanRDD.scala    |   3 +-
 .../command/table/CarbonExplainCommand.scala    |  61 +++++++
 .../datasources/SparkCarbonFileFormat.scala     |   2 -
 .../sql/hive/CarbonPreAggregateRules.scala      |  21 ++-
 .../spark/sql/hive/CarbonSessionState.scala     |   8 +-
 .../spark/sql/hive/CarbonSessionState.scala     |   8 +
 .../spark/sql/hive/CarbonSqlAstBuilder.scala    |   7 +-
 .../store/worker/SearchRequestHandler.java      |   2 +-
 22 files changed, 548 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/core/src/main/java/org/apache/carbondata/core/datamap/DataMapChooser.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapChooser.java b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapChooser.java
index c3da9c6..57069b8 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapChooser.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapChooser.java
@@ -21,6 +21,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Objects;
 import java.util.Set;
 
 import org.apache.carbondata.common.annotations.InterfaceAudience;
@@ -69,19 +70,20 @@ public class DataMapChooser {
   /**
    * Return a chosen datamap based on input filter. See {@link DataMapChooser}
    */
-  public DataMapExprWrapper choose(CarbonTable carbonTable, FilterResolverIntf resolverIntf)
+  public DataMapExprWrapper choose(CarbonTable carbonTable, FilterResolverIntf filter)
       throws IOException {
-    if (resolverIntf != null) {
-      Expression expression = resolverIntf.getFilterExpression();
+    Objects.requireNonNull(carbonTable);
+    if (filter != null) {
+      Expression expression = filter.getFilterExpression();
       // First check for FG datamaps if any exist
       List<TableDataMap> allDataMapFG =
           DataMapStoreManager.getInstance().getAllVisibleDataMap(carbonTable, DataMapLevel.FG);
-      ExpressionTuple tuple = selectDataMap(expression, allDataMapFG, resolverIntf);
+      ExpressionTuple tuple = selectDataMap(expression, allDataMapFG, filter);
       if (tuple.dataMapExprWrapper == null) {
         // Check for CG datamap
         List<TableDataMap> allDataMapCG =
             DataMapStoreManager.getInstance().getAllVisibleDataMap(carbonTable, DataMapLevel.CG);
-        tuple = selectDataMap(expression, allDataMapCG, resolverIntf);
+        tuple = selectDataMap(expression, allDataMapCG, filter);
       }
       if (tuple.dataMapExprWrapper != null) {
         return tuple.dataMapExprWrapper;
@@ -89,8 +91,7 @@ public class DataMapChooser {
     }
     // Return the default datamap if no other datamap exists.
     return new DataMapExprWrapperImpl(
-        DataMapStoreManager.getInstance().getDefaultDataMap(carbonTable),
-        resolverIntf);
+        DataMapStoreManager.getInstance().getDefaultDataMap(carbonTable), filter);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/AndDataMapExprWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/AndDataMapExprWrapper.java b/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/AndDataMapExprWrapper.java
index c573dcb..199f993 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/AndDataMapExprWrapper.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/AndDataMapExprWrapper.java
@@ -24,6 +24,7 @@ import org.apache.carbondata.core.datamap.DataMapLevel;
 import org.apache.carbondata.core.datamap.Segment;
 import org.apache.carbondata.core.indexstore.ExtendedBlocklet;
 import org.apache.carbondata.core.indexstore.PartitionSpec;
+import org.apache.carbondata.core.metadata.schema.table.DataMapSchema;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 
 /**
@@ -94,7 +95,12 @@ public class AndDataMapExprWrapper implements DataMapExprWrapper {
     return wrappers;
   }
 
-  @Override public DataMapLevel getDataMapType() {
-    return left.getDataMapType();
+  @Override public DataMapLevel getDataMapLevel() {
+    return left.getDataMapLevel();
   }
+
+  @Override public DataMapSchema getDataMapSchema() {
+    return left.getDataMapSchema();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/DataMapExprWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/DataMapExprWrapper.java b/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/DataMapExprWrapper.java
index 14cfc33..5a04529 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/DataMapExprWrapper.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/DataMapExprWrapper.java
@@ -24,6 +24,7 @@ import org.apache.carbondata.core.datamap.DataMapLevel;
 import org.apache.carbondata.core.datamap.Segment;
 import org.apache.carbondata.core.indexstore.ExtendedBlocklet;
 import org.apache.carbondata.core.indexstore.PartitionSpec;
+import org.apache.carbondata.core.metadata.schema.table.DataMapSchema;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 
 /**
@@ -71,9 +72,13 @@ public interface DataMapExprWrapper extends Serializable {
   FilterResolverIntf getFilterResolverIntf(String uniqueId);
 
   /**
-   * Get the datamap type.
-   * @return
+   * Get the datamap level.
+   */
+  DataMapLevel getDataMapLevel();
+
+  /**
+   * Get the datamap schema
    */
-  DataMapLevel getDataMapType();
+  DataMapSchema getDataMapSchema();
 
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/DataMapExprWrapperImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/DataMapExprWrapperImpl.java b/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/DataMapExprWrapperImpl.java
index f9518ba..eff6b4d 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/DataMapExprWrapperImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/DataMapExprWrapperImpl.java
@@ -27,6 +27,7 @@ import org.apache.carbondata.core.datamap.Segment;
 import org.apache.carbondata.core.datamap.TableDataMap;
 import org.apache.carbondata.core.indexstore.ExtendedBlocklet;
 import org.apache.carbondata.core.indexstore.PartitionSpec;
+import org.apache.carbondata.core.metadata.schema.table.DataMapSchema;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 
 public class DataMapExprWrapperImpl implements DataMapExprWrapper {
@@ -83,7 +84,12 @@ public class DataMapExprWrapperImpl implements DataMapExprWrapper {
     return wrappers;
   }
 
-  @Override public DataMapLevel getDataMapType() {
+  @Override public DataMapLevel getDataMapLevel() {
     return dataMap.getDataMapFactory().getDataMapType();
   }
+
+  @Override public DataMapSchema getDataMapSchema() {
+    return dataMap.getDataMapSchema();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/OrDataMapExprWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/OrDataMapExprWrapper.java b/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/OrDataMapExprWrapper.java
index 93dd242..4988903 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/OrDataMapExprWrapper.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/OrDataMapExprWrapper.java
@@ -26,6 +26,7 @@ import org.apache.carbondata.core.datamap.DataMapLevel;
 import org.apache.carbondata.core.datamap.Segment;
 import org.apache.carbondata.core.indexstore.ExtendedBlocklet;
 import org.apache.carbondata.core.indexstore.PartitionSpec;
+import org.apache.carbondata.core.metadata.schema.table.DataMapSchema;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 
 /**
@@ -91,7 +92,11 @@ public class OrDataMapExprWrapper implements DataMapExprWrapper {
   }
 
 
-  @Override public DataMapLevel getDataMapType() {
-    return left.getDataMapType();
+  @Override public DataMapLevel getDataMapLevel() {
+    return left.getDataMapLevel();
+  }
+
+  @Override public DataMapSchema getDataMapSchema() {
+    return left.getDataMapSchema();
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMap.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMap.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMap.java
index 3ca9c5a..906291a 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMap.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMap.java
@@ -57,6 +57,7 @@ import org.apache.carbondata.core.metadata.datatype.DataType;
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
+import org.apache.carbondata.core.profiler.ExplainCollector;
 import org.apache.carbondata.core.scan.filter.FilterExpressionProcessor;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
 import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
@@ -627,9 +628,10 @@ public class BlockletDataMap extends CoarseGrainDataMap implements Cacheable {
       return new ArrayList<>();
     }
     List<Blocklet> blocklets = new ArrayList<>();
+    int numBlocklets = 0;
     if (filterExp == null) {
-      int rowCount = unsafeMemoryDMStore.getRowCount();
-      for (int i = 0; i < rowCount; i++) {
+      numBlocklets = unsafeMemoryDMStore.getRowCount();
+      for (int i = 0; i < numBlocklets; i++) {
         DataMapRow safeRow = unsafeMemoryDMStore.getUnsafeRow(i).convertToSafeRow();
         blocklets.add(createBlocklet(safeRow, safeRow.getShort(BLOCKLET_ID_INDEX)));
       }
@@ -637,10 +639,10 @@ public class BlockletDataMap extends CoarseGrainDataMap implements Cacheable {
       // Remove B-tree jump logic as start and end key prepared is not
       // correct for old store scenarios
       int startIndex = 0;
-      int endIndex = unsafeMemoryDMStore.getRowCount();
+      numBlocklets = unsafeMemoryDMStore.getRowCount();
       FilterExecuter filterExecuter =
           FilterUtil.getFilterExecuterTree(filterExp, segmentProperties, null);
-      while (startIndex < endIndex) {
+      while (startIndex < numBlocklets) {
         DataMapRow safeRow = unsafeMemoryDMStore.getUnsafeRow(startIndex).convertToSafeRow();
         int blockletId = safeRow.getShort(BLOCKLET_ID_INDEX);
         String filePath = new String(safeRow.getByteArray(FILE_PATH_INDEX),
@@ -654,6 +656,7 @@ public class BlockletDataMap extends CoarseGrainDataMap implements Cacheable {
         startIndex++;
       }
     }
+    ExplainCollector.addTotalBlocklets(numBlocklets);
     return blocklets;
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/AggregationDataMapSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/AggregationDataMapSchema.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/AggregationDataMapSchema.java
index 673a7ab..2bb6d18 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/AggregationDataMapSchema.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/AggregationDataMapSchema.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.carbondata.core.metadata.schema.datamap.DataMapClassProvider;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.metadata.schema.table.column.ParentColumnTableRelation;
 import org.apache.carbondata.core.preagg.TimeSeriesFunctionEnum;
@@ -360,6 +361,11 @@ public class AggregationDataMapSchema extends DataMapSchema {
     }
   }
 
+  public DataMapClassProvider getProvider() {
+    return isTimeseriesDataMap ?
+        DataMapClassProvider.TIMESERIES : DataMapClassProvider.PREAGGREGATE;
+  }
+
   @Override public boolean equals(Object o) {
     if (this == o) return true;
     if (o == null || getClass() != o.getClass()) return false;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/core/src/main/java/org/apache/carbondata/core/profiler/ExplainCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/profiler/ExplainCollector.java b/core/src/main/java/org/apache/carbondata/core/profiler/ExplainCollector.java
new file mode 100644
index 0000000..5d69bb9
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/profiler/ExplainCollector.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.core.profiler;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+import org.apache.carbondata.core.metadata.schema.table.DataMapSchema;
+
+/**
+ * An information collector used for EXPLAIN command, to print out
+ * SQL rewrite and pruning information.
+ * This class is a singleton, not supporting concurrent EXPLAIN command
+ */
+@InterfaceAudience.Internal
+public class ExplainCollector {
+
+  private static ExplainCollector INSTANCE = null;
+
+  private List<String> olapDataMapProviders = new ArrayList<>();
+  private List<String> olapDataMapNames = new ArrayList<>();
+
+  // mapping of thread name to map of table name to pruning info
+  private Map<String, Map<String, TablePruningInfo>> scans = new ConcurrentHashMap<>();
+
+  private ExplainCollector() {
+  }
+
+  public static boolean enabled() {
+    return INSTANCE != null;
+  }
+
+  public static void setup() {
+    INSTANCE = new ExplainCollector();
+  }
+
+  public static void remove() {
+    if (enabled()) {
+      INSTANCE = null;
+    }
+  }
+
+  public static ExplainCollector get() {
+    return INSTANCE;
+  }
+
+  public static void recordMatchedOlapDataMap(String dataMapProvider, String dataMapName) {
+    if (enabled()) {
+      Objects.requireNonNull(dataMapProvider);
+      Objects.requireNonNull(dataMapName);
+      ExplainCollector profiler = get();
+      profiler.olapDataMapProviders.add(dataMapProvider);
+      profiler.olapDataMapNames.add(dataMapName);
+    }
+  }
+
+  public static void addPruningInfo(String tableName) {
+    if (enabled()) {
+      ExplainCollector profiler = get();
+      String threadName = Thread.currentThread().getName();
+      if (!profiler.scans.containsKey(threadName)) {
+        Map<String, TablePruningInfo> map = new HashMap<>();
+        map.put(tableName, new TablePruningInfo());
+        profiler.scans.put(threadName, map);
+      }
+    }
+  }
+
+  public static void setFilterStatement(String filterStatement) {
+    if (enabled()) {
+      TablePruningInfo scan = getCurrentTablePruningInfo();
+      scan.setFilterStatement(filterStatement);
+    }
+  }
+
+  public static void recordDefaultDataMapPruning(DataMapSchema dataMapSchema, int numBlocklets) {
+    if (enabled()) {
+      TablePruningInfo scan = getCurrentTablePruningInfo();
+      scan.setNumBlockletsAfterDefaultPruning(dataMapSchema, numBlocklets);
+    }
+  }
+
+  public static void recordCGDataMapPruning(DataMapSchema dataMapSchema, int numBlocklets) {
+    if (enabled()) {
+      TablePruningInfo scan = getCurrentTablePruningInfo();
+      scan.setNumBlockletsAfterCGPruning(dataMapSchema, numBlocklets);
+    }
+  }
+
+  public static void recordFGDataMapPruning(DataMapSchema dataMapSchema, int numBlocklets) {
+    if (enabled()) {
+      TablePruningInfo scan = getCurrentTablePruningInfo();
+      scan.setNumBlockletsAfterFGPruning(dataMapSchema, numBlocklets);
+    }
+  }
+
+  public static void addTotalBlocklets(int numBlocklets) {
+    if (enabled()) {
+      TablePruningInfo scan = getCurrentTablePruningInfo();
+      scan.addTotalBlocklets(numBlocklets);
+    }
+  }
+
+  /**
+   * Return the current TablePruningInfo (It is the last one in the map, since it is in
+   * single thread)
+   */
+  private static TablePruningInfo getCurrentTablePruningInfo() {
+    String threadName = Thread.currentThread().getName();
+    if (!get().scans.containsKey(threadName)) {
+      throw new IllegalStateException();
+    }
+
+    Iterator<TablePruningInfo> iterator = get().scans.get(threadName).values().iterator();
+    TablePruningInfo output = null;
+    while (iterator.hasNext()) {
+      output = iterator.next();
+    }
+    return output;
+  }
+
+  public static String getFormatedOutput() {
+    return get().toString();
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+    for (int i = 0; i < olapDataMapProviders.size(); i++) {
+      if (i == 0) {
+        builder.append("Query rewrite based on DataMap:").append("\n");
+      }
+      builder.append(" - ").append(olapDataMapNames.get(i)).append(" (")
+          .append(olapDataMapProviders.get(i)).append(")").append("\n");
+    }
+    for (Map.Entry<String, Map<String, TablePruningInfo>> allThreads : scans.entrySet()) {
+      for (Map.Entry<String, TablePruningInfo> entry : allThreads.getValue().entrySet()) {
+        builder.append("Table Scan on ").append(entry.getKey()).append("\n")
+            .append(entry.getValue().toString());
+      }
+    }
+    return builder.toString();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/core/src/main/java/org/apache/carbondata/core/profiler/TablePruningInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/profiler/TablePruningInfo.java b/core/src/main/java/org/apache/carbondata/core/profiler/TablePruningInfo.java
new file mode 100644
index 0000000..cec8827
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/profiler/TablePruningInfo.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.core.profiler;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+import org.apache.carbondata.core.metadata.schema.table.DataMapSchema;
+
+/**
+ * Used for EXPLAIN command
+ */
+@InterfaceAudience.Internal
+public class TablePruningInfo {
+
+  private int totalBlocklets;
+  private String filterStatement;
+
+  private DataMapSchema defaultDataMap;
+  private int numBlockletsAfterDefaultPruning;
+
+  private DataMapSchema cgDataMap;
+  private int numBlockletsAfterCGPruning;
+
+  private DataMapSchema fgDataMap;
+  private int numBlockletsAfterFGPruning;
+
+  void addTotalBlocklets(int numBlocklets) {
+    this.totalBlocklets += numBlocklets;
+  }
+
+  void setFilterStatement(String filterStatement) {
+    this.filterStatement = filterStatement;
+  }
+
+  void setNumBlockletsAfterDefaultPruning(DataMapSchema dataMapSchema, int numBlocklets) {
+    this.defaultDataMap = dataMapSchema;
+    this.numBlockletsAfterDefaultPruning = numBlocklets;
+  }
+
+  void setNumBlockletsAfterCGPruning(DataMapSchema dataMapSchema, int numBlocklets) {
+    this.cgDataMap = dataMapSchema;
+    this.numBlockletsAfterCGPruning = numBlocklets;
+  }
+
+  void setNumBlockletsAfterFGPruning(DataMapSchema dataMapSchema, int numBlocklets) {
+    this.fgDataMap = dataMapSchema;
+    this.numBlockletsAfterFGPruning = numBlocklets;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+    builder
+        .append(" - total blocklets: ").append(totalBlocklets).append("\n")
+        .append(" - filter: ").append(filterStatement).append("\n");
+    if (defaultDataMap != null) {
+      int skipBlocklets = totalBlocklets - numBlockletsAfterDefaultPruning;
+      builder
+          .append(" - pruned by Main DataMap").append("\n")
+          .append("    - skipped blocklets: ").append(skipBlocklets).append("\n");
+    }
+    if (cgDataMap != null) {
+      int skipBlocklets = numBlockletsAfterDefaultPruning - numBlockletsAfterCGPruning;
+      builder
+          .append(" - pruned by CG DataMap").append("\n")
+          .append("    - name: ").append(cgDataMap.getDataMapName()).append("\n")
+          .append("    - provider: ").append(cgDataMap.getProviderName()).append("\n")
+          .append("    - skipped blocklets: ").append(skipBlocklets).append("\n");
+    }
+    if (fgDataMap != null) {
+      int skipBlocklets;
+      if (numBlockletsAfterCGPruning != 0) {
+        skipBlocklets = numBlockletsAfterCGPruning - numBlockletsAfterFGPruning;
+      } else {
+        skipBlocklets = numBlockletsAfterDefaultPruning - numBlockletsAfterFGPruning;
+      }
+      builder
+          .append(" - pruned by FG DataMap").append("\n")
+          .append("    - name: ").append(fgDataMap.getDataMapName()).append("\n")
+          .append("    - provider: ").append(fgDataMap.getProviderName()).append("\n")
+          .append("    - skipped blocklets: ").append(skipBlocklets).append("\n");
+    }
+    return builder.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/core/src/main/java/org/apache/carbondata/core/scan/expression/MatchExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/expression/MatchExpression.java b/core/src/main/java/org/apache/carbondata/core/scan/expression/MatchExpression.java
index 573100e..db806a7 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/expression/MatchExpression.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/expression/MatchExpression.java
@@ -63,6 +63,6 @@ public class MatchExpression extends Expression {
 
   @Override
   public String getStatement() {
-    return queryString;
+    return "TEXT_MATCH('" + queryString + "')";
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/RangeNoDictionaryTypeVisitor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/RangeNoDictionaryTypeVisitor.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/RangeNoDictionaryTypeVisitor.java
index d4c44ba..2205694 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/RangeNoDictionaryTypeVisitor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/RangeNoDictionaryTypeVisitor.java
@@ -65,7 +65,7 @@ public class RangeNoDictionaryTypeVisitor extends NoDictionaryTypeVisitor
           }
           evaluateResultListFinal.add(result.getString());
         }
-        // evaluateResultListFinal.add(metadata.getExpression().evaluate().getListAsString());
+        // evaluateResultListFinal.add(metadata.getFilterExpression().evaluate().getListAsString());
         if (!metadata.isIncludeFilter() && !evaluateResultListFinal
             .contains(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
           evaluateResultListFinal.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
index 5cca96c..7c0da5e 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
@@ -42,6 +42,7 @@ import org.apache.carbondata.core.metadata.schema.partition.PartitionType;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.metadata.schema.table.TableInfo;
 import org.apache.carbondata.core.mutate.UpdateVO;
+import org.apache.carbondata.core.profiler.ExplainCollector;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.scan.model.QueryModel;
@@ -191,7 +192,8 @@ public abstract class CarbonInputFormat<T> extends FileInputFormat<Void, T> {
    * It sets unresolved filter expression.
    *
    * @param configuration
-   * @param filterExpression
+   * @para    DataMapJob dataMapJob = getDataMapJob(job.getConfiguration());
+m filterExpression
    */
   public static void setFilterPredicates(Configuration configuration, Expression filterExpression) {
     if (filterExpression == null) {
@@ -405,6 +407,13 @@ public abstract class CarbonInputFormat<T> extends FileInputFormat<Void, T> {
    */
   private List<ExtendedBlocklet> getPrunedBlocklets(JobContext job, CarbonTable carbonTable,
       FilterResolverIntf resolver, List<Segment> segmentIds) throws IOException {
+    ExplainCollector.addPruningInfo(carbonTable.getTableName());
+    if (resolver != null) {
+      ExplainCollector.setFilterStatement(resolver.getFilterExpression().getStatement());
+    } else {
+      ExplainCollector.setFilterStatement("none");
+    }
+
     boolean distributedCG = Boolean.parseBoolean(CarbonProperties.getInstance()
         .getProperty(CarbonCommonConstants.USE_DISTRIBUTED_DATAMAP,
             CarbonCommonConstants.USE_DISTRIBUTED_DATAMAP_DEFAULT));
@@ -415,6 +424,10 @@ public abstract class CarbonInputFormat<T> extends FileInputFormat<Void, T> {
         .getDefaultDataMap(getOrCreateCarbonTable(job.getConfiguration()), resolver);
     List<ExtendedBlocklet> prunedBlocklets =
         dataMapExprWrapper.prune(segmentIds, partitionsToPrune);
+
+    ExplainCollector.recordDefaultDataMapPruning(
+        dataMapExprWrapper.getDataMapSchema(), prunedBlocklets.size());
+
     // Get the available CG datamaps and prune further.
     DataMapExprWrapper cgDataMapExprWrapper = DataMapChooser.get()
         .chooseCGDataMap(getOrCreateCarbonTable(job.getConfiguration()), resolver);
@@ -429,17 +442,23 @@ public abstract class CarbonInputFormat<T> extends FileInputFormat<Void, T> {
       } else {
         prunedBlocklets = cgDataMapExprWrapper.prune(segmentIds, partitionsToPrune);
       }
+
+      ExplainCollector.recordCGDataMapPruning(
+          cgDataMapExprWrapper.getDataMapSchema(), prunedBlocklets.size());
     }
     // Now try to prune with FG DataMap.
     dataMapExprWrapper = DataMapChooser.get()
         .chooseFGDataMap(getOrCreateCarbonTable(job.getConfiguration()), resolver);
-    if (dataMapExprWrapper != null && dataMapExprWrapper.getDataMapType() == DataMapLevel.FG
+    if (dataMapExprWrapper != null && dataMapExprWrapper.getDataMapLevel() == DataMapLevel.FG
         && isFgDataMapPruningEnable(job.getConfiguration()) && dataMapJob != null) {
       // Prune segments from already pruned blocklets
       pruneSegments(segmentIds, prunedBlocklets);
       prunedBlocklets =
           executeDataMapJob(carbonTable, resolver, segmentIds, dataMapExprWrapper, dataMapJob,
               partitionsToPrune);
+
+      ExplainCollector.recordFGDataMapPruning(
+          dataMapExprWrapper.getDataMapSchema(), prunedBlocklets.size());
     }
     return prunedBlocklets;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
index 3286f79..bd957dc 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
@@ -35,8 +35,6 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
   val file2 = resourcesPath + "/datamap_input.csv"
 
   override protected def beforeAll(): Unit = {
-    //n should be about 5000000 of reset if size is default 1024
-    val n = 15000
     LuceneFineGrainDataMapSuite.createFile(file2)
     sql("create database if not exists lucene")
     CarbonProperties.getInstance()
@@ -580,6 +578,49 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
     sql("drop datamap dm on table datamap_test")
   }
 
+  test("explain query with lucene datamap") {
+    sql("drop table if exists main")
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.BLOCKLET_SIZE, "8")
+    sql(
+      """
+        | CREATE TABLE main(id INT, name STRING, city STRING, age INT)
+        | STORED BY 'carbondata'
+        | TBLPROPERTIES('SORT_COLUMNS'='city,name')
+      """.stripMargin)
+    sql(
+      s"""
+         | CREATE DATAMAP dm ON TABLE main
+         | USING 'lucene'
+         | DMProperties('TEXT_COLUMNS'='name , city')
+      """.stripMargin)
+
+    val file1 = resourcesPath + "/main.csv"
+    LuceneFineGrainDataMapSuite.createFile(file1, 1000000)
+
+    sql(s"LOAD DATA LOCAL INPATH '$file1' INTO TABLE main OPTIONS('header'='false')")
+
+    sql("EXPLAIN SELECT * FROM main WHERE TEXT_MATCH('name:bob')").show(false)
+    val rows = sql("EXPLAIN SELECT * FROM main WHERE TEXT_MATCH('name:bob')").collect()
+
+    assertResult(
+      """== CarbonData Profiler ==
+        |Table Scan on main
+        | - total blocklets: 1
+        | - filter: TEXT_MATCH('name:bob')
+        | - pruned by Main DataMap
+        |    - skipped blocklets: 0
+        | - pruned by FG DataMap
+        |    - name: dm
+        |    - provider: lucene
+        |    - skipped blocklets: 1
+        |""".stripMargin)(rows(0).getString(0))
+
+    LuceneFineGrainDataMapSuite.deleteFile(file1)
+    sql("drop datamap dm on table main")
+    CarbonProperties.getInstance().addProperty(
+      CarbonCommonConstants.BLOCKLET_SIZE, CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL)
+  }
+
   override protected def afterAll(): Unit = {
     LuceneFineGrainDataMapSuite.deleteFile(file2)
     sql("DROP TABLE IF EXISTS normal_test")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateTableSelection.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateTableSelection.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateTableSelection.scala
index 95a524d..ebb2491 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateTableSelection.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateTableSelection.scala
@@ -16,6 +16,8 @@
  */
 package org.apache.carbondata.integration.spark.testsuite.preaggregate
 
+import java.io.File
+
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 import org.apache.spark.sql.execution.datasources.LogicalRelation
 import org.apache.spark.sql.hive.CarbonRelation
@@ -379,6 +381,70 @@ class TestPreAggregateTableSelection extends SparkQueryTest with BeforeAndAfterA
     checkAnswer(df, Seq(Row(10,10.0)))
   }
 
+  test("explain projection query") {
+    val rows = sql("explain select name, age from mainTable").collect()
+    assertResult(
+      """== CarbonData Profiler ==
+        |Table Scan on maintable
+        | - total blocklets: 1
+        | - filter: none
+        | - pruned by Main DataMap
+        |    - skipped blocklets: 0
+        |""".stripMargin)(rows(0).getString(0))
+  }
+
+  test("explain projection query hit datamap") {
+    val rows = sql("explain select name,sum(age) from mainTable group by name").collect()
+    assertResult(
+      """== CarbonData Profiler ==
+        |Query rewrite based on DataMap:
+        | - agg1 (preaggregate)
+        |Table Scan on maintable_agg1
+        | - total blocklets: 1
+        | - filter: none
+        | - pruned by Main DataMap
+        |    - skipped blocklets: 0
+        |""".stripMargin)(rows(0).getString(0))
+  }
+
+  test("explain filter query") {
+    sql("explain select name,sum(age) from mainTable where name = 'a' group by name").show(false)
+    val rows = sql("explain select name,sum(age) from mainTable where name = 'a' group by name").collect()
+    assertResult(
+      """== CarbonData Profiler ==
+        |Query rewrite based on DataMap:
+        | - agg1 (preaggregate)
+        |Table Scan on maintable_agg1
+        | - total blocklets: 1
+        | - filter: (maintable_name <> null and maintable_name = a)
+        | - pruned by Main DataMap
+        |    - skipped blocklets: 1
+        |""".stripMargin)(rows(0).getString(0))
+
+  }
+
+  test("explain query with multiple table") {
+    val query = "explain select t1.city,sum(t1.age) from mainTable t1, mainTableavg t2 " +
+                "where t1.name = t2.name and t1.id < 3 group by t1.city"
+    sql(query).show(false)
+    val rows = sql(query).collect()
+    assert(rows(0).getString(0).contains(
+      """
+        |Table Scan on maintable
+        | - total blocklets: 1
+        | - filter: ((id <> null and id < 3) and name <> null)
+        | - pruned by Main DataMap
+        |    - skipped blocklets: 0""".stripMargin))
+    assert(rows(0).getString(0).contains(
+      """
+        |Table Scan on maintableavg
+        | - total blocklets: 1
+        | - filter: name <> null
+        | - pruned by Main DataMap
+        |    - skipped blocklets: 0""".stripMargin))
+
+  }
+
   override def afterAll: Unit = {
     sql("drop table if exists mainTable")
     sql("drop table if exists mainTable_avg")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
index 954eefc..c668f7d 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
@@ -44,7 +44,8 @@ import org.apache.carbondata.core.constants.{CarbonCommonConstants, CarbonCommon
 import org.apache.carbondata.core.datastore.block.Distributable
 import org.apache.carbondata.core.indexstore.PartitionSpec
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
-import org.apache.carbondata.core.metadata.schema.table.TableInfo
+import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, TableInfo}
+import org.apache.carbondata.core.profiler.ExplainCollector
 import org.apache.carbondata.core.scan.expression.Expression
 import org.apache.carbondata.core.scan.filter.FilterUtil
 import org.apache.carbondata.core.scan.model.QueryModel

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonExplainCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonExplainCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonExplainCommand.scala
new file mode 100644
index 0000000..0c2567a
--- /dev/null
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonExplainCommand.scala
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.command.table
+
+import org.apache.spark.sql.{Row, SparkSession}
+import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
+import org.apache.spark.sql.catalyst.plans.logical.{Command, LogicalPlan, Union}
+import org.apache.spark.sql.execution.command.{ExplainCommand, MetadataCommand}
+import org.apache.spark.sql.types.StringType
+
+import org.apache.carbondata.core.profiler.ExplainCollector
+
+case class CarbonExplainCommand(
+    child: LogicalPlan,
+    override val output: Seq[Attribute] =
+    Seq(AttributeReference("plan", StringType, nullable = true)())
+) extends MetadataCommand {
+  override def processMetadata(sparkSession: SparkSession): Seq[Row] = {
+    val explainCommand = child.asInstanceOf[ExplainCommand]
+
+    val isCommand = explainCommand.logicalPlan match {
+      case _: Command => true
+      case Union(childern) if childern.forall(_.isInstanceOf[Command]) => true
+      case _ => false
+    }
+
+    if (explainCommand.logicalPlan.isStreaming || isCommand) {
+      explainCommand.run(sparkSession)
+    } else {
+      collectProfiler(sparkSession) ++ explainCommand.run(sparkSession)
+    }
+  }
+
+  private def collectProfiler(sparkSession: SparkSession): Seq[Row] = {
+    val queryExecution =
+      sparkSession.sessionState.executePlan(child.asInstanceOf[ExplainCommand].logicalPlan)
+    try {
+      ExplainCollector.setup()
+      queryExecution.toRdd.partitions
+      Seq(Row("== CarbonData Profiler ==\n" + ExplainCollector.getFormatedOutput))
+    } finally {
+      ExplainCollector.remove()
+    }
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala
index 942a21b..1bb7d7c 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala
@@ -216,8 +216,6 @@ class SparkCarbonFileFormat extends FileFormat
 
         val model = format.createQueryModel(split, attemptContext)
 
-        var partition : java.util.List[PartitionSpec] = new java.util.ArrayList[PartitionSpec]()
-
         val segmentPath = CarbonTablePath.getSegmentPath(identifier.getTablePath(), "null")
         val readCommittedScope = new LatestFilesReadCommittedScope(
           identifier.getTablePath + "/Fact/Part0/Segment_null/")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
index 9cf3d68..1bb328c 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
@@ -36,9 +36,11 @@ import org.apache.spark.sql.types._
 import org.apache.spark.util.CarbonReflectionUtils
 
 import org.apache.carbondata.core.constants.{CarbonCommonConstants, CarbonCommonConstantsInternal}
+import org.apache.carbondata.core.metadata.schema.datamap.DataMapClassProvider
 import org.apache.carbondata.core.metadata.schema.table.{AggregationDataMapSchema, CarbonTable, DataMapSchema}
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema
 import org.apache.carbondata.core.preagg.{AggregateQueryPlan, AggregateTableSelector, QueryColumn}
+import org.apache.carbondata.core.profiler.ExplainCollector
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager
 import org.apache.carbondata.core.util.{CarbonUtil, ThreadLocalSessionInfo}
 
@@ -359,7 +361,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
            l.relation.asInstanceOf[CarbonDatasourceHadoopRelation].carbonRelation.
              metaData.hasAggregateDataMapSchema && !isPlanUpdated =>
         val carbonTable = getCarbonTable(l)
-        if(isSpecificSegmentNotPresent(carbonTable)) {
+        if (isSpecificSegmentNotPresent(carbonTable)) {
           val list = scala.collection.mutable.HashSet.empty[QueryColumn]
           val aggregateExpressions = scala.collection.mutable.HashSet.empty[AggregateExpression]
           val isValidPlan = extractQueryColumnsFromAggExpression(
@@ -386,6 +388,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
                   carbonTable,
                   agg)
               isPlanUpdated = true
+              setExplain(aggDataMapSchema)
               val updateAggPlan =
                 Aggregate(
                 updatedGroupExp,
@@ -451,6 +454,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
                   carbonTable,
                   agg)
               isPlanUpdated = true
+              setExplain(aggDataMapSchema)
               val updateAggPlan =
                 Aggregate(
                 updatedGroupExp,
@@ -519,6 +523,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
                   carbonTable,
                   agg)
               isPlanUpdated = true
+              setExplain(aggDataMapSchema)
               val updateAggPlan =
                 Aggregate(
                 updatedGroupExp,
@@ -590,6 +595,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
                   carbonTable,
                   agg)
               isPlanUpdated = true
+              setExplain(aggDataMapSchema)
               val updateAggPlan =
                 Aggregate(
                   updatedGroupExp,
@@ -624,12 +630,17 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
 
     }
     if(isPlanUpdated) {
-      CarbonSession.threadSet(CarbonCommonConstants.SUPPORT_DIRECT_QUERY_ON_DATAMAP,
-        "true")
+      CarbonSession.threadSet(CarbonCommonConstants.SUPPORT_DIRECT_QUERY_ON_DATAMAP, "true")
     }
     updatedPlan
   }
 
+  // set datamap match information for EXPLAIN command
+  private def setExplain(dataMapSchema: AggregationDataMapSchema): Unit = {
+    ExplainCollector.recordMatchedOlapDataMap(
+      dataMapSchema.getProvider.getShortName, dataMapSchema.getDataMapName)
+  }
+
   /**
    * Method to get the aggregate query plan
    * @param aggPlan
@@ -1004,7 +1015,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
     // if it does not match with any pre aggregate table return the same plan
     if (!selectedAggMaps.isEmpty) {
       // filter the selected child schema based on size to select the pre-aggregate tables
-      // that are nonEmpty
+      // that are enabled
       val catalog = CarbonEnv.getInstance(sparkSession).carbonMetastore
       val relationBuffer = selectedAggMaps.asScala.map { selectedDataMapSchema =>
         val identifier = TableIdentifier(
@@ -1041,7 +1052,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
             .apply(logicalPlan))
           case None => (null, null)
         }
-        // If the relationBuffer is nonEmpty then find the table with the minimum size.
+        // If the relationBuffer is enabled then find the table with the minimum size.
       }
     } else {
       (null, null)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/integration/spark2/src/main/spark2.1/org/apache/spark/sql/hive/CarbonSessionState.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/spark2.1/org/apache/spark/sql/hive/CarbonSessionState.scala b/integration/spark2/src/main/spark2.1/org/apache/spark/sql/hive/CarbonSessionState.scala
index 9fe7241..b361e36 100644
--- a/integration/spark2/src/main/spark2.1/org/apache/spark/sql/hive/CarbonSessionState.scala
+++ b/integration/spark2/src/main/spark2.1/org/apache/spark/sql/hive/CarbonSessionState.scala
@@ -22,13 +22,13 @@ import org.apache.spark.sql.catalyst.analysis.{Analyzer, FunctionRegistry}
 import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTablePartition, FunctionResourceLoader, GlobalTempViewManager, SessionCatalog}
 import org.apache.spark.sql.catalyst.expressions.{And, AttributeReference, BoundReference, Expression, InterpretedPredicate, PredicateSubquery, ScalarSubquery}
 import org.apache.spark.sql.catalyst.optimizer.Optimizer
-import org.apache.spark.sql.catalyst.parser.ParserInterface
+import org.apache.spark.sql.catalyst.parser.{ParserInterface, SqlBaseParser}
 import org.apache.spark.sql.catalyst.parser.ParserUtils.{string, _}
 import org.apache.spark.sql.catalyst.parser.SqlBaseParser.{CreateTableContext, ShowTablesContext}
 import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan, SubqueryAlias}
 import org.apache.spark.sql.catalyst.rules.Rule
 import org.apache.spark.sql.catalyst.{CatalystConf, TableIdentifier}
-import org.apache.spark.sql.execution.command.table.CarbonShowTablesCommand
+import org.apache.spark.sql.execution.command.table.{CarbonExplainCommand, CarbonShowTablesCommand}
 import org.apache.spark.sql.execution.datasources._
 import org.apache.spark.sql.execution.strategy.{CarbonLateDecodeStrategy, DDLStrategy, StreamingTableStrategy}
 import org.apache.spark.sql.execution.{SparkOptimizer, SparkSqlAstBuilder}
@@ -404,4 +404,8 @@ class CarbonSqlAstBuilder(conf: SQLConf, parser: CarbonSpark2SqlParser, sparkSes
       }
     }
   }
+
+  override def visitExplain(ctx: SqlBaseParser.ExplainContext): LogicalPlan = {
+    CarbonExplainCommand(super.visitExplain(ctx))
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonSessionState.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonSessionState.scala b/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonSessionState.scala
index de37a35..a7a255e 100644
--- a/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonSessionState.scala
+++ b/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonSessionState.scala
@@ -26,8 +26,16 @@ import org.apache.spark.sql.catalyst.expressions.Expression
 import org.apache.spark.sql.catalyst.optimizer.Optimizer
 import org.apache.spark.sql.catalyst.parser.ParserInterface
 import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan}
+import org.apache.spark.sql.catalyst.parser.{ParserInterface, SqlBaseParser}
+import org.apache.spark.sql.catalyst.parser.ParserUtils.{string, withOrigin}
+import org.apache.spark.sql.catalyst.parser.SqlBaseParser.{AddTableColumnsContext, ChangeColumnContext, CreateHiveTableContext, CreateTableContext, ShowTablesContext}
+import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan, SubqueryAlias}
 import org.apache.spark.sql.catalyst.rules.Rule
 import org.apache.spark.sql.execution.datasources.{FindDataSourceTable, PreWriteCheck, ResolveSQLOnFile, _}
+import org.apache.spark.sql.execution.command._
+import org.apache.spark.sql.execution.command.schema.{CarbonAlterTableAddColumnCommand, CarbonAlterTableDataTypeChangeCommand}
+import org.apache.spark.sql.execution.command.table.{CarbonExplainCommand, CarbonShowTablesCommand}
+import org.apache.spark.sql.execution.datasources.{FindDataSourceTable, LogicalRelation, PreWriteCheck, ResolveSQLOnFile, _}
 import org.apache.spark.sql.execution.strategy.{CarbonLateDecodeStrategy, DDLStrategy, StreamingTableStrategy}
 import org.apache.spark.sql.hive.client.HiveClient
 import org.apache.spark.sql.internal.{SQLConf, SessionState}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonSqlAstBuilder.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonSqlAstBuilder.scala b/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonSqlAstBuilder.scala
index b0702ae..a533db0 100644
--- a/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonSqlAstBuilder.scala
+++ b/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonSqlAstBuilder.scala
@@ -19,12 +19,13 @@ package org.apache.spark.sql.hive
 
 import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.catalyst.parser.ParserUtils.{string, withOrigin}
+import org.apache.spark.sql.catalyst.parser.SqlBaseParser
 import org.apache.spark.sql.catalyst.parser.SqlBaseParser.{AddTableColumnsContext, ChangeColumnContext, CreateHiveTableContext, CreateTableContext, ShowTablesContext}
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 import org.apache.spark.sql.execution.SparkSqlAstBuilder
 import org.apache.spark.sql.execution.command.{AlterTableAddColumnsModel, AlterTableDataTypeChangeModel}
 import org.apache.spark.sql.execution.command.schema.{CarbonAlterTableAddColumnCommand, CarbonAlterTableDataTypeChangeCommand}
-import org.apache.spark.sql.execution.command.table.CarbonShowTablesCommand
+import org.apache.spark.sql.execution.command.table.{CarbonExplainCommand, CarbonShowTablesCommand}
 import org.apache.spark.sql.internal.SQLConf
 import org.apache.spark.sql.parser.{CarbonHelperSqlAstBuilder, CarbonSpark2SqlParser}
 import org.apache.spark.sql.types.DecimalType
@@ -121,4 +122,8 @@ class CarbonSqlAstBuilder(conf: SQLConf, parser: CarbonSpark2SqlParser, sparkSes
       }
     }
   }
+
+  override def visitExplain(ctx: SqlBaseParser.ExplainContext): LogicalPlan = {
+    CarbonExplainCommand(super.visitExplain(ctx))
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/03a735bf/store/search/src/main/java/org/apache/carbondata/store/worker/SearchRequestHandler.java
----------------------------------------------------------------------
diff --git a/store/search/src/main/java/org/apache/carbondata/store/worker/SearchRequestHandler.java b/store/search/src/main/java/org/apache/carbondata/store/worker/SearchRequestHandler.java
index 8e31395..957e9f8 100644
--- a/store/search/src/main/java/org/apache/carbondata/store/worker/SearchRequestHandler.java
+++ b/store/search/src/main/java/org/apache/carbondata/store/worker/SearchRequestHandler.java
@@ -140,7 +140,7 @@ public class SearchRequestHandler {
     DataMapExprWrapper wrapper =
         DataMapChooser.get().choose(table, queryModel.getFilterExpressionResolverTree());
 
-    if (wrapper.getDataMapType() == DataMapLevel.FG) {
+    if (wrapper.getDataMapLevel() == DataMapLevel.FG) {
       List<Segment> segments = new LinkedList<>();
       for (CarbonInputSplit split : mbSplit.getAllSplits()) {
         segments.add(Segment.toSegment(


Mime
View raw message