phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sama...@apache.org
Subject [2/2] phoenix git commit: PHOENIX-4007 Surface time at which byte/row estimate information was computed in explain plan output
Date Wed, 27 Sep 2017 06:14:26 GMT
PHOENIX-4007 Surface time at which byte/row estimate information was computed in explain plan output


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fd2b064a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fd2b064a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fd2b064a

Branch: refs/heads/4.x-HBase-1.1
Commit: fd2b064a01f2ef91eca5f0b4fddb3be6b898277c
Parents: 3d2f269
Author: Samarth Jain <samarth@apache.org>
Authored: Tue Sep 26 23:14:18 2017 -0700
Committer: Samarth Jain <samarth@apache.org>
Committed: Tue Sep 26 23:14:18 2017 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/ArrayIT.java     |   3 +-
 .../end2end/BaseUniqueNamesOwnClusterIT.java    |   1 -
 .../end2end/ExplainPlanWithStatsDisabledIT.java |  64 ++-
 .../end2end/ExplainPlanWithStatsEnabledIT.java  | 413 ++++++++++++++++---
 .../phoenix/end2end/ParallelStatsEnabledIT.java |   1 +
 .../phoenix/end2end/QueryWithOffsetIT.java      |  10 +-
 .../phoenix/end2end/StatsCollectorIT.java       |  45 +-
 .../phoenix/compile/BaseMutationPlan.java       |   5 +
 .../phoenix/compile/DelegateMutationPlan.java   |   5 +
 .../apache/phoenix/compile/DeleteCompiler.java  |  48 ++-
 .../phoenix/compile/ListJarsQueryPlan.java      |   5 +
 .../apache/phoenix/compile/StatementPlan.java   |   6 +
 .../apache/phoenix/compile/TraceQueryPlan.java  |   5 +
 .../apache/phoenix/compile/UpsertCompiler.java  |  15 +-
 .../coprocessor/BaseScannerRegionObserver.java  |   1 -
 .../UngroupedAggregateRegionObserver.java       |  14 +-
 .../apache/phoenix/execute/AggregatePlan.java   |   1 +
 .../apache/phoenix/execute/BaseQueryPlan.java   |  12 +
 .../phoenix/execute/DelegateQueryPlan.java      |   5 +
 .../apache/phoenix/execute/HashJoinPlan.java    |  30 +-
 .../execute/LiteralResultIterationPlan.java     |   5 +
 .../org/apache/phoenix/execute/ScanPlan.java    |  12 +-
 .../phoenix/execute/SortMergeJoinPlan.java      |  34 +-
 .../org/apache/phoenix/execute/UnionPlan.java   |  27 +-
 .../phoenix/iterate/BaseResultIterators.java    |  75 +++-
 .../apache/phoenix/jdbc/PhoenixStatement.java   |  53 ++-
 .../stats/DefaultStatisticsCollector.java       |  52 ++-
 .../phoenix/schema/stats/GuidePostsInfo.java    |  59 ++-
 .../schema/stats/GuidePostsInfoBuilder.java     |  48 ++-
 .../phoenix/schema/stats/StatisticsScanner.java |   2 +-
 .../phoenix/schema/stats/StatisticsUtil.java    |  62 +--
 .../phoenix/schema/stats/StatisticsWriter.java  |  60 ++-
 .../schema/tuple/MultiKeyValueTuple.java        |   7 +-
 .../org/apache/phoenix/util/NumberUtil.java     |  18 +
 .../org/apache/phoenix/util/PhoenixRuntime.java |   7 +
 .../phoenix/filter/SkipScanBigFilterTest.java   |   3 +-
 .../query/ParallelIteratorsSplitTest.java       |   5 +
 .../schema/stats/StatisticsScannerTest.java     |   4 +-
 38 files changed, 961 insertions(+), 261 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
index d05a200..cf86614 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
@@ -131,8 +131,7 @@ public class ArrayIT extends ParallelStatsDisabledIT {
 		String query = "SELECT a_double_array, /* comment ok? */ b_string, a_float FROM " + tableName + " WHERE ?=organization_id and ?=a_float";
 		Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 		Connection conn = DriverManager.getConnection(getUrl(), props);
-        //TODO: samarth do we need this
-		analyzeTable(conn, tableName);
+        analyzeTable(conn, tableName);
 		try {
 		    PreparedStatement statement = conn.prepareStatement(query);
 			statement.setString(1, tenantId);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseUniqueNamesOwnClusterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseUniqueNamesOwnClusterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseUniqueNamesOwnClusterIT.java
index 9401b2c..7ccbaaf 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseUniqueNamesOwnClusterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseUniqueNamesOwnClusterIT.java
@@ -18,7 +18,6 @@
 package org.apache.phoenix.end2end;
 
 import org.apache.phoenix.query.BaseTest;
-import org.junit.AfterClass;
 import org.junit.experimental.categories.Category;
 
 /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsDisabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsDisabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsDisabledIT.java
index ff4127d..c2d9b52 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsDisabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsDisabledIT.java
@@ -20,12 +20,13 @@ package org.apache.phoenix.end2end;
 import static org.apache.phoenix.end2end.ExplainPlanWithStatsEnabledIT.getByteRowEstimates;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.util.List;
 
-import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.end2end.ExplainPlanWithStatsEnabledIT.Estimate;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -54,7 +55,7 @@ public class ExplainPlanWithStatsDisabledIT extends ParallelStatsDisabledIT {
     private static void initData(String tableName) throws Exception {
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             conn.createStatement().execute("CREATE TABLE " + tableName
-                    + " ( k INTEGER, c1.a bigint,c2.b bigint CONSTRAINT pk PRIMARY KEY (k))");
+                    + " ( k INTEGER, c1.a bigint,c2.b bigint CONSTRAINT pk PRIMARY KEY (k)) GUIDE_POSTS_WIDTH = 0");
             conn.createStatement().execute("upsert into " + tableName + " values (100,1,3)");
             conn.createStatement().execute("upsert into " + tableName + " values (101,2,4)");
             conn.createStatement().execute("upsert into " + tableName + " values (102,2,4)");
@@ -66,6 +67,9 @@ public class ExplainPlanWithStatsDisabledIT extends ParallelStatsDisabledIT {
             conn.createStatement().execute("upsert into " + tableName + " values (108,2,4)");
             conn.createStatement().execute("upsert into " + tableName + " values (109,2,4)");
             conn.commit();
+            // Because the guide post width is set to 0, no guide post will be collected
+            // effectively disabling stats collection.
+            conn.createStatement().execute("UPDATE STATISTICS " + tableName);
         }
     }
 
@@ -167,9 +171,10 @@ public class ExplainPlanWithStatsDisabledIT extends ParallelStatsDisabledIT {
         binds.add(99);
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             conn.setAutoCommit(false);
-            Pair<Long, Long> info = getByteRowEstimates(conn, sql, binds);
-            assertEquals((Long) 200l, info.getSecond());
-            assertEquals((Long) 2l, info.getFirst());
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 200l, info.estimatedBytes);
+            assertEquals((Long) 2l, info.estimatedRows);
+            assertTrue(info.estimatedRows > 0);
         }
     }
 
@@ -183,30 +188,57 @@ public class ExplainPlanWithStatsDisabledIT extends ParallelStatsDisabledIT {
             assertEstimatesAreZero(sql, binds, conn);
         }
     }
-    
+
     @Test
     public void testBytesRowsForSelectExecutedSerially() throws Exception {
         String sql = "SELECT * FROM " + tableA + " LIMIT 2";
         List<Object> binds = Lists.newArrayList();
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             conn.setAutoCommit(false);
-            Pair<Long, Long> info = getByteRowEstimates(conn, sql, binds);
-            assertEquals((Long) 200l, info.getSecond());
-            assertEquals((Long) 2l, info.getFirst());
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 200l, info.estimatedBytes);
+            assertEquals((Long) 2l, info.estimatedRows);
+            assertTrue(info.estimatedRows > 0);
+        }
+    }
+
+    @Test
+    public void testEstimatesForUnionWithTablesWithNullAndLargeGpWidth() throws Exception {
+        String tableWithLargeGPWidth = generateUniqueName();
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            // create a table with 1 MB guidepost width
+            long guidePostWidth = 1000000;
+            conn.createStatement()
+                    .execute("CREATE TABLE " + tableWithLargeGPWidth
+                            + " ( k INTEGER, c1.a bigint,c2.b bigint CONSTRAINT pk PRIMARY KEY (k)) GUIDE_POSTS_WIDTH="
+                            + guidePostWidth);
+            conn.createStatement()
+                    .execute("upsert into " + tableWithLargeGPWidth + " values (100,1,3)");
+            conn.createStatement()
+                    .execute("upsert into " + tableWithLargeGPWidth + " values (101,2,4)");
+            conn.commit();
+            conn.createStatement().execute("UPDATE STATISTICS " + tableWithLargeGPWidth);
+        }
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            String sql =
+                    "SELECT * FROM " + tableA + " UNION ALL SELECT * FROM " + tableWithLargeGPWidth;
+            assertEstimatesAreNull(sql, Lists.newArrayList(), conn);
         }
     }
 
-    private void assertEstimatesAreNull(String sql, List<Object> binds, Connection conn)
+    public static void assertEstimatesAreNull(String sql, List<Object> binds, Connection conn)
             throws Exception {
-        Pair<Long, Long> info = getByteRowEstimates(conn, sql, binds);
-        assertNull(info.getSecond());
-        assertNull(info.getFirst());
+        Estimate info = getByteRowEstimates(conn, sql, binds);
+        assertNull(info.estimatedBytes);
+        assertNull(info.estimatedRows);
+        assertNull(info.estimateInfoTs);
     }
 
     private void assertEstimatesAreZero(String sql, List<Object> binds, Connection conn)
             throws Exception {
-        Pair<Long, Long> info = getByteRowEstimates(conn, sql, binds);
-        assertEquals((Long) 0l, info.getSecond());
-        assertEquals((Long) 0l, info.getFirst());
+        Estimate info = getByteRowEstimates(conn, sql, binds);
+        assertEquals((Long) 0l, info.estimatedBytes);
+        assertEquals((Long) 0l, info.estimatedRows);
+        assertEquals((Long) 0l, info.estimateInfoTs);
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
index 49a0485..cd4555c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
@@ -18,29 +18,29 @@
 package org.apache.phoenix.end2end;
 
 import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_USE_STATS_FOR_PARALLELIZATION;
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.util.Collections;
 import java.util.List;
-import java.util.Map;
 
-import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.util.EnvironmentEdge;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 
 /**
  * This class has tests for asserting the bytes and rows information exposed in the explain plan
@@ -50,22 +50,37 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
 
     private static String tableA;
     private static String tableB;
+    private static String tableWithLargeGPWidth;
+    private static String indexOnA;
+    private static final long largeGpWidth = 2 * 1000 * 1000l;
 
     @BeforeClass
-    public static void doSetup() throws Exception {
-        Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
-        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
-        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+    public static void createTables() throws Exception {
         tableA = generateUniqueName();
-        initDataAndStats(tableA);
+        initDataAndStats(tableA, 20l);
         tableB = generateUniqueName();
-        initDataAndStats(tableB);
+        initDataAndStats(tableB, 20l);
+        tableWithLargeGPWidth = generateUniqueName();
+        initDataAndStats(tableWithLargeGPWidth, largeGpWidth);
+        indexOnA = generateUniqueName();
+        createIndex(indexOnA, tableA, 20);
     }
 
-    private static void initDataAndStats(String tableName) throws Exception {
+    private static void createIndex(String indexName, String table, long guidePostWidth)
+            throws Exception {
         try (Connection conn = DriverManager.getConnection(getUrl())) {
-            conn.createStatement().execute("CREATE TABLE " + tableName
-                    + " ( k INTEGER, c1.a bigint,c2.b bigint CONSTRAINT pk PRIMARY KEY (k))");
+            conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + table
+                    + " (c1.a) INCLUDE (c2.b) GUIDE_POSTS_WIDTH = " + guidePostWidth);
+            conn.createStatement().execute("UPDATE STATISTICS " + indexName);
+        }
+    }
+
+    private static void initDataAndStats(String tableName, Long guidePostWidth) throws Exception {
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            conn.createStatement()
+                    .execute("CREATE TABLE " + tableName
+                            + " (k INTEGER PRIMARY KEY, c1.a bigint, c2.b bigint)"
+                            + " GUIDE_POSTS_WIDTH=" + guidePostWidth);
             conn.createStatement().execute("upsert into " + tableName + " values (100,1,3)");
             conn.createStatement().execute("upsert into " + tableName + " values (101,2,4)");
             conn.createStatement().execute("upsert into " + tableName + " values (102,2,4)");
@@ -81,25 +96,73 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
         }
     }
 
+    private static Connection getTenantConnection(String tenantId) throws SQLException {
+        String url = getUrl() + ';' + TENANT_ID_ATTRIB + '=' + tenantId;
+        return DriverManager.getConnection(url);
+    }
+
     @Test
-    public void testBytesRowsForSelect() throws Exception {
+    public void testBytesRowsForSelectWhenKeyOutOfRange() throws Exception {
         String sql = "SELECT * FROM " + tableA + " where k >= ?";
         List<Object> binds = Lists.newArrayList();
+        binds.add(200);
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 0l, info.estimatedBytes);
+            assertEquals((Long) 0l, info.estimatedRows);
+            assertTrue(info.estimateInfoTs > 0);
+        }
+    }
+
+    @Test
+    public void testBytesRowsForSelectWhenKeyInRange() throws Exception {
+        String sql = "SELECT * FROM " + tableB + " where k >= ?";
+        List<Object> binds = Lists.newArrayList();
         binds.add(99);
         try (Connection conn = DriverManager.getConnection(getUrl())) {
-            Pair<Long, Long> info = getByteRowEstimates(conn, sql, binds);
-            assertEquals((Long) 634l, info.getSecond());
-            assertEquals((Long) 10l, info.getFirst());
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 634l, info.estimatedBytes);
+            assertEquals((Long) 10l, info.estimatedRows);
+            assertTrue(info.estimateInfoTs > 0);
+        }
+    }
+
+    @Test
+    public void testBytesRowsForSelectOnIndex() throws Exception {
+        String sql = "SELECT * FROM " + tableA + " where c1.a >= ?";
+        List<Object> binds = Lists.newArrayList();
+        binds.add(0);
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 691l, info.estimatedBytes);
+            assertEquals((Long) 10l, info.estimatedRows);
+            assertTrue(info.estimateInfoTs > 0);
         }
     }
 
     @Test
     public void testBytesRowsForUnion() throws Exception {
-        String sql = "SELECT * FROM " + tableA + " UNION ALL SELECT * FROM " + tableB;
+        String sql =
+                "SELECT /*+ NO_INDEX */ * FROM " + tableA + " UNION ALL SELECT * FROM " + tableB;
         try (Connection conn = DriverManager.getConnection(getUrl())) {
-            Pair<Long, Long> info = getByteRowEstimates(conn, sql, Lists.newArrayList());
-            assertEquals((Long) (2 * 634l), info.getSecond());
-            assertEquals((Long) (2 * 10l), info.getFirst());
+            Estimate info = getByteRowEstimates(conn, sql, Lists.newArrayList());
+            assertEquals((Long) (2 * 634l), info.estimatedBytes);
+            assertEquals((Long) (2 * 10l), info.estimatedRows);
+            assertTrue(info.estimateInfoTs > 0);
+        }
+    }
+
+    @Test
+    public void testEstimatesForUnionWithTableWithLargeGpWidth() throws Exception {
+        // For table with largeGpWidth, a guide post is generated that has the
+        // byte size estimate of guide post width.
+        String sql =
+                "SELECT /*+ NO_INDEX */ * FROM " + tableA + " UNION ALL SELECT * FROM " + tableB
+                        + " UNION ALL SELECT * FROM " + tableWithLargeGPWidth;
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            Estimate info = getByteRowEstimates(conn, sql, Lists.newArrayList());
+            assertEquals((Long) (2 * 634 + largeGpWidth), info.estimatedBytes);
+            assertTrue(info.estimateInfoTs > 0);
         }
     }
 
@@ -109,21 +172,23 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
                 "SELECT ta.c1.a, ta.c2.b FROM " + tableA + " ta JOIN " + tableB
                         + " tb ON ta.k = tb.k";
         try (Connection conn = DriverManager.getConnection(getUrl())) {
-            Pair<Long, Long> info = getByteRowEstimates(conn, sql, Lists.newArrayList());
-            assertEquals((Long) (634l), info.getSecond());
-            assertEquals((Long) (10l), info.getFirst());
+            Estimate info = getByteRowEstimates(conn, sql, Lists.newArrayList());
+            assertEquals((Long) (634l), info.estimatedBytes);
+            assertEquals((Long) (10l), info.estimatedRows);
+            assertTrue(info.estimateInfoTs > 0);
         }
     }
 
     @Test
     public void testBytesRowsForSortMergeJoin() throws Exception {
         String sql =
-                "SELECT /*+ USE_SORT_MERGE_JOIN */ ta.c1.a, ta.c2.b FROM " + tableA + " ta JOIN "
-                        + tableB + " tb ON ta.k = tb.k";
+                "SELECT /*+ NO_INDEX USE_SORT_MERGE_JOIN */ ta.c1.a, ta.c2.b FROM " + tableA
+                        + " ta JOIN " + tableB + " tb ON ta.k = tb.k";
         try (Connection conn = DriverManager.getConnection(getUrl())) {
-            Pair<Long, Long> info = getByteRowEstimates(conn, sql, Lists.newArrayList());
-            assertEquals((Long) (2 * 634l), info.getSecond());
-            assertEquals((Long) (2 * 10l), info.getFirst());
+            Estimate info = getByteRowEstimates(conn, sql, Lists.newArrayList());
+            assertEquals((Long) (2 * 634l), info.estimatedBytes);
+            assertEquals((Long) (2 * 10l), info.estimatedRows);
+            assertTrue(info.estimateInfoTs > 0);
         }
     }
 
@@ -133,33 +198,36 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
         List<Object> binds = Lists.newArrayList();
         binds.add(99);
         try (Connection conn = DriverManager.getConnection(getUrl())) {
-            Pair<Long, Long> info = getByteRowEstimates(conn, sql, binds);
-            assertEquals((Long) 634l, info.getSecond());
-            assertEquals((Long) 10l, info.getFirst());
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 634l, info.estimatedBytes);
+            assertEquals((Long) 10l, info.estimatedRows);
+            assertTrue(info.estimateInfoTs > 0);
         }
     }
 
     @Test
     public void testBytesRowsForUpsertSelectServerSide() throws Exception {
-        String sql = "UPSERT INTO " + tableA + " SELECT * FROM " + tableA;
+        String sql = "UPSERT INTO " + tableA + " SELECT * FROM " + tableB;
         List<Object> binds = Lists.newArrayList();
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             conn.setAutoCommit(true);
-            Pair<Long, Long> info = getByteRowEstimates(conn, sql, binds);
-            assertEquals((Long) 634l, info.getSecond());
-            assertEquals((Long) 10l, info.getFirst());
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 634l, info.estimatedBytes);
+            assertEquals((Long) 10l, info.estimatedRows);
+            assertTrue(info.estimateInfoTs > 0);
         }
     }
 
     @Test
     public void testBytesRowsForUpsertSelectClientSide() throws Exception {
-        String sql = "UPSERT INTO " + tableA + " SELECT * FROM " + tableA;
+        String sql = "UPSERT INTO " + tableB + " SELECT * FROM " + tableB;
         List<Object> binds = Lists.newArrayList();
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             conn.setAutoCommit(false);
-            Pair<Long, Long> info = getByteRowEstimates(conn, sql, binds);
-            assertEquals((Long) 634l, info.getSecond());
-            assertEquals((Long) 10l, info.getFirst());
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 634l, info.estimatedBytes);
+            assertEquals((Long) 10l, info.estimatedRows);
+            assertTrue(info.estimateInfoTs > 0);
         }
     }
 
@@ -171,9 +239,10 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
         binds.add(99);
         binds.add(99);
         try (Connection conn = DriverManager.getConnection(getUrl())) {
-            Pair<Long, Long> info = getByteRowEstimates(conn, sql, binds);
-            assertEquals((Long) 0l, info.getSecond());
-            assertEquals((Long) 0l, info.getFirst());
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 0l, info.estimatedBytes);
+            assertEquals((Long) 0l, info.estimatedRows);
+            assertEquals((Long) 0l, info.estimateInfoTs);
         }
     }
 
@@ -184,9 +253,10 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
         binds.add(99);
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             conn.setAutoCommit(true);
-            Pair<Long, Long> info = getByteRowEstimates(conn, sql, binds);
-            assertEquals((Long) 634l, info.getSecond());
-            assertEquals((Long) 10l, info.getFirst());
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 634l, info.estimatedBytes);
+            assertEquals((Long) 10l, info.estimatedRows);
+            assertTrue(info.estimateInfoTs > 0);
         }
     }
 
@@ -197,9 +267,10 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
         binds.add(99);
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             conn.setAutoCommit(false);
-            Pair<Long, Long> info = getByteRowEstimates(conn, sql, binds);
-            assertEquals((Long) 200l, info.getSecond());
-            assertEquals((Long) 2l, info.getFirst());
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 200l, info.estimatedBytes);
+            assertEquals((Long) 2l, info.estimatedRows);
+            assertTrue(info.estimateInfoTs > 0);
         }
     }
 
@@ -210,9 +281,10 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
         binds.add(100);
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             conn.setAutoCommit(false);
-            Pair<Long, Long> info = getByteRowEstimates(conn, sql, binds);
-            assertEquals((Long) 0l, info.getSecond());
-            assertEquals((Long) 0l, info.getFirst());
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 0l, info.estimatedBytes);
+            assertEquals((Long) 0l, info.estimatedRows);
+            assertEquals((Long) 0l, info.estimateInfoTs);
         }
     }
 
@@ -222,17 +294,31 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
         List<Object> binds = Lists.newArrayList();
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             conn.setAutoCommit(false);
-            Pair<Long, Long> info = getByteRowEstimates(conn, sql, binds);
-            assertEquals((Long) 200l, info.getSecond());
-            assertEquals((Long) 2l, info.getFirst());
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 200l, info.estimatedBytes);
+            assertEquals((Long) 2l, info.estimatedRows);
+            assertTrue(info.estimateInfoTs > 0);
         }
     }
 
-    public static Pair<Long, Long> getByteRowEstimates(Connection conn, String sql,
-            List<Object> bindValues) throws Exception {
+    public static class Estimate {
+        final Long estimatedBytes;
+        final Long estimatedRows;
+        final Long estimateInfoTs;
+
+        Estimate(Long rows, Long bytes, Long ts) {
+            this.estimatedBytes = bytes;
+            this.estimatedRows = rows;
+            this.estimateInfoTs = ts;
+        }
+    }
+
+    public static Estimate getByteRowEstimates(Connection conn, String sql, List<Object> bindValues)
+            throws Exception {
         String explainSql = "EXPLAIN " + sql;
         Long estimatedBytes = null;
         Long estimatedRows = null;
+        Long estimateInfoTs = null;
         try (PreparedStatement statement = conn.prepareStatement(explainSql)) {
             int paramIdx = 1;
             for (Object bind : bindValues) {
@@ -244,8 +330,10 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
                     (Long) rs.getObject(PhoenixRuntime.EXPLAIN_PLAN_ESTIMATED_BYTES_READ_COLUMN);
             estimatedRows =
                     (Long) rs.getObject(PhoenixRuntime.EXPLAIN_PLAN_ESTIMATED_ROWS_READ_COLUMN);
+            estimateInfoTs =
+                    (Long) rs.getObject(PhoenixRuntime.EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN);
         }
-        return new Pair<>(estimatedRows, estimatedBytes);
+        return new Estimate(estimatedRows, estimatedBytes, estimateInfoTs);
     }
 
     @Test
@@ -263,14 +351,17 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
             table = generateUniqueName();
             ddl = "CREATE TABLE " + table + " (PK1 INTEGER NOT NULL PRIMARY KEY, KV1 VARCHAR)";
             conn.createStatement().execute(ddl);
-            assertUseStatsForQueryFlag(table, conn.unwrap(PhoenixConnection.class), DEFAULT_USE_STATS_FOR_PARALLELIZATION);
+            assertUseStatsForQueryFlag(table, conn.unwrap(PhoenixConnection.class),
+                DEFAULT_USE_STATS_FOR_PARALLELIZATION);
         }
     }
 
     private static void assertUseStatsForQueryFlag(String tableName, PhoenixConnection conn,
             boolean flag) throws TableNotFoundException, SQLException {
-        assertEquals(flag, conn.unwrap(PhoenixConnection.class).getMetaDataCache()
-                .getTableRef(new PTableKey(null, tableName)).getTable().useStatsForParallelization());
+        assertEquals(flag,
+            conn.unwrap(PhoenixConnection.class).getMetaDataCache()
+                    .getTableRef(new PTableKey(null, tableName)).getTable()
+                    .useStatsForParallelization());
         String query =
                 "SELECT USE_STATS_FOR_PARALLELIZATION FROM SYSTEM.CATALOG WHERE TABLE_NAME = ? AND COLUMN_NAME IS NULL AND COLUMN_FAMILY IS NULL AND TENANT_ID IS NULL";
         PreparedStatement stmt = conn.prepareStatement(query);
@@ -279,4 +370,200 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
         rs.next();
         assertEquals(flag, rs.getBoolean(1));
     }
+
+    @Test
+    public void testBytesRowsForSelectOnTenantViews() throws Exception {
+        String tenant1View = generateUniqueName();
+        ;
+        String tenant2View = generateUniqueName();
+        ;
+        String tenant3View = generateUniqueName();
+        ;
+        String multiTenantBaseTable = generateUniqueName();
+        String tenant1 = "tenant1";
+        String tenant2 = "tenant2";
+        String tenant3 = "tenant3";
+        MyClock clock = new MyClock(1000);
+        createMultitenantTableAndViews(tenant1View, tenant2View, tenant3View, tenant1, tenant2,
+            tenant3, multiTenantBaseTable, clock);
+
+        // query the entire multitenant table
+        String sql = "SELECT * FROM " + multiTenantBaseTable + " WHERE ORGID >= ?";
+        List<Object> binds = Lists.newArrayList();
+        binds.add("tenant0");
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 817l, info.estimatedBytes);
+            assertEquals((Long) 10l, info.estimatedRows);
+            assertEquals((Long) clock.currentTime(), info.estimateInfoTs);
+        }
+        binds.clear();
+        // query tenant1 view
+        try (Connection conn = getTenantConnection(tenant1)) {
+            sql = "SELECT * FROM " + tenant1View;
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 143l, info.estimatedBytes);
+            assertEquals((Long) 2l, info.estimatedRows);
+            assertEquals((Long) clock.currentTime(), info.estimateInfoTs);
+        }
+        // query tenant2 view
+        try (Connection conn = getTenantConnection(tenant2)) {
+            sql = "SELECT * FROM " + tenant2View;
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 143l, info.estimatedBytes);
+            assertEquals((Long) 2l, info.estimatedRows);
+            assertEquals((Long) clock.currentTime(), info.estimateInfoTs);
+        }
+        // query tenant3 view
+        try (Connection conn = getTenantConnection(tenant3)) {
+            sql = "SELECT * FROM " + tenant3View;
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 531l, info.estimatedBytes);
+            assertEquals((Long) 6l, info.estimatedRows);
+            assertEquals((Long) clock.currentTime(), info.estimateInfoTs);
+        }
+        /*
+         * Now we will add some rows to tenant1view an run update stats on it. We will do this after
+         * advancing our clock by 1000 seconds. This way we can check that only the region for
+         * tenant1 will have updated guidepost with the new timestamp.
+         */
+        long prevTenant1Bytes = 143l;
+        long prevGuidePostTimestamp = clock.currentTime();
+        clock.advanceTime(1000);
+        try {
+            EnvironmentEdgeManager.injectEdge(clock);
+            // Update tenant1 view
+            try (Connection conn = getTenantConnection(tenant1)) {
+                // upsert a few rows for tenantView
+                conn.createStatement()
+                        .executeUpdate("UPSERT INTO " + tenant1View + " VALUES (11, 11, 11)");
+                conn.createStatement()
+                        .executeUpdate("UPSERT INTO " + tenant1View + " VALUES (12, 12, 12)");
+                conn.createStatement()
+                        .executeUpdate("UPSERT INTO " + tenant1View + " VALUES (13, 13, 13)");
+                conn.createStatement()
+                        .executeUpdate("UPSERT INTO " + tenant1View + " VALUES (14, 14, 14)");
+                conn.createStatement()
+                        .executeUpdate("UPSERT INTO " + tenant1View + " VALUES (15, 15, 15)");
+                conn.createStatement()
+                        .executeUpdate("UPSERT INTO " + tenant1View + " VALUES (16, 16, 16)");
+                conn.commit();
+                // run update stats on the tenantView
+                conn.createStatement().executeUpdate("UPDATE STATISTICS " + tenant1View);
+                // get estimates now and check if they were updated as expected
+                sql = "SELECT * FROM " + tenant1View;
+                Estimate info = getByteRowEstimates(conn, sql, Collections.emptyList());
+                assertTrue(info.estimatedBytes > prevTenant1Bytes);
+                assertEquals((Long) 8l, info.estimatedRows);
+                assertEquals((Long) clock.currentTime(), info.estimateInfoTs);
+            }
+        } finally {
+            EnvironmentEdgeManager.reset();
+        }
+        // Now check estimates again for tenantView2 and tenantView3. They should stay the same.
+        try (Connection conn = getTenantConnection(tenant2)) {
+            sql = "SELECT * FROM " + tenant2View;
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 143l, info.estimatedBytes);
+            assertEquals((Long) 2l, info.estimatedRows);
+            assertEquals((Long) prevGuidePostTimestamp, info.estimateInfoTs);
+        }
+        try (Connection conn = getTenantConnection(tenant3)) {
+            sql = "SELECT * FROM " + tenant3View;
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 531l, info.estimatedBytes);
+            assertEquals((Long) 6l, info.estimatedRows);
+            assertEquals((Long) prevGuidePostTimestamp, info.estimateInfoTs);
+        }
+        /*
+         * Now let's query the base table and see estimates. Because we use the minimum timestamp
+         * for all guideposts that we will be scanning, the timestamp for the estimate info for this
+         * query should be prevGuidePostTimestamp.
+         */
+        binds.clear();
+        binds.add("tenant0");
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            sql = "SELECT * FROM " + multiTenantBaseTable + " WHERE ORGID >= ?";
+            Estimate info = getByteRowEstimates(conn, sql, binds);
+            assertEquals((Long) 1399l, info.estimatedBytes);
+            assertEquals((Long) 16l, info.estimatedRows);
+            assertEquals((Long) prevGuidePostTimestamp, info.estimateInfoTs);
+        }
+    }
+
+    private static void createMultitenantTableAndViews(String tenant1View, String tenant2View,
+            String tenant3View, String tenant1, String tenant2, String tenant3,
+            String multiTenantTable, MyClock clock) throws SQLException {
+        byte[][] splits =
+                new byte[][] { Bytes.toBytes(tenant1), Bytes.toBytes(tenant2),
+                        Bytes.toBytes(tenant3) };
+        String ddl =
+                "CREATE TABLE " + multiTenantTable
+                        + " (orgId CHAR(15) NOT NULL, pk2 integer NOT NULL, c1.a bigint, c2.b bigint CONSTRAINT PK PRIMARY KEY "
+                        + "(ORGID, PK2)) MULTI_TENANT=true, GUIDE_POSTS_WIDTH=2";
+        // Use our own clock to get rows created with our controlled timestamp
+        try {
+            EnvironmentEdgeManager.injectEdge(clock);
+            createTestTable(getUrl(), ddl, splits, null);
+            clock.advanceTime(1000);
+            try (Connection conn = DriverManager.getConnection(getUrl())) {
+                /**
+                 * Insert 2 rows each for tenant1 and tenant2 and 6 rows for tenant3
+                 */
+                conn.createStatement().execute(
+                    "upsert into " + multiTenantTable + " values ('" + tenant1 + "',1,1,1)");
+                conn.createStatement().execute(
+                    "upsert into " + multiTenantTable + " values ('" + tenant1 + "',2,2,2)");
+                conn.createStatement().execute(
+                    "upsert into " + multiTenantTable + " values ('" + tenant2 + "',3,3,3)");
+                conn.createStatement().execute(
+                    "upsert into " + multiTenantTable + " values ('" + tenant2 + "',4,4,4)");
+                conn.createStatement().execute(
+                    "upsert into " + multiTenantTable + " values ('" + tenant3 + "',5,5,5)");
+                conn.createStatement().execute(
+                    "upsert into " + multiTenantTable + " values ('" + tenant3 + "',6,6,6)");
+                conn.createStatement().execute(
+                    "upsert into " + multiTenantTable + " values ('" + tenant3 + "',7,7,7)");
+                conn.createStatement().execute(
+                    "upsert into " + multiTenantTable + " values ('" + tenant3 + "',8,8,8)");
+                conn.createStatement().execute(
+                    "upsert into " + multiTenantTable + " values ('" + tenant3 + "',9,9,9)");
+                conn.createStatement().execute(
+                    "upsert into " + multiTenantTable + " values ('" + tenant3 + "',10,10,10)");
+                conn.commit();
+                conn.createStatement().execute("UPDATE STATISTICS " + multiTenantTable);
+            }
+            try (Connection conn = getTenantConnection(tenant1)) {
+                conn.createStatement().execute(
+                    "CREATE VIEW " + tenant1View + " AS SELECT * FROM " + multiTenantTable);
+            }
+            try (Connection conn = getTenantConnection(tenant2)) {
+                conn.createStatement().execute(
+                    "CREATE VIEW " + tenant2View + " AS SELECT * FROM " + multiTenantTable);
+            }
+            try (Connection conn = getTenantConnection(tenant3)) {
+                conn.createStatement().execute(
+                    "CREATE VIEW " + tenant3View + " AS SELECT * FROM " + multiTenantTable);
+            }
+        } finally {
+            EnvironmentEdgeManager.reset();
+        }
+    }
+
+    private static class MyClock extends EnvironmentEdge {
+        public volatile long time;
+
+        public MyClock(long time) {
+            this.time = time;
+        }
+
+        @Override
+        public long currentTime() {
+            return time;
+        }
+
+        public void advanceTime(long t) {
+            this.time += t;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java
index a62d50d..d6b3924 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java
@@ -41,6 +41,7 @@ public abstract class ParallelStatsEnabledIT extends BaseTest {
     public static void doSetup() throws Exception {
         Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
         props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
+        props.put(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Long.toString(5));
         props.put(QueryServices.USE_STATS_FOR_PARALLELIZATION, Boolean.toString(true));
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java
index cab75b7..aff22af 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java
@@ -34,7 +34,11 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.stats.GuidePostsInfo;
+import org.apache.phoenix.schema.stats.GuidePostsKey;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.junit.Before;
@@ -162,14 +166,14 @@ public class QueryWithOffsetIT extends ParallelStatsDisabledIT {
         initTableValues(conn);
         updateStatistics(conn);
         ResultSet rs;
-        rs = conn.createStatement()
-                .executeQuery("SELECT t_id from " + tableName + " order by t_id offset " + offset + " row");
         int i = 0;
+        rs =
+                conn.createStatement().executeQuery(
+                    "SELECT t_id from " + tableName + " order by t_id offset " + offset + " row");
         while (i++ < STRINGS.length - offset) {
             assertTrue(rs.next());
             assertEquals(STRINGS[offset + i - 1], rs.getString(1));
         }
-
         rs = conn.createStatement().executeQuery(
                 "SELECT k3, count(*) from " + tableName + " group by k3 order by k3 desc offset " + offset + " row");
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
index d2c8e6f..19b5275 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -43,7 +43,6 @@ import java.util.Random;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -58,8 +57,9 @@ import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableKey;
-import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.stats.GuidePostsInfo;
 import org.apache.phoenix.schema.stats.GuidePostsKey;
+import org.apache.phoenix.schema.stats.StatisticsUtil;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
@@ -177,7 +177,7 @@ public class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
         ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
         String explainPlan = QueryUtil.getExplainPlan(rs);
         assertEquals(
-                "CLIENT 1-CHUNK 0 ROWS 0 BYTES PARALLEL 1-WAY FULL SCAN OVER " + physicalTableName + "\n" + 
+                "CLIENT 1-CHUNK 0 ROWS 20 BYTES PARALLEL 1-WAY FULL SCAN OVER " + physicalTableName + "\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY",
                 explainPlan);
         conn.close();
@@ -197,7 +197,7 @@ public class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
         rs = conn.createStatement().executeQuery("EXPLAIN SELECT v2 FROM " + fullTableName + " WHERE v2='foo'");
         explainPlan = QueryUtil.getExplainPlan(rs);
         // if we are using the ONE_CELL_PER_COLUMN_FAMILY storage scheme, we will have the single kv even though there are no values for col family v2 
-        String stats = columnEncoded && !mutable  ? "4-CHUNK 1 ROWS 38 BYTES" : "3-CHUNK 0 ROWS 0 BYTES";
+        String stats = columnEncoded && !mutable  ? "4-CHUNK 1 ROWS 38 BYTES" : "3-CHUNK 0 ROWS 20 BYTES";
         assertEquals(
                 "CLIENT " + stats + " PARALLEL 3-WAY FULL SCAN OVER " + physicalTableName + "\n" +
                 "    SERVER FILTER BY B.V2 = 'foo'\n" + 
@@ -707,5 +707,42 @@ public class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
             assertEquals("Number of expected rows in stats table after major compaction didn't match", numRows, rs.getInt(1));
         }
     }
+    
+    @Test
+    public void testEmptyGuidePostGeneratedWhenDataSizeLessThanGPWidth() throws Exception {
+        String tableName = generateUniqueName();
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            long guidePostWidth = 20000000;
+            conn.createStatement()
+                    .execute("CREATE TABLE " + tableName
+                            + " ( k INTEGER, c1.a bigint,c2.b bigint CONSTRAINT pk PRIMARY KEY (k)) GUIDE_POSTS_WIDTH="
+                            + guidePostWidth + ", SALT_BUCKETS = 4");
+            conn.createStatement().execute("upsert into " + tableName + " values (100,1,3)");
+            conn.createStatement().execute("upsert into " + tableName + " values (101,2,4)");
+            conn.commit();
+            conn.createStatement().execute("UPDATE STATISTICS " + tableName);
+            ConnectionQueryServices queryServices =
+                    conn.unwrap(PhoenixConnection.class).getQueryServices();
+            try (HTableInterface statsHTable =
+                    queryServices.getTable(
+                        SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES,
+                            queryServices.getProps()).getName())) {
+                GuidePostsInfo gps =
+                        StatisticsUtil.readStatistics(statsHTable,
+                            new GuidePostsKey(Bytes.toBytes(tableName), Bytes.toBytes("C1")),
+                            HConstants.LATEST_TIMESTAMP);
+                assertTrue(gps.isEmptyGuidePost());
+                assertEquals(guidePostWidth, gps.getByteCounts()[0]);
+                assertTrue(gps.getGuidePostTimestamps()[0] > 0);
+                gps =
+                        StatisticsUtil.readStatistics(statsHTable,
+                            new GuidePostsKey(Bytes.toBytes(tableName), Bytes.toBytes("C2")),
+                            HConstants.LATEST_TIMESTAMP);
+                assertTrue(gps.isEmptyGuidePost());
+                assertEquals(guidePostWidth, gps.getByteCounts()[0]);
+                assertTrue(gps.getGuidePostTimestamps()[0] > 0);
+            }
+        }
+    }
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/compile/BaseMutationPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/BaseMutationPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/BaseMutationPlan.java
index 276dc9b..0e45682 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/BaseMutationPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/BaseMutationPlan.java
@@ -74,4 +74,9 @@ public abstract class BaseMutationPlan implements MutationPlan {
         return 0l;
     }
 
+    @Override
+    public Long getEstimateInfoTimestamp() throws SQLException {
+        return 0l;
+    }
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/compile/DelegateMutationPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DelegateMutationPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DelegateMutationPlan.java
index 005ae1f..343ec32 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DelegateMutationPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DelegateMutationPlan.java
@@ -77,4 +77,9 @@ public class DelegateMutationPlan implements MutationPlan {
         return plan.getEstimatedBytesToScan();
     }
 
+    @Override
+    public Long getEstimateInfoTimestamp() throws SQLException {
+        return plan.getEstimateInfoTimestamp();
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index b2fd17c..be07cf4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -309,6 +309,13 @@ public class DeleteCompiler {
         public Long getEstimatedRowsToScan() throws SQLException {
             Long estRows = null;
             for (MutationPlan plan : plans) {
+                /*
+                 * If any of the plan doesn't have estimate information available, then we cannot
+                 * provide estimate for the overall plan.
+                 */
+                if (plan.getEstimatedRowsToScan() == null) {
+                    return null;
+                }
                 estRows = add(estRows, plan.getEstimatedRowsToScan());
             }
             return estRows;
@@ -318,12 +325,36 @@ public class DeleteCompiler {
         public Long getEstimatedBytesToScan() throws SQLException {
             Long estBytes = null;
             for (MutationPlan plan : plans) {
+                /*
+                 * If any of the plan doesn't have estimate information available, then we cannot
+                 * provide estimate for the overall plan.
+                 */
+                if (plan.getEstimatedBytesToScan() == null) {
+                    return null;
+                }
                 estBytes = add(estBytes, plan.getEstimatedBytesToScan());
             }
             return estBytes;
         }
+
+        @Override
+        public Long getEstimateInfoTimestamp() throws SQLException {
+            Long estInfoTimestamp = Long.MAX_VALUE;
+            for (MutationPlan plan : plans) {
+                Long timestamp = plan.getEstimateInfoTimestamp();
+                /*
+                 * If any of the plan doesn't have estimate information available, then we cannot
+                 * provide estimate for the overall plan.
+                 */
+                if (timestamp == null) {
+                    return timestamp;
+                }
+                estInfoTimestamp = Math.min(estInfoTimestamp, timestamp);
+            }
+            return estInfoTimestamp;
+        }
     }
-    
+
     private static boolean hasNonPKIndexedColumns(Collection<PTable> immutableIndexes) {
         for (PTable index : immutableIndexes) {
             for (PColumn column : index.getPKColumns()) {
@@ -562,6 +593,11 @@ public class DeleteCompiler {
                     public Long getEstimatedBytesToScan() throws SQLException {
                         return 0l;
                     }
+
+                    @Override
+                    public Long getEstimateInfoTimestamp() throws SQLException {
+                        return 0l;
+                    }
                 });
             } else if (runOnServer) {
                 // TODO: better abstraction
@@ -659,6 +695,11 @@ public class DeleteCompiler {
                     public Long getEstimatedBytesToScan() throws SQLException {
                         return aggPlan.getEstimatedBytesToScan();
                     }
+
+                    @Override
+                    public Long getEstimateInfoTimestamp() throws SQLException {
+                        return aggPlan.getEstimateInfoTimestamp();
+                    }
                 });
             } else {
                 List<TableRef> immutableIndexRefsToBe = Lists.newArrayListWithExpectedSize(dataPlan.getTableRef().getTable().getIndexes().size());
@@ -746,6 +787,11 @@ public class DeleteCompiler {
                     public Long getEstimatedBytesToScan() throws SQLException {
                         return plan.getEstimatedBytesToScan();
                     }
+
+                    @Override
+                    public Long getEstimateInfoTimestamp() throws SQLException {
+                        return plan.getEstimateInfoTimestamp();
+                    }
                 });
             }
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
index c6d03c1..839e7c9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
@@ -264,4 +264,9 @@ public class ListJarsQueryPlan implements QueryPlan {
     public Long getEstimatedBytesToScan() {
         return 0l;
     }
+
+    @Override
+    public Long getEstimateInfoTimestamp() throws SQLException {
+        return 0l;
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementPlan.java
index 6d381d9..c74b1c0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementPlan.java
@@ -48,4 +48,10 @@ public interface StatementPlan {
      *         Returns null if the estimate cannot be provided.
      */
     public Long getEstimatedBytesToScan() throws SQLException;
+
+    /**
+     * @return timestamp at which the estimate information (estimated bytes and estimated rows) was
+     *         computed. executed. Returns null if the information cannot be provided.
+     */
+    public Long getEstimateInfoTimestamp() throws SQLException;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
index c061ec6..62e6991 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
@@ -272,4 +272,9 @@ public class TraceQueryPlan implements QueryPlan {
     public Long getEstimatedBytesToScan() {
         return 0l;
     }
+
+    @Override
+    public Long getEstimateInfoTimestamp() throws SQLException {
+        return 0l;
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index c384292..6f45e28 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -764,6 +764,11 @@ public class UpsertCompiler {
                         public Long getEstimatedBytesToScan() throws SQLException {
                             return aggPlan.getEstimatedBytesToScan();
                         }
+
+                        @Override
+                        public Long getEstimateInfoTimestamp() throws SQLException {
+                            return aggPlan.getEstimateInfoTimestamp();
+                        }
                     };
                 }
             }
@@ -846,7 +851,11 @@ public class UpsertCompiler {
                 public Long getEstimatedBytesToScan() throws SQLException {
                     return queryPlan.getEstimatedBytesToScan();
                 }
-                
+
+                @Override
+                public Long getEstimateInfoTimestamp() throws SQLException {
+                    return queryPlan.getEstimateInfoTimestamp();
+                }
             };
         }
 
@@ -1086,6 +1095,10 @@ public class UpsertCompiler {
                 return 0l;
             }
 
+            @Override
+            public Long getEstimateInfoTimestamp() throws SQLException {
+                return 0l;
+            }
         };
     }
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 4b6e88f..8239069 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -20,7 +20,6 @@ package org.apache.phoenix.coprocessor;
 import java.io.IOException;
 import java.util.List;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.DoNotRetryIOException;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 0773ebc..30f89cb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -902,7 +902,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                 InternalScanner internalScanner = scanner;
                 if (scanType.equals(ScanType.COMPACT_DROP_DELETES)) {
                     try {
-                        long clientTimeStamp = TimeKeeper.SYSTEM.getCurrentTime();
+                        long clientTimeStamp = EnvironmentEdgeManager.currentTimeMillis();
                         StatisticsCollector stats = StatisticsCollectorFactory.createStatisticsCollector(
                             c.getEnvironment(), table.getNameAsString(), clientTimeStamp,
                             store.getFamily().getName());
@@ -1160,7 +1160,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
      * Package private for tests.
      */
     static class StatsCollectionCallable implements Callable<Long> {
-        private final StatisticsCollector stats;
+        private final StatisticsCollector statsCollector;
         private final Region region;
         private final RegionScanner innerScanner;
         private final Configuration config;
@@ -1168,7 +1168,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
 
         StatsCollectionCallable(StatisticsCollector s, Region r, RegionScanner rs,
                 Configuration config, Scan scan) {
-            this.stats = s;
+            this.statsCollector = s;
             this.region = r;
             this.innerScanner = rs;
             this.config = config;
@@ -1194,12 +1194,12 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
             long rowCount = 0;
             try {
                 if (!compactionRunning) {
-                    stats.init();
+                    statsCollector.init();
                     synchronized (innerScanner) {
                         do {
                             List<Cell> results = new ArrayList<Cell>();
                             hasMore = innerScanner.nextRaw(results);
-                            stats.collectStatistics(results);
+                            statsCollector.collectStatistics(results);
                             rowCount++;
                             compactionRunning = areStatsBeingCollectedViaCompaction();
                         } while (hasMore && !compactionRunning);
@@ -1213,7 +1213,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
             } finally {
                 try {
                     if (noErrors && !compactionRunning) {
-                        stats.updateStatistic(region, scan);
+                        statsCollector.updateStatistic(region, scan);
                         logger.info("UPDATE STATISTICS finished successfully for scanner: "
                                 + innerScanner + ". Number of rows scanned: " + rowCount
                                 + ". Time: " + (System.currentTimeMillis() - startTime));
@@ -1225,7 +1225,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                 } finally {
                     try {
                         StatisticsCollectionRunTracker.getInstance(config).removeUpdateStatsCommandRegion(region.getRegionInfo());
-                        stats.close();
+                        statsCollector.close();
                     } finally {
                         try {
                             innerScanner.close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
index 74c8d39..4c29abe 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
@@ -227,6 +227,7 @@ public class AggregatePlan extends BaseQueryPlan {
                 : new ParallelIterators(this, null, wrapParallelIteratorFactory(), scan, false, caches);
         estimatedRows = iterators.getEstimatedRowCount();
         estimatedSize = iterators.getEstimatedByteCount();
+        estimateInfoTimestamp = iterators.getEstimateInfoTimestamp();
         splits = iterators.getSplits();
         scans = iterators.getScans();
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 238a537..c1ddd44 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -114,6 +114,7 @@ public abstract class BaseQueryPlan implements QueryPlan {
     protected final Expression dynamicFilter;
     protected Long estimatedRows;
     protected Long estimatedSize;
+    protected Long estimateInfoTimestamp;
     private boolean explainPlanCalled;
     
 
@@ -503,6 +504,9 @@ public abstract class BaseQueryPlan implements QueryPlan {
         // Optimize here when getting explain plan, as queries don't get optimized until after compilation
         QueryPlan plan = context.getConnection().getQueryServices().getOptimizer().optimize(context.getStatement(), this);
         ExplainPlan exp = plan instanceof BaseQueryPlan ? new ExplainPlan(getPlanSteps(plan.iterator())) : plan.getExplainPlan();
+        this.estimatedRows = plan.getEstimatedRowsToScan();
+        this.estimatedSize = plan.getEstimatedBytesToScan();
+        this.estimateInfoTimestamp = plan.getEstimateInfoTimestamp();
         return exp;
     }
 
@@ -533,4 +537,12 @@ public abstract class BaseQueryPlan implements QueryPlan {
         return estimatedSize;
     }
 
+    @Override
+    public Long getEstimateInfoTimestamp() throws SQLException {
+        if (!explainPlanCalled) {
+            getExplainPlan();
+        }
+        return estimateInfoTimestamp;
+    }
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java
index cde1410..3c62c5b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java
@@ -151,4 +151,9 @@ public abstract class DelegateQueryPlan implements QueryPlan {
     public Long getEstimatedBytesToScan() throws SQLException {
         return delegate.getEstimatedBytesToScan();
     }
+
+    @Override
+    public Long getEstimateInfoTimestamp() throws SQLException {
+        return delegate.getEstimateInfoTimestamp();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index 879aa61..2b90dcb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.execute;
 import static org.apache.phoenix.monitoring.TaskExecutionMetricsHolder.NO_OP_INSTANCE;
 import static org.apache.phoenix.util.LogUtil.addCustomAnnotations;
 import static org.apache.phoenix.util.NumberUtil.add;
+import static org.apache.phoenix.util.NumberUtil.getMin;
 
 import java.sql.SQLException;
 import java.util.Collections;
@@ -96,6 +97,7 @@ public class HashJoinPlan extends DelegateQueryPlan {
     private List<Expression> keyRangeExpressions;
     private Long estimatedRows;
     private Long estimatedBytes;
+    private Long estimateInfoTs;
     private boolean explainPlanCalled;
     
     public static HashJoinPlan create(SelectStatement statement, 
@@ -261,8 +263,24 @@ public class HashJoinPlan extends DelegateQueryPlan {
             planSteps.add("    JOIN-SCANNER " + joinInfo.getLimit() + " ROW LIMIT");
         }
         for (SubPlan subPlan : subPlans) {
-            estimatedBytes = add(estimatedBytes, subPlan.getInnerPlan().getEstimatedBytesToScan());
-            estimatedRows = add(estimatedRows, subPlan.getInnerPlan().getEstimatedRowsToScan());
+            if (subPlan.getInnerPlan().getEstimatedBytesToScan() == null
+                    || subPlan.getInnerPlan().getEstimatedRowsToScan() == null
+                    || subPlan.getInnerPlan().getEstimateInfoTimestamp() == null) {
+                /*
+                 * If any of the sub plans doesn't have the estimate info available, then we don't
+                 * provide estimate for the overall plan
+                 */
+                estimatedBytes = null;
+                estimatedRows = null;
+                estimateInfoTs = null;
+                break;
+            } else {
+                estimatedBytes =
+                        add(estimatedBytes, subPlan.getInnerPlan().getEstimatedBytesToScan());
+                estimatedRows = add(estimatedRows, subPlan.getInnerPlan().getEstimatedRowsToScan());
+                estimateInfoTs =
+                        getMin(estimateInfoTs, subPlan.getInnerPlan().getEstimateInfoTimestamp());
+            }
         }
         return new ExplainPlan(planSteps);
     }
@@ -486,6 +504,14 @@ public class HashJoinPlan extends DelegateQueryPlan {
         }
         return estimatedBytes;
     }
+
+    @Override
+    public Long getEstimateInfoTimestamp() throws SQLException {
+        if (!explainPlanCalled) {
+            getExplainPlan();
+        }
+        return estimateInfoTs;
+    }
 }
 
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/execute/LiteralResultIterationPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/LiteralResultIterationPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/LiteralResultIterationPlan.java
index 781c07e..86f59c5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/LiteralResultIterationPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/LiteralResultIterationPlan.java
@@ -125,4 +125,9 @@ public class LiteralResultIterationPlan extends BaseQueryPlan {
 	public Long getEstimatedBytesToScan() {
 		return 0l;
 	}
+
+    @Override
+    public Long getEstimateInfoTimestamp() throws SQLException {
+        return 0l;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
index f5b1af0..1e1cb0d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
@@ -64,6 +64,7 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
@@ -88,6 +89,7 @@ public class ScanPlan extends BaseQueryPlan {
     private boolean isDataToScanWithinThreshold;
     private Long serialRowsEstimate;
     private Long serialBytesEstimate;
+    private Long serialEstimateInfoTs;
 
     public ScanPlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector, Integer limit, Integer offset, OrderBy orderBy, ParallelIteratorFactory parallelIteratorFactory, boolean allowPageFilter) throws SQLException {
         this(context, statement, table, projector, limit, offset, orderBy, parallelIteratorFactory, allowPageFilter, null);
@@ -114,6 +116,7 @@ public class ScanPlan extends BaseQueryPlan {
         if (isSerial) {
             serialBytesEstimate = estimate.getFirst();
             serialRowsEstimate = estimate.getSecond();
+            serialEstimateInfoTs = EnvironmentEdgeManager.currentTimeMillis();
         }
     }
 
@@ -240,6 +243,7 @@ public class ScanPlan extends BaseQueryPlan {
         }
         estimatedRows = iterators.getEstimatedRowCount();
         estimatedSize = iterators.getEstimatedByteCount();
+        estimateInfoTimestamp = iterators.getEstimateInfoTimestamp();
         splits = iterators.getSplits();
         scans = iterators.getScans();
         if (isOffsetOnServer) {
@@ -302,5 +306,11 @@ public class ScanPlan extends BaseQueryPlan {
         return super.getEstimatedBytesToScan();
     }
 
-
+    @Override
+    public Long getEstimateInfoTimestamp() throws SQLException {
+        if (isSerial) {
+            return serialEstimateInfoTs;
+        }
+        return super.getEstimateInfoTimestamp();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
index 568094a..fab7c59 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.execute;
 
 import static org.apache.phoenix.util.NumberUtil.add;
+import static org.apache.phoenix.util.NumberUtil.getMin;
 
 import java.io.IOException;
 import java.nio.MappedByteBuffer;
@@ -92,6 +93,7 @@ public class SortMergeJoinPlan implements QueryPlan {
     private final int thresholdBytes;
     private Long estimatedBytes;
     private Long estimatedRows;
+    private Long estimateInfoTs;
     private boolean explainPlanCalled;
 
     public SortMergeJoinPlan(StatementContext context, FilterableStatement statement, TableRef table, 
@@ -164,8 +166,28 @@ public class SortMergeJoinPlan implements QueryPlan {
         for (String step : rhsPlan.getExplainPlan().getPlanSteps()) {
             steps.add("    " + step);            
         }
-        estimatedBytes = add(add(estimatedBytes, lhsPlan.getEstimatedBytesToScan()), rhsPlan.getEstimatedBytesToScan());
-        estimatedRows = add(add(estimatedRows, lhsPlan.getEstimatedRowsToScan()), rhsPlan.getEstimatedRowsToScan());
+        if ((lhsPlan.getEstimatedBytesToScan() == null || rhsPlan.getEstimatedBytesToScan() == null)
+                || (lhsPlan.getEstimatedRowsToScan() == null
+                        || rhsPlan.getEstimatedRowsToScan() == null)
+                || (lhsPlan.getEstimateInfoTimestamp() == null
+                        || rhsPlan.getEstimateInfoTimestamp() == null)) {
+            /*
+             * If any of the sub plans doesn't have the estimate info available, then we don't
+             * provide estimate for the overall plan
+             */
+            estimatedBytes = null;
+            estimatedRows = null;
+            estimateInfoTs = null;
+        } else {
+            estimatedBytes =
+                    add(add(estimatedBytes, lhsPlan.getEstimatedBytesToScan()),
+                        rhsPlan.getEstimatedBytesToScan());
+            estimatedRows =
+                    add(add(estimatedRows, lhsPlan.getEstimatedRowsToScan()),
+                        rhsPlan.getEstimatedRowsToScan());
+            estimateInfoTs =
+                    getMin(lhsPlan.getEstimateInfoTimestamp(), rhsPlan.getEstimateInfoTimestamp());
+        }
         return new ExplainPlan(steps);
     }
 
@@ -727,4 +749,12 @@ public class SortMergeJoinPlan implements QueryPlan {
         }
         return estimatedBytes;
     }
+
+    @Override
+    public Long getEstimateInfoTimestamp() throws SQLException {
+        if (!explainPlanCalled) {
+            getExplainPlan();
+        }
+        return estimateInfoTs;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd2b064a/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
index fd50a83..e06522f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.execute;
 
 import static org.apache.phoenix.util.NumberUtil.add;
+import static org.apache.phoenix.util.NumberUtil.getMin;
 
 import java.sql.ParameterMetaData;
 import java.sql.SQLException;
@@ -66,6 +67,7 @@ public class UnionPlan implements QueryPlan {
     private UnionResultIterators iterators;
     private Long estimatedRows;
     private Long estimatedBytes;
+    private Long estimateInfoTs;
     private boolean explainPlanCalled;
 
     public UnionPlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector,
@@ -182,8 +184,21 @@ public class UnionPlan implements QueryPlan {
             steps.set(i, "    " + steps.get(i));
         }
         for (QueryPlan plan : plans) {
-            estimatedRows = add(estimatedRows, plan.getEstimatedRowsToScan());
-            estimatedBytes = add(estimatedBytes, plan.getEstimatedBytesToScan());
+            if (plan.getEstimatedBytesToScan() == null || plan.getEstimatedRowsToScan() == null
+                    || plan.getEstimateInfoTimestamp() == null) {
+                /*
+                 * If any of the sub plans doesn't have the estimate info available, then we don't
+                 * provide estimate for the overall plan
+                 */
+                estimatedBytes = null;
+                estimatedRows = null;
+                estimateInfoTs = null;
+                break;
+            } else {
+                estimatedBytes = add(estimatedBytes, plan.getEstimatedBytesToScan());
+                estimatedRows = add(estimatedRows, plan.getEstimatedRowsToScan());
+                estimateInfoTs = getMin(estimateInfoTs, plan.getEstimateInfoTimestamp());
+            }
         }
         return new ExplainPlan(steps);
     }
@@ -253,4 +268,12 @@ public class UnionPlan implements QueryPlan {
         }
         return estimatedBytes;
     }
+
+    @Override
+    public Long getEstimateInfoTimestamp() throws SQLException {
+        if (!explainPlanCalled) {
+            getExplainPlan();
+        }
+        return estimateInfoTs;
+    }
 }


Mime
View raw message