phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jamestay...@apache.org
Subject phoenix git commit: PHOENIX-4105 Fix tests broken due to PHOENIX-4089
Date Fri, 18 Aug 2017 22:51:29 GMT
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 371d23740 -> 8be8f9494


PHOENIX-4105 Fix tests broken due to PHOENIX-4089


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8be8f949
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8be8f949
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8be8f949

Branch: refs/heads/4.x-HBase-0.98
Commit: 8be8f949467a4d29c6cdda6e16c9663f9bfef8f5
Parents: 371d237
Author: James Taylor <jamestaylor@apache.org>
Authored: Fri Aug 18 15:49:54 2017 -0700
Committer: James Taylor <jamestaylor@apache.org>
Committed: Fri Aug 18 15:51:20 2017 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/ConcurrentMutationsIT.java  |   2 +-
 .../end2end/IndexToolForPartialBuildIT.java     |   6 +-
 .../UpdateCacheAcrossDifferentClientsIT.java    |  47 ++++
 .../phoenix/end2end/index/ImmutableIndexIT.java |   2 +-
 .../end2end/index/MutableIndexFailureIT.java    |   2 +-
 .../end2end/index/PartialIndexRebuilderIT.java  | 234 +++++++++++--------
 .../coprocessor/MetaDataRegionObserver.java     |   7 +-
 .../phoenix/util/DefaultEnvironmentEdge.java    |   3 +-
 .../apache/phoenix/util/EnvironmentEdge.java    |   9 +-
 .../phoenix/util/EnvironmentEdgeManager.java    |   1 +
 .../java/org/apache/phoenix/util/TestUtil.java  |  39 ++--
 11 files changed, 229 insertions(+), 123 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8be8f949/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsIT.java
index e674d8f..a690541 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsIT.java
@@ -230,7 +230,7 @@ public class ConcurrentMutationsIT extends BaseUniqueNamesOwnClusterIT
{
     }
     
     @Test
-    @Repeat(25)
+    @Repeat(10)
     public void testConcurrentUpserts() throws Exception {
         int nThreads = 8;
         final int batchSize = 200;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8be8f949/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
index 0bba36a..8c5da69 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
@@ -99,6 +99,7 @@ public class IndexToolForPartialBuildIT extends BaseOwnClusterIT {
         serverProps.put("hbase.client.pause", "5000");
         serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB, Boolean.FALSE.toString());
         serverProps.put(QueryServices.INDEX_FAILURE_DISABLE_INDEX, Boolean.TRUE.toString());
+        serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME_ATTRIB,
Long.toString(2000));
         return serverProps;
     }
     
@@ -200,6 +201,10 @@ public class IndexToolForPartialBuildIT extends BaseOwnClusterIT {
                 assertTrue(rs.next());
                 assertEquals("xxUNAME" + i*1000 + "_xyz", rs.getString(1));
             }
+            for (int i = 6; i <= 7; i++) {
+                assertTrue(rs.next());
+                assertEquals("xxUNAME" + i*1000 + "_xyz", rs.getString(1));
+            }
             assertFalse(rs.next());
             // run the index MR job.
             final IndexTool indexingTool = new IndexTool();
@@ -227,7 +232,6 @@ public class IndexToolForPartialBuildIT extends BaseOwnClusterIT {
                 assertTrue(rs.next());
                 assertEquals("xxUNAME" + i*1000 + "_xyz", rs.getString(1));
             }
-
             assertFalse(rs.next());
 
            // conn.createStatement().execute(String.format("DROP INDEX  %s ON %s", indxTable,
fullTableName));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8be8f949/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
index 08dbf32..ddeebc0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
@@ -21,12 +21,21 @@ import java.sql.ResultSet;
 import java.util.Map;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PMetaData;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -100,6 +109,44 @@ public class UpdateCacheAcrossDifferentClientsIT extends BaseUniqueNamesOwnClust
     }
 
     @Test
+    public void testTableSentWhenIndexStateChanges() throws Throwable {
+        // Create connections 1 and 2
+        Properties longRunningProps = new Properties(); // Must update config before starting
server
+        longRunningProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
+            QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
+        longRunningProps.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
+        Connection conn1 = DriverManager.getConnection(getUrl(), longRunningProps);
+        String url2 = getUrl() + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + "LongRunningQueries";
+        Connection conn2 = DriverManager.getConnection(url2, longRunningProps);
+        conn1.setAutoCommit(true);
+        conn2.setAutoCommit(true);
+        try {
+            String schemaName = generateUniqueName();
+            String tableName = generateUniqueName();
+            String indexName = generateUniqueName();
+            final String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+            String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
+            conn1.createStatement().execute("CREATE TABLE " + fullTableName + "(k INTEGER
PRIMARY KEY, v1 INTEGER, v2 INTEGER) COLUMN_ENCODED_BYTES = 0, STORE_NULLS=true");
+            conn1.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName
+ " (v1) INCLUDE (v2)");
+            HTableInterface metaTable = conn2.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+            IndexUtil.updateIndexState(fullIndexName, 0, metaTable, PIndexState.DISABLE);
+            conn2.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES(1,2,3)");
+            conn2.commit();
+            conn1.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES(4,5,6)");
+            conn1.commit();
+            PTableKey key = new PTableKey(null,fullTableName);
+            PMetaData metaCache = conn1.unwrap(PhoenixConnection.class).getMetaDataCache();
+            PTable table = metaCache.getTableRef(key).getTable();
+            for (PTable index : table.getIndexes()) {
+                assertEquals(PIndexState.DISABLE, index.getIndexState());
+            }
+        } finally {
+            conn1.close();
+            conn2.close();
+        }
+    }
+
+    @Test
     public void testUpdateCacheFrequencyWithAddColumn() throws Exception {
         // Create connections 1 and 2
         Properties longRunningProps = new Properties(); // Must update config before starting
server

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8be8f949/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
index bf38c78..a933ccb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
@@ -184,7 +184,7 @@ public class ImmutableIndexIT extends BaseUniqueNamesOwnClusterIT {
             String upsertSelect = "UPSERT INTO " + TABLE_NAME + "(varchar_pk, char_pk, int_pk,
long_pk, decimal_pk, date_pk) " +
                     "SELECT varchar_pk||'_upsert_select', char_pk, int_pk, long_pk, decimal_pk,
date_pk FROM "+ TABLE_NAME;
             conn.createStatement().execute(upsertSelect);
-            TestUtil.waitForIndexRebuild(conn, indexName, PIndexState.ACTIVE);
+            TestUtil.waitForIndexState(conn, indexName, PIndexState.ACTIVE);
             ResultSet rs;
             rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ COUNT(*) FROM
" + TABLE_NAME);
             assertTrue(rs.next());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8be8f949/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index bc5237a..4b12de1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -329,7 +329,7 @@ public class MutableIndexFailureIT extends BaseTest {
     private void waitForIndexRebuild(Connection conn, String index, PIndexState expectedIndexState)
throws InterruptedException, SQLException {
         if (!transactional) {
             String fullIndexName = SchemaUtil.getTableName(schema, index);
-            TestUtil.waitForIndexRebuild(conn, fullIndexName, expectedIndexState);
+            TestUtil.waitForIndexState(conn, fullIndexName, expectedIndexState);
         }
     }
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8be8f949/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
index bc0dda8..77f32ac 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
@@ -24,12 +24,12 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.util.Map;
-import java.util.Properties;
 import java.util.Random;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -39,11 +39,10 @@ import org.apache.phoenix.schema.PMetaData;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.util.EnvironmentEdge;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.IndexScrutiny;
 import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.Repeat;
 import org.apache.phoenix.util.RunUntilFailure;
@@ -58,72 +57,69 @@ import com.google.common.collect.Maps;
 @RunWith(RunUntilFailure.class)
 public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
     private static final Random RAND = new Random(5);
+    private static final int WAIT_AFTER_DISABLED = 10000;
 
     @BeforeClass
     public static void doSetup() throws Exception {
         Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(10);
         serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB, Boolean.TRUE.toString());
         serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB, "1000");
-        serverProps.put(QueryServices.INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD, "30000");
// give up rebuilding after 30 seconds
-        serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME_ATTRIB,
Long.toString(2000));
+        serverProps.put(QueryServices.INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD, "120000");
// give up rebuilding after 2 minutes
+        serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME_ATTRIB,
Long.toString(WAIT_AFTER_DISABLED));
         setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), ReadOnlyProps.EMPTY_PROPS);
     }
 
     private static boolean mutateRandomly(Connection conn, String fullTableName, int nRows)
throws Exception {
-        return mutateRandomly(conn, fullTableName, nRows, false);
+        return mutateRandomly(conn, fullTableName, nRows, false, null);
     }
     
     private static boolean hasInactiveIndex(PMetaData metaCache, PTableKey key) throws TableNotFoundException
{
         PTable table = metaCache.getTableRef(key).getTable();
         for (PTable index : table.getIndexes()) {
-            if (index.getIndexState() == PIndexState.INACTIVE) {
+            if (index.getIndexState() == PIndexState.ACTIVE) {
                 return true;
             }
         }
         return false;
     }
     
-    private static boolean isAllActiveIndex(PMetaData metaCache, PTableKey key) throws TableNotFoundException
{
-        PTable table = metaCache.getTableRef(key).getTable();
-        for (PTable index : table.getIndexes()) {
-            if (index.getIndexState() != PIndexState.ACTIVE) {
-                return false;
-            }
-        }
-        return true;
-    }
-    
-    private static boolean mutateRandomly(Connection conn, String fullTableName, int nRows,
boolean checkForInactive) throws SQLException, InterruptedException {
+    private static boolean mutateRandomly(Connection conn, String fullTableName, int nRows,
boolean checkForInactive, String fullIndexName) throws SQLException, InterruptedException
{
         PTableKey key = new PTableKey(null,fullTableName);
         PMetaData metaCache = conn.unwrap(PhoenixConnection.class).getMetaDataCache();
         boolean hasInactiveIndex = false;
-        int batchSize = checkForInactive && !isAllActiveIndex(metaCache, key) ? 1
: 200;
+        int batchSize = 200;
+        if (checkForInactive) {
+            batchSize = 3;
+        }
         for (int i = 0; i < 10000; i++) {
             int pk = Math.abs(RAND.nextInt()) % nRows;
             int v1 = Math.abs(RAND.nextInt()) % nRows;
             int v2 = Math.abs(RAND.nextInt()) % nRows;
-            if (checkForInactive && hasInactiveIndex(metaCache, key)) {
-                checkForInactive = false;
-                hasInactiveIndex = true;
-                batchSize = 200;
-            }
             conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES(" +
pk + "," + v1 + "," + v2 + ")");
             if (i % batchSize == 0) {
                 conn.commit();
-                if (checkForInactive) Thread.sleep(100);
+                if (checkForInactive) {
+                    if (hasInactiveIndex(metaCache, key)) {
+                        checkForInactive = false;
+                        hasInactiveIndex = true;
+                        batchSize = 200;
+                    }
+                }
             }
         }
         conn.commit();
         for (int i = 0; i < 10000; i++) {
             int pk = Math.abs(RAND.nextInt()) % nRows;
-            if (checkForInactive && hasInactiveIndex(metaCache, key)) {
-                checkForInactive = false;
-                hasInactiveIndex = true;
-                batchSize = 200;
-            }
             conn.createStatement().execute("DELETE FROM " + fullTableName + " WHERE k= "
+ pk);
             if (i % batchSize == 0) {
                 conn.commit();
+                if (checkForInactive) {
+                    if (hasInactiveIndex(metaCache, key)) {
+                        checkForInactive = false;
+                        hasInactiveIndex = true;
+                        batchSize = 200;
+                    }
+                }
             }
         }
         conn.commit();
@@ -131,14 +127,16 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT
{
             int pk = Math.abs(RAND.nextInt()) % nRows;
             int v1 = Math.abs(RAND.nextInt()) % nRows;
             int v2 = Math.abs(RAND.nextInt()) % nRows;
-            if (checkForInactive && hasInactiveIndex(metaCache, key)) {
-                checkForInactive = false;
-                hasInactiveIndex = true;
-                batchSize = 200;
-            }
             conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES(" +
pk + "," + v1 + "," + v2 + ")");
             if (i % batchSize == 0) {
                 conn.commit();
+                if (checkForInactive) {
+                    if (hasInactiveIndex(metaCache, key)) {
+                        checkForInactive = false;
+                        hasInactiveIndex = true;
+                        batchSize = 200;
+                    }
+                }
             }
         }
         conn.commit();
@@ -146,7 +144,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT
{
     }
     
     @Test
-    @Repeat(20)
+    @Repeat(10)
     public void testDeleteAndUpsertAfterFailure() throws Throwable {
         final int nRows = 10;
         String schemaName = generateUniqueName();
@@ -162,7 +160,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT
{
             HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
             IndexUtil.updateIndexState(fullIndexName, disableTS, metaTable, PIndexState.DISABLE);
             mutateRandomly(conn, fullTableName, nRows);
-            TestUtil.waitForIndexRebuild(conn, fullIndexName, PIndexState.ACTIVE);
+            TestUtil.waitForIndexState(conn, fullIndexName, PIndexState.ACTIVE);
             
             long actualRowCount = IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
             assertEquals(nRows,actualRowCount);
@@ -176,24 +174,20 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT
{
         String tableName = generateUniqueName();
         String indexName = generateUniqueName();
         final String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
-        String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
+        final String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             conn.createStatement().execute("CREATE TABLE " + fullTableName + "(k INTEGER
PRIMARY KEY, v1 INTEGER, v2 INTEGER) COLUMN_ENCODED_BYTES = 0, STORE_NULLS=true");
             conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName
+ " (v1) INCLUDE (v2)");
             mutateRandomly(conn, fullTableName, nRows);
-            long disableTS = EnvironmentEdgeManager.currentTimeMillis();
             HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
-            IndexUtil.updateIndexState(fullIndexName, disableTS, metaTable, PIndexState.DISABLE);
-            mutateRandomly(conn, fullTableName, nRows);
             final boolean[] hasInactiveIndex = new boolean[1];
             final CountDownLatch doneSignal = new CountDownLatch(1);
             Runnable r = new Runnable() {
 
                 @Override
                 public void run() {
-                    try {
-                        Connection conn = DriverManager.getConnection(getUrl());
-                        hasInactiveIndex[0] = mutateRandomly(conn, fullTableName, nRows,
true);
+                    try (Connection conn = DriverManager.getConnection(getUrl())) {
+                        hasInactiveIndex[0] = mutateRandomly(conn, fullTableName, nRows,
true, fullIndexName);
                     } catch (Exception e) {
                         throw new RuntimeException(e);
                     } finally {
@@ -205,8 +199,10 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT
{
             Thread t = new Thread(r);
             t.setDaemon(true);
             t.start();
-            TestUtil.waitForIndexRebuild(conn, fullIndexName, PIndexState.ACTIVE);
-            doneSignal.await(120, TimeUnit.SECONDS);
+            long disableTS = EnvironmentEdgeManager.currentTimeMillis();
+            IndexUtil.updateIndexState(fullIndexName, disableTS, metaTable, PIndexState.DISABLE);
+            doneSignal.await(60, TimeUnit.SECONDS);
+            TestUtil.waitForIndexState(conn, fullIndexName, PIndexState.ACTIVE);
             assertTrue(hasInactiveIndex[0]);
             
             long actualRowCount = IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
@@ -214,7 +210,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT
{
        }
     }
 
-      @Test
+    @Test
     public void testMultiVersionsAfterFailure() throws Throwable {
         String schemaName = generateUniqueName();
         String tableName = generateUniqueName();
@@ -235,7 +231,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT
{
             conn.commit();
             conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','eeeee')");
             conn.commit();
-            TestUtil.waitForIndexRebuild(conn, fullIndexName, PIndexState.ACTIVE);
+            TestUtil.waitForIndexState(conn, fullIndexName, PIndexState.ACTIVE);
 
             IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
         }
@@ -262,7 +258,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT
{
             conn.commit();
             conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','ccc')");
             conn.commit();
-            TestUtil.waitForIndexRebuild(conn, fullIndexName, PIndexState.ACTIVE);
+            TestUtil.waitForIndexState(conn, fullIndexName, PIndexState.ACTIVE);
 
             IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
         }
@@ -289,7 +285,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT
{
             conn.commit();
             conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a',null)");
             conn.commit();
-            TestUtil.waitForIndexRebuild(conn, fullIndexName, PIndexState.ACTIVE);
+            TestUtil.waitForIndexState(conn, fullIndexName, PIndexState.ACTIVE);
 
             IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
         }
@@ -314,7 +310,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT
{
             conn.commit();
             conn.createStatement().execute("DELETE FROM " + fullTableName);
             conn.commit();
-            TestUtil.waitForIndexRebuild(conn, fullIndexName, PIndexState.ACTIVE);
+            TestUtil.waitForIndexState(conn, fullIndexName, PIndexState.ACTIVE);
 
             IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
        }
@@ -339,38 +335,57 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT
{
             IndexUtil.updateIndexState(fullIndexName, disableTS, metaTable, PIndexState.DISABLE);
             conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','b')");
             conn.commit();
-            TestUtil.waitForIndexRebuild(conn, fullIndexName, PIndexState.ACTIVE);
+            TestUtil.waitForIndexState(conn, fullIndexName, PIndexState.ACTIVE);
 
             IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
         }
     }
     
+    private static class MyClock extends EnvironmentEdge {
+        public volatile long time;
+        
+        public MyClock (long time) {
+            this.time = time;
+        }
+        
+        @Override
+        public long currentTime() {
+            return time;
+        }
+    }
+    
     @Test
     public void testMultiValuesAtSameTS() throws Throwable {
         String schemaName = generateUniqueName();
         String tableName = generateUniqueName();
         String indexName = generateUniqueName();
-        String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
-        String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
+        final String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+        final String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
+        final MyClock clock = new MyClock(1000);
+        EnvironmentEdgeManager.injectEdge(clock);
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             conn.createStatement().execute("CREATE TABLE " + fullTableName + "(k VARCHAR
PRIMARY KEY, v VARCHAR) COLUMN_ENCODED_BYTES = 0, STORE_NULLS=true");
+            clock.time += 100;
             conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName
+ " (v)");
+            clock.time += 100;
             conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a')");
             conn.commit();
-            long disableTS = EnvironmentEdgeManager.currentTimeMillis();
+            clock.time += 100;
             HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
-            IndexUtil.updateIndexState(fullIndexName, disableTS, metaTable, PIndexState.DISABLE);
-            Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
-            props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(disableTS));
-            try (Connection conn2 = DriverManager.getConnection(getUrl(), props)) {
-                conn2.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','bb')");
-                conn2.commit();
-                conn2.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','ccc')");
-                conn2.commit();
-            }
-            TestUtil.waitForIndexRebuild(conn, fullIndexName, PIndexState.ACTIVE);
-
+            IndexUtil.updateIndexState(fullIndexName, clock.currentTime(), metaTable, PIndexState.DISABLE);
+            conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','bb')");
+            conn.commit();
+            conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','ccc')");
+            conn.commit();
+            clock.time += 1000;
+            advanceClockUntilPartialRebuildStarts(fullIndexName, clock);
+            TestUtil.waitForIndexState(conn, fullIndexName, PIndexState.ACTIVE);
+            clock.time += 100;
             IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
+            TestUtil.dumpTable(conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullTableName)));
+            TestUtil.dumpTable(conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullIndexName)));
+        } finally {
+            EnvironmentEdgeManager.injectEdge(null);
         }
     }
     
@@ -379,27 +394,31 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT
{
         String schemaName = generateUniqueName();
         String tableName = generateUniqueName();
         String indexName = generateUniqueName();
-        String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
-        String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
+        final String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+        final String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
+        final MyClock clock = new MyClock(1000);
+        EnvironmentEdgeManager.injectEdge(clock);
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             conn.createStatement().execute("CREATE TABLE " + fullTableName + "(k VARCHAR
PRIMARY KEY, v VARCHAR) COLUMN_ENCODED_BYTES = 0, STORE_NULLS=true");
+            clock.time += 100;
             conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName
+ " (v)");
+            clock.time += 100;
             conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a')");
             conn.commit();
-            long disableTS = EnvironmentEdgeManager.currentTimeMillis();
+            clock.time += 100;
             HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
-            IndexUtil.updateIndexState(fullIndexName, disableTS, metaTable, PIndexState.DISABLE);
-            Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
-            props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(disableTS));
-            try (Connection conn2 = DriverManager.getConnection(getUrl(), props)) {
-                conn2.createStatement().execute("DELETE FROM " + fullTableName + " WHERE
k='a'");
-                conn2.commit();
-                conn2.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','ccc')");
-                conn2.commit();
-            }
-            TestUtil.waitForIndexRebuild(conn, fullIndexName, PIndexState.ACTIVE);
-
+            IndexUtil.updateIndexState(fullIndexName, clock.currentTime(), metaTable, PIndexState.DISABLE);
+            conn.createStatement().execute("DELETE FROM " + fullTableName + " WHERE k='a'");
+            conn.commit();
+            conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','ccc')");
+            conn.commit();
+            clock.time += 1000;
+            advanceClockUntilPartialRebuildStarts(fullIndexName, clock);
+            TestUtil.waitForIndexState(conn, fullIndexName, PIndexState.ACTIVE);
+            clock.time += 100;
             IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
+        } finally {
+            EnvironmentEdgeManager.injectEdge(null);
         }
     }
     
@@ -408,27 +427,54 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT
{
         String schemaName = generateUniqueName();
         String tableName = generateUniqueName();
         String indexName = generateUniqueName();
-        String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
-        String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
+        final String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+        final String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
+        final MyClock clock = new MyClock(1000);
+        EnvironmentEdgeManager.injectEdge(clock);
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             conn.createStatement().execute("CREATE TABLE " + fullTableName + "(k VARCHAR
PRIMARY KEY, v VARCHAR) COLUMN_ENCODED_BYTES = 0, STORE_NULLS=true");
+            clock.time += 100;
             conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName
+ " (v)");
+            clock.time += 100;
             conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a')");
             conn.commit();
-            long disableTS = EnvironmentEdgeManager.currentTimeMillis();
+            clock.time += 100;
             HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
-            IndexUtil.updateIndexState(fullIndexName, disableTS, metaTable, PIndexState.DISABLE);
-            Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
-            props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(disableTS));
-            try (Connection conn2 = DriverManager.getConnection(getUrl(), props)) {
-                conn2.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','ccc')");
-                conn2.commit();
-                conn2.createStatement().execute("DELETE FROM " + fullTableName + " WHERE
k='a'");
-                conn2.commit();
-            }
-            TestUtil.waitForIndexRebuild(conn, fullIndexName, PIndexState.ACTIVE);
-
+            IndexUtil.updateIndexState(fullIndexName, clock.currentTime(), metaTable, PIndexState.DISABLE);
+            conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','ccc')");
+            conn.commit();
+            conn.createStatement().execute("DELETE FROM " + fullTableName + " WHERE k='a'");
+            conn.commit();
+            clock.time += 1000;
+            advanceClockUntilPartialRebuildStarts(fullIndexName, clock);
+            TestUtil.waitForIndexState(conn, fullIndexName, PIndexState.ACTIVE);
+            clock.time += 100;
+            TestUtil.dumpTable(conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullTableName)));
+            TestUtil.dumpTable(conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullIndexName)));
             IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
+        } finally {
+            EnvironmentEdgeManager.injectEdge(null);
         }
     }
+
+    private static void advanceClockUntilPartialRebuildStarts(final String fullIndexName,
final MyClock clock) {
+        Runnable r = new Runnable() {
+            @Override
+            public void run() {
+                try (Connection conn = DriverManager.getConnection(getUrl())) {
+                  int nTries = 10;
+                    while (--nTries >0 && !TestUtil.checkIndexState(conn, fullIndexName,
PIndexState.INACTIVE)) {
+                        Thread.sleep(1000);
+                        clock.time += 1000;
+                    }
+                    clock.time += WAIT_AFTER_DISABLED + 1000;
+                } catch (Exception e) {
+                    throw new RuntimeException(e);
+                }
+            }
+        };
+        Thread t = new Thread(r);
+        t.setDaemon(true);
+        t.start();
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8be8f949/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 063689f..350f4cd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -78,6 +78,7 @@ import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
@@ -313,7 +314,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                                 + indexPTable.getName() + " are online.");
                         continue;
                     }
-                    if (System.currentTimeMillis() - Math.abs(indexDisableTimestamp) >
indexDisableTimestampThreshold) {
+                    if (EnvironmentEdgeManager.currentTimeMillis() - Math.abs(indexDisableTimestamp)
> indexDisableTimestampThreshold) {
                         /*
                          * It has been too long since the index has been disabled and any
future
                          * attempts to reenable it likely will fail. So we are going to mark
the
@@ -339,7 +340,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                         IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.INACTIVE,
null);
                         continue; // Must wait until clients start to do index maintenance
again
                     }
-                    long currentTime = System.currentTimeMillis();
+                    long currentTime = EnvironmentEdgeManager.currentTimeMillis();
                     long forwardOverlapDurationMs = env.getConfiguration().getLong(
                             QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME_ATTRIB,

                                     QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME);
@@ -513,7 +514,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
             if (disabledTimeStamp < 0 || rebuildIndexBatchSize > (HConstants.LATEST_TIMESTAMP
                     - disabledTimeStamp)) { return HConstants.LATEST_TIMESTAMP; }
             long timestampForNextBatch = disabledTimeStamp + rebuildIndexBatchSize;
-			if (timestampForNextBatch < 0 || timestampForNextBatch > System.currentTimeMillis()
+			if (timestampForNextBatch < 0 || timestampForNextBatch > EnvironmentEdgeManager.currentTimeMillis()
 					|| (noOfBatches != null && noOfBatches > configuredBatches)) {
 				// if timestampForNextBatch cross current time , then we should
 				// build the complete index

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8be8f949/phoenix-core/src/main/java/org/apache/phoenix/util/DefaultEnvironmentEdge.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/DefaultEnvironmentEdge.java
b/phoenix-core/src/main/java/org/apache/phoenix/util/DefaultEnvironmentEdge.java
index 6f917e3..8dc63b1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/DefaultEnvironmentEdge.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/DefaultEnvironmentEdge.java
@@ -21,7 +21,7 @@ package org.apache.phoenix.util;
 /**
  * Default implementation of an environment edge.
  */
-public class DefaultEnvironmentEdge implements EnvironmentEdge {
+public class DefaultEnvironmentEdge extends EnvironmentEdge {
   /**
    * {@inheritDoc}
    * <p/>
@@ -31,4 +31,5 @@ public class DefaultEnvironmentEdge implements EnvironmentEdge {
   public long currentTime() {
     return System.currentTimeMillis();
   }
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8be8f949/phoenix-core/src/main/java/org/apache/phoenix/util/EnvironmentEdge.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/EnvironmentEdge.java b/phoenix-core/src/main/java/org/apache/phoenix/util/EnvironmentEdge.java
index 2610c5c..91a5ed3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/EnvironmentEdge.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/EnvironmentEdge.java
@@ -24,11 +24,16 @@ package org.apache.phoenix.util;
  *
  * @see EnvironmentEdgeManager
  */
-public interface EnvironmentEdge {
+public abstract class EnvironmentEdge implements org.apache.hadoop.hbase.util.EnvironmentEdge
{
   /**
    * Returns the currentTime.
    *
    * @return Current time.
    */
-  long currentTime();
+  abstract public long currentTime();
+  
+  @Override
+  public final long currentTimeMillis() {
+      return currentTime();
+  }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8be8f949/phoenix-core/src/main/java/org/apache/phoenix/util/EnvironmentEdgeManager.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/EnvironmentEdgeManager.java
b/phoenix-core/src/main/java/org/apache/phoenix/util/EnvironmentEdgeManager.java
index 04d3bd3..775dd8c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/EnvironmentEdgeManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/EnvironmentEdgeManager.java
@@ -55,6 +55,7 @@ public class EnvironmentEdgeManager {
    * @param edge the new edge.
    */
   public static void injectEdge(EnvironmentEdge edge) {
+    org.apache.hadoop.hbase.util.EnvironmentEdgeManager.injectEdge(edge);
     if (edge == null) {
       reset();
     } else {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8be8f949/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index 6001f59..35d24f8 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -855,29 +855,30 @@ public class TestUtil {
         System.out.println("-----------------------------------------------");
     }
 
-    public static void waitForIndexRebuild(Connection conn, String fullIndexName, PIndexState
expectedIndexState) throws InterruptedException, SQLException {
-        boolean isActive = false;
-        String schema = SchemaUtil.getSchemaNameFromFullName(fullIndexName);
-        String index = SchemaUtil.getTableNameFromFullName(fullIndexName);
-        int maxTries = 12, nTries = 0;
+    public static void waitForIndexState(Connection conn, String fullIndexName, PIndexState
expectedIndexState) throws InterruptedException, SQLException {
+        int maxTries = 300, nTries = 0;
         do {
-            Thread.sleep(5 * 1000); // sleep 5 secs
-            String query = "SELECT CAST(" + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP
+ " AS BIGINT) FROM " +
-                    PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " WHERE (" + PhoenixDatabaseMetaData.TABLE_SCHEM
+ "," + PhoenixDatabaseMetaData.TABLE_NAME
-                    + ") = (" + "'" + schema + "','" + index + "') "
-                    + "AND " + PhoenixDatabaseMetaData.COLUMN_FAMILY + " IS NULL AND " +
PhoenixDatabaseMetaData.COLUMN_NAME + " IS NULL"
-                    + " AND " + PhoenixDatabaseMetaData.INDEX_STATE + " = '" + expectedIndexState.getSerializedValue()
+ "'";
-            ResultSet rs = conn.createStatement().executeQuery(query);
-            if (rs.next() && expectedIndexState == PIndexState.ACTIVE) {
-                if (rs.getLong(1) == 0 && !rs.wasNull()) {
-                    isActive = true;
-                    break;
-                }
+            Thread.sleep(1000); // sleep 1 sec
+            if (checkIndexState(conn, fullIndexName, expectedIndexState)) {
+                return;
             }
         } while (++nTries < maxTries);
-        if (expectedIndexState == PIndexState.ACTIVE) {
-            assertTrue(isActive);
+        fail("Ran out of time waiting for index state to become " + expectedIndexState);
+    }
+
+    public static boolean checkIndexState(Connection conn, String fullIndexName, PIndexState
expectedIndexState) throws InterruptedException, SQLException {
+        String schema = SchemaUtil.getSchemaNameFromFullName(fullIndexName);
+        String index = SchemaUtil.getTableNameFromFullName(fullIndexName);
+        String query = "SELECT CAST(" + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP +
" AS BIGINT) FROM " +
+                PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " WHERE (" + PhoenixDatabaseMetaData.TABLE_SCHEM
+ "," + PhoenixDatabaseMetaData.TABLE_NAME
+                + ") = (" + "'" + schema + "','" + index + "') "
+                + "AND " + PhoenixDatabaseMetaData.COLUMN_FAMILY + " IS NULL AND " + PhoenixDatabaseMetaData.COLUMN_NAME
+ " IS NULL"
+                + " AND " + PhoenixDatabaseMetaData.INDEX_STATE + " = '" + expectedIndexState.getSerializedValue()
+ "'";
+        ResultSet rs = conn.createStatement().executeQuery(query);
+        if (rs.next()) {
+            return expectedIndexState != PIndexState.ACTIVE || (rs.getLong(1) == 0 &&
!rs.wasNull());
         }
+        return false;
     }
 
     public static long getRowCount(Connection conn, String tableName) throws SQLException
{


Mime
View raw message