phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jeffr...@apache.org
Subject git commit: Phoenix-950: Improve Secondary Index Update Failure Handling
Date Wed, 30 Jul 2014 21:39:19 GMT
Repository: phoenix
Updated Branches:
  refs/heads/master 63648fa5b -> 3d69fa211


Phoenix-950: Improve Secondary Index Update Failure Handling


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3d69fa21
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3d69fa21
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3d69fa21

Branch: refs/heads/master
Commit: 3d69fa21123d182577a58bbc517d40ea9dc5a2cd
Parents: 63648fa
Author: Jeffrey Zhong <jeffreyz@apache.org>
Authored: Mon Jul 21 18:32:56 2014 -0700
Committer: Jeffrey Zhong <jeffreyz@apache.org>
Committed: Wed Jul 30 14:21:49 2014 -0700

----------------------------------------------------------------------
 phoenix-core/pom.xml                            |  27 ++-
 phoenix-core/src/build/phoenix-core.xml         |  53 +++++
 .../end2end/index/MutableIndexFailureIT.java    |  71 ++++--
 .../phoenix/compile/StatementContext.java       |  11 +
 .../coprocessor/MetaDataEndpointImpl.java       |  66 ++++--
 .../coprocessor/MetaDataRegionObserver.java     | 224 ++++++++++++++++++-
 .../apache/phoenix/execute/BasicQueryPlan.java  |  18 +-
 .../org/apache/phoenix/hbase/index/Indexer.java |   2 +-
 .../index/PhoenixIndexFailurePolicy.java        |  28 ++-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   3 +
 .../phoenix/mapreduce/CsvBulkLoadTool.java      |   2 +-
 .../query/ConnectionQueryServicesImpl.java      |  13 +-
 .../apache/phoenix/query/QueryConstants.java    |   4 +-
 .../org/apache/phoenix/query/QueryServices.java |  11 +
 .../phoenix/query/QueryServicesOptions.java     |   3 +
 .../apache/phoenix/schema/MetaDataClient.java   |  71 +++++-
 .../org/apache/phoenix/schema/TableRef.java     |  23 +-
 .../org/apache/phoenix/util/MetaDataUtil.java   |  67 ++++++
 pom.xml                                         |   5 -
 19 files changed, 615 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 46125b6..73b27bf 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -168,9 +168,30 @@
       <plugin>
         <!--Make it so assembly:single does nothing in here -->
         <artifactId>maven-assembly-plugin</artifactId>
-        <configuration>
-          <skipAssembly>true</skipAssembly>
-        </configuration>
+        <executions>
+          <execution>
+            <id>core</id>
+            <phase>package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+            <configuration>
+              <attach>false</attach>
+              <finalName>phoenix</finalName>
+              <archive>
+                <index>true</index>
+                <manifest>
+                  <addClasspath>true</addClasspath>
+                  <addDefaultImplementationEntries>true</addDefaultImplementationEntries>
+                  <addDefaultSpecificationEntries>true</addDefaultSpecificationEntries>
+                </manifest>
+              </archive>
+              <descriptors>
+                <descriptor>src/build/phoenix-core.xml</descriptor>
+              </descriptors>
+            </configuration>
+          </execution>
+        </executions>      
       </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/build/phoenix-core.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/src/build/phoenix-core.xml b/phoenix-core/src/build/phoenix-core.xml
new file mode 100644
index 0000000..7b8df1e
--- /dev/null
+++ b/phoenix-core/src/build/phoenix-core.xml
@@ -0,0 +1,53 @@
+<?xml version='1.0'?>
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  <id>core-${parent.version}</id>
+  <!-- All the dependencies (unpacked) necessary to run phoenix from a single, stand-alone jar -->
+  <formats>
+    <format>jar</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  
+  <dependencySets>
+    <dependencySet>
+      <!-- Unpack all the dependencies to class files, since java doesn't support 
+        jar of jars for running -->
+      <unpack>true</unpack>
+      <!-- save these dependencies to the top-level -->
+      <outputDirectory>/</outputDirectory>
+      <includes>
+        <include>org.antlr:antlr-runtime</include>
+      </includes>
+    </dependencySet>
+
+    <dependencySet>
+      <outputDirectory>/</outputDirectory>
+      <unpack>true</unpack>
+      <includes>
+        <include>org.apache.phoenix:phoenix-core*</include>
+      </includes>
+    </dependencySet>
+  </dependencySets>
+</assembly>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 0601c2b..47e62d3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -26,7 +26,6 @@ import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.PreparedStatement;
@@ -37,13 +36,13 @@ import java.util.Properties;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.query.BaseTest;
-import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTableType;
@@ -81,6 +80,7 @@ public class MutableIndexFailureIT extends BaseTest {
         setUpConfigForMiniCluster(conf);
         conf.setInt("hbase.client.retries.number", 2);
         conf.setInt("hbase.client.pause", 5000);
+        conf.setLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB, 0);
         util = new HBaseTestingUtility(conf);
         util.startMiniCluster();
         String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
@@ -98,21 +98,7 @@ public class MutableIndexFailureIT extends BaseTest {
         }
     }
 
-    private static void destroyIndexTable() throws Exception {
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        Connection conn = driver.connect(url, props);
-        ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
-        HBaseAdmin admin = services.getAdmin();
-        try {
-            admin.disableTable(INDEX_TABLE_FULL_NAME);
-            admin.deleteTable(INDEX_TABLE_FULL_NAME);
-        } catch (TableNotFoundException e) {} finally {
-            conn.close();
-            admin.close();
-        }
-    }
-
-    @Test
+    @Test(timeout=300000)
     public void testWriteFailureDisablesIndex() throws Exception {
         String query;
         ResultSet rs;
@@ -139,17 +125,29 @@ public class MutableIndexFailureIT extends BaseTest {
         assertEquals(INDEX_TABLE_NAME, rs.getString(3));
         assertEquals(PIndexState.ACTIVE.toString(), rs.getString("INDEX_STATE"));
         assertFalse(rs.next());
-
-        destroyIndexTable();
-
+        
         PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
         stmt.setString(1, "a");
         stmt.setString(2, "x");
         stmt.setString(3, "1");
         stmt.execute();
+        conn.commit();
+
+        TableName indexTable = TableName.valueOf(INDEX_TABLE_NAME);
+        HBaseAdmin admin = this.util.getHBaseAdmin();
+        HTableDescriptor indexTableDesc = admin.getTableDescriptor(indexTable);
+        try{
+          admin.disableTable(indexTable);
+          admin.deleteTable(indexTable);
+        } catch (TableNotFoundException ignore) {}
+
+        stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
+        stmt.setString(1, "a2");
+        stmt.setString(2, "x2");
+        stmt.setString(3, "2");
+        stmt.execute();
         try {
             conn.commit();
-            fail();
         } catch (SQLException e) {}
 
         // Verify the metadata for index is correct.
@@ -159,5 +157,32 @@ public class MutableIndexFailureIT extends BaseTest {
         assertEquals(INDEX_TABLE_NAME, rs.getString(3));
         assertEquals(PIndexState.DISABLE.toString(), rs.getString("INDEX_STATE"));
         assertFalse(rs.next());
+        
+        stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
+        stmt.setString(1, "a3");
+        stmt.setString(2, "x3");
+        stmt.setString(3, "3");
+        stmt.execute();
+        conn.commit();
+        
+        // recreate index table
+        admin.createTable(indexTableDesc);
+        do {
+          Thread.sleep(15 * 1000); // sleep 15 secs
+          rs = conn.getMetaData().getTables(null, "", INDEX_TABLE_NAME, new String[] {PTableType.INDEX.toString()});
+          assertTrue(rs.next());
+          if(PIndexState.ACTIVE.toString().equals(rs.getString("INDEX_STATE"))){
+              break;
+          }
+        } while(true);
+        
+        // verify index table has data
+        query = "SELECT count(1) FROM " + INDEX_TABLE_FULL_NAME;
+        rs = conn.createStatement().executeQuery(query);
+        assertTrue(rs.next());
+        
+        // using 2 here because we onluy partially build index from where we failed and the oldest 
+        // index row has been deleted when we dropped the index table during test.
+        assertEquals(2, rs.getInt(1));
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
index b27447c..5bebfd8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
@@ -27,6 +27,7 @@ import java.util.Set;
 
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement;
@@ -75,6 +76,7 @@ public class StatementContext {
     private TableRef currentTable;
     private List<Pair<byte[], byte[]>> whereConditionColumns;
     private TupleProjector clientTupleProjector;
+    private TimeRange scanTimeRange = null;
     
     public StatementContext(PhoenixStatement statement) {
         this(statement, FromCompiler.EMPTY_TABLE_RESOLVER, new Scan(), new SequenceManager(statement));
@@ -303,4 +305,13 @@ public class StatementContext {
     public void setClientTupleProjector(TupleProjector projector) {
         this.clientTupleProjector = projector;
     }
+
+    public void setScanTimeRange(TimeRange value){
+    	this.scanTimeRange = value;
+    }
+    
+    public TimeRange getScanTimeRange() {
+    	return this.scanTimeRange;
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 3f4892b..060b641 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -50,6 +50,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT_BYTE
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE_BYTES;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES;
 import static org.apache.phoenix.schema.PTableType.INDEX;
 import static org.apache.phoenix.util.SchemaUtil.getVarCharLength;
 import static org.apache.phoenix.util.SchemaUtil.getVarChars;
@@ -266,18 +267,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
 
     private RegionCoprocessorEnvironment env;
 
-    private static MetaDataMutationResult checkTableKeyInRegion(byte[] key, HRegion region) {
-        byte[] startKey = region.getStartKey();
-        byte[] endKey = region.getEndKey();
-        if (Bytes.compareTo(startKey, key) <= 0
-                && (Bytes.compareTo(HConstants.LAST_ROW, endKey) == 0 || Bytes.compareTo(key,
-                    endKey) < 0)) {
-            return null; // normal case;
-        }
-        return new MetaDataMutationResult(MutationCode.TABLE_NOT_IN_REGION,
-                EnvironmentEdgeManager.currentTimeMillis(), null);
-    }
-
     /**
      * Stores a reference to the coprocessor environment provided by the
      * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this
@@ -1413,7 +1402,19 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             long timeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
             ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
             List<Cell> newKVs = tableMetadata.get(0).getFamilyCellMap().get(TABLE_FAMILY_BYTES);
-            Cell newKV = newKVs.get(0);
+            Cell newKV = null;
+            int disableTimeStampKVIndex = -1;
+            int index = 0;
+            for(Cell cell : newKVs){
+                if(Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), 
+                      INDEX_STATE_BYTES, 0, INDEX_STATE_BYTES.length) == 0){
+                  newKV = cell;
+                } else if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), 
+                  INDEX_DISABLE_TIMESTAMP_BYTES, 0, INDEX_DISABLE_TIMESTAMP_BYTES.length) == 0){
+                  disableTimeStampKVIndex = index;
+                }
+                index++;
+            }
             PIndexState newState =
                     PIndexState.fromSerializedValue(newKV.getValueArray()[newKV.getValueOffset()]);
             RowLock rowLock = region.getRowLock(key);
@@ -1424,6 +1425,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 Get get = new Get(key);
                 get.setTimeRange(PTable.INITIAL_SEQ_NUM, timeStamp);
                 get.addColumn(TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
+                get.addColumn(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
                 Result currentResult = region.get(get);
                 if (currentResult.rawCells().length == 0) {
                     builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
@@ -1431,10 +1433,30 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     done.run(builder.build());
                     return;
                 }
-                Cell currentStateKV = currentResult.rawCells()[0];
+                Cell currentStateKV = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
+                Cell currentDisableTimeStamp = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
+               
                 PIndexState currentState =
                         PIndexState.fromSerializedValue(currentStateKV.getValueArray()[currentStateKV
                                 .getValueOffset()]);
+                
+                // check if we need reset disable time stamp
+                if( (newState == PIndexState.DISABLE) && 
+                    (currentState == PIndexState.DISABLE || currentState == PIndexState.INACTIVE) && 
+                    (currentDisableTimeStamp != null && currentDisableTimeStamp.getValueLength() > 0) &&
+                    (disableTimeStampKVIndex >= 0)) {
+                    Long curTimeStampVal = (Long)PDataType.LONG.toObject(currentDisableTimeStamp.getValueArray(), 
+                      currentDisableTimeStamp.getValueOffset(), currentDisableTimeStamp.getValueLength());
+                    // new DisableTimeStamp is passed in
+                    Cell newDisableTimeStampCell = newKVs.get(disableTimeStampKVIndex);
+                    Long newDisableTimeStamp = (Long)PDataType.LONG.toObject(newDisableTimeStampCell.getValueArray(),
+                      newDisableTimeStampCell.getValueOffset(), newDisableTimeStampCell.getValueLength());
+                    if(curTimeStampVal > 0 && curTimeStampVal < newDisableTimeStamp){
+                        // not reset disable timestamp
+                        newKVs.remove(disableTimeStampKVIndex);
+                    }
+                }
+                
                 // Detect invalid transitions
                 if (currentState == PIndexState.BUILDING) {
                     if (newState == PIndexState.USABLE) {
@@ -1444,7 +1466,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         return;
                     }
                 } else if (currentState == PIndexState.DISABLE) {
-                    if (newState != PIndexState.BUILDING && newState != PIndexState.DISABLE) {
+                    if (newState != PIndexState.BUILDING && newState != PIndexState.DISABLE &&
+                        newState != PIndexState.INACTIVE) {
                         builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
                         builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                         done.run(builder.build());
@@ -1469,6 +1492,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     newKVs.set(0, KeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES,
                         INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue())));
                 }
+                
                 if (currentState != newState) {
                     region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet());
                     // Invalidate from cache
@@ -1492,6 +1516,18 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         }
     }
     
+    private static MetaDataMutationResult checkTableKeyInRegion(byte[] key, HRegion region) {
+        byte[] startKey = region.getStartKey();
+        byte[] endKey = region.getEndKey();
+        if (Bytes.compareTo(startKey, key) <= 0
+                && (Bytes.compareTo(HConstants.LAST_ROW, endKey) == 0 || Bytes.compareTo(key,
+                    endKey) < 0)) {
+            return null; // normal case;
+        }
+        return new MetaDataMutationResult(MutationCode.TABLE_NOT_IN_REGION,
+                EnvironmentEdgeManager.currentTimeMillis(), null);
+    }
+
     /**
      * 
      * Matches rows that end with a given byte array suffix

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 2ef8b3a..0e236da 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -19,13 +19,44 @@ package org.apache.phoenix.coprocessor;
 
 import java.io.IOException;
 
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.phoenix.jdbc.PhoenixDriver;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.cache.GlobalCache;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.MetaDataClient;
+import org.apache.phoenix.schema.PDataType;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.SchemaUtil;
 
 
 /**
@@ -33,26 +64,195 @@ import org.apache.phoenix.query.QueryServicesOptions;
  * to SYSTEM.TABLE.
  */
 public class MetaDataRegionObserver extends BaseRegionObserver {
-
+    public static final Log LOG = LogFactory.getLog(MetaDataRegionObserver.class);
+    protected Timer scheduleTimer = new Timer(true);
+    private boolean enableRebuildIndex = QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD;
+    private long rebuildIndexTimeInterval = QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL;
+  
     @Override
     public void preClose(final ObserverContext<RegionCoprocessorEnvironment> c,
             boolean abortRequested) {
+        scheduleTimer.cancel();
         GlobalCache.getInstance(c.getEnvironment()).getMetaDataCache().invalidateAll();
     }
     
     @Override
     public void start(CoprocessorEnvironment env) throws IOException {
-      // sleep a little bit to compensate time clock skew when SYSTEM.CATALOG moves 
-      // among region servers because we relies on server time of RS which is hosting
-      // SYSTEM.CATALOG
-      long sleepTime = env.getConfiguration().getLong(QueryServices.CLOCK_SKEW_INTERVAL_ATTRIB, 
-          QueryServicesOptions.DEFAULT_CLOCK_SKEW_INTERVAL);
-      try {
-          if(sleepTime > 0) {
-              Thread.sleep(sleepTime);
-          }
-      } catch (InterruptedException ie) {
-          Thread.currentThread().interrupt();
+        // sleep a little bit to compensate time clock skew when SYSTEM.CATALOG moves 
+        // among region servers because we relies on server time of RS which is hosting
+        // SYSTEM.CATALOG
+        long sleepTime = env.getConfiguration().getLong(QueryServices.CLOCK_SKEW_INTERVAL_ATTRIB, 
+            QueryServicesOptions.DEFAULT_CLOCK_SKEW_INTERVAL);
+        try {
+            if(sleepTime > 0) {
+                Thread.sleep(sleepTime);
+            }
+        } catch (InterruptedException ie) {
+            Thread.currentThread().interrupt();
+        }
+        enableRebuildIndex = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB, 
+            QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD);
+        rebuildIndexTimeInterval = env.getConfiguration().getLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB, 
+            QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL);
+    }
+    
+
+    @Override
+    public void postOpen(ObserverContext<RegionCoprocessorEnvironment> e) {
+        if (!enableRebuildIndex) {
+            LOG.info("Failure Index Rebuild is skipped by configuration.");
+            return;
+        }
+        // turn off verbose deprecation logging
+        Logger deprecationLogger = Logger.getLogger("org.apache.hadoop.conf.Configuration.deprecation");
+        if (deprecationLogger != null) {
+            deprecationLogger.setLevel(Level.WARN);
+        }
+        try {
+            Class.forName(PhoenixDriver.class.getName());
+            // starts index rebuild schedule work
+            BuildIndexScheduleTask task = new BuildIndexScheduleTask(e.getEnvironment());
+            // run scheduled task every 10 secs
+            scheduleTimer.schedule(task, 10000, rebuildIndexTimeInterval);
+        } catch (ClassNotFoundException ex) {
+            LOG.error("BuildIndexScheduleTask cannot start!", ex);
+        }
+    }
+    
+    /**
+     * Task runs periodically to build indexes whose INDEX_NEED_PARTIALLY_REBUILD is set true
+     *
+     */
+    public static class BuildIndexScheduleTask extends TimerTask {
+      // inProgress is to prevent timer from invoking a new task while previous one is still running
+      private final static AtomicInteger inProgress = new AtomicInteger(0);
+      RegionCoprocessorEnvironment env;
+      public BuildIndexScheduleTask(RegionCoprocessorEnvironment env) {
+        this.env = env;
       }
+      
+        private String getJdbcUrl() {
+            String zkQuorum = this.env.getConfiguration().get(HConstants.ZOOKEEPER_QUORUM);
+            String zkClientPort = this.env.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT,
+                Integer.toString(HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT));
+            String zkParentNode = this.env.getConfiguration().get(HConstants.ZOOKEEPER_ZNODE_PARENT,
+                HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
+            return PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum
+                + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkClientPort
+                + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkParentNode;
+        }
+      
+        public void run() {
+            RegionScanner scanner = null;
+            PhoenixConnection conn = null;
+            if (inProgress.get() > 0) {
+                LOG.debug("New ScheduledBuildIndexTask skipped as there is already one running");
+                return;
+            }
+            try {
+                inProgress.incrementAndGet();
+                Scan scan = new Scan();
+                SingleColumnValueFilter filter = new SingleColumnValueFilter(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                    PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES,
+                    CompareFilter.CompareOp.NOT_EQUAL, PDataType.LONG.toBytes(0L));
+                filter.setFilterIfMissing(true);
+                scan.setFilter(filter);
+                scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                    PhoenixDatabaseMetaData.TABLE_NAME_BYTES);
+                scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                    PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES);
+                scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                    PhoenixDatabaseMetaData.INDEX_STATE_BYTES);
+                scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                    PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES);
+
+                boolean hasMore = false;
+                List<Cell> results = new ArrayList<Cell>();
+                scanner = this.env.getRegion().getScanner(scan);
+
+                do {
+                    results.clear();
+                    hasMore = scanner.next(results);
+                    if (results.isEmpty()) break;
+
+                    Result r = Result.create(results);
+                    byte[] disabledTimeStamp = r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                        PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES);
+
+                    Long disabledTimeStampVal = 0L;
+                    if (disabledTimeStamp == null || disabledTimeStamp.length == 0) {
+                        continue;
+                    }
+
+                    // disableTimeStamp has to be a positive value
+                    disabledTimeStampVal = (Long) PDataType.LONG.toObject(disabledTimeStamp);
+                    if (disabledTimeStampVal <= 0) {
+                        continue;
+                    }
+
+                    byte[] dataTable = r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                        PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES);
+                    byte[] indexStat = r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                        PhoenixDatabaseMetaData.INDEX_STATE_BYTES);
+                    if ((dataTable == null || dataTable.length == 0)
+                            || (indexStat == null || indexStat.length == 0)
+                            || ((Bytes.compareTo(PIndexState.DISABLE.getSerializedBytes(), indexStat) != 0) 
+                                    && (Bytes.compareTo(PIndexState.INACTIVE.getSerializedBytes(), indexStat) != 0))) {
+                        // index has to be either in disable or inactive state
+                        // data table name can't be empty
+                        continue;
+                    }
+
+                    byte[][] rowKeyMetaData = new byte[3][];
+                    SchemaUtil.getVarChars(r.getRow(), 3, rowKeyMetaData);
+                    byte[] indexTable = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
+
+                    // validity check
+                    if (indexTable == null || indexTable.length == 0) {
+                        LOG.debug("Index rebuild has been skipped for row=" + r);
+                        continue;
+                    }
+
+                    if (conn == null) {
+                        conn = DriverManager.getConnection(getJdbcUrl()).unwrap(PhoenixConnection.class);
+                    }
+                    PTable dataPTable = PhoenixRuntime.getTable(conn, Bytes.toString(dataTable));
+                    PTable indexPTable = PhoenixRuntime.getTable(conn, Bytes.toString(indexTable));
+                    if (!MetaDataUtil.tableRegionsOnline(this.env.getConfiguration(), indexPTable)) {
+                        LOG.debug("Index rebuild has been skipped because not all regions of index table="
+                                + indexPTable.getName() + " are online.");
+                        continue;
+                    }
+
+                    MetaDataClient client = new MetaDataClient(conn);
+                    long overlapTime = env.getConfiguration().getLong(
+                        QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB,
+                        QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME);
+                    long timeStamp = Math.max(0, disabledTimeStampVal - overlapTime);
+
+                    LOG.info("Starting to build index=" + indexPTable.getName() + " from timestamp=" + timeStamp);
+                    client.buildPartialIndexFromTimeStamp(indexPTable, new TableRef(dataPTable, Long.MAX_VALUE, timeStamp));
+
+                } while (hasMore);
+            } catch (Throwable t) {
+                LOG.warn("ScheduledBuildIndexTask failed!", t);
+            } finally {
+                inProgress.decrementAndGet();
+                if (scanner != null) {
+                    try {
+                        scanner.close();
+                    } catch (IOException ignored) {
+                        LOG.debug("ScheduledBuildIndexTask can't close scanner.", ignored);
+                    }
+                }
+                if (conn != null) {
+                    try {
+                        conn.close();
+                    } catch (SQLException ignored) {
+                        LOG.debug("ScheduledBuildIndexTask can't close connection", ignored);
+                    }
+                }
+            }
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/execute/BasicQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BasicQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BasicQueryPlan.java
index 8a270e3..78d07e5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BasicQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BasicQueryPlan.java
@@ -165,16 +165,24 @@ public abstract class BasicQueryPlan implements QueryPlan {
         // is resolved.
         // TODO: include time range in explain plan?
         PhoenixConnection connection = context.getConnection();
-        Long scn = connection.getSCN();
-        if(scn == null) {
+        if (context.getScanTimeRange() == null) {
+          Long scn = connection.getSCN();
+          if (scn == null) {
             scn = context.getCurrentTime();
             // Add one to server time since max of time range is exclusive
             // and we need to account of OSs with lower resolution clocks.
-            if(scn < HConstants.LATEST_TIMESTAMP) {
-                scn++;
+            if (scn < HConstants.LATEST_TIMESTAMP) {
+              scn++;
             }
+          }
+          ScanUtil.setTimeRange(scan, scn);
+        } else {
+          try {
+            scan.setTimeRange(context.getScanTimeRange().getMin(), context.getScanTimeRange().getMax());
+          } catch (IOException e) {
+            throw new SQLException(e);
+          }
         }
-        ScanUtil.setTimeRange(scan, scn);
         ScanUtil.setTenantId(scan, connection.getTenantId() == null ? null : connection.getTenantId().getBytes());
         // Set local index related scan attributes. 
         if (context.getCurrentTable().getTable().getIndexType() == IndexType.LOCAL) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 35eb10d..975621c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -599,7 +599,7 @@ public class Indexer extends BaseRegionObserver {
      * hopes they come up before the primary table finishes.
      */
     Collection<Pair<Mutation, byte[]>> indexUpdates = extractIndexUpdate(logEdit);
-    recoveryWriter.writeAndKillYourselfOnFailure(indexUpdates);
+    recoveryWriter.write(indexUpdates);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index 9e2cc7a..351b12f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.index;
 
 import java.io.IOException;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
@@ -25,6 +26,7 @@ import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableName;
@@ -38,6 +40,7 @@ import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
 
 import com.google.common.collect.Multimap;
+
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
@@ -47,6 +50,7 @@ import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.UpdateIndexStateRequest;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.protobuf.ProtobufUtil;
+import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.util.SchemaUtil;
 
@@ -77,9 +81,22 @@ public class PhoenixIndexFailurePolicy extends  KillServerOnFailurePolicy {
     @Override
     public void handleFailure(Multimap<HTableInterfaceReference, Mutation> attempted, Exception cause) throws IOException {
         Set<HTableInterfaceReference> refs = attempted.asMap().keySet();
-        StringBuilder buf = new StringBuilder("Disabled index" + (refs.size() > 1 ? "es " : " "));
         try {
             for (HTableInterfaceReference ref : refs) {
+                long minTimeStamp = 0;
+                Collection<Mutation> mutations = attempted.get(ref);
+                if (mutations != null) {
+                  for (Mutation m : mutations) {
+                    for (List<Cell> kvs : m.getFamilyCellMap().values()) {
+                      for (Cell kv : kvs) {
+                        if (minTimeStamp == 0 || (kv.getTimestamp() >=0 && minTimeStamp < kv.getTimestamp())) {
+                          minTimeStamp = kv.getTimestamp();
+                        }
+                      }
+                    }
+                  }
+                }
+                
                 // Disable the index by using the updateIndexState method of MetaDataProtocol end point coprocessor.
                 String indexTableName = ref.getTableName();
                 byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(indexTableName);
@@ -87,6 +104,7 @@ public class PhoenixIndexFailurePolicy extends  KillServerOnFailurePolicy {
                 // Mimic the Put that gets generated by the client on an update of the index state
                 Put put = new Put(indexTableKey);
                 put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES, PIndexState.DISABLE.getSerializedBytes());
+                put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, PDataType.LONG.toBytes(minTimeStamp));
                 final List<Mutation> tableMetadata = Collections.<Mutation>singletonList(put);
                 
                 final Map<byte[], MetaDataResponse> results = 
@@ -118,17 +136,13 @@ public class PhoenixIndexFailurePolicy extends  KillServerOnFailurePolicy {
                 if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                     LOG.warn("Attempt to disable index " + indexTableName + " failed with code = " + result.getMutationCode() + ". Will use default failure policy instead.");
                     super.handleFailure(attempted, cause);
+                    throw new DoNotRetryIOException("Attemp to writes to " + indexTableName + " failed.", cause);
                 }
-                LOG.info("Successfully disabled index " + indexTableName);
-                buf.append(indexTableName);
-                buf.append(',');
+                LOG.info("Successfully disabled index " + indexTableName + " due to an exception while writing updates.", cause);
             }
-            buf.setLength(buf.length()-1);
-            buf.append(" due to an exception while writing updates");
         } catch (Throwable t) {
             super.handleFailure(attempted, cause);
         }
-        throw new DoNotRetryIOException(buf.toString(), cause);
     }
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 93ada7b..99b5910 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -110,6 +110,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
     public static final String SYSTEM_CATALOG_ALIAS = "\"SYSTEM.TABLE\"";
 
     public static final String TABLE_NAME = "TABLE_NAME";
+    public static final byte[] TABLE_NAME_BYTES = Bytes.toBytes(TABLE_NAME);
     public static final String TABLE_TYPE = "TABLE_TYPE";
     public static final byte[] TABLE_TYPE_BYTES = Bytes.toBytes(TABLE_TYPE);
     
@@ -220,6 +221,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
     public static final String SUPERTABLE_NAME = "SUPERTABLE_NAME";
     
     public static final String TYPE_ID = "TYPE_ID";
+    public static final String INDEX_DISABLE_TIMESTAMP = "INDEX_DISABLE_TIMESTAMP";
+    public static final byte[] INDEX_DISABLE_TIMESTAMP_BYTES = Bytes.toBytes(INDEX_DISABLE_TIMESTAMP);
     
     private final PhoenixConnection connection;
     private final ResultSet emptyResultSet;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
index 08a43fe..661a222 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
@@ -189,7 +189,7 @@ public class CsvBulkLoadTool extends Configured implements Tool {
 	private int loadData(Configuration conf, CommandLine cmdLine,
 			Connection conn) throws SQLException, InterruptedException,
 			ExecutionException {
-		String tableName = cmdLine.getOptionValue(TABLE_NAME_OPT.getOpt());
+		    String tableName = cmdLine.getOptionValue(TABLE_NAME_OPT.getOpt());
         String schemaName = cmdLine.getOptionValue(SCHEMA_NAME_OPT.getOpt());
         String indexTableName = cmdLine.getOptionValue(INDEX_TABLE_NAME_OPT.getOpt());
         String qualifiedTableName = getQualifiedTableName(schemaName, tableName);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 8e9e578..93cb241 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -26,6 +26,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAM
 import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA;
 
 import java.io.IOException;
+import java.sql.Connection;
 import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
@@ -144,7 +145,6 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     private static final Logger logger = LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
     private static final int INITIAL_CHILD_SERVICES_CAPACITY = 100;
     private static final int DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS = 1000;
-    
     protected final Configuration config;
     // Copy of config.getProps(), but read-only to prevent synchronization that we
     // don't need.
@@ -1398,7 +1398,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     }
 
     // Keeping this to use for further upgrades
-    protected PhoenixConnection addColumnsIfNotExists(PhoenixConnection oldMetaConnection, String tableName, long timestamp, String columns) throws SQLException {
+    protected PhoenixConnection addColumnsIfNotExists(PhoenixConnection oldMetaConnection, 
+        String tableName, long timestamp, String columns) throws SQLException {
+
         Properties props = new Properties(oldMetaConnection.getClientInfo());
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp));
         // Cannot go through DriverManager or you end up in an infinite loop because it'll call init again
@@ -1407,6 +1409,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try {
             metaConnection.createStatement().executeUpdate("ALTER TABLE " + tableName + " ADD IF NOT EXISTS " + columns );
         } catch (SQLException e) {
+            logger.warn("addColumnsIfNotExists failed due to:" + e);
             sqlE = e;
         } finally {
             try {
@@ -1469,7 +1472,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                             } catch (TableAlreadyExistsException ignore) {
                                 // This will occur if we have an older SYSTEM.CATALOG and we need to update it to include
                                 // any new columns we've added.
-                                metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, PhoenixDatabaseMetaData.INDEX_TYPE + " " + PDataType.UNSIGNED_TINYINT.getSqlTypeName());
+                                metaConnection = addColumnsIfNotExists(metaConnection, 
+                                  PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
+                                  MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, 
+                                  PhoenixDatabaseMetaData.INDEX_TYPE + " " + PDataType.UNSIGNED_TINYINT.getSqlTypeName() + 
+                                  ", " + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP + " " + PDataType.LONG.getSqlTypeName());
                             }
                             try {
                                 metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_SEQUENCE_METADATA);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index 07f6612..da2d487 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -78,7 +78,8 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE;
-
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP;
 import java.math.BigDecimal;
 
 import org.apache.hadoop.hbase.HConstants;
@@ -213,6 +214,7 @@ public interface QueryConstants {
             SOURCE_DATA_TYPE + " SMALLINT," +
             IS_AUTOINCREMENT + " VARCHAR," +
             INDEX_TYPE + " UNSIGNED_TINYINT," +
+            INDEX_DISABLE_TIMESTAMP + " BIGINT," +
             "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TENANT_ID + ","
             + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" +
             HConstants.VERSIONS + "=" + MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS + ",\n" +

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 639eaa5..3e0e461 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -113,6 +113,17 @@ public interface QueryServices extends SQLCloseable {
     // The following config settings is to deal with SYSTEM.CATALOG moves(PHOENIX-916) among region servers
     public static final String CLOCK_SKEW_INTERVAL_ATTRIB = "phoenix.clock.skew.interval";
     
+    // A master switch if to enable auto rebuild an index which failed to be updated previously 
+    public static final String INDEX_FAILURE_HANDLING_REBUILD_ATTRIB = "phoenix.index.failure.handling.rebuild";
+    
+    // Time interval to check if there is an index needs to be rebuild
+    public static final String INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB = 
+        "phoenix.index.failure.handling.rebuild.interval";
+    
+    // Index will be partially re-built from index disable time stamp - following overlap time 
+    public static final String INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB = 
+        "phoenix.index.failure.handling.rebuild.overlap.time";
+    
     /**
      * Get executor service used for parallel scans
      */

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 95269a1..e0be74d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -124,6 +124,9 @@ public class QueryServicesOptions {
     public static final long DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE =  1024L*1024L*10L; // 10 Mb
     public static final int DEFAULT_GROUPBY_ESTIMATED_DISTINCT_VALUES = 1000;
     public static final int DEFAULT_CLOCK_SKEW_INTERVAL = 2000;
+    public static final boolean DEFAULT_INDEX_FAILURE_HANDLING_REBUILD = true; // auto rebuild on
+    public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL = 10000; // 10 secs
+    public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME = 300000; // 5 mins
     
     private final Configuration config;
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 12cc15a..beda106 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -54,10 +54,12 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP;
 import static org.apache.phoenix.query.QueryServices.DROP_METADATA_ATTRIB;
 import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA;
 import static org.apache.phoenix.schema.PDataType.VARCHAR;
 
+import java.io.IOException;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSetMetaData;
@@ -85,6 +87,7 @@ import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.ColumnResolver;
@@ -211,12 +214,20 @@ public class MetaDataClient {
             IMMUTABLE_ROWS + 
             ") VALUES (?, ?, ?, ?)";
     private static final String UPDATE_INDEX_STATE =
+        "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + 
+        TENANT_ID + "," +
+        TABLE_SCHEM + "," +
+        TABLE_NAME + "," +
+        INDEX_STATE + 
+        ") VALUES (?, ?, ?, ?)";
+    private static final String UPDATE_INDEX_STATE_TO_ACTIVE =
             "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + 
             TENANT_ID + "," +
             TABLE_SCHEM + "," +
             TABLE_NAME + "," +
-            INDEX_STATE +
-            ") VALUES (?, ?, ?, ?)";
+            INDEX_STATE + "," +
+            INDEX_DISABLE_TIMESTAMP +
+            ") VALUES (?, ?, ?, ?, ?)";
     private static final String INSERT_COLUMN =
         "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + 
         TENANT_ID + "," +
@@ -493,11 +504,13 @@ public class MetaDataClient {
     }
     
     private MutationState buildIndex(PTable index, TableRef dataTableRef) throws SQLException {
+        AlterIndexStatement indexStatement = null;
         boolean wasAutoCommit = connection.getAutoCommit();
         connection.rollback();
         try {
             connection.setAutoCommit(true);
             MutationState state;
+            
             // For local indexes, we optimize the initial index population by *not* sending Puts over
             // the wire for the index rows, as we don't need to do that. Instead, we tap into our
             // region observer to generate the index rows based on the data rows as we scan
@@ -513,6 +526,11 @@ public class MetaDataClient {
                 // index maintainers.
                 // Define the LOCAL_INDEX_BUILD as a new static in BaseScannerRegionObserver
                 Scan scan = plan.getContext().getScan();
+                try {
+                    scan.setTimeRange(dataTableRef.getLowerBoundTimeStamp(), Long.MAX_VALUE);
+                } catch (IOException e) {
+                    throw new SQLException(e);
+                }
                 ImmutableBytesWritable ptr = new ImmutableBytesWritable();
                 PTable dataTable = tableRef.getTable();
                 List<PTable> indexes = Lists.newArrayListWithExpectedSize(1);
@@ -537,12 +555,18 @@ public class MetaDataClient {
             } else {
                 PostIndexDDLCompiler compiler = new PostIndexDDLCompiler(connection, dataTableRef);
                 MutationPlan plan = compiler.compile(index);
+                try {
+                    plan.getContext().setScanTimeRange(new TimeRange(dataTableRef.getLowerBoundTimeStamp(), Long.MAX_VALUE));
+                } catch (IOException e) {
+                    throw new SQLException(e);
+                }
                 state = connection.getQueryServices().updateData(plan);
-            }
-            AlterIndexStatement indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null, 
-                    TableName.create(index.getSchemaName().getString(), index.getTableName().getString())),
-                    dataTableRef.getTable().getTableName().getString(), false, PIndexState.ACTIVE);
+            }            
+            indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null, 
+                TableName.create(index.getSchemaName().getString(), index.getTableName().getString())),
+                dataTableRef.getTable().getTableName().getString(), false, PIndexState.ACTIVE);
             alterIndex(indexStatement);
+            
             return state;
         } finally {
             connection.setAutoCommit(wasAutoCommit);
@@ -559,6 +583,32 @@ public class MetaDataClient {
     }
 
     /**
+     * Rebuild indexes from a timestamp which is the value from hbase row key timestamp field
+     */
+    public void buildPartialIndexFromTimeStamp(PTable index, TableRef dataTableRef) throws SQLException {
+        boolean needRestoreIndexState = false;
+        // Need to change index state from Disable to InActive when build index partially so that
+        // new changes will be indexed during index rebuilding
+        AlterIndexStatement indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null,
+            TableName.create(index.getSchemaName().getString(), index.getTableName().getString())),
+            dataTableRef.getTable().getTableName().getString(), false, PIndexState.INACTIVE);
+        alterIndex(indexStatement);
+        needRestoreIndexState = true;
+        try {
+            buildIndex(index, dataTableRef);
+            needRestoreIndexState = false;
+        } finally {
+            if(needRestoreIndexState) {
+                // reset index state to disable
+                indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null,
+                    TableName.create(index.getSchemaName().getString(), index.getTableName().getString())),
+                    dataTableRef.getTable().getTableName().getString(), false, PIndexState.DISABLE);
+                alterIndex(indexStatement);
+            }
+        }
+    }
+
+    /**
      * Create an index table by morphing the CreateIndexStatement into a CreateTableStatement and calling
      * MetaDataClient.createTable. In doing so, we perform the following translations:
      * 1) Change the type of any columns being indexed to types that support null if the column is nullable.
@@ -2078,11 +2128,18 @@ public class MetaDataClient {
             TableRef indexRef = FromCompiler.getResolverForMutation(statement, connection).getTables().get(0);
             PreparedStatement tableUpsert = null;
             try {
-                tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE);
+                if(newIndexState == PIndexState.ACTIVE){
+                    tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE_TO_ACTIVE);
+                } else {
+                    tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE);
+                }
                 tableUpsert.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString());
                 tableUpsert.setString(2, schemaName);
                 tableUpsert.setString(3, indexName);
                 tableUpsert.setString(4, newIndexState.getSerializedValue());
+                if(newIndexState == PIndexState.ACTIVE){
+                    tableUpsert.setLong(5, 0);
+                }
                 tableUpsert.execute();
             } finally {
                 if(tableUpsert != null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
index 6019eb8..e820bb3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
@@ -24,7 +24,8 @@ import org.apache.hadoop.hbase.HConstants;
 public final class TableRef {
     private final PTable table;
     private final String alias;
-    private final long timeStamp;
+    private final long upperBoundTimeStamp;
+    private final long lowerBoundTimeStamp;
     private final boolean hasDynamicCols;
 
     public TableRef(TableRef tableRef, long timeStamp) {
@@ -34,11 +35,21 @@ public final class TableRef {
     public TableRef(PTable table) {
         this(null, table, HConstants.LATEST_TIMESTAMP, false);
     }
+    
+    public TableRef(PTable table, long upperBoundTimeStamp, long lowerBoundTimeStamp) {
+        this(null, table, upperBoundTimeStamp, lowerBoundTimeStamp, false);
+    }
 
-    public TableRef(String alias, PTable table, long timeStamp, boolean hasDynamicCols) {
+    public TableRef(String alias, PTable table, long upperBoundTimeStamp, boolean hasDynamicCols) {
+        this(alias, table, upperBoundTimeStamp, 0, hasDynamicCols);
+    }
+    
+    public TableRef(String alias, PTable table, long upperBoundTimeStamp, long lowerBoundTimeStamp, 
+        boolean hasDynamicCols) {
         this.alias = alias;
         this.table = table;
-        this.timeStamp = timeStamp;
+        this.upperBoundTimeStamp = upperBoundTimeStamp;
+        this.lowerBoundTimeStamp = lowerBoundTimeStamp;
         this.hasDynamicCols = hasDynamicCols;
     }
     
@@ -70,7 +81,11 @@ public final class TableRef {
     }
 
     public long getTimeStamp() {
-        return timeStamp;
+        return this.upperBoundTimeStamp;
+    }
+    
+    public long getLowerBoundTimeStamp() {
+        return this.lowerBoundTimeStamp;
     }
 
     public boolean hasDynamicCols() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index b98ebf0..215a2b1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -19,17 +19,28 @@ package org.apache.phoenix.util;
 
 import static org.apache.phoenix.util.SchemaUtil.getVarChars;
 
+import java.io.IOException;
 import java.sql.SQLException;
 import java.util.Collection;
 import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
@@ -39,13 +50,20 @@ import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.SequenceKey;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableNotFoundException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.protobuf.ServiceException;
 
 
 public class MetaDataUtil {
+    private static final Logger logger = LoggerFactory.getLogger(MetaDataUtil.class);
+  
     public static final String VIEW_INDEX_TABLE_PREFIX = "_IDX_";
     public static final byte[] VIEW_INDEX_TABLE_PREFIX_BYTES = Bytes.toBytes(VIEW_INDEX_TABLE_PREFIX);
     public static final String LOCAL_INDEX_TABLE_PREFIX = "_LOCAL_IDX_";
@@ -331,6 +349,55 @@ public class MetaDataUtil {
                 PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + " = '" + key.getSchemaName() + "'");
         
     }
+    
+    /**
+     * This function checks if all regions of a table is online
+     * @param table
+     * @return true when all regions of a table are online
+     * @throws IOException
+     * @throws
+     */
+    public static boolean tableRegionsOnline(Configuration conf, PTable table) {
+        HConnection hcon = null;
+
+        try {
+            hcon = HConnectionManager.getConnection(conf);
+            List<HRegionLocation> locations = hcon.locateRegions(
+                org.apache.hadoop.hbase.TableName.valueOf(table.getTableName().getBytes()));
+
+            for (HRegionLocation loc : locations) {
+                try {
+                    ServerName sn = loc.getServerName();
+                    if (sn == null) continue;
+
+                    AdminService.BlockingInterface admin = hcon.getAdmin(sn);
+                    GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
+                        loc.getRegionInfo().getRegionName());
+
+                    admin.getRegionInfo(null, request);
+                } catch (ServiceException e) {
+                    IOException ie = ProtobufUtil.getRemoteException(e);
+                    logger.debug("Region " + loc.getRegionInfo().getEncodedName() + " isn't online due to:" + ie);
+                    return false;
+                } catch (RemoteException e) {
+                    logger.debug("Cannot get region " + loc.getRegionInfo().getEncodedName() + " info due to error:" + e);
+                    return false;
+                }
+            }
+        } catch (IOException ex) {
+            logger.warn("tableRegionsOnline failed due to:" + ex);
+            return false;
+        } finally {
+            if (hcon != null) {
+                try {
+                    hcon.close();
+                } catch (IOException ignored) {
+                }
+            }
+        }
+
+        return true;
+    }
 
     public static final String IS_VIEW_INDEX_TABLE_PROP_NAME = "IS_VIEW_INDEX_TABLE";
     public static final byte[] IS_VIEW_INDEX_TABLE_PROP_BYTES = Bytes.toBytes(IS_VIEW_INDEX_TABLE_PROP_NAME);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d69fa21/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 2001b82..4612d70 100644
--- a/pom.xml
+++ b/pom.xml
@@ -690,11 +690,6 @@
           </dependency>
           <dependency>
             <groupId>org.apache.hbase</groupId>
-            <artifactId>hbase-common</artifactId>
-            <version>${hbase-hadoop2.version}</version>
-          </dependency>
-          <dependency>
-            <groupId>org.apache.hbase</groupId>
             <artifactId>hbase-protocol</artifactId>
             <version>${hbase-hadoop2.version}</version>
           </dependency>


Mime
View raw message