phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jamestay...@apache.org
Subject phoenix git commit: PHOENIX-2597 Local Indexes are not used to query views
Date Sun, 17 Jan 2016 01:57:41 GMT
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 ee6d4f410 -> 0af732292


PHOENIX-2597 Local Indexes are not used to query views


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0af73229
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0af73229
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0af73229

Branch: refs/heads/4.x-HBase-0.98
Commit: 0af73229234793c8274e37f9a61d15479d116385
Parents: ee6d4f4
Author: James Taylor <jtaylor@salesforce.com>
Authored: Thu Jan 14 18:15:52 2016 -0800
Committer: James Taylor <jtaylor@salesforce.com>
Committed: Sat Jan 16 17:56:10 2016 -0800

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/BaseViewIT.java  |  60 ++-
 .../java/org/apache/phoenix/end2end/ViewIT.java |  36 +-
 .../apache/phoenix/compile/PostDDLCompiler.java |   2 +-
 .../apache/phoenix/schema/MetaDataClient.java   | 389 +++++++++----------
 4 files changed, 269 insertions(+), 218 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0af73229/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
index c713df4..7e7175f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
@@ -27,11 +27,20 @@ import java.math.BigDecimal;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
+import java.sql.Statement;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.QueryUtil;
@@ -81,20 +90,40 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
     }
     
     protected void testUpdatableViewWithIndex(Integer saltBuckets, boolean localIndex) throws Exception {
-        testUpdatableView(saltBuckets);
-        testUpdatableViewIndex(saltBuckets, localIndex);
+        String viewName = testUpdatableView(saltBuckets);
+        Pair<String,Scan> pair = testUpdatableViewIndex(saltBuckets, localIndex);
+        Scan scan = pair.getSecond();
+        String tableName = pair.getFirst();
+        // Confirm that dropping the view also deletes the rows in the index
+        if (saltBuckets == null) {
+            try (Connection conn = DriverManager.getConnection(getUrl())) {
+                HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
+                ResultScanner scanner = htable.getScanner(scan);
+                Result result = scanner.next();
+                // Confirm index has rows
+                assertTrue(result != null && !result.isEmpty());
+                
+                conn.createStatement().execute("DROP VIEW " + viewName);
+                
+                // Confirm index has no rows after view is dropped
+                scanner = htable.getScanner(scan);
+                result = scanner.next();
+                assertTrue(result == null || result.isEmpty());
+            }
+        }
     }
 
-    protected void testUpdatableView(Integer saltBuckets) throws Exception {
+    protected String testUpdatableView(Integer saltBuckets) throws Exception {
         Connection conn = DriverManager.getConnection(getUrl());
 		if (saltBuckets!=null) {
 			if (tableDDLOptions.length()!=0)
 				tableDDLOptions+=",";
 			tableDDLOptions+=(" SALT_BUCKETS="+saltBuckets);
 		}
+		String viewName = "V";
         String ddl = "CREATE TABLE " + fullTableName + " (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, k3 DECIMAL, s VARCHAR CONSTRAINT pk PRIMARY KEY (k1, k2, k3))" + tableDDLOptions;
         conn.createStatement().execute(ddl);
-        ddl = "CREATE VIEW v AS SELECT * FROM " + fullTableName + " WHERE k1 = 1";
+        ddl = "CREATE VIEW " + viewName + " AS SELECT * FROM " + fullTableName + " WHERE k1 = 1";
         conn.createStatement().execute(ddl);
         for (int i = 0; i < 10; i++) {
             conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES(" + (i % 4) + "," + (i+100) + "," + (i > 5 ? 2 : 1) + ")");
@@ -135,13 +164,14 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
         assertEquals(121, rs.getInt(2));
         assertFalse(rs.next());
         conn.close();
+        return viewName;
     }
 
-    protected void testUpdatableViewIndex(Integer saltBuckets) throws Exception {
-        testUpdatableViewIndex(saltBuckets, false);
+    protected Pair<String,Scan> testUpdatableViewIndex(Integer saltBuckets) throws Exception {
+        return testUpdatableViewIndex(saltBuckets, false);
     }
 
-    protected void testUpdatableViewIndex(Integer saltBuckets, boolean localIndex) throws Exception {
+    protected Pair<String,Scan> testUpdatableViewIndex(Integer saltBuckets, boolean localIndex) throws Exception {
         ResultSet rs;
         Connection conn = DriverManager.getConnection(getUrl());
         if (localIndex) {
@@ -196,28 +226,32 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
 
         
         query = "SELECT k1, k2, s FROM v WHERE s = 'foo'";
-        rs = conn.createStatement().executeQuery(query);
+        Statement statement = conn.createStatement();
+        rs = statement.executeQuery(query);
+        Scan scan = statement.unwrap(PhoenixStatement.class).getQueryPlan().getContext().getScan();
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
         assertEquals(120, rs.getInt(2));
         assertEquals("foo", rs.getString(3));
         assertFalse(rs.next());
+        String htableName;
         rs = conn.createStatement().executeQuery("EXPLAIN " + query);
         if (localIndex) {
-            assertEquals("CLIENT PARALLEL "+ (saltBuckets == null ? 1 : saltBuckets)  +"-WAY RANGE SCAN OVER _LOCAL_IDX_" + tableName +" [" + (Short.MIN_VALUE+1) + ",'foo']\n"
+            htableName = "_LOCAL_IDX_" + tableName;
+            assertEquals("CLIENT PARALLEL "+ (saltBuckets == null ? 1 : saltBuckets)  +"-WAY RANGE SCAN OVER " + htableName +" [" + (Short.MIN_VALUE+1) + ",'foo']\n"
                     + "    SERVER FILTER BY FIRST KEY ONLY\n"
                     + "CLIENT MERGE SORT",QueryUtil.getExplainPlan(rs));
         } else {
+            htableName = "_IDX_" + tableName;
             assertEquals(saltBuckets == null
-                    ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName +" [" + (Short.MIN_VALUE+1) + ",'foo']\n"
+                    ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + htableName +" [" + (Short.MIN_VALUE+1) + ",'foo']\n"
                             + "    SERVER FILTER BY FIRST KEY ONLY"
-                            : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER _IDX_T" + (transactional ? "_TXN" : "") + " [0," + (Short.MIN_VALUE+1) + ",'foo']\n"
+                            : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER " + htableName + " [0," + (Short.MIN_VALUE+1) + ",'foo']\n"
                                     + "    SERVER FILTER BY FIRST KEY ONLY\n"
                                     + "CLIENT MERGE SORT",
                             QueryUtil.getExplainPlan(rs));
         }
         conn.close();
+        return new Pair<>(htableName,scan);
     }
-
-
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0af73229/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 55f26cd..146fb1f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -450,13 +450,26 @@ public class ViewIT extends BaseViewIT {
         }
 	}
 	
-	@Test
-    public void testViewUsesTableIndex() throws Exception {
+    @Test
+    public void testViewUsesTableGlobalIndex() throws Exception {
+        testViewUsesTableIndex(false);
+    }
+    
+    @Test
+    public void testViewUsesTableLocalIndex() throws Exception {
+        testViewUsesTableIndex(true);
+    }
+
+    
+    private void testViewUsesTableIndex(boolean localIndex) throws Exception {
         ResultSet rs;
+        // Use unique name for table with local index as otherwise we run into issues
+        // when we attempt to drop the table (with the drop metadata option set to false
+        String fullTableName = this.fullTableName + (localIndex ? "_WITH_LI" : "_WITHOUT_LI");
         Connection conn = DriverManager.getConnection(getUrl());
         String ddl = "CREATE TABLE " + fullTableName + "  (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, k3 DECIMAL, s1 VARCHAR, s2 VARCHAR CONSTRAINT pk PRIMARY KEY (k1, k2, k3))" + tableDDLOptions;
         conn.createStatement().execute(ddl);
-        conn.createStatement().execute("CREATE INDEX i1 ON " + fullTableName + "(k3, k2) INCLUDE(s1, s2)");
+        conn.createStatement().execute("CREATE " + (localIndex ? "LOCAL " : "") + " INDEX i1 ON " + fullTableName + "(k3, k2) INCLUDE(s1, s2)");
         conn.createStatement().execute("CREATE INDEX i2 ON " + fullTableName + "(k3, k2, s2)");
         
         ddl = "CREATE VIEW v AS SELECT * FROM " + fullTableName + " WHERE s1 = 'foo'";
@@ -481,11 +494,18 @@ public class ViewIT extends BaseViewIT {
         assertFalse(rs.next());
         rs = conn.createStatement().executeQuery("EXPLAIN " + query);
         String queryPlan = QueryUtil.getExplainPlan(rs);
-        assertEquals(
-                "CLIENT PARALLEL 1-WAY SKIP SCAN ON 4 KEYS OVER I1 [1,100] - [2,109]\n" + 
-                "    SERVER FILTER BY (\"S2\" = 'bas' AND \"S1\" = 'foo')", queryPlan);
-    }
-	
+        // Assert that in either case (local & global) that index from physical table used for query on view.
+        if (localIndex) {
+            assertEquals("CLIENT PARALLEL 1-WAY SKIP SCAN ON 4 KEYS OVER _LOCAL_IDX_" + fullTableName + " [-32768,1,100] - [-32768,2,109]\n" + 
+                    "    SERVER FILTER BY (\"S2\" = 'bas' AND \"S1\" = 'foo')\n" + 
+                    "CLIENT MERGE SORT", queryPlan);
+        } else {
+            assertEquals(
+                    "CLIENT PARALLEL 1-WAY SKIP SCAN ON 4 KEYS OVER I1 [1,100] - [2,109]\n" + 
+                    "    SERVER FILTER BY (\"S2\" = 'bas' AND \"S1\" = 'foo')", queryPlan);
+        }
+    }    
+
     @Test
     public void testCreateViewDefinesPKColumn() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0af73229/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
index 192a031..2659b3f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
@@ -127,7 +127,7 @@ public class PostDDLCompiler {
             @Override
             public MutationState execute() throws SQLException {
                 if (tableRefs.isEmpty()) {
-                    return null;
+                    return new MutationState(0, connection);
                 }
                 boolean wasAutoCommit = connection.getAutoCommit();
                 try {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0af73229/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index f1bb582..ee8fba3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -113,8 +113,6 @@ import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
 
-import co.cask.tephra.TxConstants;
-
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
@@ -217,6 +215,8 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.google.common.primitives.Ints;
 
+import co.cask.tephra.TxConstants;
+
 public class MetaDataClient {
     private static final Logger logger = LoggerFactory.getLogger(MetaDataClient.class);
 
@@ -441,7 +441,7 @@ public class MetaDataClient {
     }
     
     private long getCurrentScn() {
-    	Long scn = connection.getSCN();
+        Long scn = connection.getSCN();
         long currentScn = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
         return currentScn;
     }
@@ -465,20 +465,20 @@ public class MetaDataClient {
         }
         
         boolean defaultTransactional = connection.getQueryServices().getProps().getBoolean(
-							                QueryServices.DEFAULT_TABLE_ISTRANSACTIONAL_ATTRIB,
-							                QueryServicesOptions.DEFAULT_TRANSACTIONAL);
+                                            QueryServices.DEFAULT_TABLE_ISTRANSACTIONAL_ATTRIB,
+                                            QueryServicesOptions.DEFAULT_TRANSACTIONAL);
         // start a txn if all table are transactional by default or if we found the table in the cache and it is transactional
         // TODO if system tables become transactional remove the check 
         boolean isTransactional = defaultTransactional || (table!=null && table.isTransactional());
         if (!systemTable && isTransactional && !connection.getMutationState().isTransactionStarted()) {
-        	connection.getMutationState().startTransaction();
+            connection.getMutationState().startTransaction();
         }
         resolvedTimestamp = resolvedTimestamp==null ? TransactionUtil.getResolvedTimestamp(connection, isTransactional, HConstants.LATEST_TIMESTAMP) : resolvedTimestamp;
         // Do not make rpc to getTable if 
         // 1. table is a system table
         // 2. table was already resolved as of that timestamp
-		if (table != null && !alwaysHitServer
-				&& (systemTable || resolvedTimestamp == tableResolvedTimestamp)) {
+        if (table != null && !alwaysHitServer
+                && (systemTable || resolvedTimestamp == tableResolvedTimestamp)) {
             return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, QueryConstants.UNSET_TIMESTAMP, table);
         }
 
@@ -490,11 +490,11 @@ public class MetaDataClient {
             final byte[] schemaBytes = PVarchar.INSTANCE.toBytes(schemaName);
             final byte[] tableBytes = PVarchar.INSTANCE.toBytes(tableName);
             ConnectionQueryServices queryServices = connection.getQueryServices();
-			result = queryServices.getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, resolvedTimestamp);
-			// if the table was assumed to be transactional, but is actually not transactional then re-resolve as of the right timestamp (and vice versa) 
-			if (table==null && result.getTable()!=null && result.getTable().isTransactional()!=isTransactional) {
-				result = queryServices.getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, TransactionUtil.getResolvedTimestamp(connection, result.getTable().isTransactional(), HConstants.LATEST_TIMESTAMP));
-			}
+            result = queryServices.getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, resolvedTimestamp);
+            // if the table was assumed to be transactional, but is actually not transactional then re-resolve as of the right timestamp (and vice versa) 
+            if (table==null && result.getTable()!=null && result.getTable().isTransactional()!=isTransactional) {
+                result = queryServices.getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, TransactionUtil.getResolvedTimestamp(connection, result.getTable().isTransactional(), HConstants.LATEST_TIMESTAMP));
+            }
 
             if (SYSTEM_CATALOG_SCHEMA.equals(schemaName)) {
                 return result;
@@ -520,18 +520,18 @@ public class MetaDataClient {
                 if (table != null) {
                     // Ensures that table in result is set to table found in our cache.
                     if (code == MutationCode.TABLE_ALREADY_EXISTS) {
-                    	result.setTable(table);
-                    	// Although this table is up-to-date, the parent table may not be.
-                    	// In this case, we update the parent table which may in turn pull
-                    	// in indexes to add to this table.
-                    	long resolvedTime = TransactionUtil.getResolvedTime(connection, result);
-						if (addIndexesFromPhysicalTable(result, resolvedTimestamp)) {
-                    	    connection.addTable(result.getTable(), resolvedTime);
-                    	}
-                    	else {
-                        	// if we aren't adding the table, we still need to update the resolved time of the table 
-                        	connection.updateResolvedTimestamp(table, resolvedTime);
-                    	}
+                        result.setTable(table);
+                        // Although this table is up-to-date, the parent table may not be.
+                        // In this case, we update the parent table which may in turn pull
+                        // in indexes to add to this table.
+                        long resolvedTime = TransactionUtil.getResolvedTime(connection, result);
+                        if (addIndexesFromPhysicalTable(result, resolvedTimestamp)) {
+                            connection.addTable(result.getTable(), resolvedTime);
+                        }
+                        else {
+                            // if we aren't adding the table, we still need to update the resolved time of the table 
+                            connection.updateResolvedTimestamp(table, resolvedTime);
+                        }
                         return result;
                     }
                     // If table was not found at the current time stamp and we have one cached, remove it.
@@ -628,12 +628,12 @@ public class MetaDataClient {
      * @throws SQLException if the physical table cannot be found
      */
     private boolean addIndexesFromPhysicalTable(MetaDataMutationResult result, Long resolvedTimestamp) throws SQLException {
-        PTable table = result.getTable();
+        PTable view = result.getTable();
         // If not a view or if a view directly over an HBase table, there's nothing to do
-        if (table.getType() != PTableType.VIEW || table.getViewType() == ViewType.MAPPED) {
+        if (view.getType() != PTableType.VIEW || view.getViewType() == ViewType.MAPPED) {
             return false;
         }
-        String physicalName = table.getPhysicalName().getString();
+        String physicalName = view.getPhysicalName().getString();
         String schemaName = SchemaUtil.getSchemaNameFromFullName(physicalName);
         String tableName = SchemaUtil.getTableNameFromFullName(physicalName);
         MetaDataMutationResult parentResult = updateCache(null, schemaName, tableName, false, resolvedTimestamp);
@@ -644,84 +644,72 @@ public class MetaDataClient {
         if (!result.wasUpdated() && !parentResult.wasUpdated()) {
             return false;
         }
-        List<PTable> indexes = physicalTable.getIndexes();
-        if (indexes.isEmpty()) {
+        List<PTable> physicalTableIndexes = physicalTable.getIndexes();
+        if (physicalTableIndexes.isEmpty()) {
             return false;
         }
         // Filter out indexes if column doesn't exist in view
-        List<PTable> allIndexes = Lists.newArrayListWithExpectedSize(indexes.size() + table.getIndexes().size());
+        List<PTable> indexesToAdd = Lists.newArrayListWithExpectedSize(physicalTableIndexes.size() + view.getIndexes().size());
         if (result.wasUpdated()) { // Table from server never contains inherited indexes
-            allIndexes.addAll(table.getIndexes());
+            indexesToAdd.addAll(view.getIndexes());
         } else { // Only add original ones, as inherited ones may have changed
-            for (PTable index : indexes) {
-                if (index.getViewIndexId() != null) {
-                    allIndexes.add(index);
+            for (PTable index : view.getIndexes()) {
+                // Original indexes will not have a view statement while inherited ones will
+                if (index.getViewStatement() == null) {
+                    indexesToAdd.add(index);
                 }
             }
         }
-        for (PTable index : indexes) {
-            if (index.getViewIndexId() == null) {
-                boolean containsAllReqdCols = true;
-                // Ensure that all columns required to create index
-                // exist in the view too (since view columns may be removed)
-                IndexMaintainer indexMaintainer = index.getIndexMaintainer(physicalTable, connection);
-                // check that the columns required for the index pk (not including the pk columns of the data table)
-                // are present in the view
-                Set<ColumnReference> indexColRefs = indexMaintainer.getIndexedColumns();
-                for (ColumnReference colRef : indexColRefs) {
-                    try {
-                        byte[] cf= colRef.getFamily();
-                        byte[] cq= colRef.getQualifier();
-                        if (cf!=null) {
-                            table.getColumnFamily(cf).getColumn(cq);
-                        }
-                        else {
-                            table.getColumn( Bytes.toString(cq));
-                        }
-                    } catch (ColumnNotFoundException e) { // Ignore this index and continue with others
-                        containsAllReqdCols = false;
-                        break;
+        for (PTable index : physicalTableIndexes) {
+            boolean containsAllReqdCols = true;
+            // Ensure that all columns required to create index exist in the view too,
+            // since view columns may be removed.
+            IndexMaintainer indexMaintainer = index.getIndexMaintainer(physicalTable, connection);
+            // Check that the columns required for the index pk are present in the view
+            Set<ColumnReference> indexColRefs = indexMaintainer.getIndexedColumns();
+            for (ColumnReference colRef : indexColRefs) {
+                try {
+                    byte[] cf= colRef.getFamily();
+                    byte[] cq= colRef.getQualifier();
+                    if (cf!=null) {
+                        view.getColumnFamily(cf).getColumn(cq);
+                    }
+                    else {
+                        view.getColumn( Bytes.toString(cq));
                     }
+                } catch (ColumnNotFoundException e) { // Ignore this index and continue with others
+                    containsAllReqdCols = false;
+                    break;
                 }
-                // check that pk columns of the data table (which are also present in the index pk) are present in the view
-                List<PColumn> pkColumns = physicalTable.getPKColumns();
-                for (int i = physicalTable.getBucketNum() == null ? 0 : 1; i < pkColumns.size(); i++) {
+            }
+            // Ensure that constant columns (i.e. columns matched in the view WHERE clause)
+            // all exist in the index on the physical table.
+            for (PColumn col : view.getColumns()) {
+                if (col.getViewConstant() != null) {
                     try {
-                        PColumn pkColumn = pkColumns.get(i);
-                        table.getColumn(pkColumn.getName().getString());
+                        // It'd be possible to use a local index that doesn't have all view constants,
+                        // but the WHERE clause for the view statement (which is added to the index below)
+                        // would fail to compile.
+                        String indexColumnName = IndexUtil.getIndexColumnName(col);
+                        index.getColumn(indexColumnName);
                     } catch (ColumnNotFoundException e) { // Ignore this index and continue with others
                         containsAllReqdCols = false;
                         break;
                     }
                 }
-                // Ensure that constant columns (i.e. columns matched in the view WHERE clause)
-                // all exist in the index on the physical table.
-                for (PColumn col : table.getColumns()) {
-                    if (col.getViewConstant() != null) {
-                        try {
-                            // TODO: it'd be possible to use a local index that doesn't have all view constants
-                            String indexColumnName = IndexUtil.getIndexColumnName(col);
-                            index.getColumn(indexColumnName);
-                        } catch (ColumnNotFoundException e) { // Ignore this index and continue with others
-                            containsAllReqdCols = false;
-                            break;
-                        }
-                    }
-                }
-                if (containsAllReqdCols) {
-                    // Tack on view statement to index to get proper filtering for view
-                    String viewStatement = IndexUtil.rewriteViewStatement(connection, index, physicalTable, table.getViewStatement());
-                    index = PTableImpl.makePTable(index, viewStatement);
-                    allIndexes.add(index);
-                }
+            }
+            if (containsAllReqdCols) {
+                // Tack on view statement to index to get proper filtering for view
+                String viewStatement = IndexUtil.rewriteViewStatement(connection, index, physicalTable, view.getViewStatement());
+                index = PTableImpl.makePTable(index, viewStatement);
+                indexesToAdd.add(index);
             }
         }
-        PTable allIndexesTable = PTableImpl.makePTable(table, table.getTimeStamp(), allIndexes);
+        PTable allIndexesTable = PTableImpl.makePTable(view, view.getTimeStamp(), indexesToAdd);
         result.setTable(allIndexesTable);
         return true;
     }
 
-
     private void addColumnMutation(String schemaName, String tableName, PColumn column, PreparedStatement colUpsert, String parentTableName, String pkName, Short keySeq, boolean isSalted) throws SQLException {
         colUpsert.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString());
         colUpsert.setString(2, schemaName);
@@ -1224,14 +1212,14 @@ public class MetaDataClient {
                 List<PColumn> pkColumns = dataTable.getPKColumns();
                 Set<RowKeyColumnExpression> unusedPkColumns;
                 if (dataTable.getBucketNum() != null) { // Ignore SALT column
-                	unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size()-1);
-                	posOffset++;
+                    unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size()-1);
+                    posOffset++;
                 } else {
-                	unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size());
+                    unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size());
                 }
                 for (int i = posOffset; i < pkColumns.size(); i++) {
                     PColumn column = pkColumns.get(i);
-					unusedPkColumns.add(new RowKeyColumnExpression(column, new RowKeyValueAccessor(pkColumns, i), "\""+column.getName().getString()+"\""));
+                    unusedPkColumns.add(new RowKeyColumnExpression(column, new RowKeyValueAccessor(pkColumns, i), "\""+column.getName().getString()+"\""));
                 }
                 List<ColumnDefInPkConstraint> allPkColumns = Lists.newArrayListWithExpectedSize(unusedPkColumns.size());
                 List<ColumnDef> columnDefs = Lists.newArrayListWithExpectedSize(includedColumns.size() + indexParseNodeAndSortOrderList.size());
@@ -1240,7 +1228,7 @@ public class MetaDataClient {
                     // Add tenant ID column as first column in index
                     PColumn col = dataTable.getPKColumns().get(posOffset);
                     RowKeyColumnExpression columnExpression = new RowKeyColumnExpression(col, new RowKeyValueAccessor(pkColumns, posOffset), col.getName().getString());
-					unusedPkColumns.remove(columnExpression);
+                    unusedPkColumns.remove(columnExpression);
                     PDataType dataType = IndexUtil.getIndexColumnDataType(col);
                     ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
                     allPkColumns.add(new ColumnDefInPkConstraint(colName, col.getSortOrder(), false));
@@ -1265,7 +1253,7 @@ public class MetaDataClient {
                 IndexExpressionCompiler expressionIndexCompiler = new IndexExpressionCompiler(context);
                 Set<ColumnName> indexedColumnNames = Sets.newHashSetWithExpectedSize(indexParseNodeAndSortOrderList.size());
                 for (Pair<ParseNode, SortOrder> pair : indexParseNodeAndSortOrderList) {
-                	ParseNode parseNode = pair.getFirst();
+                    ParseNode parseNode = pair.getFirst();
                     // normalize the parse node
                     parseNode = StatementNormalizer.normalize(parseNode, resolver);
                     // compile the parseNode to get an expression
@@ -1291,22 +1279,22 @@ public class MetaDataClient {
                     
                     ColumnName colName = null;
                     ColumnRef colRef = expressionIndexCompiler.getColumnRef();
-					boolean isRowTimestamp = false;
+                    boolean isRowTimestamp = false;
                     if (colRef!=null) { 
-						// if this is a regular column
-					    PColumn column = colRef.getColumn();
-					    String columnFamilyName = column.getFamilyName()!=null ? column.getFamilyName().getString() : null;
-					    colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(columnFamilyName, column.getName().getString()));
-					    isRowTimestamp = column.isRowTimestamp();
-					}
-					else { 
-						// if this is an expression
-					    // TODO column names cannot have double quotes, remove this once this PHOENIX-1621 is fixed
-						String name = expressionStr.replaceAll("\"", "'");
+                        // if this is a regular column
+                        PColumn column = colRef.getColumn();
+                        String columnFamilyName = column.getFamilyName()!=null ? column.getFamilyName().getString() : null;
+                        colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(columnFamilyName, column.getName().getString()));
+                        isRowTimestamp = column.isRowTimestamp();
+                    }
+                    else { 
+                        // if this is an expression
+                        // TODO column names cannot have double quotes, remove this once this PHOENIX-1621 is fixed
+                        String name = expressionStr.replaceAll("\"", "'");
                         colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, name));
-					}
-					indexedColumnNames.add(colName);
-                	PDataType dataType = IndexUtil.getIndexColumnDataType(expression.isNullable(), expression.getDataType());
+                    }
+                    indexedColumnNames.add(colName);
+                    PDataType dataType = IndexUtil.getIndexColumnDataType(expression.isNullable(), expression.getDataType());
                     allPkColumns.add(new ColumnDefInPkConstraint(colName, pair.getSecond(), isRowTimestamp));
                     columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), expression.isNullable(), expression.getMaxLength(), expression.getScale(), false, pair.getSecond(), expressionStr, isRowTimestamp));
                 }
@@ -1593,7 +1581,7 @@ public class MetaDataClient {
                 // table instead of only a view? We don't have anywhere to put the link
                 // from the table to the index, though.
                 if (indexType == IndexType.LOCAL || (parent.getType() == PTableType.VIEW && parent.getViewType() != ViewType.MAPPED)) {
-                	PName physicalName = parent.getPhysicalName();
+                    PName physicalName = parent.getPhysicalName();
                     saltBucketNum = parent.getBucketNum();
                     addSaltColumn = (saltBucketNum != null && indexType != IndexType.LOCAL);
                     defaultFamilyName = parent.getDefaultFamilyName() == null ? null : parent.getDefaultFamilyName().getString();
@@ -1728,8 +1716,8 @@ public class MetaDataClient {
                 }
             }
             boolean transactionsEnabled = connection.getQueryServices().getProps().getBoolean(
-											QueryServices.TRANSACTIONS_ENABLED,
-											QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
+                                            QueryServices.TRANSACTIONS_ENABLED,
+                                            QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
             // can't create a transactional table if transactions are not enabled
             if (!transactionsEnabled && transactional) {
                 throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_TXN_TABLE_IF_TXNS_DISABLED)
@@ -1738,7 +1726,7 @@ public class MetaDataClient {
             }
             // can't create a transactional table if it has a row timestamp column
             if (pkConstraint.getNumColumnsWithRowTimestamp()>0 && transactional) {
-            	throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_TXN_TABLE_WITH_ROW_TIMESTAMP)
+                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_TXN_TABLE_WITH_ROW_TIMESTAMP)
                 .setSchemaName(schemaName).setTableName(tableName)
                 .build().buildException();
             }
@@ -1762,16 +1750,16 @@ public class MetaDataClient {
                 // on the server, but this can mess up our secondary index code as the changes get
                 // committed prior to the
                 // maintenance code being able to see the prior state to update the rows correctly.
-            	if (Boolean.FALSE.equals(storeNullsProp)) {
-            		throw new SQLExceptionInfo.Builder(SQLExceptionCode.STORE_NULLS_MUST_BE_TRUE_FOR_TRANSACTIONAL)
-            		.setSchemaName(schemaName).setTableName(tableName)
-            		.build().buildException();
-            	}
-            	// Force STORE_NULLS to true when transactional as Tephra cannot deal with column deletes
-            	storeNulls = true;
-            	tableProps.put(PhoenixDatabaseMetaData.STORE_NULLS, Boolean.TRUE);
-            	
-            	if (!sharedTable) {
+                if (Boolean.FALSE.equals(storeNullsProp)) {
+                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.STORE_NULLS_MUST_BE_TRUE_FOR_TRANSACTIONAL)
+                    .setSchemaName(schemaName).setTableName(tableName)
+                    .build().buildException();
+                }
+                // Force STORE_NULLS to true when transactional as Tephra cannot deal with column deletes
+                storeNulls = true;
+                tableProps.put(PhoenixDatabaseMetaData.STORE_NULLS, Boolean.TRUE);
+                
+                if (!sharedTable) {
                     Integer maxVersionsProp = (Integer) commonFamilyProps.get(HConstants.VERSIONS);
                     if (maxVersionsProp == null) {
                         if (parent != null) {
@@ -1787,7 +1775,7 @@ public class MetaDataClient {
                         }
                         commonFamilyProps.put(HConstants.VERSIONS, maxVersionsProp);
                     }
-            	}
+                }
             }
             timestamp = timestamp==null ? TransactionUtil.getTableTimestamp(connection, transactional) : timestamp;
 
@@ -2016,8 +2004,8 @@ public class MetaDataClient {
             }
 
             if (familyNames.isEmpty()) {
-            	//if there are no family names, use the default column family name. This also takes care of the case when
-            	//the table ddl has only PK cols present (which means familyNames is empty).
+                //if there are no family names, use the default column family name. This also takes care of the case when
+                //the table ddl has only PK cols present (which means familyNames is empty).
                 byte[] cf = defaultFamilyName == null ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES : Bytes.toBytes(defaultFamilyName);
                 familyPropList.add(new Pair<byte[],Map<String,Object>>(cf, commonFamilyProps));
             }
@@ -2353,7 +2341,6 @@ public class MetaDataClient {
             MetaDataMutationResult result = connection.getQueryServices().dropTable(tableMetaData, tableType, cascade);
             MutationCode code = result.getMutationCode();
             PTable table = result.getTable();
-			boolean transactional = table!=null && table.isTransactional();
             switch (code) {
             case TABLE_NOT_FOUND:
                 if (!ifExists) { throw new TableNotFoundException(schemaName, tableName); }
@@ -2365,54 +2352,64 @@ public class MetaDataClient {
 
                 .setSchemaName(schemaName).setTableName(tableName).build().buildException();
             default:
-				connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), parentTableName, result.getMutationTime());
+                connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), parentTableName, result.getMutationTime());
 
-                if (result.getTable() != null && tableType != PTableType.VIEW) {
-                    connection.setAutoCommit(true);
-                    boolean dropMetaData = result.getTable().getViewIndexId() == null &&
-                            connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
+                if (result.getTable() != null) {
+                    boolean dropMetaData = false;
                     long ts = (scn == null ? result.getMutationTime() : scn);
-                    // Create empty table and schema - they're only used to get the name from
-                    // PName name, PTableType type, long timeStamp, long sequenceNumber, List<PColumn> columns
                     List<TableRef> tableRefs = Lists.newArrayListWithExpectedSize(2 + table.getIndexes().size());
-                    // All multi-tenant tables have a view index table, so no need to check in that case
-                    if (tableType == PTableType.TABLE
-                            && (table.isMultiTenant() || hasViewIndexTable || hasLocalIndexTable)) {
-
-                        MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName());
-                        if (hasViewIndexTable) {
-                            String viewIndexSchemaName = null;
-                            String viewIndexTableName = null;
-                            if (schemaName != null) {
-                                viewIndexSchemaName = MetaDataUtil.getViewIndexTableName(schemaName);
-                                viewIndexTableName = tableName;
-                            } else {
-                                viewIndexTableName = MetaDataUtil.getViewIndexTableName(tableName);
+                    connection.setAutoCommit(true);
+                    if (tableType == PTableType.VIEW) {
+                        for (PTable index : table.getIndexes()) {
+                            // Don't drop index data from indexes on physical table
+                            if (index.getViewStatement() == null) {
+                                tableRefs.add(new TableRef(null, index, ts, false));
                             }
-                            PTable viewIndexTable = new PTableImpl(null, viewIndexSchemaName, viewIndexTableName, ts,
-                                    table.getColumnFamilies());
-                            tableRefs.add(new TableRef(null, viewIndexTable, ts, false));
                         }
-                        if (hasLocalIndexTable) {
-                            String localIndexSchemaName = null;
-                            String localIndexTableName = null;
-                            if (schemaName != null) {
-                                localIndexSchemaName = MetaDataUtil.getLocalIndexTableName(schemaName);
-                                localIndexTableName = tableName;
-                            } else {
-                                localIndexTableName = MetaDataUtil.getLocalIndexTableName(tableName);
+                    } else {
+                        dropMetaData = result.getTable().getViewIndexId() == null &&
+                                connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
+                        // Create empty table and schema - they're only used to get the name from
+                        // PName name, PTableType type, long timeStamp, long sequenceNumber, List<PColumn> columns
+                        // All multi-tenant tables have a view index table, so no need to check in that case
+                        if (tableType == PTableType.TABLE
+                                && (table.isMultiTenant() || hasViewIndexTable || hasLocalIndexTable)) {
+    
+                            MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName());
+                            if (hasViewIndexTable) {
+                                String viewIndexSchemaName = null;
+                                String viewIndexTableName = null;
+                                if (schemaName != null) {
+                                    viewIndexSchemaName = MetaDataUtil.getViewIndexTableName(schemaName);
+                                    viewIndexTableName = tableName;
+                                } else {
+                                    viewIndexTableName = MetaDataUtil.getViewIndexTableName(tableName);
+                                }
+                                PTable viewIndexTable = new PTableImpl(null, viewIndexSchemaName, viewIndexTableName, ts,
+                                        table.getColumnFamilies());
+                                tableRefs.add(new TableRef(null, viewIndexTable, ts, false));
+                            }
+                            if (hasLocalIndexTable) {
+                                String localIndexSchemaName = null;
+                                String localIndexTableName = null;
+                                if (schemaName != null) {
+                                    localIndexSchemaName = MetaDataUtil.getLocalIndexTableName(schemaName);
+                                    localIndexTableName = tableName;
+                                } else {
+                                    localIndexTableName = MetaDataUtil.getLocalIndexTableName(tableName);
+                                }
+                                PTable localIndexTable = new PTableImpl(null, localIndexSchemaName, localIndexTableName,
+                                        ts, Collections.<PColumnFamily> emptyList());
+                                tableRefs.add(new TableRef(null, localIndexTable, ts, false));
                             }
-                            PTable localIndexTable = new PTableImpl(null, localIndexSchemaName, localIndexTableName,
-                                    ts, Collections.<PColumnFamily> emptyList());
-                            tableRefs.add(new TableRef(null, localIndexTable, ts, false));
                         }
+                        tableRefs.add(new TableRef(null, table, ts, false));
+                        // TODO: Let the standard mutable secondary index maintenance handle this?
+                        for (PTable index : table.getIndexes()) {
+                            tableRefs.add(new TableRef(null, index, ts, false));
+                        }
+                        deleteFromStatsTable(tableRefs, ts);
                     }
-                    tableRefs.add(new TableRef(null, table, ts, false));
-                    // TODO: Let the standard mutable secondary index maintenance handle this?
-                    for (PTable index : table.getIndexes()) {
-                        tableRefs.add(new TableRef(null, index, ts, false));
-                    }
-                    deleteFromStatsTable(tableRefs, ts);
                     if (!dropMetaData) {
                         MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null,
                                 Collections.<PColumn> emptyList(), ts);
@@ -2589,7 +2586,7 @@ public class MetaDataClient {
             ListMultimap<String,Pair<String,Object>> stmtProperties = statement.getProps();
             Map<String, List<Pair<String, Object>>> properties = new HashMap<>(stmtProperties.size());
             TableRef tableRef = FromCompiler.getResolver(statement, connection).getTables().get(0);
-			PTable table = tableRef.getTable();
+            PTable table = tableRef.getTable();
             List<ColumnDef> columnDefs = statement.getColumnDefs();
             if (columnDefs == null) {
                 columnDefs = Collections.emptyList();
@@ -2686,15 +2683,15 @@ public class MetaDataClient {
                         }
                         // cannot create a transactional table if transactions are disabled
                         boolean transactionsEnabled = connection.getQueryServices().getProps().getBoolean(
-								QueryServices.TRANSACTIONS_ENABLED,
-								QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
+                                QueryServices.TRANSACTIONS_ENABLED,
+                                QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
                         if (!transactionsEnabled) {
-                        	throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TO_BE_TXN_IF_TXNS_DISABLED)
+                            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TO_BE_TXN_IF_TXNS_DISABLED)
                             .setSchemaName(schemaName).setTableName(tableName).build().buildException();
                         }
                         // cannot make a table transactional if it has a row timestamp column
                         if (SchemaUtil.hasRowTimestampColumn(table)) {
-                        	throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TO_BE_TXN_WITH_ROW_TIMESTAMP)
+                            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TO_BE_TXN_WITH_ROW_TIMESTAMP)
                             .setSchemaName(schemaName).setTableName(tableName)
                             .build().buildException();
                         }
@@ -2748,20 +2745,20 @@ public class MetaDataClient {
 
                     // Add any new PK columns to end of index PK
                     if (numPkColumnsAdded>0) {
-                    	// create PK column list that includes the newly created columns
-                    	List<PColumn> pkColumns = Lists.newArrayListWithExpectedSize(table.getPKColumns().size()+numPkColumnsAdded);
-                    	pkColumns.addAll(table.getPKColumns());
-                    	for (int i=0; i<columnDefs.size(); ++i) {
-                    		if (columnDefs.get(i).isPK()) {
-                    			pkColumns.add(columns.get(i));
-                    		}
-                    	}
-                    	int pkSlotPosition = table.getPKColumns().size()-1;
+                        // create PK column list that includes the newly created columns
+                        List<PColumn> pkColumns = Lists.newArrayListWithExpectedSize(table.getPKColumns().size()+numPkColumnsAdded);
+                        pkColumns.addAll(table.getPKColumns());
+                        for (int i=0; i<columnDefs.size(); ++i) {
+                            if (columnDefs.get(i).isPK()) {
+                                pkColumns.add(columns.get(i));
+                            }
+                        }
+                        int pkSlotPosition = table.getPKColumns().size()-1;
                         for (PTable index : table.getIndexes()) {
                             short nextIndexKeySeq = SchemaUtil.getMaxKeySeq(index);
                             int indexPosition = index.getColumns().size();
                             for (int i=0; i<columnDefs.size(); ++i) {
-                            	ColumnDef colDef = columnDefs.get(i);
+                                ColumnDef colDef = columnDefs.get(i);
                                 if (colDef.isPK()) {
                                     PDataType indexColDataType = IndexUtil.getIndexColumnDataType(colDef.isNull(), colDef.getDataType());
                                     ColumnName indexColName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, colDef.getColumnDefName().getColumnName()));
@@ -2848,18 +2845,18 @@ public class MetaDataClient {
                     // transitioning a table from non transactional to transactional.
                     // We could update the cache manually then too, it'd just be a pain.
                     if (table.getIndexes().isEmpty() || (numPkColumnsAdded==0 && !nonTxToTx)) {
-						connection.addColumn(
-								tenantId,
-								SchemaUtil.getTableName(schemaName, tableName),
-								columns,
-								result.getMutationTime(),
-								seqNum,
-								isImmutableRows == null ? table.isImmutableRows() : isImmutableRows,
-								disableWAL == null ? table.isWALDisabled() : disableWAL,
-								multiTenant == null ? table.isMultiTenant() : multiTenant,
-								storeNulls == null ? table.getStoreNulls() : storeNulls, 
-								isTransactional == null ? table.isTransactional() : isTransactional,
-								TransactionUtil.getResolvedTime(connection, result));
+                        connection.addColumn(
+                                tenantId,
+                                SchemaUtil.getTableName(schemaName, tableName),
+                                columns,
+                                result.getMutationTime(),
+                                seqNum,
+                                isImmutableRows == null ? table.isImmutableRows() : isImmutableRows,
+                                disableWAL == null ? table.isWALDisabled() : disableWAL,
+                                multiTenant == null ? table.isMultiTenant() : multiTenant,
+                                storeNulls == null ? table.getStoreNulls() : storeNulls, 
+                                isTransactional == null ? table.isTransactional() : isTransactional,
+                                TransactionUtil.getResolvedTime(connection, result));
                     }
                     // Delete rows in view index if we haven't dropped it already
                     // We only need to do this if the multiTenant transitioned to false
@@ -2991,7 +2988,7 @@ public class MetaDataClient {
             while (true) {
                 final ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
                 TableRef tableRef = resolver.getTables().get(0);
-				PTable table = tableRef.getTable();
+                PTable table = tableRef.getTable();
                 List<ColumnName> columnRefs = statement.getColumnRefs();
                 if(columnRefs == null) {
                     columnRefs = Lists.newArrayListWithCapacity(0);
@@ -3022,7 +3019,7 @@ public class MetaDataClient {
 
                 dropColumnMutations(table, tableColumnsToDrop, tableMetaData);
                 for (PTable index : table.getIndexes()) {
-                	IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
+                    IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
                     // get the columns required for the index pk
                     Set<ColumnReference> indexColumns = indexMaintainer.getIndexedColumns();
                     // get the covered columns 
@@ -3030,11 +3027,11 @@ public class MetaDataClient {
                     List<PColumn> indexColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
                     for(PColumn columnToDrop : tableColumnsToDrop) {
                         ColumnReference columnToDropRef = new ColumnReference(columnToDrop.getFamilyName().getBytes(), columnToDrop.getName().getBytes());
-						if (indexColumns.contains(columnToDropRef)) {
+                        if (indexColumns.contains(columnToDropRef)) {
                             indexesToDrop.add(new TableRef(index));
                         } 
                         else if (coveredColumns.contains(columnToDropRef)) {
-                        	String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop);
+                            String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop);
                             indexColumnsToDrop.add(index.getColumn(indexColumnName));
                         }
                     }
@@ -3186,7 +3183,7 @@ public class MetaDataClient {
                 }
             }
             Long timeStamp = indexRef.getTable().isTransactional() ? indexRef.getTimeStamp() : null;
-			List<Mutation> tableMetadata = connection.getMutationState().toMutations(timeStamp).next().getSecond();
+            List<Mutation> tableMetadata = connection.getMutationState().toMutations(timeStamp).next().getSecond();
             connection.rollback();
 
             MetaDataMutationResult result = connection.getQueryServices().updateIndexState(tableMetadata, dataTableName);
@@ -3256,7 +3253,7 @@ public class MetaDataClient {
          */
         boolean isSharedIndex = table.getViewIndexId() != null;
         if (isSharedIndex) {
-        	// we are assuming the stats table is not transactional
+            // we are assuming the stats table is not transactional
             return connection.getQueryServices().getTableStats(table.getPhysicalName().getBytes(), getCurrentScn());
         }
         boolean isView = table.getType() == PTableType.VIEW;
@@ -3297,7 +3294,7 @@ public class MetaDataClient {
     }
     
     private PTable getParentOfView(PTable view) throws SQLException {
-    	//TODO just use view.getParentName().getString() after implementing https://issues.apache.org/jira/browse/PHOENIX-2114 
+        //TODO just use view.getParentName().getString() after implementing https://issues.apache.org/jira/browse/PHOENIX-2114 
         SelectStatement select = new SQLParser(view.getViewStatement()).parseQuery();
         String parentName = SchemaUtil.normalizeFullTableName(select.getFrom().toString().trim());
         return connection.getTable(new PTableKey(view.getTenantId(), parentName));


Mime
View raw message