phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tdsi...@apache.org
Subject phoenix git commit: PHOENIX-2051 Link record is in the format CHILD-PARENT for phoenix views and it has to scan the entire table to find the parent suffix
Date Wed, 15 Mar 2017 02:15:42 GMT
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 8a2c74244 -> 2f8153bd1


PHOENIX-2051 Link record is in the format CHILD-PARENT for phoenix views and it has to scan
the entire table to find the parent suffix


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2f8153bd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2f8153bd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2f8153bd

Branch: refs/heads/4.x-HBase-1.1
Commit: 2f8153bd15c8875c992e79dd97f1372321fa9c39
Parents: 8a2c742
Author: Thomas D'Silva <tdsilva@salesforce.com>
Authored: Fri Feb 17 15:50:24 2017 -0800
Committer: Thomas D'Silva <tdsilva@apache.org>
Committed: Tue Mar 14 19:15:27 2017 -0700

----------------------------------------------------------------------
 .../coprocessor/MetaDataEndpointImpl.java       | 213 +++++++++----------
 .../phoenix/coprocessor/MetaDataProtocol.java   |   4 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   4 +-
 .../query/ConnectionQueryServicesImpl.java      |   4 +
 .../apache/phoenix/schema/MetaDataClient.java   |  24 ++-
 .../java/org/apache/phoenix/schema/PTable.java  |  10 +-
 .../org/apache/phoenix/util/MetaDataUtil.java   |  17 ++
 .../org/apache/phoenix/util/UpgradeUtil.java    |  73 +++++++
 8 files changed, 228 insertions(+), 121 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f8153bd/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index e939436..800b8a1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -55,6 +55,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT_BYTES
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NULLABLE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NUM_ARGS_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PARENT_TENANT_ID_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PK_NAME_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.RETURN_TYPE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES;
@@ -115,7 +116,6 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -972,36 +972,32 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements
Coprocesso
                 (!EncodedColumnsUtil.usesEncodedColumnNames(encodingScheme) || tableType
== PTableType.VIEW) ? PTable.EncodedCQCounter.NULL_COUNTER
                         : new EncodedCQCounter();
         while (true) {
-            results.clear();
-            scanner.next(results);
-            if (results.isEmpty()) {
-                break;
-            }
-            Cell colKv = results.get(LINK_TYPE_INDEX);
-            if (colKv != null) {
-                int colKeyLength = colKv.getRowLength();
-                PName colName = newPName(colKv.getRowArray(), colKv.getRowOffset() + offset,
colKeyLength-offset);
-                int colKeyOffset = offset + colName.getBytes().length + 1;
-                PName famName = newPName(colKv.getRowArray(), colKv.getRowOffset() + colKeyOffset,
colKeyLength-colKeyOffset);
-                if (isQualifierCounterKV(colKv)) {
-                    Integer value = PInteger.INSTANCE.getCodec().decodeInt(colKv.getValueArray(),
colKv.getValueOffset(), SortOrder.ASC);
-                    cqCounter.setValue(famName.getString(), value);
-                } else {
-                    if (colName.getString().isEmpty() && famName != null) {
-                        LinkType linkType = LinkType.fromSerializedValue(colKv.getValueArray()[colKv.getValueOffset()]);
-                        if (linkType == LinkType.INDEX_TABLE) {
-                            addIndexToTable(tenantId, schemaName, famName, tableName, clientTimeStamp,
indexes);
-                        } else if (linkType == LinkType.PHYSICAL_TABLE) {
-                            physicalTables.add(famName);
-                        } else if (linkType == LinkType.PARENT_TABLE) {
-                            parentTableName = PNameFactory.newName(SchemaUtil.getTableNameFromFullName(famName.getBytes()));
-                            parentSchemaName = PNameFactory.newName(SchemaUtil.getSchemaNameFromFullName(famName.getBytes()));
-                        }
-                    } else {
-                        addColumnToTable(results, colName, famName, colKeyValues, columns,
saltBucketNum != null);
-                    }
-                } 
-            }
+          results.clear();
+          scanner.next(results);
+          if (results.isEmpty()) {
+              break;
+          }
+          Cell colKv = results.get(LINK_TYPE_INDEX);
+          int colKeyLength = colKv.getRowLength();
+          PName colName = newPName(colKv.getRowArray(), colKv.getRowOffset() + offset, colKeyLength-offset);
+          int colKeyOffset = offset + colName.getBytes().length + 1;
+          PName famName = newPName(colKv.getRowArray(), colKv.getRowOffset() + colKeyOffset,
colKeyLength-colKeyOffset);
+          if (isQualifierCounterKV(colKv)) {
+              Integer value = PInteger.INSTANCE.getCodec().decodeInt(colKv.getValueArray(),
colKv.getValueOffset(), SortOrder.ASC);
+              cqCounter.setValue(famName.getString(), value);
+          } else if (Bytes.compareTo(LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length, colKv.getQualifierArray(),
colKv.getQualifierOffset(), colKv.getQualifierLength())==0) {    
+              LinkType linkType = LinkType.fromSerializedValue(colKv.getValueArray()[colKv.getValueOffset()]);
+              if (linkType == LinkType.INDEX_TABLE) {
+                  addIndexToTable(tenantId, schemaName, famName, tableName, clientTimeStamp,
indexes);
+              } else if (linkType == LinkType.PHYSICAL_TABLE) {
+                  physicalTables.add(famName);
+              } else if (linkType == LinkType.PARENT_TABLE) {
+                  parentTableName = PNameFactory.newName(SchemaUtil.getTableNameFromFullName(famName.getBytes()));
+                  parentSchemaName = PNameFactory.newName(SchemaUtil.getSchemaNameFromFullName(famName.getBytes()));
+              }
+          } else {
+              addColumnToTable(results, colName, famName, colKeyValues, columns, saltBucketNum
!= null);
+          }
         }
         // Avoid querying the stats table because we're holding the rowLock here. Issuing
an RPC to a remote
         // server while holding this lock is a bad idea and likely to cause contention.
@@ -1648,41 +1644,37 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements
Coprocesso
         return rowLock;
     }
 
-    private static final byte[] PHYSICAL_TABLE_BYTES = new byte[] {PTable.LinkType.PHYSICAL_TABLE.getSerializedValue()};
+    private static final byte[] CHILD_TABLE_BYTES = new byte[] {PTable.LinkType.CHILD_TABLE.getSerializedValue()};
+
+    private void findAllChildViews(Region region, byte[] tenantId, byte[] schemaName, byte[]
tableName, TableViewFinderResult result) throws IOException {
+        TableViewFinderResult currResult = findChildViews(region, tenantId, schemaName, tableName);
+        result.addResult(currResult);
+        for (Result viewResult : currResult.getResults()) {
+            byte[][] rowViewKeyMetaData = new byte[5][];
+            getVarChars(viewResult.getRow(), 5, rowViewKeyMetaData);
+            byte[] viewtenantId = rowViewKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX];
+            byte[] viewSchema = SchemaUtil.getSchemaNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
+            byte[] viewTable = SchemaUtil.getTableNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
+            findAllChildViews(region, viewtenantId, viewSchema, viewTable, result);
+        }
+    }
 
     /**
      * @param tableName parent table's name
      * Looks for whether child views exist for the table specified by table.
      * TODO: should we pass a timestamp here?
      */
-    private TableViewFinderResult findChildViews(Region region, byte[] tenantId, PTable table,
byte[] linkTypeBytes) throws IOException {
-        byte[] schemaName = table.getSchemaName().getBytes();
-        byte[] tableName = table.getTableName().getBytes();
-        boolean isMultiTenant = table.isMultiTenant();
+    private TableViewFinderResult findChildViews(Region region, byte[] tenantId, byte[] schemaName,
byte[] tableName) throws IOException {
         Scan scan = new Scan();
-        // If the table is multi-tenant, we need to check across all tenant_ids,
-        // so we can't constrain the row key. Otherwise, any views would have
-        // the same tenantId.
-        if (!isMultiTenant) {
-            byte[] startRow = ByteUtil.concat(tenantId, QueryConstants.SEPARATOR_BYTE_ARRAY);
-            byte[] stopRow = ByteUtil.nextKey(startRow);
-            scan.setStartRow(startRow);
-            scan.setStopRow(stopRow);
-        }
-        SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES,
LINK_TYPE_BYTES, CompareOp.EQUAL, linkTypeBytes);
-        SingleColumnValueFilter tableTypeFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES,
TABLE_TYPE_BYTES,
-                CompareOp.EQUAL, PTableType.VIEW.getSerializedValue().getBytes());
-        tableTypeFilter.setFilterIfMissing(false);
+        byte[] startRow = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
+        byte[] stopRow = ByteUtil.nextKey(startRow);
+        scan.setStartRow(startRow);
+        scan.setStopRow(stopRow);
+        SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES,
LINK_TYPE_BYTES, CompareOp.EQUAL, CHILD_TABLE_BYTES);
         linkFilter.setFilterIfMissing(true);
-        byte[] suffix = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, SchemaUtil
-                .getPhysicalTableName(SchemaUtil.getTableNameAsBytes(schemaName, tableName),
table.isNamespaceMapped())
-                .getName());
-        SuffixFilter rowFilter = new SuffixFilter(suffix);
-        FilterList filter = new FilterList(linkFilter,tableTypeFilter,rowFilter);
-        scan.setFilter(filter);
+        scan.setFilter(linkFilter);
         scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES);
-        scan.addColumn(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
-        scan.addColumn(TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
+        scan.addColumn(TABLE_FAMILY_BYTES, PARENT_TENANT_ID_BYTES);
         
         // Original region-only scanner modified due to PHOENIX-1208
         // RegionScanner scanner = region.getScanner(scan);
@@ -1709,7 +1701,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements
Coprocesso
                     }
                     results.add(result);
                 }
-                TableViewFinderResult tableViewFinderResult = new TableViewFinderResult(results,
table);
+                TableViewFinderResult tableViewFinderResult = new TableViewFinderResult(results);
                 if (numOfChildViews > 0 && !allViewsInCurrentRegion) {
                     tableViewFinderResult.setAllViewsNotInSingleRegion();
                 }
@@ -1851,12 +1843,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements
Coprocesso
                         EnvironmentEdgeManager.currentTimeMillis(), null);
             }
 
-            // Only tables may have views, so prevent the running of this potentially
-            // expensive full table scan over the SYSTEM.CATALOG table unless it's needed.
             if (tableType == PTableType.TABLE || tableType == PTableType.SYSTEM) {
                 // Handle any child views that exist
-                TableViewFinderResult tableViewFinderResult = findChildViews(region, tenantId,
table,
-                        PHYSICAL_TABLE_BYTES);
+                TableViewFinderResult tableViewFinderResult = findChildViews(region, tenantId,
table.getSchemaName().getBytes(), table.getTableName().getBytes());
                 if (tableViewFinderResult.hasViews()) {
                     if (isCascade) {
                         if (tableViewFinderResult.allViewsInMultipleRegions()) {
@@ -1867,11 +1856,11 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements
Coprocesso
                         } else if (tableViewFinderResult.allViewsInSingleRegion()) {
                             // Recursively delete views - safe as all the views as all in
the same region
                             for (Result viewResult : tableViewFinderResult.getResults())
{
-                                byte[][] rowKeyMetaData = new byte[3][];
-                                getVarChars(viewResult.getRow(), 3, rowKeyMetaData);
-                                byte[] viewTenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
-                                byte[] viewSchemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
-                                byte[] viewName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
+                                byte[][] rowViewKeyMetaData = new byte[5][];
+                                getVarChars(viewResult.getRow(), 5, rowViewKeyMetaData);
+                                byte[] viewTenantId = rowViewKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX];
+                                byte[] viewSchemaName = SchemaUtil.getSchemaNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
+                                byte[] viewName = SchemaUtil.getTableNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
                                 byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName,
viewName);
                                 Delete delete = new Delete(viewKey, clientTimeStamp);
                                 rowsToDelete.add(delete);
@@ -1903,12 +1892,20 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements
Coprocesso
                 Cell kv = results.get(LINK_TYPE_INDEX);
                 int nColumns = getVarChars(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
0, rowKeyMetaData);
                 if (nColumns == 5
-                        && rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length
== 0
-                        && rowKeyMetaData[PhoenixDatabaseMetaData.INDEX_NAME_INDEX].length
> 0
+                        && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length
> 0
                         && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(),
kv.getQualifierLength(),
-                                LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length) == 0
-                        && LinkType.fromSerializedValue(kv.getValueArray()[kv.getValueOffset()])
== LinkType.INDEX_TABLE) {
-                    indexNames.add(rowKeyMetaData[PhoenixDatabaseMetaData.INDEX_NAME_INDEX]);
+                                LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length) == 0) {
+                        LinkType linkType = LinkType.fromSerializedValue(kv.getValueArray()[kv.getValueOffset()]);
+                        if (rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length
== 0 && linkType == LinkType.INDEX_TABLE) {
+                            indexNames.add(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]);
+                        } else if (linkType == LinkType.PARENT_TABLE || linkType == LinkType.PHYSICAL_TABLE)
{
+                            // delete parent->child link for views
+                            Cell parentTenantIdCell = MetaDataUtil.getCell(results, PhoenixDatabaseMetaData.PARENT_TENANT_ID_BYTES);
+                            PName parentTenantId = parentTenantIdCell!=null ? PNameFactory.newName(parentTenantIdCell.getValueArray(),
parentTenantIdCell.getValueOffset(), parentTenantIdCell.getValueLength()) : null;
+                            byte[] linkKey = MetaDataUtil.getChildLinkKey(parentTenantId,
table.getParentSchemaName(), table.getParentTableName(), table.getTenantId(), table.getName());
+                            Delete linkDelete = new Delete(linkKey, clientTimeStamp);
+                            rowsToDelete.add(linkDelete);
+                        }
                 }
                 // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
                 // FIXME: the version of the Delete constructor without the lock args was
introduced
@@ -1920,7 +1917,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements
Coprocesso
                 scanner.next(results);
             } while (!results.isEmpty());
         }
-
+        
         // Recursively delete indexes
         for (byte[] indexName : indexNames) {
             byte[] indexKey = SchemaUtil.getTableKey(tenantId, schemaName, indexName);
@@ -2255,11 +2252,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements
Coprocesso
             short deltaNumPkColsSoFar = 0;
             short columnsAddedToView = 0;
             short columnsAddedToBaseTable = 0;
-            byte[][] rowViewKeyMetaData = new byte[3][];
-            getVarChars(viewResult.getRow(), 3, rowViewKeyMetaData);
-            byte[] viewKey = SchemaUtil.getTableKey(rowViewKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX],
-                    rowViewKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX],
-                    rowViewKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]);
+            byte[][] rowViewKeyMetaData = new byte[5][];
+            getVarChars(viewResult.getRow(), 5, rowViewKeyMetaData);
+            byte[] tenantId = rowViewKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX];
+            byte[] schema = SchemaUtil.getSchemaNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
+            byte[] table = SchemaUtil.getTableNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
+            byte[] viewKey = SchemaUtil.getTableKey(tenantId, schema, table);
             
             // lock the rows corresponding to views so that no other thread can modify the
view meta-data
             RowLock viewRowLock = acquireLock(region, viewKey, locks);
@@ -2580,13 +2578,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements
Coprocesso
         }
         for (Result viewResult : childViewsResult.getResults()) {
             short numColsDeleted = 0;
-            byte[][] rowViewKeyMetaData = new byte[3][];
-            getVarChars(viewResult.getRow(), 3, rowViewKeyMetaData);
-            byte[] viewKey =
-                    SchemaUtil.getTableKey(
-                        rowViewKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX],
-                        rowViewKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX],
-                        rowViewKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]);
+            byte[][] rowViewKeyMetaData = new byte[5][];
+            getVarChars(viewResult.getRow(), 5, rowViewKeyMetaData);
+            byte[] viewTenantId = rowViewKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX];
+            byte[] viewSchemaName = SchemaUtil.getSchemaNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
+            byte[] viewName = SchemaUtil.getTableNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
+            byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
 
             // lock the rows corresponding to views so that no other thread can modify the
view
             // meta-data
@@ -2882,17 +2879,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements
Coprocesso
                             schemaName, tableName);
                     // Size for worst case - all new columns are PK column
                     List<Mutation> mutationsForAddingColumnsToViews = Lists.newArrayListWithExpectedSize(tableMetaData.size()
* ( 1 + table.getIndexes().size()));
-                    /*
-                     * If adding a column to a view, we don't want to propagate those meta-data
changes to the child
-                     * view hierarchy. This is because our check of finding child views is
expensive and we want making
-                     * meta-data changes to views to be light-weight. The side-effect of
this change is that a child
-                     * won't have it's parent views columns i.e. it would have diverged itself
from the parent view. See
-                     * https://issues.apache.org/jira/browse/PHOENIX-2051 for a proper way
to fix the performance issue
-                     * and https://issues.apache.org/jira/browse/PHOENIX-2054 for enabling
meta-data changes to a view
-                     * to be propagated to its view hierarchy.
-                     */
+                    // TODO propagate to grandchild views as well
                     if (type == PTableType.TABLE || type == PTableType.SYSTEM) {
-                        TableViewFinderResult childViewsResult = findChildViews(region, tenantId,
table, PHYSICAL_TABLE_BYTES);
+                        TableViewFinderResult childViewsResult = new TableViewFinderResult();
+                        findAllChildViews(region, tenantId, table.getSchemaName().getBytes(),
table.getTableName().getBytes(), childViewsResult);
                         if (childViewsResult.hasViews()) {
                             /* 
                              * Dis-allow if:
@@ -3166,13 +3156,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements
Coprocesso
                     List<Mutation> additionalTableMetaData = Lists.newArrayList();
                     
                     PTableType type = table.getType();
-                    // Only tables may have views, so prevent the running of this potentially
-                    // expensive full table scan over the SYSTEM.CATALOG table unless it's
needed.
-                    // In the case of a view, we allow a column to be dropped without checking
for
-                    // child views, but in the future we'll allow it and propagate it as
necessary.
+                    // TODO propagate to grandchild views as well
                     if (type == PTableType.TABLE || type == PTableType.SYSTEM) {
-                        TableViewFinderResult childViewsResult = 
-                                findChildViews(region, tenantId, table, PHYSICAL_TABLE_BYTES);
+                        TableViewFinderResult childViewsResult = new TableViewFinderResult();
+                        findAllChildViews(region, tenantId, table.getSchemaName().getBytes(),
table.getTableName().getBytes(), childViewsResult);
                         if (childViewsResult.hasViews()) {
                             MetaDataMutationResult mutationResult =
                                     dropColumnsFromChildViews(region, table,
@@ -3560,21 +3547,16 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements
Coprocesso
 
         private List<Result> results = Lists.newArrayList();
         private boolean allViewsNotInSingleRegion = false;
-        private PTable table;
 
-        private TableViewFinderResult(List<Result> results, PTable table) {
+        private TableViewFinderResult() {
+        }
+        
+        private TableViewFinderResult(List<Result> results) {
             this.results = results;
-            this.table = table;
         }
-
+        
         public boolean hasViews() {
-            int localIndexesCount = 0;
-            for(PTable index : table.getIndexes()) {
-                if(index.getIndexType().equals(IndexType.LOCAL)) {
-                    localIndexesCount++;
-                }
-            }
-            return results.size()-localIndexesCount > 0;
+            return !results.isEmpty();
         }
 
         private void setAllViewsNotInSingleRegion() {
@@ -3598,6 +3580,13 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements
Coprocesso
         private boolean allViewsInMultipleRegions() {
             return results.size() > 0 && allViewsNotInSingleRegion;
         }
+        
+        private void addResult(TableViewFinderResult result) {
+            this.results.addAll(result.getResults());
+            if (result.allViewsInMultipleRegions()) {
+                this.setAllViewsNotInSingleRegion();
+            }
+        }
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f8153bd/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index f2abbdb..93bba74 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -86,8 +86,9 @@ public abstract class MetaDataProtocol extends MetaDataService {
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_8_1 = MIN_TABLE_TIMESTAMP + 18;
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0 = MIN_TABLE_TIMESTAMP + 20;
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0 = MIN_TABLE_TIMESTAMP + 25;
+    public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0 = MIN_TABLE_TIMESTAMP + 26;
     // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the MIN_SYSTEM_TABLE_TIMESTAMP_*
constants
-    public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0;
+    public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0;
     
     // ALWAYS update this map whenever rolling out a new release (major, minor or patch release).

     // Key is the SYSTEM.CATALOG timestamp for the version and value is the version string.
@@ -103,6 +104,7 @@ public abstract class MetaDataProtocol extends MetaDataService {
         TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0, "4.8.x");
         TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0, "4.9.x");
         TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0, "4.10.x");
+        TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0, "4.11.x");
     }
     
     public static final String CURRENT_CLIENT_VERSION = PHOENIX_MAJOR_VERSION + "." + PHOENIX_MINOR_VERSION
+ "." + PHOENIX_PATCH_NUMBER; 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f8153bd/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 0e62164..e3a206c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -78,7 +78,6 @@ import com.google.common.collect.Lists;
  * 
  */
 public class PhoenixDatabaseMetaData implements DatabaseMetaData {
-    public static final int INDEX_NAME_INDEX = 4; // Shared with FAMILY_NAME_INDEX
     public static final int FAMILY_NAME_INDEX = 4;
     public static final int COLUMN_NAME_INDEX = 3;
     public static final int TABLE_NAME_INDEX = 2;
@@ -124,6 +123,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData {
     public static final byte[] PHYSICAL_NAME_BYTES = Bytes.toBytes(PHYSICAL_NAME);
 
     public static final String COLUMN_FAMILY = "COLUMN_FAMILY";
+    public static final byte[] COLUMN_FAMILY_BYTES = Bytes.toBytes(COLUMN_FAMILY);
     public static final String TABLE_CAT = "TABLE_CAT";
     public static final String TABLE_CATALOG = "TABLE_CATALOG";
     public static final String TABLE_SCHEM = "TABLE_SCHEM";
@@ -514,6 +514,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData {
         if (colPattern == null || colPattern.length() == 0) {
             appendConjunction(where);
             where.append(COLUMN_NAME + " is not null" );
+            appendConjunction(where);
+            where.append(LINK_TYPE + " is null" );
         }
         boolean isTenantSpecificConnection = connection.getTenantId() != null;
         if (isTenantSpecificConnection) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f8153bd/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 3e4f3b9..1d0dd9f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -58,6 +58,7 @@ import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RENEW_LEASE_
 import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RENEW_LEASE_THRESHOLD_MILLISECONDS;
 import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS;
 import static org.apache.phoenix.util.UpgradeUtil.getSysCatalogSnapshotName;
+import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_11_0;
 import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0;
 
 import java.io.IOException;
@@ -2768,6 +2769,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices
implement
                             MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0);
                     clearCache();
                 }
+                if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0)
{
+                    upgradeTo4_11_0(metaConnection);
+                }
             }
 
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f8153bd/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index d09284f..50ff64b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -313,7 +313,16 @@ public class MetaDataClient {
             COLUMN_QUALIFIER_COUNTER + 
             ") VALUES (?, ?, ?, ?, ?)";
 
-    public static final String INCREMENT_SEQ_NUM =
+    private static final String CREATE_CHILD_LINK =
+            "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"(
" +
+                    TENANT_ID + "," +
+                    TABLE_SCHEM + "," +
+                    TABLE_NAME + "," +
+                    COLUMN_NAME + "," +
+                    COLUMN_FAMILY + "," +
+                    LINK_TYPE + 
+                    ") VALUES (?, ?, ?, ?, ?, ?)";
+    private static final String INCREMENT_SEQ_NUM =
             "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"(
" +
                     TENANT_ID + "," +
                     TABLE_SCHEM + "," +
@@ -2063,9 +2072,7 @@ public class MetaDataClient {
                     }
                     pkColumns = newLinkedHashSet(parent.getPKColumns());
 
-                    // Add row linking from view to its parent table
-                    // FIXME: not currently used, but see PHOENIX-1367
-                    // as fixing that will require it's usage.
+                    // Add row linking view to its parent 
                     PreparedStatement linkStatement = connection.prepareStatement(CREATE_VIEW_LINK);
                     linkStatement.setString(1, tenantIdStr);
                     linkStatement.setString(2, schemaName);
@@ -2074,6 +2081,15 @@ public class MetaDataClient {
                     linkStatement.setByte(5, LinkType.PARENT_TABLE.getSerializedValue());
                     linkStatement.setString(6, parent.getTenantId() == null ? null : parent.getTenantId().getString());
                     linkStatement.execute();
+                    // Add row linking parent to view
+                    linkStatement = connection.prepareStatement(CREATE_CHILD_LINK);
+                    linkStatement.setString(1, parent.getTenantId() == null ? null : parent.getTenantId().getString());
+                    linkStatement.setString(2, parent.getSchemaName() == null ? null : parent.getSchemaName().getString());
+                    linkStatement.setString(3, parent.getTableName().getString());
+                    linkStatement.setString(4, tenantIdStr);
+                    linkStatement.setString(5, SchemaUtil.getTableName(schemaName, tableName));
+                    linkStatement.setByte(6, LinkType.CHILD_TABLE.getSerializedValue());
+                    linkStatement.execute();
                 }
             } else {
                 columns = new LinkedHashMap<PColumn,PColumn>(colDefs.size());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f8153bd/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
index 91a41a3..24a494f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
@@ -141,14 +141,18 @@ public interface PTable extends PMetaDataEntity {
          */
         INDEX_TABLE((byte)1),
         /**
-         * Link from a view to its physical table
+         * Link from a view or index to its physical table
          */
         PHYSICAL_TABLE((byte)2),
         /**
          * Link from a view to its parent table
          */
-        PARENT_TABLE((byte)3);
-        
+        PARENT_TABLE((byte)3),
+        /**
+         * Link from a parent table to its child view
+         */
+        CHILD_TABLE((byte)4);
+
         private final byte[] byteValue;
         private final byte serializedValue;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f8153bd/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index 456f9d4..72567a7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -306,6 +306,23 @@ public class MetaDataUtil {
         return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId, QueryConstants.SEPARATOR_BYTE_ARRAY,
schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : schemaName, QueryConstants.SEPARATOR_BYTE_ARRAY,
tableName, QueryConstants.SEPARATOR_BYTE_ARRAY, QueryConstants.SEPARATOR_BYTE_ARRAY, indexName);
     }
     
+    public static byte[] getChildLinkKey(PName parentTenantId, PName parentSchemaName, PName
parentTableName, PName viewTenantId, PName viewName) {
+        return ByteUtil.concat(parentTenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : parentTenantId.getBytes(),
QueryConstants.SEPARATOR_BYTE_ARRAY, 
+                        parentSchemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : parentSchemaName.getBytes(),
QueryConstants.SEPARATOR_BYTE_ARRAY, 
+                        parentTableName.getBytes(), QueryConstants.SEPARATOR_BYTE_ARRAY,
+                        viewTenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : viewTenantId.getBytes(),
QueryConstants.SEPARATOR_BYTE_ARRAY, 
+                        viewName.getBytes());
+    }
+    
+    public static Cell getCell(List<Cell> cells, byte[] cq) {
+        for (Cell cell : cells) {
+            if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
cq, 0, cq.length) == 0) {
+                return cell;
+            }
+        }
+        return null;
+    }
+    
     public static boolean isMultiTenant(Mutation m, KeyValueBuilder builder, ImmutableBytesWritable
ptr) {
         if (getMutationValue(m, PhoenixDatabaseMetaData.MULTI_TENANT_BYTES, builder, ptr))
{
             return Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f8153bd/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index 62fefc7..02e4ec2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -903,6 +903,7 @@ public class UpgradeUtil {
                         "TABLE_SCHEM %s " +
                         "AND TABLE_NAME = ? " +
                         "AND COLUMN_NAME IS NOT NULL " +
+                        "AND LINK_TYPE IS NULL " +
                         "ORDER BY " + 
                         ORDINAL_POSITION;
 
@@ -1073,6 +1074,78 @@ public class UpgradeUtil {
         }
     }
     
+    /**
+     * Upgrade the metadata in the catalog table to enable adding columns to tables with
views
+     * @param oldMetaConnection caller should take care of closing the passed connection
appropriately
+     * @throws SQLException
+     */
+    public static void upgradeTo4_11_0(PhoenixConnection oldMetaConnection) throws SQLException
{
+        PhoenixConnection metaConnection = null;
+        try {
+            // Need to use own connection with max time stamp to be able to read all data
from SYSTEM.CATALOG 
+            metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
+            logger.info("Upgrading metadata to add parent to child links for views");
+            metaConnection.commit();
+            //     physical table 
+            //         |  
+            //     child view    
+            //         |
+            //     grand child view
+            // Create parent table to child view CHILD link. As the PARENT link from child
view to physical table is not there (it gets overwritten with the PHYSICAL link) use the PHYSICAL
link instead.
+            // We need to filter out grand child views PHYSICAL links while running this
query
+            String createChildLink = "UPSERT INTO SYSTEM.CATALOG(TENANT_ID,TABLE_SCHEM,TABLE_NAME,COLUMN_NAME,COLUMN_FAMILY,LINK_TYPE)"
+
+                                        "SELECT PARENT_TENANT_ID," + 
+                                        "       CASE INSTR(COLUMN_FAMILY,'.')" +
+                                        "              WHEN 0 THEN NULL" + 
+                                        "              ELSE REGEXP_SUBSTR(COLUMN_FAMILY,'[^\\.]+')"
+ 
+                                        "       END AS PARENT_SCHEMA," + 
+                                        "       CASE INSTR(COLUMN_FAMILY,'.')" + 
+                                        "              WHEN 0 THEN COLUMN_FAMILY" + 
+                                        "              ELSE SUBSTR(COLUMN_FAMILY,INSTR(COLUMN_FAMILY,'.')+1)"
+ 
+                                        "       END AS PARENT_TABLE," + 
+                                        "       TENANT_ID," + 
+                                        "       CASE WHEN TABLE_SCHEM IS NULL THEN TABLE_NAME"
+ 
+                                        "            ELSE TABLE_SCHEM||'.'||TABLE_NAME" +

+                                        "       END AS VIEW_NAME," + 
+                                        "       4 AS LINK_TYPE " + 
+                                        "FROM SYSTEM.CATALOG(PARENT_TENANT_ID VARCHAR)" +

+                                        "WHERE LINK_TYPE = 2 " +
+                                        "AND (TENANT_ID, TABLE_SCHEM, TABLE_NAME) NOT IN
(   " +
+                                        "       SELECT TENANT_ID, " +
+                                        "              TABLE_SCHEM, " +
+                                        "              TABLE_NAME " +
+                                        "       FROM   SYSTEM.CATALOG " +
+                                        "       WHERE  LINK_TYPE = 3 )";
+            metaConnection.createStatement().execute(createChildLink);
+            metaConnection.commit();
+            // Create child view to grand child view CHILD link using grand child view to
child view PARENT link.
+            String createGrandChildLink = "UPSERT INTO SYSTEM.CATALOG(TENANT_ID,TABLE_SCHEM,TABLE_NAME,COLUMN_NAME,COLUMN_FAMILY,LINK_TYPE)"
+
+                                        "SELECT PARENT_TENANT_ID," + 
+                                        "       CASE INSTR(COLUMN_FAMILY,'.')" +
+                                        "              WHEN 0 THEN NULL" + 
+                                        "              ELSE REGEXP_SUBSTR(COLUMN_FAMILY,'[^\\.]+')"
+ 
+                                        "       END AS PARENT_SCHEMA," + 
+                                        "       CASE INSTR(COLUMN_FAMILY,'.')" + 
+                                        "              WHEN 0 THEN COLUMN_FAMILY" + 
+                                        "              ELSE SUBSTR(COLUMN_FAMILY,INSTR(COLUMN_FAMILY,'.')+1)"
+ 
+                                        "       END AS PARENT_TABLE," + 
+                                        "       TENANT_ID," + 
+                                        "       CASE WHEN TABLE_SCHEM IS NULL THEN TABLE_NAME"
+ 
+                                        "            ELSE TABLE_SCHEM||'.'||TABLE_NAME" +

+                                        "       END AS VIEW_NAME," + 
+                                        "       4 AS LINK_TYPE " + 
+                                        "FROM SYSTEM.CATALOG(PARENT_TENANT_ID VARCHAR)" +

+                                        "WHERE LINK_TYPE = 3 ";
+            metaConnection.createStatement().execute(createGrandChildLink);
+            metaConnection.commit();
+            metaConnection.getQueryServices().clearCache();
+        } finally {
+            if (metaConnection != null) {
+                metaConnection.close();
+            }
+        }
+    }
+    
     private static void upsertBaseColumnCountInHeaderRow(PhoenixConnection metaConnection,
             String tenantId, String schemaName, String viewOrTableName, int baseColumnCount)
             throws SQLException {


Mime
View raw message