phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jeffr...@apache.org
Subject [05/50] [abbrv] git commit: PHOENIX-91 Use LRU size-based cache on ConnectionQueryServicesImpl (JamesTaylor)
Date Wed, 05 Mar 2014 22:54:51 GMT
PHOENIX-91 Use LRU size-based cache on ConnectionQueryServicesImpl (JamesTaylor)


Project: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/commit/c00f6f25
Tree: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/tree/c00f6f25
Diff: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/diff/c00f6f25

Branch: refs/heads/4.0
Commit: c00f6f2506cd61df4f728827520ed879f9cfd5eb
Parents: 48bc5ba
Author: James Taylor <jamestaylor@apache.org>
Authored: Sat Mar 1 14:13:20 2014 -0800
Committer: James Taylor <jamestaylor@apache.org>
Committed: Sat Mar 1 14:13:20 2014 -0800

----------------------------------------------------------------------
 .../phoenix/compile/CreateIndexCompiler.java    |   2 +-
 .../phoenix/compile/CreateTableCompiler.java    |   2 +-
 .../apache/phoenix/compile/DeleteCompiler.java  |   2 +-
 .../apache/phoenix/compile/FromCompiler.java    | 160 ++++++++---------
 .../phoenix/compile/ProjectionCompiler.java     |   4 +-
 .../apache/phoenix/compile/QueryCompiler.java   |   4 +-
 .../apache/phoenix/compile/UpsertCompiler.java  |   4 +-
 .../apache/phoenix/execute/AggregatePlan.java   |   2 +-
 .../apache/phoenix/execute/BasicQueryPlan.java  |  10 --
 .../apache/phoenix/execute/MutationState.java   |   2 +-
 .../org/apache/phoenix/execute/ScanPlan.java    |   2 +-
 .../apache/phoenix/jdbc/PhoenixConnection.java  |  47 +++--
 .../org/apache/phoenix/jdbc/PhoenixDriver.java  |   1 +
 .../apache/phoenix/jdbc/PhoenixStatement.java   |   2 +-
 .../apache/phoenix/optimize/QueryOptimizer.java |   4 +-
 .../phoenix/parse/AlterIndexStatement.java      |   2 +-
 .../phoenix/parse/AlterTableStatement.java      |   2 +-
 .../phoenix/parse/CreateIndexStatement.java     |   2 +-
 .../apache/phoenix/parse/DeleteStatement.java   |   2 +-
 .../phoenix/parse/SingleTableSQLStatement.java  |  37 ----
 .../phoenix/parse/SingleTableStatement.java     |  37 ++++
 .../apache/phoenix/parse/UpsertStatement.java   |   2 +-
 .../phoenix/query/ConnectionQueryServices.java  |   2 +
 .../query/ConnectionQueryServicesImpl.java      |  27 +--
 .../query/ConnectionlessQueryServicesImpl.java  |  10 +-
 .../apache/phoenix/query/QueryConstants.java    |   5 +-
 .../org/apache/phoenix/query/QueryServices.java |   3 +-
 .../phoenix/query/QueryServicesOptions.java     |  19 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  12 +-
 .../org/apache/phoenix/schema/PMetaData.java    |  15 +-
 .../apache/phoenix/schema/PMetaDataImpl.java    | 174 ++++++++++++-------
 .../org/apache/phoenix/schema/PTableKey.java    |   5 +
 .../org/apache/phoenix/util/PhoenixRuntime.java |   2 +-
 .../org/apache/phoenix/util/SchemaUtil.java     |   2 +-
 .../java/org/apache/phoenix/util/SizedUtil.java |   4 +-
 .../phoenix/compile/JoinQueryCompilerTest.java  |   2 +-
 .../phoenix/compile/QueryCompilerTest.java      |   4 +-
 .../phoenix/compile/ViewCompilerTest.java       |   5 +-
 .../apache/phoenix/end2end/AlterTableTest.java  |   2 +-
 ...aultParallelIteratorsRegionSplitterTest.java |   2 +-
 ...RangeParallelIteratorRegionSplitterTest.java |   2 +-
 .../end2end/index/ImmutableIndexTest.java       |   2 +-
 .../end2end/index/IndexMetadataTest.java        |   4 +-
 .../phoenix/end2end/index/SaltedIndexTest.java  |   2 +-
 .../phoenix/index/IndexMaintainerTest.java      |   4 +-
 .../query/BaseConnectionlessQueryTest.java      |   7 +-
 .../phoenix/query/QueryServicesTestImpl.java    |   7 +-
 .../phoenix/schema/PMetaDataImplTest.java       | 115 ++++++++++++
 .../apache/phoenix/schema/RowKeySchemaTest.java |   2 +-
 .../phoenix/schema/RowKeyValueAccessorTest.java |   2 +-
 50 files changed, 487 insertions(+), 287 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateIndexCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateIndexCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateIndexCompiler.java
index 5701e28..0c6b808 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateIndexCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateIndexCompiler.java
@@ -42,7 +42,7 @@ public class CreateIndexCompiler {
 
     public MutationPlan compile(final CreateIndexStatement create) throws SQLException {
         final PhoenixConnection connection = statement.getConnection();
-        final ColumnResolver resolver = FromCompiler.getResolver(create, connection);
+        final ColumnResolver resolver = FromCompiler.getResolverForMutation(create, connection);
         Scan scan = new Scan();
         final StatementContext context = new StatementContext(statement, resolver, scan);
         ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
index b6f695e..51af286 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
@@ -73,7 +73,7 @@ public class CreateTableCompiler {
 
     public MutationPlan compile(final CreateTableStatement create) throws SQLException {
         final PhoenixConnection connection = statement.getConnection();
-        ColumnResolver resolver = FromCompiler.getResolver(create, connection);
+        ColumnResolver resolver = FromCompiler.getResolverForCreation(create, connection);
         PTableType type = create.getTableType();
         PhoenixConnection connectionToBe = connection;
         PTable parentToBe = null;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index a5f43f8..ae61375 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -180,7 +180,7 @@ public class DeleteCompiler {
         final PhoenixConnection connection = statement.getConnection();
         final boolean isAutoCommit = connection.getAutoCommit();
         final ConnectionQueryServices services = connection.getQueryServices();
-        final ColumnResolver resolver = FromCompiler.getResolver(delete, connection);
+        final ColumnResolver resolver = FromCompiler.getResolverForMutation(delete, connection);
         final TableRef tableRef = resolver.getTables().get(0);
         final PTable table = tableRef.getTable();
         if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index f745419..cfac375 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -35,7 +35,7 @@ import org.apache.phoenix.parse.DerivedTableNode;
 import org.apache.phoenix.parse.JoinTableNode;
 import org.apache.phoenix.parse.NamedTableNode;
 import org.apache.phoenix.parse.SelectStatement;
-import org.apache.phoenix.parse.SingleTableSQLStatement;
+import org.apache.phoenix.parse.SingleTableStatement;
 import org.apache.phoenix.parse.TableName;
 import org.apache.phoenix.parse.TableNode;
 import org.apache.phoenix.parse.TableNodeVisitor;
@@ -97,7 +97,7 @@ public class FromCompiler {
         }
     };
 
-    public static ColumnResolver getResolver(final CreateTableStatement statement, final PhoenixConnection connection)
+    public static ColumnResolver getResolverForCreation(final CreateTableStatement statement, final PhoenixConnection connection)
             throws SQLException {
         TableName baseTable = statement.getBaseTableName();
         if (baseTable == null) {
@@ -106,7 +106,7 @@ public class FromCompiler {
         NamedTableNode tableNode = NamedTableNode.create(null, baseTable, Collections.<ColumnDef>emptyList());
         // Always use non-tenant-specific connection here
         try {
-            SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, false);
+            SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, true);
             return visitor;
         } catch (TableNotFoundException e) {
             // Used for mapped VIEW, since we won't be able to resolve that.
@@ -143,11 +143,11 @@ public class FromCompiler {
      * @throws TableNotFoundException
      *             if table name not found in schema
      */
-    public static ColumnResolver getResolver(SelectStatement statement, PhoenixConnection connection)
+    public static ColumnResolver getResolverForQuery(SelectStatement statement, PhoenixConnection connection)
     		throws SQLException {
     	List<TableNode> fromNodes = statement.getFrom();
         if (fromNodes.size() == 1)
-            return new SingleTableColumnResolver(connection, (NamedTableNode)fromNodes.get(0), false);
+            return new SingleTableColumnResolver(connection, (NamedTableNode)fromNodes.get(0), true);
 
         MultiTableColumnResolver visitor = new MultiTableColumnResolver(connection);
         for (TableNode node : fromNodes) {
@@ -157,20 +157,16 @@ public class FromCompiler {
     }
 
     public static ColumnResolver getResolver(NamedTableNode tableNode, PhoenixConnection connection) throws SQLException {
-        SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, false);
+        SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, true);
         return visitor;
     }
     
-    public static ColumnResolver getResolver(SingleTableSQLStatement statement, PhoenixConnection connection,
-            List<ColumnDef> dyn_columns) throws SQLException {
-        SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, statement.getTable(), true);
-        return visitor;
-    }
-
-    public static ColumnResolver getResolver(SingleTableSQLStatement statement, PhoenixConnection connection)
+    public static ColumnResolver getResolverForMutation(SingleTableStatement statement, PhoenixConnection connection)
             throws SQLException {
-        return getResolver(statement, connection, Collections.<ColumnDef>emptyList());
+        SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, statement.getTable(), false);
+        return visitor;
     }
+    
 
     private static class SingleTableColumnResolver extends BaseColumnResolver {
         	private final List<TableRef> tableRefs;
@@ -191,58 +187,10 @@ public class FromCompiler {
            tableRefs = ImmutableList.of(new TableRef(alias, theTable, timeStamp, !table.getDynamicColumns().isEmpty()));
        }
        
-        public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode table, boolean updateCacheOnlyIfAutoCommit) throws SQLException {
+        public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode, boolean updateCacheImmediately) throws SQLException {
             super(connection);
-            alias = table.getAlias();
-            TableName tableNameNode = table.getName();
-            String schemaName = tableNameNode.getSchemaName();
-            String tableName = tableNameNode.getTableName();
-            SQLException sqlE = null;
-            long timeStamp = QueryConstants.UNSET_TIMESTAMP;
-            TableRef tableRef = null;
-            boolean retry = true;
-            boolean didRetry = false;
-            MetaDataMutationResult result = null;
-            String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
-            PName tenantId = connection.getTenantId();
-            while (true) {
-                try {
-                    PTable theTable = null;
-                    if (!updateCacheOnlyIfAutoCommit || connection.getAutoCommit()) {
-                        retry = false; // No reason to retry after this
-                        result = client.updateCache(schemaName, tableName);
-                        timeStamp = result.getMutationTime();
-                        theTable = result.getTable();
-                    } 
-                    if (theTable == null) {
-                        theTable = connection.getPMetaData().getTable(new PTableKey(tenantId, fullTableName));
-                    }
-                    // If dynamic columns have been specified add them to the table declaration
-                    if (!table.getDynamicColumns().isEmpty()) {
-                        theTable = this.addDynamicColumns(table.getDynamicColumns(), theTable);
-                    }
-                    tableRef = new TableRef(alias, theTable, timeStamp, !table.getDynamicColumns().isEmpty());
-                    if (didRetry && logger.isDebugEnabled()) {
-                        logger.debug("Re-resolved stale table " + fullTableName + " with seqNum " + tableRef.getTable().getSequenceNumber() + " at timestamp " + tableRef.getTable().getTimeStamp() + " with " + tableRef.getTable().getColumns().size() + " columns: " + tableRef.getTable().getColumns());
-                    }
-                    break;
-                } catch (TableNotFoundException e) {
-                    if (tenantId != null) { // Check with null tenantId next
-                        tenantId = null;
-                        continue;
-                    }
-                    sqlE = new TableNotFoundException(e,timeStamp);
-                }
-                // If we haven't already tried, update our cache and retry
-                // Only loop back if the cache was updated
-                if (retry && (result = client.updateCache(schemaName, tableName)).wasUpdated()) {
-                    timeStamp = result.getMutationTime();
-                    retry = false;
-                    didRetry = true;
-                    continue;
-                }
-                throw sqlE;
-            }
+            alias = tableNode.getAlias();
+            TableRef tableRef = createTableRef(tableNode, updateCacheImmediately);
             tableRefs = ImmutableList.of(tableRef);
         }
 
@@ -313,6 +261,61 @@ public class FromCompiler {
             this.client = new MetaDataClient(connection);
         }
 
+        protected TableRef createTableRef(NamedTableNode tableNode, boolean updateCacheImmediately) throws SQLException {
+            String alias = tableNode.getAlias();
+            String tableName = tableNode.getName().getTableName();
+            String schemaName = tableNode.getName().getSchemaName();
+            List<ColumnDef> dynamicColumns = tableNode.getDynamicColumns();
+            SQLException sqlE = null;
+            long timeStamp = QueryConstants.UNSET_TIMESTAMP;
+            TableRef tableRef = null;
+            boolean retry = true;
+            boolean didRetry = false;
+            MetaDataMutationResult result = null;
+            String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+            PName tenantId = connection.getTenantId();
+            while (true) {
+                try {
+                    PTable theTable = null;
+                    if (updateCacheImmediately || connection.getAutoCommit()) {
+                        retry = false; // No reason to retry after this
+                        result = client.updateCache(schemaName, tableName);
+                        timeStamp = result.getMutationTime();
+                        theTable = result.getTable();
+                    } 
+                    if (theTable == null) {
+                        theTable = connection.getMetaDataCache().getTable(new PTableKey(tenantId, fullTableName));
+                    }
+                    // If dynamic columns have been specified add them to the table declaration
+                    if (!dynamicColumns.isEmpty()) {
+                        theTable = this.addDynamicColumns(dynamicColumns, theTable);
+                    }
+                    tableRef = new TableRef(alias, theTable, timeStamp, !dynamicColumns.isEmpty());
+                    if (didRetry && logger.isDebugEnabled()) {
+                        logger.debug("Re-resolved stale table " + fullTableName + " with seqNum " + tableRef.getTable().getSequenceNumber() + " at timestamp " + tableRef.getTable().getTimeStamp() + " with " + tableRef.getTable().getColumns().size() + " columns: " + tableRef.getTable().getColumns());
+                    }
+                    break;
+                } catch (TableNotFoundException e) {
+                    if (tenantId != null) { // Check with null tenantId next
+                        tenantId = null;
+                        continue;
+                    }
+                    sqlE = new TableNotFoundException(e,timeStamp);
+                }
+                // If we haven't already tried, update our cache and retry.
+                // We always attempt to update the cache in the event of a retry (i.e. TableNotFoundException)
+                // Only loop back if the cache was updated
+                if (retry && (result = client.updateCache(schemaName, tableName)).wasUpdated()) {
+                    timeStamp = result.getMutationTime();
+                    retry = false;
+                    didRetry = true;
+                    continue;
+                }
+                throw sqlE;
+            }
+            return tableRef;
+        }
+        
         protected PTable addDynamicColumns(List<ColumnDef> dynColumns, PTable theTable)
                 throws SQLException {
             if (!dynColumns.isEmpty()) {
@@ -364,31 +367,10 @@ public class FromCompiler {
             joinNode.getTable().accept(this);
         }
 
-        private TableRef createTableRef(String alias, String schemaName, String tableName,
-                List<ColumnDef> dynamicColumnDefs) throws SQLException {
-            MetaDataMutationResult result = client.updateCache(schemaName, tableName);
-            long timeStamp = result.getMutationTime();
-            String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
-            PTable theTable = connection.getPMetaData().getTable(new PTableKey(connection.getTenantId(), fullTableName));
-
-            // If dynamic columns have been specified add them to the table declaration
-            if (!dynamicColumnDefs.isEmpty()) {
-                theTable = this.addDynamicColumns(dynamicColumnDefs, theTable);
-            }
-            TableRef tableRef = new TableRef(alias, theTable, timeStamp, !dynamicColumnDefs.isEmpty());
-            return tableRef;
-        }
-
-
         @Override
-        public void visit(NamedTableNode namedTableNode) throws SQLException {
-            String tableName = namedTableNode.getName().getTableName();
-            String schemaName = namedTableNode.getName().getSchemaName();
-
-            String alias = namedTableNode.getAlias();
-            List<ColumnDef> dynamicColumnDefs = namedTableNode.getDynamicColumns();
-
-            TableRef tableRef = createTableRef(alias, schemaName, tableName, dynamicColumnDefs);
+        public void visit(NamedTableNode tableNode) throws SQLException {
+            String alias = tableNode.getAlias();
+            TableRef tableRef = createTableRef(tableNode, true);
             PTable theTable = tableRef.getTable();
 
             if (alias != null) {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
index 07d3ecf..d3367cc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
@@ -146,7 +146,7 @@ public class ProjectionCompiler {
         PTable index = tableRef.getTable();
         PhoenixConnection conn = context.getConnection();
         String tableName = index.getParentName().getString();
-        PTable table = conn.getPMetaData().getTable(new PTableKey(conn.getTenantId(), tableName));
+        PTable table = conn.getMetaDataCache().getTable(new PTableKey(conn.getTenantId(), tableName));
         int tableOffset = table.getBucketNum() == null ? 0 : 1;
         int indexOffset = index.getBucketNum() == null ? 0 : 1;
         if (index.getColumns().size()-indexOffset != table.getColumns().size()-tableOffset) {
@@ -195,7 +195,7 @@ public class ProjectionCompiler {
         PTable index = tableRef.getTable();
         PhoenixConnection conn = context.getConnection();
         String tableName = index.getParentName().getString();
-        PTable table = conn.getPMetaData().getTable(new PTableKey(conn.getTenantId(), tableName));
+        PTable table = conn.getMetaDataCache().getTable(new PTableKey(conn.getTenantId(), tableName));
         PColumnFamily pfamily = table.getColumnFamily(cfName);
         for (PColumn column : pfamily.getColumns()) {
             PColumn indexColumn = index.getColumn(IndexUtil.getIndexColumnName(column));

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
index 0506ba7..111d532 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
@@ -119,7 +119,7 @@ public class QueryCompiler {
         if (select.getFrom().size() > 1) {
             select = JoinCompiler.optimize(context, select, statement);
             if (this.select != select) {
-                ColumnResolver resolver = FromCompiler.getResolver(select, statement.getConnection());
+                ColumnResolver resolver = FromCompiler.getResolverForQuery(select, statement.getConnection());
                 context = new StatementContext(statement, resolver, scan);
             }
             JoinSpec join = JoinCompiler.getJoinSpec(context, select);
@@ -257,7 +257,7 @@ public class QueryCompiler {
         Expression having = HavingCompiler.compile(context, select, groupBy);
         // Don't pass groupBy when building where clause expression, because we do not want to wrap these
         // expressions as group by key expressions since they're pre, not post filtered.
-        context.setResolver(FromCompiler.getResolver(select, connection));
+        context.setResolver(FromCompiler.getResolverForQuery(select, connection));
         WhereCompiler.compile(context, select);
         context.setResolver(resolver); // recover resolver
         OrderBy orderBy = OrderByCompiler.compile(context, select, groupBy, limit); 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index a2c6496..a45455e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -209,7 +209,7 @@ public class UpsertCompiler {
         final PhoenixConnection connection = statement.getConnection();
         ConnectionQueryServices services = connection.getQueryServices();
         final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
-        final ColumnResolver resolver = FromCompiler.getResolver(upsert, connection);
+        final ColumnResolver resolver = FromCompiler.getResolverForMutation(upsert, connection);
         final TableRef tableRef = resolver.getTables().get(0);
         final PTable table = tableRef.getTable();
         if (table.getType() == PTableType.VIEW) {
@@ -344,7 +344,7 @@ public class UpsertCompiler {
         if (valueNodes == null) {
             SelectStatement select = upsert.getSelect();
             assert(select != null);
-            ColumnResolver selectResolver = FromCompiler.getResolver(select, connection);
+            ColumnResolver selectResolver = FromCompiler.getResolverForQuery(select, connection);
             select = StatementNormalizer.normalize(select, selectResolver);
             select = addTenantAndViewConstants(table, select, tenantId, addViewColumnsToBe);
             sameTable = select.getFrom().size() == 1

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
index 0910d92..cec60e1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
@@ -160,7 +160,7 @@ public class AggregatePlan extends BasicQueryPlan {
                 resultScanner = new LimitingResultIterator(aggResultIterator, limit);
             }
         } else {
-            int thresholdBytes = getConnectionQueryServices(context.getConnection().getQueryServices()).getProps().getInt(
+            int thresholdBytes = context.getConnection().getQueryServices().getProps().getInt(
                     QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
             resultScanner = new OrderedAggregatingResultIterator(aggResultIterator, orderBy.getOrderByExpressions(), thresholdBytes, limit);
         }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/execute/BasicQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BasicQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BasicQueryPlan.java
index 7108498..cc1193c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BasicQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BasicQueryPlan.java
@@ -23,7 +23,6 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.compile.ExplainPlan;
 import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
 import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
@@ -36,7 +35,6 @@ import org.apache.phoenix.iterate.ParallelIterators.ParallelIteratorFactory;
 import org.apache.phoenix.iterate.ResultIterator;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.parse.FilterableStatement;
-import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
@@ -113,14 +111,6 @@ public abstract class BasicQueryPlan implements QueryPlan {
         return projection;
     }
 
-    protected ConnectionQueryServices getConnectionQueryServices(ConnectionQueryServices services) {
-        // Get child services associated with tenantId of query.
-        ConnectionQueryServices childServices = context.getConnection().getTenantId() == null ? 
-                services : 
-                services.getChildQueryServices(new ImmutableBytesWritable(context.getConnection().getTenantId().getBytes()));
-        return childServices;
-    }
-
 //    /**
 //     * Sets up an id used to do round robin queue processing on the server
 //     * @param scan

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 9ea5993..8002a04 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -293,7 +293,7 @@ public class MutationState implements SQLCloseable {
                                 }
                             }
                         }
-                        table = connection.getPMetaData().getTable(new PTableKey(tenantId, table.getName().getString()));
+                        table = connection.getMetaDataCache().getTable(new PTableKey(tenantId, table.getName().getString()));
                         for (PColumn column : columns) {
                             if (column != null) {
                                 table.getColumnFamily(column.getFamilyName().getString()).getColumn(column.getName().getString());

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
index 987d705..8080abc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
@@ -94,7 +94,7 @@ public class ScanPlan extends BasicQueryPlan {
             scanner = new MergeSortTopNResultIterator(iterators, limit, orderBy.getOrderByExpressions());
         } else {
             if (isSalted && 
-                    (getConnectionQueryServices(context.getConnection().getQueryServices()).getProps().getBoolean(
+                    (context.getConnection().getQueryServices().getProps().getBoolean(
                             QueryServices.ROW_KEY_ORDER_SALTED_TABLE_ATTRIB, 
                             QueryServicesOptions.DEFAULT_ROW_KEY_ORDER_SALTED_TABLE) ||
                      orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY ||

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 1a7e3e1..82e5643 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -50,6 +50,7 @@ import java.util.concurrent.Executor;
 
 import javax.annotation.Nullable;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.client.KeyValueBuilder;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -66,9 +67,10 @@ import org.apache.phoenix.schema.PArrayDataType;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.PMetaData;
-import org.apache.phoenix.schema.PMetaDataImpl;
+import org.apache.phoenix.schema.PMetaData.Pruner;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.JDBCUtil;
 import org.apache.phoenix.util.NumberUtil;
@@ -77,6 +79,7 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 
+import com.google.common.base.Objects;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -118,7 +121,7 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     }
     
     public PhoenixConnection(PhoenixConnection connection) throws SQLException {
-        this(connection.getQueryServices(), connection.getURL(), connection.getClientInfo(), connection.getPMetaData());
+        this(connection.getQueryServices(), connection.getURL(), connection.getClientInfo(), connection.getMetaDataCache());
         this.isAutoCommit = connection.isAutoCommit;
     }
     
@@ -127,7 +130,7 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     }
     
     public PhoenixConnection(ConnectionQueryServices services, PhoenixConnection connection, long scn) throws SQLException {
-        this(services, connection.getURL(), newPropsWithSCN(scn,connection.getClientInfo()), PMetaDataImpl.pruneNewerTables(scn, connection.getPMetaData()));
+        this(services, connection.getURL(), newPropsWithSCN(scn,connection.getClientInfo()), connection.getMetaDataCache());
         this.isAutoCommit = connection.isAutoCommit;
     }
     
@@ -136,9 +139,17 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
         this.url = url;
         // Copy so client cannot change
         this.info = info == null ? new Properties() : new Properties(info);
-        if (this.info.isEmpty()) {
+        final PName tenantId = JDBCUtil.getTenantId(url, info);
+        if (this.info.isEmpty() && tenantId == null) {
             this.services = services;
         } else {
+            // Create child services keyed by tenantId to track resource usage for
+            // a tenantId for all connections on this JVM.
+            if (tenantId != null) {
+                services = services.getChildQueryServices(tenantId.getBytesPtr());
+            }
+            // TODO: we could avoid creating another wrapper if the only property
+            // specified was for the tenant ID
             Map<String, String> existingProps = services.getProps().asMap();
             Map<String, String> tmpAugmentedProps = Maps.newHashMapWithExpectedSize(existingProps.size() + info.size());
             tmpAugmentedProps.putAll(existingProps);
@@ -153,18 +164,30 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
             };
         }
         this.scn = JDBCUtil.getCurrentSCN(url, this.info);
-        this.tenantId = JDBCUtil.getTenantId(url, this.info);
-        this.mutateBatchSize = JDBCUtil.getMutateBatchSize(url, this.info, services.getProps());
-        datePattern = services.getProps().get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT);
-        String numberPattern = services.getProps().get(QueryServices.NUMBER_FORMAT_ATTRIB, NumberUtil.DEFAULT_NUMBER_FORMAT);
-        int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
+        this.tenantId = tenantId;
+        this.mutateBatchSize = JDBCUtil.getMutateBatchSize(url, this.info, this.services.getProps());
+        datePattern = this.services.getProps().get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT);
+        String numberPattern = this.services.getProps().get(QueryServices.NUMBER_FORMAT_ATTRIB, NumberUtil.DEFAULT_NUMBER_FORMAT);
+        int maxSize = this.services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
         Format dateTimeFormat = DateUtil.getDateFormatter(datePattern);
         formatters[PDataType.DATE.ordinal()] = dateTimeFormat;
         formatters[PDataType.TIME.ordinal()] = dateTimeFormat;
         formatters[PDataType.DECIMAL.ordinal()] = FunctionArgumentType.NUMERIC.getFormatter(numberPattern);
-        this.metaData = PMetaDataImpl.pruneMultiTenant(metaData);
+        // We do not limit the metaData on a connection less than the global one,
+        // as there's not much that will be cached here.
+        this.metaData = metaData.pruneTables(new Pruner() {
+
+            @Override
+            public boolean prune(PTable table) {
+                long maxTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
+                return (table.getType() != PTableType.SYSTEM && 
+                        (  table.getTimeStamp() >= maxTimestamp || 
+                         ! Objects.equal(tenantId, table.getTenantId())) );
+            }
+            
+        });
         this.mutationState = new MutationState(maxSize, this);
-        services.addConnection(this);
+        this.services.addConnection(this);
     }
 
     public int executeStatements(Reader reader, List<Object> binds, PrintStream out) throws IOException, SQLException {
@@ -256,7 +279,7 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
         return mutateBatchSize;
     }
     
-    public PMetaData getPMetaData() {
+    public PMetaData getMetaDataCache() {
         return metaData;
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index 8b75979..b952ab5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -54,6 +54,7 @@ public final class PhoenixDriver extends PhoenixEmbeddedDriver {
             throw new IllegalStateException("Untable to register " + PhoenixDriver.class.getName() + ": "+ e.getMessage());
         }
     }
+    // One entry per cluster here
     private final ConcurrentMap<ConnectionInfo,ConnectionQueryServices> connectionQueryServicesMap = new ConcurrentHashMap<ConnectionInfo,ConnectionQueryServices>(3);
 
     public PhoenixDriver() { // for Squirrel

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 1323328..e0d23e7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -259,7 +259,7 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
         @SuppressWarnings("unchecked")
         @Override
         public QueryPlan compilePlan(PhoenixStatement stmt) throws SQLException {
-            ColumnResolver resolver = FromCompiler.getResolver(this, stmt.getConnection());
+            ColumnResolver resolver = FromCompiler.getResolverForQuery(this, stmt.getConnection());
             SelectStatement select = StatementNormalizer.normalize(this, resolver);
             return new QueryCompiler(stmt, select, resolver).compile();
         }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index 0d72c87..139a1e0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -67,7 +67,7 @@ public class QueryOptimizer {
     }
 
     public QueryPlan optimize(PhoenixStatement statement, SelectStatement select) throws SQLException {
-        return optimize(statement, select, FromCompiler.getResolver(select, statement.getConnection()), Collections.<PColumn>emptyList(), null);
+        return optimize(statement, select, FromCompiler.getResolverForQuery(select, statement.getConnection()), Collections.<PColumn>emptyList(), null);
     }
 
     public QueryPlan optimize(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory) throws SQLException {
@@ -190,7 +190,7 @@ public class QueryOptimizer {
         List<? extends TableNode> tables = Collections.singletonList(FACTORY.namedTable(alias, FACTORY.table(schemaName, tableName)));
         try {
             SelectStatement indexSelect = FACTORY.select(select, tables);
-            ColumnResolver resolver = FromCompiler.getResolver(indexSelect, statement.getConnection());
+            ColumnResolver resolver = FromCompiler.getResolverForQuery(indexSelect, statement.getConnection());
             // Check index state of now potentially updated index table to make sure it's active
             if (PIndexState.ACTIVE.equals(resolver.getTables().get(0).getTable().getIndexState())) {
                 QueryCompiler compiler = new QueryCompiler(statement, indexSelect, resolver, targetColumns, parallelIteratorFactory);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/parse/AlterIndexStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/AlterIndexStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/AlterIndexStatement.java
index 5aebb1a..fcf817a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/AlterIndexStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/AlterIndexStatement.java
@@ -19,7 +19,7 @@ package org.apache.phoenix.parse;
 
 import org.apache.phoenix.schema.PIndexState;
 
-public class AlterIndexStatement extends SingleTableSQLStatement {
+public class AlterIndexStatement extends SingleTableStatement {
     private final String dataTableName;
     private final boolean ifExists;
     private final PIndexState indexState;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/parse/AlterTableStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/AlterTableStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/AlterTableStatement.java
index 21c3e6f..a334011 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/AlterTableStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/AlterTableStatement.java
@@ -19,7 +19,7 @@ package org.apache.phoenix.parse;
 
 import org.apache.phoenix.schema.PTableType;
 
-public abstract class AlterTableStatement extends SingleTableSQLStatement {
+public abstract class AlterTableStatement extends SingleTableStatement {
     private final PTableType tableType;
 
     AlterTableStatement(NamedTableNode table, PTableType tableType) {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java
index 1511d12..cc2d971 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.util.Pair;
 import com.google.common.collect.ListMultimap;
 
 
-public class CreateIndexStatement extends SingleTableSQLStatement {
+public class CreateIndexStatement extends SingleTableStatement {
     private final TableName indexTableName;
     private final PrimaryKeyConstraint indexConstraint;
     private final List<ColumnName> includeColumns;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/parse/DeleteStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/DeleteStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/DeleteStatement.java
index b60180c..077671e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/DeleteStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/DeleteStatement.java
@@ -23,7 +23,7 @@ import java.util.List;
 import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
 import org.apache.phoenix.schema.Sequence.Action;
 
-public class DeleteStatement extends SingleTableSQLStatement implements FilterableStatement {
+public class DeleteStatement extends SingleTableStatement implements FilterableStatement {
     private final ParseNode whereNode;
     private final List<OrderByNode> orderBy;
     private final LimitNode limit;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/parse/SingleTableSQLStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/SingleTableSQLStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/SingleTableSQLStatement.java
deleted file mode 100644
index d5f2018..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/SingleTableSQLStatement.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.parse;
-
-public abstract class SingleTableSQLStatement extends MutableStatement {
-    private final NamedTableNode table;
-    private final int bindCount;
-
-    public SingleTableSQLStatement(NamedTableNode table, int bindCount) {
-        this.table = table;
-        this.bindCount = bindCount;
-    }
-    
-    public NamedTableNode getTable() {
-        return table;
-    }
-
-    @Override
-    public int getBindCount() {
-        return bindCount;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/parse/SingleTableStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/SingleTableStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/SingleTableStatement.java
new file mode 100644
index 0000000..603519e
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/SingleTableStatement.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.parse;
+
+public abstract class SingleTableStatement extends MutableStatement {
+    private final NamedTableNode table;
+    private final int bindCount;
+
+    public SingleTableStatement(NamedTableNode table, int bindCount) {
+        this.table = table;
+        this.bindCount = bindCount;
+    }
+    
+    public NamedTableNode getTable() {
+        return table;
+    }
+
+    @Override
+    public int getBindCount() {
+        return bindCount;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/parse/UpsertStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/UpsertStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/UpsertStatement.java
index 1142f9b..bb23421 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/UpsertStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/UpsertStatement.java
@@ -20,7 +20,7 @@ package org.apache.phoenix.parse;
 import java.util.Collections;
 import java.util.List;
 
-public class UpsertStatement extends SingleTableSQLStatement { 
+public class UpsertStatement extends SingleTableStatement { 
     private final List<ColumnName> columns;
     private final List<ParseNode> values;
     private final SelectStatement select;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
index fc4ca87..4d05008 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
@@ -42,6 +42,8 @@ import org.apache.phoenix.schema.SequenceKey;
 
 
 public interface ConnectionQueryServices extends QueryServices, MetaDataMutated {
+    public static final int INITIAL_META_DATA_TABLE_CAPACITY = 100;
+
     /**
      * Get (and create if necessary) a child QueryService for a given tenantId.
      * The QueryService will be cached for the lifetime of the parent QueryService

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index caa91d4..6fc4661 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -96,7 +96,6 @@ import org.apache.phoenix.schema.SequenceKey;
 import org.apache.phoenix.schema.TableAlreadyExistsException;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.ByteUtil;
-import org.apache.phoenix.util.JDBCUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ReadOnlyProps;
@@ -114,6 +113,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     private static final Logger logger = LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
     private static final int INITIAL_CHILD_SERVICES_CAPACITY = 100;
     private static final int DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS = 1000;
+    
     protected final Configuration config;
     // Copy of config.getProps(), but read-only to prevent synchronization that we
     // don't need.
@@ -122,7 +122,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     private final StatsManager statsManager;
     private final ConcurrentHashMap<ImmutableBytesWritable,ConnectionQueryServices> childServices;
     // Cache the latest meta data here for future connections
-    private volatile PMetaData latestMetaData = PMetaDataImpl.EMPTY_META_DATA;
+    private volatile PMetaData latestMetaData;
     private final Object latestMetaDataLock = new Object();
     // Lowest HBase version on the cluster.
     private int lowestClusterHBaseVersion = Integer.MAX_VALUE;
@@ -132,6 +132,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     private ConcurrentMap<SequenceKey,Sequence> sequenceMap = Maps.newConcurrentMap();
     private KeyValueBuilder kvBuilder;
 
+    private PMetaData newEmptyMetaData() {
+        long maxSizeBytes = props.getLong(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB,
+                QueryServicesOptions.DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE);
+        return new PMetaDataImpl(INITIAL_META_DATA_TABLE_CAPACITY, maxSizeBytes);
+    }
     /**
      * Construct a ConnectionQueryServicesImpl that represents a connection to an HBase
      * cluster.
@@ -161,6 +166,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         if (this.connection.isClosed()) { // TODO: why the heck doesn't this throw above?
             throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION).build().buildException();
         }
+        this.latestMetaData = newEmptyMetaData();
         // TODO: should we track connection wide memory usage or just org-wide usage?
         // If connection-wide, create a MemoryManager here, otherwise just use the one from the delegate
         this.childServices = new ConcurrentHashMap<ImmutableBytesWritable,ConnectionQueryServices>(INITIAL_CHILD_SERVICES_CAPACITY);
@@ -182,12 +188,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     public HTableInterface getTable(byte[] tableName) throws SQLException {
         try {
             return HBaseFactoryProvider.getHTableFactory().getTable(tableName, connection, getExecutor());
+        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
+            byte[][] schemaAndTableName = new byte[2][];
+            SchemaUtil.getVarChars(tableName, schemaAndTableName);
+            throw new TableNotFoundException(Bytes.toString(schemaAndTableName[0]), Bytes.toString(schemaAndTableName[1]));
         } catch (IOException e) {
-        	if(e instanceof org.apache.hadoop.hbase.TableNotFoundException || e.getCause() instanceof org.apache.hadoop.hbase.TableNotFoundException) {
-        		byte[][] schemaAndTableName = new byte[2][];
-        		SchemaUtil.getVarChars(tableName, schemaAndTableName);
-        		throw new TableNotFoundException(Bytes.toString(schemaAndTableName[0]), Bytes.toString(schemaAndTableName[1]));
-        	} 
         	throw new SQLException(e);
         } 
     }
@@ -413,9 +418,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
 
     @Override
     public PhoenixConnection connect(String url, Properties info) throws SQLException {
-        Long scn = JDBCUtil.getCurrentSCN(url, info);
-        PMetaData metaData = scn == null ? latestMetaData : PMetaDataImpl.pruneNewerTables(scn, latestMetaData);
-        return new PhoenixConnection(this, url, info, metaData);
+        return new PhoenixConnection(this, url, info, latestMetaData);
     }
 
 
@@ -1139,7 +1142,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         Properties props = new Properties(oldMetaConnection.getClientInfo());
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp));
         // Cannot go through DriverManager or you end up in an infinite loop because it'll call init again
-        PhoenixConnection metaConnection = new PhoenixConnection(this, oldMetaConnection.getURL(), props, oldMetaConnection.getPMetaData());
+        PhoenixConnection metaConnection = new PhoenixConnection(this, oldMetaConnection.getURL(), props, oldMetaConnection.getMetaDataCache());
         SQLException sqlE = null;
         try {
             metaConnection.createStatement().executeUpdate("ALTER TABLE " + PhoenixDatabaseMetaData.TYPE_SCHEMA_AND_TABLE + " ADD IF NOT EXISTS " + columns );
@@ -1166,7 +1169,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     public void init(String url, Properties props) throws SQLException {
         props = new Properties(props);
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
-        PhoenixConnection metaConnection = new PhoenixConnection(this, url, props, PMetaDataImpl.EMPTY_META_DATA);
+        PhoenixConnection metaConnection = new PhoenixConnection(this, url, props, newEmptyMetaData());
         SQLException sqlE = null;
         try {
             try {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index 99e6302..ea53149 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -86,12 +86,18 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     
     public ConnectionlessQueryServicesImpl(QueryServices queryServices) {
         super(queryServices);
-        metaData = PMetaDataImpl.EMPTY_META_DATA;
+        metaData = newEmptyMetaData();
         // find the HBase version and use that to determine the KeyValueBuilder that should be used
         String hbaseVersion = VersionInfo.getVersion();
         this.kvBuilder = KeyValueBuilder.get(hbaseVersion);
     }
 
+    private PMetaData newEmptyMetaData() {
+        long maxSizeBytes = getProps().getLong(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB,
+                QueryServicesOptions.DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE);
+        return new PMetaDataImpl(INITIAL_META_DATA_TABLE_CAPACITY, maxSizeBytes);
+    }
+
     @Override
     public ConnectionQueryServices getChildQueryServices(ImmutableBytesWritable childId) {
         return this; // Just reuse the same query services
@@ -194,7 +200,7 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     public void init(String url, Properties props) throws SQLException {
         props = new Properties(props);
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
-        PhoenixConnection metaConnection = new PhoenixConnection(this, url, props, PMetaDataImpl.EMPTY_META_DATA);
+        PhoenixConnection metaConnection = new PhoenixConnection(this, url, props, newEmptyMetaData());
         SQLException sqlE = null;
         try {
             try {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index 4933791..0e50ce1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -116,8 +116,9 @@ public interface QueryConstants {
     public final static PName AGG_COLUMN_NAME = SINGLE_COLUMN_NAME;
     public final static PName AGG_COLUMN_FAMILY_NAME = SINGLE_COLUMN_FAMILY_NAME;
     
-    public static final byte[] ARRAY_VALUE_COLUMN_FAMILY = Bytes.toBytes("_arr_v");
-    public static final byte[] ARRAY_VALUE_COLUMN_QUALIFIER = Bytes.toBytes("_arr_v");
+    public static final byte[] ARRAY_VALUE_COLUMN_FAMILY = Bytes.toBytes("_a");
+    // TODO: use empty byte array so as not to accidentally conflict with any other columns
+    public static final byte[] ARRAY_VALUE_COLUMN_QUALIFIER = ARRAY_VALUE_COLUMN_FAMILY;
 
     public static final byte[] TRUE = new byte[] {1};
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 0904be7..14901a7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -93,8 +93,9 @@ public interface QueryServices extends SQLCloseable {
     public static final String DISTINCT_VALUE_COMPRESS_THRESHOLD_ATTRIB = "phoenix.distinct.value.compress.threshold";
     public static final String SEQUENCE_CACHE_SIZE_ATTRIB = "phoenix.sequence.cacheSize";
     public static final String INDEX_MAX_FILESIZE_PERC_ATTRIB = "phoenix.index.maxDataFileSizePerc";
-    public static final String MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB = "phoenix.coprocessor.maxMetaDataCacheSize";
     public static final String MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB = "phoenix.coprocessor.maxMetaDataCacheTimeToLiveMs";
+    public static final String MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB = "phoenix.coprocessor.maxMetaDataCacheSize";
+    public static final String MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB = "phoenix.client.maxMetaDataCacheSize";
 
     
     /**

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index d52cd5c..4af0871 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -28,6 +28,7 @@ import static org.apache.phoenix.query.QueryServices.IMMUTABLE_ROWS_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.KEEP_ALIVE_MS_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MASTER_INFO_PORT_ATTRIB;
+import static org.apache.phoenix.query.QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_INTRA_REGION_PARALLELIZATION_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_MEMORY_PERC_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_MEMORY_WAIT_MS_ATTRIB;
@@ -35,6 +36,7 @@ import static org.apache.phoenix.query.QueryServices.MAX_MUTATION_SIZE_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_QUERY_CONCURRENCY_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB;
+import static org.apache.phoenix.query.QueryServices.MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_SPOOL_TO_DISK_BYTES_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_TENANT_MEMORY_PERC_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_ATTRIB;
@@ -111,8 +113,9 @@ public class QueryServicesOptions {
     
     public static final int DEFAULT_SEQUENCE_CACHE_SIZE = 100;  // reserve 100 sequences at a time
     public static final int DEFAULT_INDEX_MAX_FILESIZE_PERC = 50; // % of data table max file size for index table
-    public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE =  1024L*1024L*20L; // 20 Mb
     public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS =  60000 * 30; // 30 mins   
+    public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE =  1024L*1024L*20L; // 20 Mb
+    public static final long DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE =  1024L*1024L*10L; // 10 Mb
     
     private final Configuration config;
     
@@ -241,6 +244,14 @@ public class QueryServicesOptions {
         return set(MAX_SERVER_CACHE_SIZE_ATTRIB, maxServerCacheSize);
     }
 
+    public QueryServicesOptions setMaxServerMetaDataCacheSize(long maxMetaDataCacheSize) {
+        return set(MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB, maxMetaDataCacheSize);
+    }
+
+    public QueryServicesOptions setMaxClientMetaDataCacheSize(long maxMetaDataCacheSize) {
+        return set(MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, maxMetaDataCacheSize);
+    }
+
     public QueryServicesOptions setScanFetchSize(int scanFetchSize) {
         return set(SCAN_CACHE_SIZE_ATTRIB, scanFetchSize);
     }
@@ -285,15 +296,15 @@ public class QueryServicesOptions {
         return set(DROP_METADATA_ATTRIB, dropMetadata);
     }
     
-    public QueryServicesOptions setSPGBYEnabled(boolean enabled) {
+    public QueryServicesOptions setGroupBySpill(boolean enabled) {
         return set(GROUPBY_SPILLABLE_ATTRIB, enabled);
     }
 
-    public QueryServicesOptions setSPGBYMaxCacheSize(long size) {
+    public QueryServicesOptions setGroupBySpillMaxCacheSize(long size) {
         return set(GROUPBY_MAX_CACHE_SIZE_ATTRIB, size);
     }
     
-    public QueryServicesOptions setSPGBYNumSpillFiles(long num) {
+    public QueryServicesOptions setGroupBySpillNumSpillFiles(long num) {
         return set(GROUPBY_SPILL_FILES_ATTRIB, num);
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 822be36..ea2847a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -269,7 +269,7 @@ public class MetaDataClient {
         String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
         long tableTimestamp = HConstants.LATEST_TIMESTAMP;
         try {
-            table = connection.getPMetaData().getTable(new PTableKey(tenantId, fullTableName));
+            table = connection.getMetaDataCache().getTable(new PTableKey(tenantId, fullTableName));
             tableTimestamp = table.getTimeStamp();
         } catch (TableNotFoundException e) {
             // TODO: Try again on services cache, as we may be looking for
@@ -512,7 +512,7 @@ public class MetaDataClient {
         boolean allocateViewIndexId = false;
         while (true) {
             try {
-                ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
+                ColumnResolver resolver = FromCompiler.getResolverForMutation(statement, connection);
                 tableRef = resolver.getTables().get(0);
                 PTable dataTable = tableRef.getTable();
                 boolean isTenantConnection = connection.getTenantId() != null;
@@ -1410,7 +1410,7 @@ public class MetaDataClient {
             boolean retried = false;
             while (true) {
                 List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(2);
-                ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
+                ColumnResolver resolver = FromCompiler.getResolverForMutation(statement, connection);
                 PTable table = resolver.getTables().get(0).getTable();
                 if (logger.isDebugEnabled()) {
                     logger.debug("Resolved table to " + table.getName().getString() + " with seqNum " + table.getSequenceNumber() + " at timestamp " + table.getTimeStamp() + " with " + table.getColumns().size() + " columns: " + table.getColumns());
@@ -1709,7 +1709,7 @@ public class MetaDataClient {
             String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
             boolean retried = false;
             while (true) {
-                final ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
+                final ColumnResolver resolver = FromCompiler.getResolverForMutation(statement, connection);
                 PTable table = resolver.getTables().get(0).getTable();
                 List<ColumnName> columnRefs = statement.getColumnRefs();
                 if(columnRefs == null) {
@@ -1861,7 +1861,7 @@ public class MetaDataClient {
                     if (retried) {
                         throw e;
                     }
-                    table = connection.getPMetaData().getTable(new PTableKey(tenantId, fullTableName));
+                    table = connection.getMetaDataCache().getTable(new PTableKey(tenantId, fullTableName));
                     retried = true;
                 }
             }
@@ -1883,7 +1883,7 @@ public class MetaDataClient {
             }
             connection.setAutoCommit(false);
             // Confirm index table is valid and up-to-date
-            TableRef indexRef = FromCompiler.getResolver(statement, connection).getTables().get(0);
+            TableRef indexRef = FromCompiler.getResolverForMutation(statement, connection).getTables().get(0);
             PreparedStatement tableUpsert = null;
             try {
                 tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
index 6c74acb..5ddd5bb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
@@ -17,12 +17,21 @@
  */
 package org.apache.phoenix.schema;
 
-import java.util.Map;
-
 import org.apache.phoenix.query.MetaDataMutated;
 
 
 public interface PMetaData extends MetaDataMutated {
+    public static interface Cache extends Iterable<PTable> {
+        public Cache clone();
+        public PTable get(PTableKey key);
+        public PTable put(PTableKey key, PTable value);
+        public PTable remove(PTableKey key);
+        public int size();
+    }
+    public static interface Pruner {
+        public boolean prune(PTable table);
+    }
+    public Cache getTables();
     public PTable getTable(PTableKey key) throws TableNotFoundException;
-    public Map<PTableKey, PTable> getTables();
+    public PMetaData pruneTables(Pruner pruner);
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
index 5e76ddd..ef48bbd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
@@ -18,21 +18,108 @@
 package org.apache.phoenix.schema;
 
 import java.sql.SQLException;
-import java.util.Collections;
 import java.util.Iterator;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 
-import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 
+/**
+ * 
+ * Client-side cache of MetaData. Not thread safe, but meant to be used
+ * in a copy-on-write fashion. Internally uses a LinkedHashMap that evicts
+ * the oldest entries when size grows beyond the maxSize specified at
+ * create time.
+ *
+ */
 public class PMetaDataImpl implements PMetaData {
-    public static final PMetaData EMPTY_META_DATA = new PMetaDataImpl(Collections.<PTableKey, PTable>emptyMap());
-    private final Map<PTableKey,PTable> metaData;
+    private final Cache metaData;
     
-    public PMetaDataImpl(Map<PTableKey, PTable> tables) {
-        this.metaData = ImmutableMap.copyOf(tables);
+    public PMetaDataImpl(int initialCapacity, long maxByteSize) {
+        this.metaData = new CacheImpl(initialCapacity, maxByteSize);
+    }
+
+    public PMetaDataImpl(Cache tables) {
+        this.metaData = tables.clone();
+    }
+    
+    private static class CacheImpl implements Cache, Cloneable {
+        private final long maxByteSize;
+        private long currentSize;
+        private final LinkedHashMap<PTableKey,PTable> tables;
+        
+        private CacheImpl(long maxByteSize, long currentSize, LinkedHashMap<PTableKey,PTable> tables) {
+            this.maxByteSize = maxByteSize;
+            this.currentSize = currentSize;
+            this.tables = tables;
+        }
+        
+        public CacheImpl(int initialCapacity, long maxByteSize) {
+            this.maxByteSize = maxByteSize;
+            this.currentSize = 0;
+            this.tables = newLRUMap(initialCapacity);
+        }
+        
+        @SuppressWarnings("unchecked")
+        @Override
+        public Cache clone() {
+            return new CacheImpl(this.maxByteSize, this.currentSize, (LinkedHashMap<PTableKey, PTable>)this.tables.clone());
+        }
+        
+        @Override
+        public PTable get(PTableKey key) {
+            return tables.get(key);
+        }
+        
+        private void pruneIfNecessary() {
+            if (currentSize > maxByteSize && size() > 1) {
+                Iterator<Map.Entry<PTableKey, PTable>> entries = this.tables.entrySet().iterator();
+                do {
+                    PTable table = entries.next().getValue();
+                    if (table.getType() != PTableType.SYSTEM) {
+                        currentSize -= table.getEstimatedSize();
+                        entries.remove();
+                    }
+                } while (currentSize > maxByteSize && size() > 1 && entries.hasNext());
+            }
+        }
+        
+        @Override
+        public PTable put(PTableKey key, PTable value) {
+            currentSize += value.getEstimatedSize();
+            PTable oldTable = tables.put(key, value);
+            if (oldTable != null) {
+                currentSize -= oldTable.getEstimatedSize();
+            }
+            pruneIfNecessary();
+            return oldTable;
+        }
+        
+        @Override
+        public PTable remove(PTableKey key) {
+            PTable value = tables.remove(key);
+            if (value != null) {
+                currentSize -= value.getEstimatedSize();
+            }
+            pruneIfNecessary();
+            return value;
+        }
+        
+        private LinkedHashMap<PTableKey,PTable> newLRUMap(int estimatedSize) {
+            return new LinkedHashMap<PTableKey,PTable>(estimatedSize, 0.75F, true);
+        }
+
+        @Override
+        public Iterator<PTable> iterator() {
+            return Iterators.unmodifiableIterator(tables.values().iterator());
+        }
+
+        @Override
+        public int size() {
+            return tables.size();
+        }
     }
     
     @Override
@@ -45,14 +132,14 @@ public class PMetaDataImpl implements PMetaData {
     }
 
     @Override
-    public Map<PTableKey, PTable> getTables() {
+    public Cache getTables() {
         return metaData;
     }
 
 
     @Override
     public PMetaData addTable(PTable table) throws SQLException {
-        Map<PTableKey,PTable> tables = Maps.newHashMap(metaData);
+        Cache tables = metaData.clone();
         PTable oldTable = tables.put(table.getKey(), table);
         if (table.getParentName() != null) { // Upsert new index table into parent data table list
             String parentName = table.getParentName().getString();
@@ -79,7 +166,7 @@ public class PMetaDataImpl implements PMetaData {
     @Override
     public PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columnsToAdd, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows) throws SQLException {
         PTable table = getTable(new PTableKey(tenantId, tableName));
-        Map<PTableKey,PTable> tables = Maps.newHashMap(metaData);
+        Cache tables = metaData.clone();
         List<PColumn> oldColumns = PTableImpl.getColumnsToClone(table);
         List<PColumn> newColumns;
         if (columnsToAdd.isEmpty()) {
@@ -97,7 +184,7 @@ public class PMetaDataImpl implements PMetaData {
     @Override
     public PMetaData removeTable(PName tenantId, String tableName) throws SQLException {
         PTable table;
-        Map<PTableKey,PTable> tables = Maps.newHashMap(metaData);
+        Cache tables = metaData.clone();
         if ((table=tables.remove(new PTableKey(tenantId, tableName))) == null) {
             throw new TableNotFoundException(tableName);
         } else {
@@ -113,7 +200,7 @@ public class PMetaDataImpl implements PMetaData {
     @Override
     public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName, long tableTimeStamp, long tableSeqNum) throws SQLException {
         PTable table = getTable(new PTableKey(tenantId, tableName));
-        Map<PTableKey,PTable> tables = Maps.newHashMap(metaData);
+        Cache tables = metaData.clone();
         PColumn column;
         if (familyName == null) {
             column = table.getPKColumn(columnName);
@@ -142,58 +229,19 @@ public class PMetaDataImpl implements PMetaData {
         return new PMetaDataImpl(tables);
     }
 
-    public static PMetaData pruneNewerTables(long scn, PMetaData metaData) {
-        if (!hasNewerMetaData(scn, metaData)) {
-            return metaData;
-        }
-        Map<PTableKey,PTable> newTables = Maps.newHashMap(metaData.getTables());
-        Iterator<Map.Entry<PTableKey, PTable>> tableIterator = newTables.entrySet().iterator();
-        boolean wasModified = false;
-        while (tableIterator.hasNext()) {
-            PTable table = tableIterator.next().getValue();
-            if (table.getTimeStamp() >= scn && table.getType() != PTableType.SYSTEM) {
-                tableIterator.remove();
-                wasModified = true;
-            }
-        }
-    
-        if (wasModified) {
-            return new PMetaDataImpl(newTables);
-        }
-        return metaData;
-    }
-
-    private static boolean hasNewerMetaData(long scn, PMetaData metaData) {
-        for (PTable table : metaData.getTables().values()) {
-            if (table.getTimeStamp() >= scn) {
-                return true;
-            }
-        }
-        return false;
-    }
-    
-    private static boolean hasMultiTenantMetaData(PMetaData metaData) {
-        for (PTable table : metaData.getTables().values()) {
-            if (table.isMultiTenant()) {
-                return true;
-            }
-        }
-        return false;
-    }
-    
-    public static PMetaData pruneMultiTenant(PMetaData metaData) {
-        if (!hasMultiTenantMetaData(metaData)) {
-            return metaData;
-        }
-        Map<PTableKey,PTable> newTables = Maps.newHashMap(metaData.getTables());
-        Iterator<Map.Entry<PTableKey, PTable>> tableIterator = newTables.entrySet().iterator();
-        while (tableIterator.hasNext()) {
-            PTable table = tableIterator.next().getValue();
-            if (table.isMultiTenant()) {
-                tableIterator.remove();
+    @Override
+    public PMetaData pruneTables(Pruner pruner) {
+        for (PTable table : this.getTables()) {
+            if (pruner.prune(table)) {
+                Cache newCache = this.getTables().clone();
+                for (PTable value : this.getTables()) { // Go through old to prevent concurrent modification exception
+                    if (pruner.prune(value)) {
+                        newCache.remove(value.getKey());
+                    }
+                }
+                return new PMetaDataImpl(newCache);
             }
         }
-    
-        return new PMetaDataImpl(newTables);
+        return this;
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java
index ee533ae..e3519ae 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java
@@ -38,6 +38,11 @@ public class PTableKey {
     }
     
     @Override
+    public String toString() {
+        return name + (tenantId == null ? "" : " for " + tenantId.getString());
+    }
+    
+    @Override
     public int hashCode() {
         final int prime = 31;
         int result = 1;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
index a5e6e41..8108bdd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
@@ -319,7 +319,7 @@ public class PhoenixRuntime {
         PTable table = null;
         PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
         try {
-            table = pconn.getPMetaData().getTable(new PTableKey(pconn.getTenantId(), name));
+            table = pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), name));
         } catch (TableNotFoundException e) {
             String schemaName = SchemaUtil.getSchemaNameFromFullName(name);
             String tableName = SchemaUtil.getTableNameFromFullName(name);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index a14e36a..60ecfe4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -452,7 +452,7 @@ public class SchemaUtil {
     protected static PhoenixConnection addMetaDataColumn(PhoenixConnection conn, long scn, String columnDef) throws SQLException {
         String url = conn.getURL();
         Properties props = conn.getClientInfo();
-        PMetaData metaData = conn.getPMetaData();
+        PMetaData metaData = conn.getMetaDataCache();
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(scn));
         PhoenixConnection metaConnection = null;
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/main/java/org/apache/phoenix/util/SizedUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SizedUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SizedUtil.java
index 955bd8a..77ed8b3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SizedUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SizedUtil.java
@@ -61,8 +61,8 @@ public class SizedUtil {
     }
     
     public static int sizeOfMap(int nRows, int keySize, int valueSize) {
-        return SizedUtil.OBJECT_SIZE + nRows * (
-                SizedUtil.MAP_ENTRY_SIZE + // entry
+        return SizedUtil.OBJECT_SIZE * 4 + sizeOfArrayList(nRows) /* key set */ + nRows * (
+                SizedUtil.MAP_ENTRY_SIZE + /* entry set */
                 keySize + // key size
                 valueSize); // value size
     }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c00f6f25/phoenix-core/src/test/java/org/apache/phoenix/compile/JoinQueryCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/JoinQueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/JoinQueryCompilerTest.java
index 02c8037..e6d5f69 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/JoinQueryCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/JoinQueryCompilerTest.java
@@ -132,7 +132,7 @@ public class JoinQueryCompilerTest extends BaseConnectionlessQueryTest {
         Scan scan = new Scan();
         SQLParser parser = new SQLParser(query);
         SelectStatement select = parser.parseQuery();
-        ColumnResolver resolver = FromCompiler.getResolver(select, connection);
+        ColumnResolver resolver = FromCompiler.getResolverForQuery(select, connection);
         select = StatementNormalizer.normalize(select, resolver);
         StatementContext context = new StatementContext(new PhoenixStatement(connection), resolver, scan);
         return JoinCompiler.getJoinSpec(context, select);        


Mime
View raw message