phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jamestay...@apache.org
Subject [2/6] git commit: PHOENIX-91 Use LRU size-based cache on ConnectionQueryServicesImpl part 1 of 2 (JamesTaylor)
Date Sat, 01 Mar 2014 22:19:03 GMT
PHOENIX-91 Use LRU size-based cache on ConnectionQueryServicesImpl part 1 of 2 (JamesTaylor)


Project: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/commit/8381af1d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/tree/8381af1d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/diff/8381af1d

Branch: refs/heads/master
Commit: 8381af1d683ec023758ee427f7e112004dadfb59
Parents: c3fa6bf
Author: James Taylor <jamestaylor@apache.org>
Authored: Fri Feb 28 17:52:44 2014 -0800
Committer: James Taylor <jamestaylor@apache.org>
Committed: Fri Feb 28 17:52:44 2014 -0800

----------------------------------------------------------------------
 .../phoenix/client/GenericKeyValueBuilder.java  |  5 +-
 .../apache/phoenix/client/KeyValueBuilder.java  |  2 -
 .../apache/phoenix/compile/FromCompiler.java    | 15 +++-
 .../phoenix/compile/ProjectionCompiler.java     | 62 ++++++++++++++---
 .../apache/phoenix/execute/MutationState.java   | 17 +++--
 .../apache/phoenix/jdbc/PhoenixConnection.java  | 20 +++---
 .../phoenix/query/ConnectionQueryServices.java  |  3 +-
 .../query/ConnectionQueryServicesImpl.java      | 44 ++++++------
 .../query/ConnectionlessQueryServicesImpl.java  | 28 +++++---
 .../query/DelegateConnectionQueryServices.java  | 19 +++---
 .../apache/phoenix/query/MetaDataMutated.java   |  7 +-
 .../apache/phoenix/query/QueryConstants.java    |  6 +-
 .../apache/phoenix/schema/MetaDataClient.java   | 38 +++++------
 .../org/apache/phoenix/schema/PMetaData.java    |  4 +-
 .../apache/phoenix/schema/PMetaDataImpl.java    | 57 ++++++++--------
 .../org/apache/phoenix/schema/PNameFactory.java |  5 ++
 .../org/apache/phoenix/schema/PNameImpl.java    |  7 +-
 .../apache/phoenix/schema/PNormalizedName.java  | 51 --------------
 .../java/org/apache/phoenix/schema/PTable.java  |  1 +
 .../org/apache/phoenix/schema/PTableImpl.java   | 10 ++-
 .../org/apache/phoenix/schema/PTableKey.java    | 62 +++++++++++++++++
 .../org/apache/phoenix/util/PhoenixRuntime.java |  3 +-
 .../phoenix/compile/QueryCompilerTest.java      | 11 ++-
 .../apache/phoenix/end2end/AlterTableTest.java  | 33 +++++----
 .../phoenix/end2end/ArithmeticQueryTest.java    | 72 ++++++++++----------
 ...aultParallelIteratorsRegionSplitterTest.java | 13 ++--
 ...RangeParallelIteratorRegionSplitterTest.java | 26 ++++---
 .../end2end/index/ImmutableIndexTest.java       | 21 +++---
 .../end2end/index/IndexMetadataTest.java        | 10 +--
 .../phoenix/end2end/index/SaltedIndexTest.java  |  4 +-
 .../phoenix/index/IndexMaintainerTest.java      | 15 ++--
 .../query/BaseConnectionlessQueryTest.java      |  3 +-
 .../apache/phoenix/schema/RowKeySchemaTest.java | 11 +--
 .../phoenix/schema/RowKeyValueAccessorTest.java |  8 +--
 34 files changed, 404 insertions(+), 289 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/client/GenericKeyValueBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/client/GenericKeyValueBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/client/GenericKeyValueBuilder.java
index 38e65f3..b374e1d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/client/GenericKeyValueBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/client/GenericKeyValueBuilder.java
@@ -17,15 +17,14 @@
  */
 package org.apache.phoenix.client;
 
+import static org.apache.phoenix.hbase.index.util.ImmutableBytesPtr.copyBytesIfNecessary;
+
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
-
-import static org.apache.phoenix.hbase.index.util.ImmutableBytesPtr.copyBytesIfNecessary;
 
 /**
  * {@link KeyValueBuilder} that does simple byte[] copies to build the underlying key-value. This is

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/client/KeyValueBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/client/KeyValueBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/client/KeyValueBuilder.java
index 9980aa5..1a76dfd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/client/KeyValueBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/client/KeyValueBuilder.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.util.MetaDataUtil;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index 6f63ad4..f745419 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -55,6 +55,7 @@ import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
+import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.TableRef;
@@ -203,14 +204,19 @@ public class FromCompiler {
             boolean didRetry = false;
             MetaDataMutationResult result = null;
             String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+            PName tenantId = connection.getTenantId();
             while (true) {
                 try {
+                    PTable theTable = null;
                     if (!updateCacheOnlyIfAutoCommit || connection.getAutoCommit()) {
                         retry = false; // No reason to retry after this
                         result = client.updateCache(schemaName, tableName);
                         timeStamp = result.getMutationTime();
+                        theTable = result.getTable();
+                    } 
+                    if (theTable == null) {
+                        theTable = connection.getPMetaData().getTable(new PTableKey(tenantId, fullTableName));
                     }
-                    PTable theTable = connection.getPMetaData().getTable(fullTableName);
                     // If dynamic columns have been specified add them to the table declaration
                     if (!table.getDynamicColumns().isEmpty()) {
                         theTable = this.addDynamicColumns(table.getDynamicColumns(), theTable);
@@ -221,6 +227,10 @@ public class FromCompiler {
                     }
                     break;
                 } catch (TableNotFoundException e) {
+                    if (tenantId != null) { // Check with null tenantId next
+                        tenantId = null;
+                        continue;
+                    }
                     sqlE = new TableNotFoundException(e,timeStamp);
                 }
                 // If we haven't already tried, update our cache and retry
@@ -358,7 +368,8 @@ public class FromCompiler {
                 List<ColumnDef> dynamicColumnDefs) throws SQLException {
             MetaDataMutationResult result = client.updateCache(schemaName, tableName);
             long timeStamp = result.getMutationTime();
-            PTable theTable = connection.getPMetaData().getTable(SchemaUtil.getTableName(schemaName, tableName));
+            String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+            PTable theTable = connection.getPMetaData().getTable(new PTableKey(connection.getTenantId(), fullTableName));
 
             // If dynamic columns have been specified add them to the table declaration
             if (!dynamicColumnDefs.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
index cb1edab..07d3ecf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
@@ -17,9 +17,17 @@
  */
 package org.apache.phoenix.compile;
 
-import java.io.*;
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
 import java.sql.SQLException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
 
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -29,22 +37,54 @@ import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
 import org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
-import org.apache.phoenix.expression.*;
+import org.apache.phoenix.expression.BaseTerminalExpression;
+import org.apache.phoenix.expression.CoerceExpression;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.KeyValueColumnExpression;
 import org.apache.phoenix.expression.aggregator.ClientAggregators;
 import org.apache.phoenix.expression.aggregator.ServerAggregators;
 import org.apache.phoenix.expression.function.ArrayIndexFunction;
 import org.apache.phoenix.expression.function.SingleAggregateFunction;
 import org.apache.phoenix.expression.visitor.KeyValueExpressionVisitor;
 import org.apache.phoenix.expression.visitor.SingleAggregateFunctionVisitor;
-import org.apache.phoenix.parse.*;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.parse.AliasedNode;
+import org.apache.phoenix.parse.BindParseNode;
+import org.apache.phoenix.parse.ColumnParseNode;
+import org.apache.phoenix.parse.FamilyWildcardParseNode;
+import org.apache.phoenix.parse.FunctionParseNode;
+import org.apache.phoenix.parse.ParseNode;
+import org.apache.phoenix.parse.SelectStatement;
+import org.apache.phoenix.parse.SequenceValueParseNode;
+import org.apache.phoenix.parse.TableName;
+import org.apache.phoenix.parse.TableWildcardParseNode;
+import org.apache.phoenix.parse.WildcardParseNode;
 import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.*;
+import org.apache.phoenix.schema.ArgumentTypeMismatchException;
+import org.apache.phoenix.schema.ColumnNotFoundException;
+import org.apache.phoenix.schema.ColumnRef;
+import org.apache.phoenix.schema.KeyValueSchema;
 import org.apache.phoenix.schema.KeyValueSchema.KeyValueSchemaBuilder;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PColumnFamily;
+import org.apache.phoenix.schema.PDataType;
+import org.apache.phoenix.schema.PDatum;
+import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.ViewType;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.RowKeySchema;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.ValueBitSet;
 import org.apache.phoenix.schema.tuple.Tuple;
-import org.apache.phoenix.util.*;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.SizedUtil;
 
-import com.google.common.collect.*;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 
 
 /**
@@ -104,7 +144,9 @@ public class ProjectionCompiler {
     private static void projectAllIndexColumns(StatementContext context, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
         ColumnResolver resolver = context.getResolver();
         PTable index = tableRef.getTable();
-        PTable table = context.getConnection().getPMetaData().getTable(index.getParentName().getString());
+        PhoenixConnection conn = context.getConnection();
+        String tableName = index.getParentName().getString();
+        PTable table = conn.getPMetaData().getTable(new PTableKey(conn.getTenantId(), tableName));
         int tableOffset = table.getBucketNum() == null ? 0 : 1;
         int indexOffset = index.getBucketNum() == null ? 0 : 1;
         if (index.getColumns().size()-indexOffset != table.getColumns().size()-tableOffset) {
@@ -151,7 +193,9 @@ public class ProjectionCompiler {
 
     private static void projectIndexColumnFamily(StatementContext context, String cfName, TableRef tableRef, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
         PTable index = tableRef.getTable();
-        PTable table = context.getConnection().getPMetaData().getTable(index.getParentName().getString());
+        PhoenixConnection conn = context.getConnection();
+        String tableName = index.getParentName().getString();
+        PTable table = conn.getPMetaData().getTable(new PTableKey(conn.getTenantId(), tableName));
         PColumnFamily pfamily = table.getColumnFamily(cfName);
         for (PColumn column : pfamily.getColumns()) {
             PColumn indexColumn = index.getColumn(IndexUtil.getIndexColumnName(column));

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 70363ba..9ea5993 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -30,12 +30,6 @@ import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 import org.apache.phoenix.cache.ServerCacheClient;
 import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
@@ -49,14 +43,22 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.IllegalDataException;
 import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PRow;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.ServerUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 
 /**
  * 
@@ -268,6 +270,7 @@ public class MutationState implements SQLCloseable {
     private long[] validate() throws SQLException {
         int i = 0;
         Long scn = connection.getSCN();
+        PName tenantId = connection.getTenantId();
         MetaDataClient client = new MetaDataClient(connection);
         long[] timeStamps = new long[this.mutations.size()];
         for (Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> entry : mutations.entrySet()) {
@@ -290,7 +293,7 @@ public class MutationState implements SQLCloseable {
                                 }
                             }
                         }
-                        table = connection.getPMetaData().getTable(tableRef.getTable().getName().getString());
+                        table = connection.getPMetaData().getTable(new PTableKey(tenantId, table.getName().getString()));
                         for (PColumn column : columns) {
                             if (column != null) {
                                 table.getColumnFamily(column.getFamilyName().getString()).getColumn(column.getName().getString());

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 9f94623..1a7e3e1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -639,28 +639,28 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     }
 
     @Override
-    public PMetaData addColumn(String tableName, List<PColumn> columns, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows)
+    public PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows)
             throws SQLException {
-        metaData = metaData.addColumn(tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows);
+        metaData = metaData.addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows);
         //Cascade through to connectionQueryServices too
-        getQueryServices().addColumn(tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows);
+        getQueryServices().addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows);
         return metaData;
     }
 
     @Override
-    public PMetaData removeTable(String tableName) throws SQLException {
-        metaData = metaData.removeTable(tableName);
+    public PMetaData removeTable(PName tenantId, String tableName) throws SQLException {
+        metaData = metaData.removeTable(tenantId, tableName);
         //Cascade through to connectionQueryServices too
-        getQueryServices().removeTable(tableName);
+        getQueryServices().removeTable(tenantId, tableName);
         return metaData;
     }
 
     @Override
-    public PMetaData removeColumn(String tableName, String familyName, String columnName, long tableTimeStamp,
-            long tableSeqNum) throws SQLException {
-        metaData = metaData.removeColumn(tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+    public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName,
+            long tableTimeStamp, long tableSeqNum) throws SQLException {
+        metaData = metaData.removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
         //Cascade through to connectionQueryServices too
-        getQueryServices().removeColumn(tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+        getQueryServices().removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
         return metaData;
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
index cb10f88..fc4ca87 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
@@ -34,6 +34,7 @@ import org.apache.phoenix.compile.MutationPlan;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.Sequence;
@@ -66,7 +67,7 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
 
     public PhoenixConnection connect(String url, Properties info) throws SQLException;
 
-    public MetaDataMutationResult getTable(byte[] tenantId, byte[] schemaName, byte[] tableName, long tableTimestamp, long clientTimetamp) throws SQLException;
+    public MetaDataMutationResult getTable(PName tenantId, byte[] schemaName, byte[] tableName, long tableTimestamp, long clientTimetamp) throws SQLException;
     public MetaDataMutationResult createTable(List<Mutation> tableMetaData, byte[] tableName, PTableType tableType, Map<String,Object> tableProps, List<Pair<byte[],Map<String,Object>>> families, byte[][] splits) throws SQLException;
     public MetaDataMutationResult dropTable(List<Mutation> tableMetadata, PTableType tableType) throws SQLException;
     public MetaDataMutationResult addColumn(List<Mutation> tableMetaData, List<Pair<byte[],Map<String,Object>>> families, PTable table) throws SQLException;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 33e97e5..caa91d4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.Append;
@@ -72,7 +71,6 @@ import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.hbase.index.Indexer;
 import org.apache.phoenix.hbase.index.covered.CoveredColumnsIndexBuilder;
-import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.index.PhoenixIndexBuilder;
 import org.apache.phoenix.index.PhoenixIndexCodec;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -86,7 +84,10 @@ import org.apache.phoenix.schema.PColumnFamily;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.PMetaData;
 import org.apache.phoenix.schema.PMetaDataImpl;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.ReadOnlyTableException;
 import org.apache.phoenix.schema.SaltingUtil;
@@ -305,7 +306,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try {
             // If existing table isn't older than new table, don't replace
             // If a client opens a connection at an earlier timestamp, this can happen
-            PTable existingTable = latestMetaData.getTable(table.getName().getString());
+            PTable existingTable = latestMetaData.getTable(new PTableKey(table.getTenantId(), table.getName().getString()));
             if (existingTable.getTimeStamp() >= table.getTimeStamp()) {
                 return latestMetaData;
             }
@@ -324,8 +325,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
 
     /**
      * Ensures that metaData mutations are handled in the correct order
+     * @param tenantId TODO
      */
-    private PMetaData metaDataMutated(String tableName, long tableSeqNum, Mutator mutator) throws SQLException {
+    private PMetaData metaDataMutated(PName tenantId, String tableName, long tableSeqNum, Mutator mutator) throws SQLException {
         synchronized(latestMetaDataLock) {
             PMetaData metaData = latestMetaData;
             PTable table;
@@ -333,7 +335,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             while (true) {
                 try {
                     try {
-                        table = metaData.getTable(tableName);
+                        table = metaData.getTable(new PTableKey(tenantId, tableName));
                         /* If the table is at the prior sequence number, then we're good to go.
                          * We know if we've got this far, that the server validated the mutations,
                          * so we'd just need to wait until the other connection that mutated the same
@@ -354,7 +356,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     // and the next time it's used it'll be pulled over from the server.
                     if (waitTime <= 0) {
                         logger.warn("Unable to update meta data repo within " + (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS/1000) + " seconds for " + tableName);
-                        metaData = metaData.removeTable(tableName);
+                        metaData = metaData.removeTable(tenantId, tableName);
                         break;
                     }
                     latestMetaDataLock.wait(waitTime);
@@ -370,12 +372,12 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
      }
 
     @Override
-    public PMetaData addColumn(final String tableName, final List<PColumn> columns, final long tableTimeStamp, final long tableSeqNum, final boolean isImmutableRows) throws SQLException {
-        return metaDataMutated(tableName, tableSeqNum, new Mutator() {
+    public PMetaData addColumn(final PName tenantId, final String tableName, final List<PColumn> columns, final long tableTimeStamp, final long tableSeqNum, final boolean isImmutableRows) throws SQLException {
+        return metaDataMutated(tenantId, tableName, tableSeqNum, new Mutator() {
             @Override
             public PMetaData mutate(PMetaData metaData) throws SQLException {
                 try {
-                    return metaData.addColumn(tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows);
+                    return metaData.addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows);
                 } catch (TableNotFoundException e) {
                     // The DROP TABLE may have been processed first, so just ignore.
                     return metaData;
@@ -385,21 +387,21 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
      }
 
     @Override
-    public PMetaData removeTable(final String tableName) throws SQLException {
+    public PMetaData removeTable(PName tenantId, final String tableName) throws SQLException {
         synchronized(latestMetaDataLock) {
-            latestMetaData = latestMetaData.removeTable(tableName);
+            latestMetaData = latestMetaData.removeTable(tenantId, tableName);
             latestMetaDataLock.notifyAll();
             return latestMetaData;
         }
     }
 
     @Override
-    public PMetaData removeColumn(final String tableName, final String familyName, final String columnName, final long tableTimeStamp, final long tableSeqNum) throws SQLException {
-        return metaDataMutated(tableName, tableSeqNum, new Mutator() {
+    public PMetaData removeColumn(final PName tenantId, final String tableName, final String familyName, final String columnName, final long tableTimeStamp, final long tableSeqNum) throws SQLException {
+        return metaDataMutated(tenantId, tableName, tableSeqNum, new Mutator() {
             @Override
             public PMetaData mutate(PMetaData metaData) throws SQLException {
                 try {
-                    return metaData.removeColumn(tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+                    return metaData.removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
                 } catch (TableNotFoundException e) {
                     // The DROP TABLE may have been processed first, so just ignore.
                     return metaData;
@@ -888,7 +890,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             // Physical index table created up front for multi tenant
             // TODO: if viewIndexId is Short.MIN_VALUE, then we don't need to attempt to create it
             if (!MetaDataUtil.isMultiTenant(m, kvBuilder, ptr)) {
-                ensureViewIndexTableCreated(physicalTableName, MetaDataUtil.getClientTimeStamp(m));
+                ensureViewIndexTableCreated(tenantIdBytes.length == 0 ? null : PNameFactory.newName(tenantIdBytes), physicalTableName, MetaDataUtil.getClientTimeStamp(m));
             }
         } else if (tableType == PTableType.TABLE && MetaDataUtil.isMultiTenant(m, kvBuilder, ptr)) { // Create view index table up front for multi tenant tables
             ensureViewIndexTableCreated(tableName, tableProps, families, MetaDataUtil.isSalted(m, kvBuilder, ptr) ? splits : null, MetaDataUtil.getClientTimeStamp(m));
@@ -906,15 +908,15 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     }
 
     @Override
-    public MetaDataMutationResult getTable(byte[] tenantId, final byte[] schemaBytes, final byte[] tableBytes,
+    public MetaDataMutationResult getTable(final PName tenantId, final byte[] schemaBytes, final byte[] tableBytes,
             final long tableTimestamp, final long clientTimestamp) throws SQLException {
-        final byte[] nonNullTenantId = tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId;
-        byte[] tableKey = SchemaUtil.getTableKey(nonNullTenantId, schemaBytes, tableBytes);
+        final byte[] tenantIdBytes = tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes();
+        byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes);
         return metaDataCoprocessorExec(tableKey,
                 new Batch.Call<MetaDataProtocol, MetaDataMutationResult>() {
                     @Override
                     public MetaDataMutationResult call(MetaDataProtocol instance) throws IOException {
-                      return instance.getTable(nonNullTenantId, schemaBytes, tableBytes, tableTimestamp, clientTimestamp);
+                      return instance.getTable(tenantIdBytes, schemaBytes, tableBytes, tableTimestamp, clientTimestamp);
                     }
                 });
     }
@@ -1000,14 +1002,14 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         return props;
     }
     
-    private void ensureViewIndexTableCreated(byte[] physicalIndexTableName, long timestamp) throws SQLException {
+    private void ensureViewIndexTableCreated(PName tenantId, byte[] physicalIndexTableName, long timestamp) throws SQLException {
         PTable table;
         String name = Bytes.toString(
                 physicalIndexTableName, 
                 MetaDataUtil.VIEW_INDEX_TABLE_PREFIX_BYTES.length,
                 physicalIndexTableName.length-MetaDataUtil.VIEW_INDEX_TABLE_PREFIX_BYTES.length);
         try {
-            table = latestMetaData.getTable(name);
+            table = latestMetaData.getTable(new PTableKey(tenantId, name));
             if (table.getTimeStamp() >= timestamp) { // Table in cache is newer than client timestamp which shouldn't be the case
                 throw new TableNotFoundException(table.getSchemaName().getString(), table.getTableName().getString());
             }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index 3341263..99e6302 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -52,8 +52,11 @@ import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PMetaData;
 import org.apache.phoenix.schema.PMetaDataImpl;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
+import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.Sequence;
 import org.apache.phoenix.schema.SequenceAlreadyExistsException;
@@ -130,21 +133,21 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     }
 
     @Override
-    public PMetaData addColumn(String tableName, List<PColumn> columns, long tableTimeStamp, long tableSeqNum,
-            boolean isImmutableRows) throws SQLException {
-        return metaData = metaData.addColumn(tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows);
+    public PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp,
+            long tableSeqNum, boolean isImmutableRows) throws SQLException {
+        return metaData = metaData.addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows);
     }
 
     @Override
-    public PMetaData removeTable(String tableName)
+    public PMetaData removeTable(PName tenantId, String tableName)
             throws SQLException {
-        return metaData = metaData.removeTable(tableName);
+        return metaData = metaData.removeTable(tenantId, tableName);
     }
 
     @Override
-    public PMetaData removeColumn(String tableName, String familyName, String columnName, long tableTimeStamp,
-            long tableSeqNum) throws SQLException {
-        return metaData = metaData.removeColumn(tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+    public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName,
+            long tableTimeStamp, long tableSeqNum) throws SQLException {
+        return metaData = metaData.removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
     }
 
     
@@ -154,11 +157,12 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     }
 
     @Override
-    public MetaDataMutationResult getTable(byte[] tenantId, byte[] schemaBytes, byte[] tableBytes, long tableTimestamp, long clientTimestamp) throws SQLException {
+    public MetaDataMutationResult getTable(PName tenantId, byte[] schemaBytes, byte[] tableBytes, long tableTimestamp, long clientTimestamp) throws SQLException {
         // Return result that will cause client to use it's own metadata instead of needing
         // to get anything from the server (since we don't have a connection)
         try {
-            PTable table = metaData.getTable(SchemaUtil.getTableName(schemaBytes, tableBytes));
+            String fullTableName = SchemaUtil.getTableName(schemaBytes, tableBytes);
+            PTable table = metaData.getTable(new PTableKey(tenantId, fullTableName));
             return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, table);
         } catch (TableNotFoundException e) {
             return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 0, null);
@@ -244,10 +248,12 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
         SchemaUtil.getVarChars(tableMetadata.get(0).getRow(), rowKeyMetadata);
         KeyValue newKV = tableMetadata.get(0).getFamilyMap().get(TABLE_FAMILY_BYTES).get(0);
         PIndexState newState =  PIndexState.fromSerializedValue(newKV.getBuffer()[newKV.getValueOffset()]);
+        byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
         String schemaName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]);
         String indexName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]);
         String indexTableName = SchemaUtil.getTableName(schemaName, indexName);
-        PTable index = metaData.getTable(indexTableName);
+        PName tenantId = tenantIdBytes.length == 0 ? null : PNameFactory.newName(tenantIdBytes);
+        PTable index = metaData.getTable(new PTableKey(tenantId, indexTableName));
         index = PTableImpl.makePTable(index,newState == PIndexState.USABLE ? PIndexState.ACTIVE : newState == PIndexState.UNUSABLE ? PIndexState.INACTIVE : newState);
         return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, index);
     }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index 9d1fa18..4a943c8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -36,6 +36,7 @@ import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PMetaData;
+import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.Sequence;
@@ -79,21 +80,21 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
     }
 
     @Override
-    public PMetaData addColumn(String tableName, List<PColumn> columns, long tableTimeStamp, long tableSeqNum,
-            boolean isImmutableRows) throws SQLException {
-        return getDelegate().addColumn(tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows);
+    public PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp,
+            long tableSeqNum, boolean isImmutableRows) throws SQLException {
+        return getDelegate().addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows);
     }
 
     @Override
-    public PMetaData removeTable(String tableName)
+    public PMetaData removeTable(PName tenantId, String tableName)
             throws SQLException {
-        return getDelegate().removeTable(tableName);
+        return getDelegate().removeTable(tenantId, tableName);
     }
 
     @Override
-    public PMetaData removeColumn(String tableName, String familyName, String columnName, long tableTimeStamp,
-            long tableSeqNum) throws SQLException {
-        return getDelegate().removeColumn(tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+    public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName,
+            long tableTimeStamp, long tableSeqNum) throws SQLException {
+        return getDelegate().removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
     }
 
     @Override
@@ -102,7 +103,7 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
     }
 
     @Override
-    public MetaDataMutationResult getTable(byte[] tenantId, byte[] schemaBytes, byte[] tableBytes, long tableTimestamp, long clientTimestamp) throws SQLException {
+    public MetaDataMutationResult getTable(PName tenantId, byte[] schemaBytes, byte[] tableBytes, long tableTimestamp, long clientTimestamp) throws SQLException {
         return getDelegate().getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, clientTimestamp);
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java b/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
index ead16f5..2e6da04 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
@@ -22,6 +22,7 @@ import java.util.List;
 
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PMetaData;
+import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
 
 
@@ -34,7 +35,7 @@ import org.apache.phoenix.schema.PTable;
  */
 public interface MetaDataMutated {
     PMetaData addTable(PTable table) throws SQLException;
-    PMetaData removeTable(String tableName) throws SQLException;
-    PMetaData addColumn(String tableName, List<PColumn> columns, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows) throws SQLException;
-    PMetaData removeColumn(String tableName, String familyName, String columnName, long tableTimeStamp, long tableSeqNum) throws SQLException;
+    PMetaData removeTable(PName tenantId, String tableName) throws SQLException;
+    PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows) throws SQLException;
+    PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName, long tableTimeStamp, long tableSeqNum) throws SQLException;
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index 4987158..4933791 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -81,7 +81,7 @@ import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.schema.MetaDataSplitPolicy;
 import org.apache.phoenix.schema.PName;
-import org.apache.phoenix.schema.PNormalizedName;
+import org.apache.phoenix.schema.PNameFactory;
 
 
 /**
@@ -103,8 +103,8 @@ public interface QueryConstants {
     public final static String PHOENIX_SCHEMA = "system";
     public final static String PHOENIX_METADATA = "table";
 
-    public final static PName SINGLE_COLUMN_NAME = new PNormalizedName("s");
-    public final static PName SINGLE_COLUMN_FAMILY_NAME = new PNormalizedName("s");
+    public final static PName SINGLE_COLUMN_NAME = PNameFactory.newNormalizedName("s");
+    public final static PName SINGLE_COLUMN_FAMILY_NAME = PNameFactory.newNormalizedName("s");
     public final static byte[] SINGLE_COLUMN = SINGLE_COLUMN_NAME.getBytes();
     public final static byte[] SINGLE_COLUMN_FAMILY = SINGLE_COLUMN_FAMILY_NAME.getBytes();
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 4750a0f..822be36 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -260,6 +260,7 @@ public class MetaDataClient {
     
     private MetaDataMutationResult updateCache(String schemaName, String tableName, boolean alwaysHitServer) throws SQLException { // TODO: pass byte[] here
         Long scn = connection.getSCN();
+        PName tenantId = connection.getTenantId();
         long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
         if (TYPE_SCHEMA.equals(schemaName) && !alwaysHitServer) {
             return SYSTEM_TABLE_RESULT;
@@ -267,9 +268,8 @@ public class MetaDataClient {
         PTable table = null;
         String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
         long tableTimestamp = HConstants.LATEST_TIMESTAMP;
-        PName tenantIdName = connection.getTenantId();
         try {
-            table = connection.getPMetaData().getTable(fullTableName);
+            table = connection.getPMetaData().getTable(new PTableKey(tenantId, fullTableName));
             tableTimestamp = table.getTimeStamp();
         } catch (TableNotFoundException e) {
             // TODO: Try again on services cache, as we may be looking for
@@ -280,12 +280,7 @@ public class MetaDataClient {
             return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS,QueryConstants.UNSET_TIMESTAMP,table);
         }
         
-        byte[] tenantId = null;
-        int maxTryCount = 1;
-        if (tenantIdName != null) {
-            tenantId = tenantIdName.getBytes();
-            maxTryCount = 2;
-        }
+        int maxTryCount = tenantId == null ? 1 : 2;
         int tryCount = 0;
         MetaDataMutationResult result;
         
@@ -323,11 +318,11 @@ public class MetaDataClient {
                         return result;
                     }
                     if (code == MutationCode.TABLE_NOT_FOUND && tryCount + 1 == maxTryCount) {
-                        connection.removeTable(fullTableName);
+                        connection.removeTable(tenantId, fullTableName);
                     }
                 }
             }
-            tenantId = null;
+            tenantId = null; // Try again with global tenantId
         } while (++tryCount < maxTryCount);
         
         return result;
@@ -1228,8 +1223,9 @@ public class MetaDataClient {
         connection.rollback();
         boolean wasAutoCommit = connection.getAutoCommit();
         try {
-            String tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getString();
-            byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
+            PName tenantId = connection.getTenantId();
+            String tenantIdStr = tenantId == null ? null : tenantId.getString();
+            byte[] key = SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName);
             Long scn = connection.getSCN();
             long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
             List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(2);
@@ -1237,7 +1233,7 @@ public class MetaDataClient {
             Delete tableDelete = new Delete(key, clientTimeStamp, null);
             tableMetaData.add(tableDelete);
             if (parentTableName != null) {
-                byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantId, schemaName, parentTableName, tableName);
+                byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantIdStr, schemaName, parentTableName, tableName);
                 @SuppressWarnings("deprecation") // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
                 Delete linkDelete = new Delete(linkKey, clientTimeStamp, null);
                 tableMetaData.add(linkDelete);
@@ -1259,7 +1255,7 @@ public class MetaDataClient {
                 default:
                     try {
                         // TODO: should we update the parent table by removing the index?
-                        connection.removeTable(tableName);
+                        connection.removeTable(tenantId, tableName);
                     } catch (TableNotFoundException ignore) { } // Ignore - just means wasn't cached
                     
                     // TODO: we need to drop the index data when a view is dropped
@@ -1271,7 +1267,8 @@ public class MetaDataClient {
                         // Create empty table and schema - they're only used to get the name from
                         // PName name, PTableType type, long timeStamp, long sequenceNumber, List<PColumn> columns
                         List<TableRef> tableRefs = Lists.newArrayListWithExpectedSize(2 + table.getIndexes().size());
-                        if (tableType == PTableType.TABLE && MetaDataUtil.hasViewIndexTable(connection, table.getPhysicalName())) {
+                        // All multi-tenant tables have a view index table, so no need to check in that case
+                        if (tableType == PTableType.TABLE && (table.isMultiTenant() || MetaDataUtil.hasViewIndexTable(connection, table.getPhysicalName()))) {
                             MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName());
                             // TODO: consider removing this, as the DROP INDEX done for each DROP VIEW command
                             // would have deleted all the rows already
@@ -1303,9 +1300,10 @@ public class MetaDataClient {
 
     private MutationCode processMutationResult(String schemaName, String tableName, MetaDataMutationResult result) throws SQLException {
         final MutationCode mutationCode = result.getMutationCode();
+        PName tenantId = connection.getTenantId();
         switch (mutationCode) {
         case TABLE_NOT_FOUND:
-            connection.removeTable(tableName);
+            connection.removeTable(tenantId, tableName);
             throw new TableNotFoundException(schemaName, tableName);
         case UNALLOWED_TABLE_MUTATION:
             String columnName = null;
@@ -1400,6 +1398,7 @@ public class MetaDataClient {
         boolean wasAutoCommit = connection.getAutoCommit();
         try {
             connection.setAutoCommit(false);
+            PName tenantId = connection.getTenantId();
             TableName tableNameNode = statement.getTable().getName();
             String schemaName = tableNameNode.getSchemaName();
             String tableName = tableNameNode.getTableName();
@@ -1581,7 +1580,7 @@ public class MetaDataClient {
                     // Only update client side cache if we aren't adding a PK column to a table with indexes.
                     // We could update the cache manually then too, it'd just be a pain.
                     if (!isAddingPKColumn || table.getIndexes().isEmpty()) {
-                        connection.addColumn(SchemaUtil.getTableName(schemaName, tableName), columns, result.getMutationTime(), seqNum, isImmutableRows == null ? table.isImmutableRows() : isImmutableRows);
+                        connection.addColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName), columns, result.getMutationTime(), seqNum, isImmutableRows == null ? table.isImmutableRows() : isImmutableRows);
                     }
                     // Delete rows in view index if we haven't dropped it already
                     // We only need to do this if the multiTenant transitioned to false
@@ -1703,6 +1702,7 @@ public class MetaDataClient {
         boolean wasAutoCommit = connection.getAutoCommit();
         try {
             connection.setAutoCommit(false);
+            PName tenantId = connection.getTenantId();
             TableName tableNameNode = statement.getTable().getName();
             String schemaName = tableNameNode.getSchemaName();
             String tableName = tableNameNode.getTableName();
@@ -1818,7 +1818,7 @@ public class MetaDataClient {
                     // the server when needed.
                     if (columnsToDrop.size() > 0 && indexesToDrop.isEmpty()) {
                         for(PColumn columnToDrop : tableColumnsToDrop) {
-                            connection.removeColumn(SchemaUtil.getTableName(schemaName, tableName), columnToDrop.getFamilyName().getString() , columnToDrop.getName().getString(), result.getMutationTime(), seqNum);
+                            connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName) , columnToDrop.getFamilyName().getString(), columnToDrop.getName().getString(), result.getMutationTime(), seqNum);
                         }
                     }
                     // If we have a VIEW, then only delete the metadata, and leave the table data alone
@@ -1861,7 +1861,7 @@ public class MetaDataClient {
                     if (retried) {
                         throw e;
                     }
-                    table = connection.getPMetaData().getTable(fullTableName);
+                    table = connection.getPMetaData().getTable(new PTableKey(tenantId, fullTableName));
                     retried = true;
                 }
             }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
index 1f9f7cb..6c74acb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
@@ -23,6 +23,6 @@ import org.apache.phoenix.query.MetaDataMutated;
 
 
 public interface PMetaData extends MetaDataMutated {
-    public PTable getTable(String name) throws TableNotFoundException;
-    public Map<String, PTable> getTables();
+    public PTable getTable(PTableKey key) throws TableNotFoundException;
+    public Map<PTableKey, PTable> getTables();
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
index 3423873..5e76ddd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
@@ -28,35 +28,35 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 
 public class PMetaDataImpl implements PMetaData {
-    public static final PMetaData EMPTY_META_DATA = new PMetaDataImpl(Collections.<String,PTable>emptyMap());
-    private final Map<String,PTable> metaData;
+    public static final PMetaData EMPTY_META_DATA = new PMetaDataImpl(Collections.<PTableKey, PTable>emptyMap());
+    private final Map<PTableKey,PTable> metaData;
     
-    public PMetaDataImpl(Map<String,PTable> tables) {
+    public PMetaDataImpl(Map<PTableKey, PTable> tables) {
         this.metaData = ImmutableMap.copyOf(tables);
     }
     
     @Override
-    public PTable getTable(String name) throws TableNotFoundException {
-        PTable table = metaData.get(name);
+    public PTable getTable(PTableKey key) throws TableNotFoundException {
+        PTable table = metaData.get(key);
         if (table == null) {
-            throw new TableNotFoundException(name);
+            throw new TableNotFoundException(key.getName());
         }
         return table;
     }
 
     @Override
-    public Map<String,PTable> getTables() {
+    public Map<PTableKey, PTable> getTables() {
         return metaData;
     }
 
 
     @Override
     public PMetaData addTable(PTable table) throws SQLException {
-        Map<String,PTable> tables = Maps.newHashMap(metaData);
-        PTable oldTable = tables.put(table.getName().getString(), table);
+        Map<PTableKey,PTable> tables = Maps.newHashMap(metaData);
+        PTable oldTable = tables.put(table.getKey(), table);
         if (table.getParentName() != null) { // Upsert new index table into parent data table list
             String parentName = table.getParentName().getString();
-            PTable parentTable = tables.get(parentName);
+            PTable parentTable = tables.get(new PTableKey(table.getTenantId(), parentName));
             // If parentTable isn't cached, that's ok we can skip this
             if (parentTable != null) {
                 List<PTable> oldIndexes = parentTable.getIndexes();
@@ -66,19 +66,20 @@ public class PMetaDataImpl implements PMetaData {
                     newIndexes.remove(oldTable);
                 }
                 newIndexes.add(table);
-                tables.put(parentName, PTableImpl.makePTable(parentTable, table.getTimeStamp(), newIndexes));
+                parentTable = PTableImpl.makePTable(parentTable, table.getTimeStamp(), newIndexes);
+                tables.put(parentTable.getKey(), parentTable);
             }
         }
         for (PTable index : table.getIndexes()) {
-            tables.put(index.getName().getString(), index);
+            tables.put(index.getKey(), index);
         }
         return new PMetaDataImpl(tables);
     }
 
     @Override
-    public PMetaData addColumn(String tableName, List<PColumn> columnsToAdd, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows) throws SQLException {
-        PTable table = getTable(tableName);
-        Map<String,PTable> tables = Maps.newHashMap(metaData);
+    public PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columnsToAdd, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows) throws SQLException {
+        PTable table = getTable(new PTableKey(tenantId, tableName));
+        Map<PTableKey,PTable> tables = Maps.newHashMap(metaData);
         List<PColumn> oldColumns = PTableImpl.getColumnsToClone(table);
         List<PColumn> newColumns;
         if (columnsToAdd.isEmpty()) {
@@ -89,19 +90,19 @@ public class PMetaDataImpl implements PMetaData {
             newColumns.addAll(columnsToAdd);
         }
         PTable newTable = PTableImpl.makePTable(table, tableTimeStamp, tableSeqNum, newColumns, isImmutableRows);
-        tables.put(tableName, newTable);
+        tables.put(newTable.getKey(), newTable);
         return new PMetaDataImpl(tables);
     }
 
     @Override
-    public PMetaData removeTable(String tableName) throws SQLException {
+    public PMetaData removeTable(PName tenantId, String tableName) throws SQLException {
         PTable table;
-        Map<String,PTable> tables = Maps.newHashMap(metaData);
-        if ((table=tables.remove(tableName)) == null) {
+        Map<PTableKey,PTable> tables = Maps.newHashMap(metaData);
+        if ((table=tables.remove(new PTableKey(tenantId, tableName))) == null) {
             throw new TableNotFoundException(tableName);
         } else {
             for (PTable index : table.getIndexes()) {
-                if (tables.remove(index.getName().getString()) == null) {
+                if (tables.remove(index.getKey()) == null) {
                     throw new TableNotFoundException(index.getName().getString());
                 }
             }
@@ -110,9 +111,9 @@ public class PMetaDataImpl implements PMetaData {
     }
     
     @Override
-    public PMetaData removeColumn(String tableName, String familyName, String columnName, long tableTimeStamp, long tableSeqNum) throws SQLException {
-        PTable table = getTable(tableName);
-        Map<String,PTable> tables = Maps.newHashMap(metaData);
+    public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName, long tableTimeStamp, long tableSeqNum) throws SQLException {
+        PTable table = getTable(new PTableKey(tenantId, tableName));
+        Map<PTableKey,PTable> tables = Maps.newHashMap(metaData);
         PColumn column;
         if (familyName == null) {
             column = table.getPKColumn(columnName);
@@ -137,7 +138,7 @@ public class PMetaDataImpl implements PMetaData {
         }
         
         PTable newTable = PTableImpl.makePTable(table, tableTimeStamp, tableSeqNum, columns);
-        tables.put(tableName, newTable);
+        tables.put(newTable.getKey(), newTable);
         return new PMetaDataImpl(tables);
     }
 
@@ -145,8 +146,8 @@ public class PMetaDataImpl implements PMetaData {
         if (!hasNewerMetaData(scn, metaData)) {
             return metaData;
         }
-        Map<String,PTable> newTables = Maps.newHashMap(metaData.getTables());
-        Iterator<Map.Entry<String, PTable>> tableIterator = newTables.entrySet().iterator();
+        Map<PTableKey,PTable> newTables = Maps.newHashMap(metaData.getTables());
+        Iterator<Map.Entry<PTableKey, PTable>> tableIterator = newTables.entrySet().iterator();
         boolean wasModified = false;
         while (tableIterator.hasNext()) {
             PTable table = tableIterator.next().getValue();
@@ -184,8 +185,8 @@ public class PMetaDataImpl implements PMetaData {
         if (!hasMultiTenantMetaData(metaData)) {
             return metaData;
         }
-        Map<String,PTable> newTables = Maps.newHashMap(metaData.getTables());
-        Iterator<Map.Entry<String, PTable>> tableIterator = newTables.entrySet().iterator();
+        Map<PTableKey,PTable> newTables = Maps.newHashMap(metaData.getTables());
+        Iterator<Map.Entry<PTableKey, PTable>> tableIterator = newTables.entrySet().iterator();
         while (tableIterator.hasNext()) {
             PTable table = tableIterator.next().getValue();
             if (table.isMultiTenant()) {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameFactory.java
index c4941ab..d591972 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameFactory.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.schema;
 
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.util.SchemaUtil;
 
 public class PNameFactory {
     public static int getEstimatedSize(PName name) {
@@ -29,6 +30,10 @@ public class PNameFactory {
     private PNameFactory() {
     }
 
+    public static PName newNormalizedName(String name) {
+        return newName(SchemaUtil.normalizeIdentifier(name));
+    }
+    
     public static PName newName(String name) {
         return name == null || name.isEmpty() ? PName.EMPTY_NAME : 
             name.equals(QueryConstants.EMPTY_COLUMN_NAME ) ?  PName.EMPTY_COLUMN_NAME : 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java
index e350909..3f19a6e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java
@@ -93,11 +93,12 @@ public class PNameImpl implements PName {
     public boolean equals(Object obj) {
         if (this == obj) return true;
         if (obj == null) return false;
-        if (getClass() != obj.getClass()) return false;
-        PNameImpl other = (PNameImpl) obj;
+        if (! (obj instanceof PName) ) return false;
+        PName other = (PName)obj;
+        if (hashCode() != other.hashCode()) return false;
         // Compare normalized stringName for equality, since bytesName
         // may differ since it remains case sensitive.
-        if (!data.stringName.equals(other.data.stringName)) return false;
+        if (!getString().equals(other.getString())) return false;
         return true;
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/schema/PNormalizedName.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PNormalizedName.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PNormalizedName.java
deleted file mode 100644
index 0ebe777..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PNormalizedName.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.schema;
-
-import org.apache.http.annotation.Immutable;
-
-import org.apache.phoenix.util.SchemaUtil;
-
-
-@Immutable
-public class PNormalizedName extends PNameImpl {
-    
-    public PNormalizedName(String nonNormalizedName) {
-        super(SchemaUtil.normalizeIdentifier(nonNormalizedName));
-    }
-
-    @Override
-    public int hashCode() {
-        final int prime = 31;
-        int result = 1;
-        result = prime * result + getString().hashCode();
-        return result;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (this == obj) return true;
-        if (obj == null) return false;
-        if (getClass() != obj.getClass()) return false;
-        PNormalizedName other = (PNormalizedName)obj;
-        // Compare normalized stringName for equality, since bytesName
-        // may differ since it remains case sensitive.
-        if (!getString().equals(other.getString())) return false;
-        return true;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
index 1369f0c..dae93a2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
@@ -277,6 +277,7 @@ public interface PTable extends Writable {
     ViewType getViewType();
     String getViewStatement();
     Short getViewIndexId();
+    PTableKey getKey();
     
     int getEstimatedSize();
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index fac6892..a67eec4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -76,6 +76,7 @@ import com.google.common.collect.Maps;
 public class PTableImpl implements PTable {
     private static final Integer NO_SALTING = -1;
     
+    private PTableKey key;
     private PName name;
     private PName schemaName;
     private PName tableName;
@@ -119,6 +120,7 @@ public class PTableImpl implements PTable {
     public PTableImpl(PName tenantId, String schemaName, String tableName, long timestamp, List<PColumnFamily> families) { // For base table of mapped VIEW
         this.tenantId = tenantId;
         this.name = PNameFactory.newName(SchemaUtil.getTableName(schemaName, tableName));
+        this.key = new PTableKey(tenantId, name.getString());
         this.schemaName = PNameFactory.newName(schemaName);
         this.tableName = PNameFactory.newName(tableName);
         this.type = PTableType.VIEW;
@@ -236,7 +238,7 @@ public class PTableImpl implements PTable {
         if (schemaName == null) {
             throw new NullPointerException();
         }
-        int estimatedSize = SizedUtil.OBJECT_SIZE + 26 * SizedUtil.POINTER_SIZE + 4 * SizedUtil.INT_SIZE + 2 * SizedUtil.LONG_SIZE + 2 * SizedUtil.INT_OBJECT_SIZE +
+        int estimatedSize = SizedUtil.OBJECT_SIZE * 2 + 23 * SizedUtil.POINTER_SIZE + 4 * SizedUtil.INT_SIZE + 2 * SizedUtil.LONG_SIZE + 2 * SizedUtil.INT_OBJECT_SIZE +
               PNameFactory.getEstimatedSize(tenantId) + 
               PNameFactory.getEstimatedSize(schemaName) + 
               PNameFactory.getEstimatedSize(tableName) + 
@@ -247,6 +249,7 @@ public class PTableImpl implements PTable {
         this.schemaName = schemaName;
         this.tableName = tableName;
         this.name = PNameFactory.newName(SchemaUtil.getTableName(schemaName.getString(), tableName.getString()));
+        this.key = new PTableKey(tenantId, name.getString());
         this.type = type;
         this.state = state;
         this.timeStamp = timeStamp;
@@ -924,4 +927,9 @@ public class PTableImpl implements PTable {
     public PName getTenantId() {
         return tenantId;
     }
+
+    @Override
+    public PTableKey getKey() {
+        return key;
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java
new file mode 100644
index 0000000..ee533ae
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import com.google.common.base.Preconditions;
+
+public class PTableKey {
+    private final PName tenantId;
+    private final String name;
+    
+    public PTableKey(PName tenantId, String name) {
+        Preconditions.checkNotNull(name);
+        this.tenantId = tenantId;
+        this.name = name;
+    }
+
+    public PName getTenantId() {
+        return tenantId;
+    }
+
+    public String getName() {
+        return name;
+    }
+    
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + ((tenantId == null) ? 0 : tenantId.hashCode());
+        result = prime * result + name.hashCode();
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        PTableKey other = (PTableKey)obj;
+        if (!name.equals(other.name)) return false;
+        if (tenantId == null) {
+            if (other.tenantId != null) return false;
+        } else if (!tenantId.equals(other.tenantId)) return false;
+        return true;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
index e35d8e3..a5e6e41 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
@@ -43,6 +43,7 @@ import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.TableNotFoundException;
 
@@ -318,7 +319,7 @@ public class PhoenixRuntime {
         PTable table = null;
         PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
         try {
-            table = pconn.getPMetaData().getTable(name);
+            table = pconn.getPMetaData().getTable(new PTableKey(pconn.getTenantId(), name));
         } catch (TableNotFoundException e) {
             String schemaName = SchemaUtil.getSchemaNameFromFullName(name);
             String tableName = SchemaUtil.getTableNameFromFullName(name);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index 6d6091c..cd7ec43 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -52,6 +52,7 @@ import org.apache.phoenix.schema.AmbiguousColumnException;
 import org.apache.phoenix.schema.ColumnAlreadyExistsException;
 import org.apache.phoenix.schema.ColumnNotFoundException;
 import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SchemaUtil;
@@ -145,7 +146,8 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
         try {
             String query = "CREATE TABLE t1 (k integer not null primary key, a.k decimal, b.k decimal)";
             conn.createStatement().execute(query);
-            PColumn c = conn.unwrap(PhoenixConnection.class).getPMetaData().getTable("T1").getColumn("K");
+            PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+            PColumn c = pconn.getPMetaData().getTable(new PTableKey(pconn.getTenantId(), "T1")).getColumn("K");
             assertTrue(SchemaUtil.isPKColumn(c));
         } finally {
             conn.close();
@@ -1104,6 +1106,11 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
         }
     }
     
+    private void assertImmutableRows(Connection conn, String fullTableName, boolean expectedValue) throws SQLException {
+        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+        assertEquals(expectedValue, pconn.getPMetaData().getTable(new PTableKey(pconn.getTenantId(), fullTableName)).isImmutableRows());
+    }
+    
     @Test
     public void testDeleteFromImmutableWithKV() throws Exception {
         String ddl = "CREATE TABLE t (k1 VARCHAR, v1 VARCHAR, v2 VARCHAR CONSTRAINT pk PRIMARY KEY(k1)) immutable_rows=true";
@@ -1111,7 +1118,9 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
         Connection conn = DriverManager.getConnection(getUrl());
         try {
             conn.createStatement().execute(ddl);
+            assertImmutableRows(conn, "T", true);
             conn.createStatement().execute(indexDDL);
+            assertImmutableRows(conn, "T", true);
             conn.createStatement().execute("DELETE FROM t");
             fail();
         } catch (SQLException e) {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8381af1d/phoenix-core/src/test/java/org/apache/phoenix/end2end/AlterTableTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/end2end/AlterTableTest.java b/phoenix-core/src/test/java/org/apache/phoenix/end2end/AlterTableTest.java
index 768d9fc..ebc86e8 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/end2end/AlterTableTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/end2end/AlterTableTest.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.Test;
@@ -601,9 +602,15 @@ public class AlterTableTest extends BaseHBaseManagedTimeTest {
         }
     }
 
+    private void asssertIsWALDisabled(Connection conn, String fullTableName, boolean expectedValue) throws SQLException {
+        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+        assertEquals(expectedValue, pconn.getPMetaData().getTable(new PTableKey(pconn.getTenantId(), fullTableName)).isWALDisabled());
+    }
+    
     @Test
     public void testDisableWAL() throws Exception {
-
+        String fullTableName = "TEST_TABLE";
+        String fullIndexName = "I";
         Properties props = new Properties(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
 
@@ -618,18 +625,18 @@ public class AlterTableTest extends BaseHBaseManagedTimeTest {
             String query = "SELECT * FROM test_table";
             ResultSet rs = conn2.createStatement().executeQuery(query);
             assertFalse(rs.next());
-            assertTrue(conn2.unwrap(PhoenixConnection.class).getPMetaData().getTable("TEST_TABLE").isWALDisabled());
+            asssertIsWALDisabled(conn2,fullTableName, true);
             conn2.close();
-            assertTrue(conn.unwrap(PhoenixConnection.class).getPMetaData().getTable("TEST_TABLE").isWALDisabled());
+            asssertIsWALDisabled(conn,fullTableName, true);
 
             conn.createStatement().execute("CREATE INDEX i ON test_table (col1) include (cf1.col2) SALT_BUCKETS=4");
             conn2 = DriverManager.getConnection(getUrl(), props);
             query = "SELECT * FROM i";
             rs = conn2.createStatement().executeQuery(query);
-            assertTrue(conn2.unwrap(PhoenixConnection.class).getPMetaData().getTable("I").isWALDisabled());
+            asssertIsWALDisabled(conn2,fullIndexName, true);
             assertFalse(rs.next());
             conn2.close();
-            assertTrue(conn.unwrap(PhoenixConnection.class).getPMetaData().getTable("I").isWALDisabled());
+            asssertIsWALDisabled(conn,fullIndexName, true);
             
             conn.createStatement().execute("DROP TABLE test_table");
         } finally {
@@ -648,18 +655,18 @@ public class AlterTableTest extends BaseHBaseManagedTimeTest {
             String query = "SELECT * FROM test_table";
             ResultSet rs = conn2.createStatement().executeQuery(query);
             assertFalse(rs.next());
-            assertFalse(conn2.unwrap(PhoenixConnection.class).getPMetaData().getTable("TEST_TABLE").isWALDisabled());
+            asssertIsWALDisabled(conn,fullTableName, false);
             conn2.close();
-            assertFalse(conn.unwrap(PhoenixConnection.class).getPMetaData().getTable("TEST_TABLE").isWALDisabled());
+            asssertIsWALDisabled(conn,fullTableName, false);
 
             conn.createStatement().execute("CREATE INDEX i ON test_table (col1) include (cf1.col2) SALT_BUCKETS=4");
             conn2 = DriverManager.getConnection(getUrl(), props);
             query = "SELECT * FROM i";
             rs = conn2.createStatement().executeQuery(query);
-            assertTrue(conn2.unwrap(PhoenixConnection.class).getPMetaData().getTable("I").isWALDisabled());
+            asssertIsWALDisabled(conn2,fullIndexName, true);
             assertFalse(rs.next());
             conn2.close();
-            assertTrue(conn.unwrap(PhoenixConnection.class).getPMetaData().getTable("I").isWALDisabled());
+            asssertIsWALDisabled(conn,fullIndexName, true);
             conn.createStatement().execute("DROP TABLE test_table");
         } finally {
             conn.close();
@@ -677,18 +684,18 @@ public class AlterTableTest extends BaseHBaseManagedTimeTest {
             String query = "SELECT * FROM test_table";
             ResultSet rs = conn2.createStatement().executeQuery(query);
             assertFalse(rs.next());
-            assertFalse(conn2.unwrap(PhoenixConnection.class).getPMetaData().getTable("TEST_TABLE").isWALDisabled());
+            asssertIsWALDisabled(conn2,fullTableName, false);
             conn2.close();
-            assertFalse(conn.unwrap(PhoenixConnection.class).getPMetaData().getTable("TEST_TABLE").isWALDisabled());
+            asssertIsWALDisabled(conn,fullTableName, false);
 
             conn.createStatement().execute("CREATE INDEX i ON test_table (col1) include (cf1.col2) SALT_BUCKETS=4");
             conn2 = DriverManager.getConnection(getUrl(), props);
             query = "SELECT * FROM i";
             rs = conn2.createStatement().executeQuery(query);
-            assertFalse(conn2.unwrap(PhoenixConnection.class).getPMetaData().getTable("I").isWALDisabled());
+            asssertIsWALDisabled(conn2,fullIndexName, false);
             assertFalse(rs.next());
             conn2.close();
-            assertFalse(conn.unwrap(PhoenixConnection.class).getPMetaData().getTable("I").isWALDisabled());
+            asssertIsWALDisabled(conn,fullIndexName, false);
             
         } finally {
             conn.close();


Mime
View raw message