phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From an...@apache.org
Subject [2/8] phoenix git commit: PHOENIX-1311 HBase namespaces surfaced in phoenix
Date Thu, 14 Apr 2016 10:43:32 GMT
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 76b2f51..ad161fa 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -47,6 +47,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_STATE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_TYPE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_ARRAY;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_CONSTANT;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_ROW_TIMESTAMP;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_VIEW_REFERENCED;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.JAR_PATH;
@@ -157,17 +158,20 @@ import org.apache.phoenix.parse.ColumnDefInPkConstraint;
 import org.apache.phoenix.parse.ColumnName;
 import org.apache.phoenix.parse.CreateFunctionStatement;
 import org.apache.phoenix.parse.CreateIndexStatement;
+import org.apache.phoenix.parse.CreateSchemaStatement;
 import org.apache.phoenix.parse.CreateSequenceStatement;
 import org.apache.phoenix.parse.CreateTableStatement;
 import org.apache.phoenix.parse.DropColumnStatement;
 import org.apache.phoenix.parse.DropFunctionStatement;
 import org.apache.phoenix.parse.DropIndexStatement;
+import org.apache.phoenix.parse.DropSchemaStatement;
 import org.apache.phoenix.parse.DropSequenceStatement;
 import org.apache.phoenix.parse.DropTableStatement;
 import org.apache.phoenix.parse.IndexKeyConstraint;
 import org.apache.phoenix.parse.NamedTableNode;
 import org.apache.phoenix.parse.PFunction;
 import org.apache.phoenix.parse.PFunction.FunctionArgument;
+import org.apache.phoenix.parse.PSchema;
 import org.apache.phoenix.parse.ParseNode;
 import org.apache.phoenix.parse.ParseNodeFactory;
 import org.apache.phoenix.parse.PrimaryKeyConstraint;
@@ -175,6 +179,7 @@ import org.apache.phoenix.parse.SQLParser;
 import org.apache.phoenix.parse.SelectStatement;
 import org.apache.phoenix.parse.TableName;
 import org.apache.phoenix.parse.UpdateStatisticsStatement;
+import org.apache.phoenix.parse.UseSchemaStatement;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.ConnectionQueryServices.Feature;
 import org.apache.phoenix.query.QueryConstants;
@@ -206,8 +211,6 @@ import org.apache.phoenix.util.UpgradeUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import co.cask.tephra.TxConstants;
-
 import com.google.common.base.Objects;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.ListMultimap;
@@ -216,6 +219,8 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.google.common.primitives.Ints;
 
+import co.cask.tephra.TxConstants;
+
 public class MetaDataClient {
     private static final Logger logger = LoggerFactory.getLogger(MetaDataClient.class);
 
@@ -250,8 +255,13 @@ public class MetaDataClient {
             STORE_NULLS + "," +
             BASE_COLUMN_COUNT + "," +
             TRANSACTIONAL + "," +
-            UPDATE_CACHE_FREQUENCY +
-            ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
+            UPDATE_CACHE_FREQUENCY + "," +
+            IS_NAMESPACE_MAPPED +
+            ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?)";
+
+    private static final String CREATE_SCHEMA = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE
+            + "\"( " + TABLE_SCHEM + "," + TABLE_NAME + ") VALUES (?,?)";
+
     private static final String CREATE_LINK =
             "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
             TENANT_ID + "," +
@@ -377,6 +387,8 @@ public class MetaDataClient {
             MAX_VALUE +
             ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)";
 
+    public static final String EMPTY_TABLE = " ";
+
 
     private final PhoenixConnection connection;
 
@@ -546,7 +558,28 @@ public class MetaDataClient {
 
         return result;
     }
-    
+
+    public MetaDataMutationResult updateCache(String schemaName) throws SQLException {
+        return updateCache(schemaName, false);
+    }
+
+    public MetaDataMutationResult updateCache(String schemaName, boolean alwaysHitServer) throws SQLException {
+        long clientTimeStamp = getClientTimeStamp();
+        PSchema schema = null;
+        try {
+            schema = connection.getMetaDataCache().getSchema(new PTableKey(null, schemaName));
+            if (schema != null
+                    && !alwaysHitServer) { return new MetaDataMutationResult(MutationCode.SCHEMA_ALREADY_EXISTS, schema,
+                            QueryConstants.UNSET_TIMESTAMP); }
+        } catch (SchemaNotFoundException e) {
+
+        }
+        MetaDataMutationResult result;
+
+        result = connection.getQueryServices().getSchema(schemaName, clientTimeStamp);
+        return result;
+    }
+
     private MetaDataMutationResult updateCache(PName tenantId, List<String> functionNames,
             boolean alwaysHitServer) throws SQLException { // TODO: pass byte[] herez
         long clientTimeStamp = getClientTimeStamp();
@@ -870,22 +903,26 @@ public class MetaDataClient {
             // then analyze all of those indexes too.
             if (table.getType() != PTableType.VIEW) {
                 List<PName> names = Lists.newArrayListWithExpectedSize(2);
-                if (table.isMultiTenant() || MetaDataUtil.hasViewIndexTable(connection, table.getName())) {
+                final List<PName> physicalNames = Lists.newArrayListWithExpectedSize(2);
+                if (table.isMultiTenant() || MetaDataUtil.hasViewIndexTable(connection, table.getPhysicalName())) {
                     names.add(PNameFactory.newName(SchemaUtil.getTableName(
                             MetaDataUtil.getViewIndexSchemaName(table.getSchemaName().getString()),
                             MetaDataUtil.getViewIndexTableName(table.getTableName().getString()))));
+                    physicalNames.add(PNameFactory.newName(MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes())));
                 }
-                if (MetaDataUtil.hasLocalIndexTable(connection, table.getName())) {
+                if (MetaDataUtil.hasLocalIndexTable(connection, table.getPhysicalName())) {
                     names.add(PNameFactory.newName(SchemaUtil.getTableName(
                             MetaDataUtil.getLocalIndexSchemaName(table.getSchemaName().getString()),
                             MetaDataUtil.getLocalIndexTableName(table.getTableName().getString()))));
+                    physicalNames.add(PNameFactory.newName(MetaDataUtil.getLocalIndexPhysicalName(table.getPhysicalName().getBytes())));
                 }
-
+                int i = 0;
                 for (final PName name : names) {
+                    final int index = i++;
                     PTable indexLogicalTable = new DelegateTable(table) {
                         @Override
                         public PName getPhysicalName() {
-                            return name;
+                            return physicalNames.get(index);
                         }
                         @Override
                         public PTableStats getTableStats() {
@@ -1511,6 +1548,7 @@ public class MetaDataClient {
 
             TableName tableNameNode = statement.getTableName();
             String schemaName = tableNameNode.getSchemaName();
+            schemaName = connection.getSchema() != null && schemaName == null ? connection.getSchema() : schemaName;
             String tableName = tableNameNode.getTableName();
             String parentTableName = null;
             PName tenantId = connection.getTenantId();
@@ -1527,6 +1565,9 @@ public class MetaDataClient {
             boolean addSaltColumn = false;
             boolean rowKeyOrderOptimizable = true;
             Long timestamp = null;
+            boolean isNamespaceMapped = parent == null
+                    ? SchemaUtil.isNamespaceMappingEnabled(tableType, connection.getQueryServices().getProps())
+                    : parent.isNamespaceMapped();
             if (parent != null && tableType == PTableType.INDEX) {
                 timestamp = TransactionUtil.getTableTimestamp(connection, transactional);
                 storeNulls = parent.getStoreNulls();
@@ -1537,6 +1578,7 @@ public class MetaDataClient {
                 // from the table to the index, though.
                 if (indexType == IndexType.LOCAL || (parent.getType() == PTableType.VIEW && parent.getViewType() != ViewType.MAPPED)) {
                     PName physicalName = parent.getPhysicalName();
+
                     saltBucketNum = parent.getBucketNum();
                     addSaltColumn = (saltBucketNum != null && indexType != IndexType.LOCAL);
                     defaultFamilyName = parent.getDefaultFamilyName() == null ? null : parent.getDefaultFamilyName().getString();
@@ -1822,7 +1864,8 @@ public class MetaDataClient {
                         linkStatement.setString(4, physicalName.getString());
                         linkStatement.setByte(5, LinkType.PHYSICAL_TABLE.getSerializedValue());
                         if (tableType == PTableType.VIEW) {
-                            PTable physicalTable = connection.getTable(new PTableKey(null, physicalName.getString()));
+                            PTable physicalTable = connection.getTable(new PTableKey(null, physicalName.getString()
+                                    .replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR)));
                             linkStatement.setLong(6, physicalTable.getSequenceNumber());
                         } else {
                             linkStatement.setLong(6, parent.getSequenceNumber());
@@ -1980,7 +2023,7 @@ public class MetaDataClient {
                         Collections.<PTable>emptyList(), isImmutableRows,
                         Collections.<PName>emptyList(), defaultFamilyName == null ? null :
                                 PNameFactory.newName(defaultFamilyName), null,
-                        Boolean.TRUE.equals(disableWAL), false, false, null, indexId, indexType, true, false, 0, 0L);
+                        Boolean.TRUE.equals(disableWAL), false, false, null, indexId, indexType, true, false, 0, 0L, isNamespaceMapped);
                 connection.addTable(table, MetaDataProtocol.MIN_TABLE_TIMESTAMP);
             } else if (tableType == PTableType.INDEX && indexId == null) {
                 if (tableProps.get(HTableDescriptor.MAX_FILESIZE) == null) {
@@ -2086,6 +2129,7 @@ public class MetaDataClient {
             }
             tableUpsert.setBoolean(21, transactional);
             tableUpsert.setLong(22, updateCacheFrequency);
+            tableUpsert.setBoolean(23, isNamespaceMapped);
             tableUpsert.execute();
 
             if (asyncCreatedDate != null) {
@@ -2118,7 +2162,7 @@ public class MetaDataClient {
             MetaDataMutationResult result = connection.getQueryServices().createTable(
                     tableMetaData,
                     viewType == ViewType.MAPPED || indexId != null ? physicalNames.get(0).getBytes() : null,
-                    tableType, tableProps, familyPropList, splits);
+                    tableType, tableProps, familyPropList, splits, isNamespaceMapped);
             MutationCode code = result.getMutationCode();
             switch(code) {
             case TABLE_ALREADY_EXISTS:
@@ -2150,7 +2194,7 @@ public class MetaDataClient {
                         PTable.INITIAL_SEQ_NUM, pkName == null ? null : PNameFactory.newName(pkName), saltBucketNum, columns,
                         dataTableName == null ? null : newSchemaName, dataTableName == null ? null : PNameFactory.newName(dataTableName), Collections.<PTable>emptyList(), isImmutableRows,
                         physicalNames, defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName), viewStatement, Boolean.TRUE.equals(disableWAL), multiTenant, storeNulls, viewType,
-                        indexId, indexType, rowKeyOrderOptimizable, transactional, updateCacheFrequency, 0L);
+                        indexId, indexType, rowKeyOrderOptimizable, transactional, updateCacheFrequency, 0L, isNamespaceMapped);
                 result = new MetaDataMutationResult(code, result.getMutationTime(), table, true);
                 addTableToCache(result);
                 return table;
@@ -2298,9 +2342,6 @@ public class MetaDataClient {
                 byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantIdStr, schemaName, parentTableName, tableName);
                 Delete linkDelete = new Delete(linkKey, clientTimeStamp);
                 tableMetaData.add(linkDelete);
-            } else {
-                hasViewIndexTable = MetaDataUtil.hasViewIndexTable(connection, schemaName, tableName);
-                hasLocalIndexTable = MetaDataUtil.hasLocalIndexTable(connection, schemaName, tableName);
             }
 
             MetaDataMutationResult result = connection.getQueryServices().dropTable(tableMetaData, tableType, cascade);
@@ -2337,6 +2378,15 @@ public class MetaDataClient {
                         // Create empty table and schema - they're only used to get the name from
                         // PName name, PTableType type, long timeStamp, long sequenceNumber, List<PColumn> columns
                         // All multi-tenant tables have a view index table, so no need to check in that case
+                        if (parentTableName == null) {
+                            for (PTable index : table.getIndexes()) {
+                                if (MetaDataUtil.isLocalIndex(index.getPhysicalName().getString())) {
+                                    hasLocalIndexTable = true;
+                                } 
+                            }
+                            hasViewIndexTable = true; // As there is no way to know whether table has views or not so
+                                                      // ensuring we delete sequence and cached object during drop
+                        }
                         if (tableType == PTableType.TABLE
                                 && (table.isMultiTenant() || hasViewIndexTable || hasLocalIndexTable)) {
     
@@ -2400,7 +2450,7 @@ public class MetaDataClient {
         try {
             StringBuilder buf = new StringBuilder("DELETE FROM SYSTEM.STATS WHERE PHYSICAL_NAME IN (");
             for (TableRef ref : tableRefs) {
-                buf.append("'" + ref.getTable().getName().getString() + "',");
+                buf.append("'" + ref.getTable().getPhysicalName().getString() + "',");
             }
             buf.setCharAt(buf.length() - 1, ')');
             conn.createStatement().execute(buf.toString());
@@ -2856,6 +2906,7 @@ public class MetaDataClient {
                                 storeNulls == null ? table.getStoreNulls() : storeNulls, 
                                 isTransactional == null ? table.isTransactional() : isTransactional,
                                 updateCacheFrequency == null ? table.getUpdateCacheFrequency() : updateCacheFrequency,
+                                table.isNamespaceMapped(),
                                 resolvedTimeStamp);
                     } else if (updateCacheFrequency != null) {
                         // Force removal from cache as the update cache frequency has changed
@@ -3124,11 +3175,12 @@ public class MetaDataClient {
                         final List<TableRef> tableRefsToDrop = Lists.newArrayList();
                         Map<String, List<TableRef>> tenantIdTableRefMap = Maps.newHashMap();
                         if (result.getSharedTablesToDelete()!=null) {
-                            for (SharedTableState sharedTableState : result.getSharedTablesToDelete()) { 
-                                PTableImpl viewIndexTable = new PTableImpl(sharedTableState.getTenantId(), 
-                                    sharedTableState.getSchemaName(), sharedTableState.getTableName(), 
-                                    ts, table.getColumnFamilies(), sharedTableState.getColumns(),
-                                    sharedTableState.getPhysicalNames(), sharedTableState.getViewIndexId(), table.isMultiTenant());
+                            for (SharedTableState sharedTableState : result.getSharedTablesToDelete()) {
+                                PTableImpl viewIndexTable = new PTableImpl(sharedTableState.getTenantId(),
+                                        sharedTableState.getSchemaName(), sharedTableState.getTableName(), ts,
+                                        table.getColumnFamilies(), sharedTableState.getColumns(),
+                                        sharedTableState.getPhysicalNames(), sharedTableState.getViewIndexId(),
+                                        table.isMultiTenant(), table.isNamespaceMapped());
                                 TableRef indexTableRef = new TableRef(viewIndexTable);
                                 PName indexTableTenantId = sharedTableState.getTenantId();
                                 if (indexTableTenantId==null) {
@@ -3296,6 +3348,10 @@ public class MetaDataClient {
         return result.getFunctions();
     }
 
+    private void addSchemaToCache(MetaDataMutationResult result) throws SQLException {
+        connection.addSchema(result.getSchema());
+    }
+
     public PTableStats getTableStats(PTable table) throws SQLException {
         /*
          *  The shared view index case is tricky, because we don't have
@@ -3306,10 +3362,12 @@ public class MetaDataClient {
         boolean isSharedIndex = table.getViewIndexId() != null;
         if (isSharedIndex) {
             // we are assuming the stats table is not transactional
-            return connection.getQueryServices().getTableStats(table.getPhysicalName().getBytes(), getCurrentScn());
+            return connection.getQueryServices().getTableStats(table.getPhysicalName().getBytes(),
+                    getCurrentScn());
         }
         boolean isView = table.getType() == PTableType.VIEW;
-        String physicalName = table.getPhysicalName().getString();
+        String physicalName = table.getPhysicalName().toString().replace(QueryConstants.NAMESPACE_SEPARATOR,
+                QueryConstants.NAME_SEPARATOR);
         if (isView && table.getViewType() != ViewType.MAPPED) {
             try {
                 return connection.getTable(new PTableKey(null, physicalName)).getTableStats();
@@ -3351,4 +3409,102 @@ public class MetaDataClient {
         String parentName = SchemaUtil.normalizeFullTableName(select.getFrom().toString().trim());
         return connection.getTable(new PTableKey(view.getTenantId(), parentName));
     }
+
+    public MutationState createSchema(CreateSchemaStatement create) throws SQLException {
+        boolean wasAutoCommit = connection.getAutoCommit();
+        connection.rollback();
+        try {
+            if (!SchemaUtil.isNamespaceMappingEnabled(null,
+                    connection.getQueryServices()
+                            .getProps())) { throw new SQLExceptionInfo.Builder(
+                                    SQLExceptionCode.CREATE_SCHEMA_NOT_ALLOWED).setSchemaName(create.getSchemaName())
+                                            .build().buildException(); }
+            boolean isIfNotExists = create.isIfNotExists();
+            validateSchema(create.getSchemaName());
+            PSchema schema = new PSchema(create.getSchemaName());
+            connection.setAutoCommit(false);
+            List<Mutation> schemaMutations;
+
+            try (PreparedStatement schemaUpsert = connection.prepareStatement(CREATE_SCHEMA)) {
+                schemaUpsert.setString(1, schema.getSchemaName());
+                schemaUpsert.setString(2, MetaDataClient.EMPTY_TABLE);
+                schemaUpsert.execute();
+                schemaMutations = connection.getMutationState().toMutations(null).next().getSecond();
+                connection.rollback();
+            }
+            MetaDataMutationResult result = connection.getQueryServices().createSchema(schemaMutations,
+                    schema.getSchemaName());
+            MutationCode code = result.getMutationCode();
+            switch (code) {
+            case SCHEMA_ALREADY_EXISTS:
+                if (result.getSchema() != null) {
+                    addSchemaToCache(result);
+                }
+                if (!isIfNotExists) { throw new SchemaAlreadyExistsException(schema.getSchemaName()); }
+                break;
+            case NEWER_SCHEMA_FOUND:
+                throw new NewerSchemaAlreadyExistsException(schema.getSchemaName());
+            default:
+                result = new MetaDataMutationResult(code, schema, result.getMutationTime());
+                addSchemaToCache(result);
+            }
+        } finally {
+            connection.setAutoCommit(wasAutoCommit);
+        }
+        return new MutationState(0, connection);
+    }
+
+    private void validateSchema(String schemaName) throws SQLException {
+        if (SchemaUtil.NOT_ALLOWED_SCHEMA_LIST.contains(
+                schemaName.toUpperCase())) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.SCHEMA_NOT_ALLOWED)
+                        .setSchemaName(schemaName).build().buildException(); }
+    }
+
+    public MutationState dropSchema(DropSchemaStatement executableDropSchemaStatement) throws SQLException {
+        connection.rollback();
+        boolean wasAutoCommit = connection.getAutoCommit();
+        try {
+            String schemaName = executableDropSchemaStatement.getSchemaName();
+            boolean ifExists = executableDropSchemaStatement.ifExists();
+            byte[] key = SchemaUtil.getSchemaKey(schemaName);
+
+            Long scn = connection.getSCN();
+            long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
+            List<Mutation> schemaMetaData = Lists.newArrayListWithExpectedSize(2);
+            Delete schemaDelete = new Delete(key, clientTimeStamp);
+            schemaMetaData.add(schemaDelete);
+            MetaDataMutationResult result = connection.getQueryServices().dropSchema(schemaMetaData, schemaName);
+            MutationCode code = result.getMutationCode();
+            PSchema schema = result.getSchema();
+            switch (code) {
+            case SCHEMA_NOT_FOUND:
+                if (!ifExists) { throw new SchemaNotFoundException(schemaName); }
+                break;
+            case NEWER_SCHEMA_FOUND:
+                throw new NewerSchemaAlreadyExistsException(schemaName);
+            case TABLES_EXIST_ON_SCHEMA:
+                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_SCHEMA).setSchemaName(schemaName)
+                        .build().buildException();
+            default:
+                connection.removeSchema(schema, result.getMutationTime());
+                break;
+            }
+            return new MutationState(0, connection);
+        } finally {
+            connection.setAutoCommit(wasAutoCommit);
+        }
+    }
+
+    public MutationState useSchema(UseSchemaStatement useSchemaStatement) throws SQLException {
+        // As we allow default namespace mapped to empty schema, so this is to reset schema in connection
+        if (useSchemaStatement.getSchemaName().equals(StringUtil.EMPTY_STRING)
+                || useSchemaStatement.getSchemaName().toUpperCase().equals(SchemaUtil.SCHEMA_FOR_DEFAULT_NAMESPACE)) {
+            connection.setSchema(null);
+        } else {
+            PSchema schema = FromCompiler.getResolverForSchema(useSchemaStatement, connection)
+                    .resolveSchema(useSchemaStatement.getSchemaName());
+            connection.setSchema(schema.getSchemaName());
+        }
+        return new MutationState(0, connection);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/schema/NewerSchemaAlreadyExistsException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/NewerSchemaAlreadyExistsException.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/NewerSchemaAlreadyExistsException.java
new file mode 100644
index 0000000..b90845c
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/NewerSchemaAlreadyExistsException.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+public class NewerSchemaAlreadyExistsException extends SchemaAlreadyExistsException {
+    private static final long serialVersionUID = 1L;
+
+    public NewerSchemaAlreadyExistsException(String schemaName) {
+        super(schemaName);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
index 3adcb7e..6a710eb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.schema;
 
 import org.apache.phoenix.parse.PFunction;
+import org.apache.phoenix.parse.PSchema;
 import org.apache.phoenix.query.MetaDataMutated;
 
 public interface PMetaData extends MetaDataMutated, Iterable<PTable>, Cloneable {
@@ -33,4 +34,5 @@ public interface PMetaData extends MetaDataMutated, Iterable<PTable>, Cloneable
     public PFunction getFunction(PTableKey key) throws FunctionNotFoundException;
     public PMetaData pruneFunctions(Pruner pruner);
     public long getAge(PTableRef ref);
+    public PSchema getSchema(PTableKey key) throws SchemaNotFoundException;
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
index 413d116..67a2714 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
@@ -25,6 +25,8 @@ import java.util.Map;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.parse.PFunction;
+import org.apache.phoenix.parse.PSchema;
+import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TimeKeeper;
 
 import com.google.common.collect.Lists;
@@ -58,6 +60,7 @@ public class PMetaDataImpl implements PMetaData {
 
             private final Map<PTableKey,PTableRef> tables;
             private final Map<PTableKey,PFunction> functions;
+            private final Map<PTableKey,PSchema> schemas;
             
             private static Map<PTableKey,PTableRef> newMap(int expectedCapacity) {
                 // Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
@@ -73,6 +76,13 @@ public class PMetaDataImpl implements PMetaData {
                 return Maps.newHashMapWithExpectedSize(expectedCapacity);
             }
 
+            private static Map<PTableKey,PSchema> newSchemaMap(int expectedCapacity) {
+                // Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
+                // safely across multiple threads (as the underlying collection is not thread safe).
+                // Instead, we track access time and prune it based on the copy we've made.
+                return Maps.newHashMapWithExpectedSize(expectedCapacity);
+            }
+
             private static Map<PTableKey,PTableRef> cloneMap(Map<PTableKey,PTableRef> tables, int expectedCapacity) {
                 Map<PTableKey,PTableRef> newTables = newMap(Math.max(tables.size(),expectedCapacity));
                 // Copy value so that access time isn't changing anymore
@@ -82,6 +92,15 @@ public class PMetaDataImpl implements PMetaData {
                 return newTables;
             }
 
+            private static Map<PTableKey, PSchema> cloneSchemaMap(Map<PTableKey, PSchema> schemas, int expectedCapacity) {
+                Map<PTableKey, PSchema> newSchemas = newSchemaMap(Math.max(schemas.size(), expectedCapacity));
+                // Copy value so that access time isn't changing anymore
+                for (PSchema schema : schemas.values()) {
+                    newSchemas.put(schema.getSchemaKey(), new PSchema(schema));
+                }
+                return newSchemas;
+            }
+
             private static Map<PTableKey,PFunction> cloneFunctionsMap(Map<PTableKey,PFunction> functions, int expectedCapacity) {
                 Map<PTableKey,PFunction> newFunctions = newFunctionMap(Math.max(functions.size(),expectedCapacity));
                 for (PFunction functionAccess : functions.values()) {
@@ -97,6 +116,7 @@ public class PMetaDataImpl implements PMetaData {
                 this.expectedCapacity = toClone.expectedCapacity;
                 this.tables = cloneMap(toClone.tables, expectedCapacity);
                 this.functions = cloneFunctionsMap(toClone.functions, expectedCapacity);
+                this.schemas = cloneSchemaMap(toClone.schemas, expectedCapacity);
             }
             
             public PMetaDataCache(int initialCapacity, long maxByteSize, TimeKeeper timeKeeper) {
@@ -106,6 +126,7 @@ public class PMetaDataImpl implements PMetaData {
                 this.tables = newMap(this.expectedCapacity);
                 this.functions = newFunctionMap(this.expectedCapacity);
                 this.timeKeeper = timeKeeper;
+                this.schemas = newSchemaMap(this.expectedCapacity);
             }
             
             public PTableRef get(PTableKey key) {
@@ -317,7 +338,10 @@ public class PMetaDataImpl implements PMetaData {
     }
 
     @Override
-    public PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columnsToAdd, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows, boolean isWalDisabled, boolean isMultitenant, boolean storeNulls, boolean isTransactional, long updateCacheFrequency, long resolvedTime) throws SQLException {
+    public PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columnsToAdd, long tableTimeStamp,
+            long tableSeqNum, boolean isImmutableRows, boolean isWalDisabled, boolean isMultitenant, boolean storeNulls,
+            boolean isTransactional, long updateCacheFrequency, boolean isNamespaceMapped, long resolvedTime)
+                    throws SQLException {
         PTableRef oldTableRef = metaData.get(new PTableKey(tenantId, tableName));
         if (oldTableRef == null) {
             return this;
@@ -331,9 +355,9 @@ public class PMetaDataImpl implements PMetaData {
             newColumns.addAll(oldColumns);
             newColumns.addAll(columnsToAdd);
         }
-        PTable newTable = PTableImpl.makePTable(oldTableRef.getTable(),
-                tableTimeStamp, tableSeqNum, newColumns, isImmutableRows,
-                isWalDisabled, isMultitenant, storeNulls, isTransactional, updateCacheFrequency);
+        PTable newTable = PTableImpl.makePTable(oldTableRef.getTable(), tableTimeStamp, tableSeqNum, newColumns,
+                isImmutableRows, isWalDisabled, isMultitenant, storeNulls, isTransactional, updateCacheFrequency,
+                isNamespaceMapped);
         return addTable(newTable, resolvedTime);
     }
 
@@ -483,4 +507,23 @@ public class PMetaDataImpl implements PMetaData {
     public long getAge(PTableRef ref) {
         return this.metaData.getAge(ref);
     }
+
+    @Override
+    public PMetaData addSchema(PSchema schema) throws SQLException {
+        this.metaData.schemas.put(schema.getSchemaKey(), schema);
+        return this;
+    }
+
+    @Override
+    public PSchema getSchema(PTableKey key) throws SchemaNotFoundException {
+        PSchema schema = metaData.schemas.get(key);
+        if (schema == null) { throw new SchemaNotFoundException(key.getName()); }
+        return schema;
+    }
+
+    @Override
+    public PMetaData removeSchema(PSchema schema, long schemaTimeStamp) {
+        this.metaData.schemas.remove(SchemaUtil.getSchemaKey(schema.getSchemaName()));
+        return this;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
index b2a1d58..84db752 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
@@ -305,6 +305,10 @@ public interface PTable extends PMetaDataEntity {
      */
     public List<PName> getPhysicalNames();
 
+    /**
+     * For a view, return the name of table in HBase that physically stores data.
+     * @return the name of the physical HBase table storing the data.
+     */
     PName getPhysicalName();
     boolean isImmutableRows();
 
@@ -341,4 +345,6 @@ public interface PTable extends PMetaDataEntity {
      */
     int getRowTimestampColPos();
     long getUpdateCacheFrequency();
+
+    boolean isNamespaceMapped();
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 5789263..665e77f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -66,8 +66,6 @@ import org.apache.phoenix.util.SizedUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TrustedByteArrayOutputStream;
 
-import co.cask.tephra.TxConstants;
-
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ArrayListMultimap;
@@ -80,6 +78,8 @@ import com.google.common.collect.Maps;
 import com.google.protobuf.HBaseZeroCopyByteString;
 import com.sun.istack.NotNull;
 
+import co.cask.tephra.TxConstants;
+
 /**
  *
  * Base class for PTable implementors.  Provides abstraction for
@@ -138,6 +138,7 @@ public class PTableImpl implements PTable {
     private boolean hasColumnsRequiringUpgrade; // TODO: remove when required that tables have been upgrade for PHOENIX-2067
     private int rowTimestampColPos;
     private long updateCacheFrequency;
+    private boolean isNamespaceMapped;
 
     public PTableImpl() {
         this.indexes = Collections.emptyList();
@@ -169,7 +170,7 @@ public class PTableImpl implements PTable {
     }
     
     public PTableImpl(PName tenantId, PName schemaName, PName tableName, long timestamp, List<PColumnFamily> families, 
-            List<PColumn> columns, List<PName> physicalNames, Short viewIndexId, boolean multiTenant) throws SQLException { // For indexes stored in shared physical tables
+            List<PColumn> columns, List<PName> physicalNames, Short viewIndexId, boolean multiTenant, boolean isNamespaceMpped) throws SQLException { // For indexes stored in shared physical tables
         this.pkColumns = this.allColumns = Collections.emptyList();
         this.rowKeySchema = RowKeySchema.EMPTY_SCHEMA;
         this.indexes = Collections.emptyList();
@@ -183,7 +184,7 @@ public class PTableImpl implements PTable {
         init(tenantId, this.schemaName, this.tableName, PTableType.INDEX, state, timeStamp, sequenceNumber, pkName, bucketNum, columns,
             PTableStats.EMPTY_STATS, this.schemaName, parentTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName,
             null, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable,
-            isTransactional, updateCacheFrequency, indexDisableTimestamp);
+            isTransactional, updateCacheFrequency, indexDisableTimestamp,isNamespaceMpped);
     }
 
     public PTableImpl(long timeStamp) { // For delete marker
@@ -226,7 +227,7 @@ public class PTableImpl implements PTable {
                 table.getSequenceNumber(), table.getPKName(), table.getBucketNum(), getColumnsToClone(table), parentSchemaName, table.getParentTableName(),
                 indexes, table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), viewStatement,
                 table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(),
-                table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
+                table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped());
     }
 
     public static PTableImpl makePTable(PTable table, List<PColumn> columns) throws SQLException {
@@ -235,7 +236,7 @@ public class PTableImpl implements PTable {
                 table.getSequenceNumber(), table.getPKName(), table.getBucketNum(), columns, table.getParentSchemaName(), table.getParentTableName(),
                 table.getIndexes(), table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
                 table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(),
-                table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
+                table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped());
     }
 
     public static PTableImpl makePTable(PTable table, long timeStamp, long sequenceNumber, List<PColumn> columns) throws SQLException {
@@ -244,7 +245,7 @@ public class PTableImpl implements PTable {
                 sequenceNumber, table.getPKName(), table.getBucketNum(), columns, table.getParentSchemaName(), table.getParentTableName(), table.getIndexes(),
                 table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(),
                 table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats(),
-                table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
+                table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped());
     }
 
     public static PTableImpl makePTable(PTable table, long timeStamp, long sequenceNumber, List<PColumn> columns, boolean isImmutableRows) throws SQLException {
@@ -253,17 +254,17 @@ public class PTableImpl implements PTable {
                 sequenceNumber, table.getPKName(), table.getBucketNum(), columns, table.getParentSchemaName(), table.getParentTableName(),
                 table.getIndexes(), isImmutableRows, table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
                 table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(),
-                table.getIndexType(), table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
+                table.getIndexType(), table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped());
     }
     
     public static PTableImpl makePTable(PTable table, long timeStamp, long sequenceNumber, List<PColumn> columns, boolean isImmutableRows, boolean isWalDisabled,
-            boolean isMultitenant, boolean storeNulls, boolean isTransactional, long updateCacheFrequency) throws SQLException {
+            boolean isMultitenant, boolean storeNulls, boolean isTransactional, long updateCacheFrequency, boolean isNamespaceMapped) throws SQLException {
         return new PTableImpl(
                 table.getTenantId(), table.getSchemaName(), table.getTableName(), table.getType(), table.getIndexState(), timeStamp,
                 sequenceNumber, table.getPKName(), table.getBucketNum(), columns, table.getParentSchemaName(), table.getParentTableName(),
                 table.getIndexes(), isImmutableRows, table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
                 isWalDisabled, isMultitenant, storeNulls, table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats(),
-                table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), isTransactional, updateCacheFrequency, table.getIndexDisableTimestamp());
+                table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), isTransactional, updateCacheFrequency, table.getIndexDisableTimestamp(), isNamespaceMapped);
     }
     
     public static PTableImpl makePTable(PTable table, PIndexState state) throws SQLException {
@@ -273,7 +274,7 @@ public class PTableImpl implements PTable {
                 table.getParentSchemaName(), table.getParentTableName(), table.getIndexes(),
                 table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
                 table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(),
-                table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
+                table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped());
     }
 
     public static PTableImpl makePTable(PTable table, boolean rowKeyOrderOptimizable) throws SQLException {
@@ -283,7 +284,7 @@ public class PTableImpl implements PTable {
                 table.getParentSchemaName(), table.getParentTableName(), table.getIndexes(),
                 table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
                 table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats(),
-                table.getBaseColumnCount(), rowKeyOrderOptimizable, table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
+                table.getBaseColumnCount(), rowKeyOrderOptimizable, table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped());
     }
 
     public static PTableImpl makePTable(PTable table, PTableStats stats) throws SQLException {
@@ -293,7 +294,7 @@ public class PTableImpl implements PTable {
                 table.getParentSchemaName(), table.getParentTableName(), table.getIndexes(),
                 table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
                 table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), stats,
-                table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
+                table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped());
     }
 
     public static PTableImpl makePTable(PName tenantId, PName schemaName, PName tableName, PTableType type,
@@ -302,12 +303,12 @@ public class PTableImpl implements PTable {
             boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression,
             boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId,
             IndexType indexType, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency,
-            long indexDisableTimestamp) throws SQLException {
+            long indexDisableTimestamp, boolean isNamespaceMapped) throws SQLException {
         return new PTableImpl(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName, bucketNum, columns, dataSchemaName,
                 dataTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName,
                 viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId,
                 indexType, PTableStats.EMPTY_STATS, QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT, rowKeyOrderOptimizable, isTransactional,
-                updateCacheFrequency,indexDisableTimestamp);
+                updateCacheFrequency,indexDisableTimestamp, isNamespaceMapped);
     }
 
     public static PTableImpl makePTable(PName tenantId, PName schemaName, PName tableName, PTableType type,
@@ -316,12 +317,12 @@ public class PTableImpl implements PTable {
             boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression,
             boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId,
             IndexType indexType, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency,
-            @NotNull PTableStats stats, int baseColumnCount, long indexDisableTimestamp)
+            @NotNull PTableStats stats, int baseColumnCount, long indexDisableTimestamp, boolean isNamespaceMapped)
             throws SQLException {
         return new PTableImpl(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName,
                 bucketNum, columns, dataSchemaName, dataTableName, indexes, isImmutableRows, physicalNames,
                 defaultFamilyName, viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId,
-                indexType, stats, baseColumnCount, rowKeyOrderOptimizable, isTransactional, updateCacheFrequency, indexDisableTimestamp);
+                indexType, stats, baseColumnCount, rowKeyOrderOptimizable, isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped);
     }
 
     private PTableImpl(PName tenantId, PName schemaName, PName tableName, PTableType type, PIndexState state,
@@ -329,11 +330,11 @@ public class PTableImpl implements PTable {
             PName parentSchemaName, PName parentTableName, List<PTable> indexes, boolean isImmutableRows,
             List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL, boolean multiTenant,
             boolean storeNulls, ViewType viewType, Short viewIndexId, IndexType indexType,
-            PTableStats stats, int baseColumnCount, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, long indexDisableTimestamp) throws SQLException {
+            PTableStats stats, int baseColumnCount, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, long indexDisableTimestamp, boolean isNamespaceMapped) throws SQLException {
         init(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName, bucketNum, columns,
                 stats, schemaName, parentTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName,
                 viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable,
-                isTransactional, updateCacheFrequency, indexDisableTimestamp);
+                isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped);
     }
     
     @Override
@@ -366,7 +367,7 @@ public class PTableImpl implements PTable {
             PName pkName, Integer bucketNum, List<PColumn> columns, PTableStats stats, PName parentSchemaName, PName parentTableName,
             List<PTable> indexes, boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL,
             boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId,
-            IndexType indexType , int baseColumnCount, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, long indexDisableTimestamp) throws SQLException {
+            IndexType indexType , int baseColumnCount, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, long indexDisableTimestamp, boolean isNamespaceMapped) throws SQLException {
         Preconditions.checkNotNull(schemaName);
         Preconditions.checkArgument(tenantId==null || tenantId.getBytes().length > 0); // tenantId should be null or not empty
         int estimatedSize = SizedUtil.OBJECT_SIZE * 2 + 23 * SizedUtil.POINTER_SIZE + 4 * SizedUtil.INT_SIZE + 2 * SizedUtil.LONG_SIZE + 2 * SizedUtil.INT_OBJECT_SIZE +
@@ -400,6 +401,7 @@ public class PTableImpl implements PTable {
         this.tableStats = stats;
         this.rowKeyOrderOptimizable = rowKeyOrderOptimizable;
         this.updateCacheFrequency = updateCacheFrequency;
+        this.isNamespaceMapped = isNamespaceMapped;
         List<PColumn> pkColumns;
         PColumn[] allColumns;
         
@@ -964,7 +966,8 @@ public class PTableImpl implements PTable {
 
     @Override
     public PName getPhysicalName() {
-        return physicalNames.isEmpty() ? getName() : physicalNames.get(0);
+        return SchemaUtil.getPhysicalHBaseTableName(physicalNames.isEmpty() ? getName() : physicalNames.get(0),
+                isNamespaceMapped, type);
     }
 
     @Override
@@ -1097,6 +1100,10 @@ public class PTableImpl implements PTable {
       if (table.hasUpdateCacheFrequency()) {
           updateCacheFrequency = table.getUpdateCacheFrequency();
       }
+      boolean isNamespaceMapped=false;
+      if (table.hasIsNamespaceMapped()) {
+          isNamespaceMapped = table.getIsNamespaceMapped();
+      }
       
       try {
         PTableImpl result = new PTableImpl();
@@ -1104,7 +1111,7 @@ public class PTableImpl implements PTable {
             (bucketNum == NO_SALTING) ? null : bucketNum, columns, stats, schemaName,dataTableName, indexes,
             isImmutableRows, physicalNames, defaultFamilyName, viewStatement, disableWAL,
             multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable,
-            isTransactional, updateCacheFrequency, indexDisableTimestamp);
+            isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped);
         return result;
       } catch (SQLException e) {
         throw new RuntimeException(e); // Impossible
@@ -1195,7 +1202,7 @@ public class PTableImpl implements PTable {
       builder.setRowKeyOrderOptimizable(table.rowKeyOrderOptimizable());
       builder.setUpdateCacheFrequency(table.getUpdateCacheFrequency());
       builder.setIndexDisableTimestamp(table.getIndexDisableTimestamp());
-      
+      builder.setIsNamespaceMapped(table.isNamespaceMapped());
       return builder.build();
     }
 
@@ -1232,4 +1239,9 @@ public class PTableImpl implements PTable {
     public int getRowTimestampColPos() {
         return rowTimestampColPos;
     }
+
+    @Override
+    public boolean isNamespaceMapped() {
+        return isNamespaceMapped;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java
index e3519ae..42699d9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java
@@ -17,6 +17,8 @@
  */
 package org.apache.phoenix.schema;
 
+import org.apache.phoenix.query.QueryConstants;
+
 import com.google.common.base.Preconditions;
 
 public class PTableKey {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/schema/SchemaAlreadyExistsException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/SchemaAlreadyExistsException.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/SchemaAlreadyExistsException.java
new file mode 100644
index 0000000..2fc5f78
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/SchemaAlreadyExistsException.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import java.sql.SQLException;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+
+
+/**
+ * 
+ * Exception thrown when a schema name already exists
+ *
+ * 
+ * @since 0.1
+ */
+public class SchemaAlreadyExistsException extends SQLException {
+    private static final long serialVersionUID = 1L;
+    private static SQLExceptionCode code = SQLExceptionCode.SCHEMA_ALREADY_EXISTS;
+    private final String schemaName;
+
+    public SchemaAlreadyExistsException(String schemaName) {
+        this(schemaName, null);
+    }
+
+    public SchemaAlreadyExistsException(String schemaName, String msg) {
+        super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setMessage(msg).build().toString(),
+                code.getSQLState(), code.getErrorCode());
+        this.schemaName = schemaName;
+
+    }
+
+    public String getSchemaName() {
+        return schemaName;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/schema/SchemaNotFoundException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/SchemaNotFoundException.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/SchemaNotFoundException.java
new file mode 100644
index 0000000..b7313d8
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/SchemaNotFoundException.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+
+public class SchemaNotFoundException extends MetaDataEntityNotFoundException {
+    private static final long serialVersionUID = 1L;
+    private static SQLExceptionCode code = SQLExceptionCode.SCHEMA_NOT_FOUND;
+    private final String schemaName;
+    private final long timestamp;
+
+    public SchemaNotFoundException(SchemaNotFoundException e, long timestamp) {
+        this(e.schemaName, timestamp);
+    }
+
+    public SchemaNotFoundException(String schemaName) {
+        this(schemaName, HConstants.LATEST_TIMESTAMP);
+    }
+
+    public SchemaNotFoundException(String schemaName, long timestamp) {
+        super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).build().toString(), code.getSQLState(),
+                code.getErrorCode(), null);
+        this.schemaName = schemaName;
+        this.timestamp = timestamp;
+    }
+
+    public String getSchemaName() {
+        return schemaName;
+    }
+
+    public long getTimeStamp() {
+        return timestamp;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectorFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectorFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectorFactory.java
index 3a0b9ad..30c560a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectorFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectorFactory.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.SchemaUtil;
 
 import com.google.common.collect.Sets;
 
@@ -63,6 +64,10 @@ public class StatisticsCollectorFactory {
         DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME));
         DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME));
         DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME));
+        DISABLE_STATS.add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,true));
+        DISABLE_STATS.add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,true));
+        DISABLE_STATS.add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,true));
+        DISABLE_STATS.add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,true));
     }
     
     /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index 0dbbf5d..3bd3cef 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -28,7 +28,6 @@ import java.util.List;
 
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -50,6 +49,7 @@ import org.apache.phoenix.schema.types.PDate;
 import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.PrefixByteDecoder;
+import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.TimeKeeper;
 
@@ -73,8 +73,8 @@ public class StatisticsWriter implements Closeable {
         if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
             clientTimeStamp = TimeKeeper.SYSTEM.getCurrentTime();
         }
-        HTableInterface statsWriterTable = env
-                .getTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES));
+        HTableInterface statsWriterTable = env.getTable(
+                SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, env.getConfiguration()));
         HTableInterface statsReaderTable = ServerUtil.getHTableForCoprocessorScan(env, statsWriterTable);
         StatisticsWriter statsTable = new StatisticsWriter(statsReaderTable, statsWriterTable, tableName,
                 clientTimeStamp);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 45d284d..2f56a93 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -285,7 +285,7 @@ public class IndexUtil {
                     byte[] regionStartKey = null;
                     byte[] regionEndkey = null;
                     if(maintainer.isLocalIndex()) {
-                        HRegionLocation tableRegionLocation = connection.getQueryServices().getTableRegionLocation(table.getName().getBytes(), dataMutation.getRow());
+                        HRegionLocation tableRegionLocation = connection.getQueryServices().getTableRegionLocation(table.getPhysicalName().getBytes(), dataMutation.getRow());
                         regionStartKey = tableRegionLocation.getRegionInfo().getStartKey();
                         regionEndkey = tableRegionLocation.getRegionInfo().getEndKey();
                     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java
index cbe5a1a..a0672d4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java
@@ -159,4 +159,9 @@ public class JDBCUtil {
         String batchSizeStr = findProperty(url, overrideProps, PhoenixRuntime.REQUEST_METRIC_ATTRIB);
         return (batchSizeStr == null ? queryServicesProps.getBoolean(QueryServices.COLLECT_REQUEST_LEVEL_METRICS, QueryServicesOptions.DEFAULT_REQUEST_LEVEL_METRICS_ENABLED) : Boolean.parseBoolean(batchSizeStr));
     }
+
+    public static String getSchema(String url, Properties info, String defaultValue) {
+        String schema = findProperty(url, info, PhoenixRuntime.SCHEMA_ATTRIB);
+        return (schema == null || schema.equals("")) ? defaultValue : schema;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index 5127f5b..df37ee0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -17,12 +17,22 @@
  */
 package org.apache.phoenix.util;
 
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
 import static org.apache.phoenix.util.SchemaUtil.getVarChars;
 
 import java.io.IOException;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.Collection;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
@@ -71,9 +81,7 @@ public class MetaDataUtil {
     private static final Logger logger = LoggerFactory.getLogger(MetaDataUtil.class);
   
     public static final String VIEW_INDEX_TABLE_PREFIX = "_IDX_";
-    public static final byte[] VIEW_INDEX_TABLE_PREFIX_BYTES = Bytes.toBytes(VIEW_INDEX_TABLE_PREFIX);
     public static final String LOCAL_INDEX_TABLE_PREFIX = "_LOCAL_IDX_";
-    public static final byte[] LOCAL_INDEX_TABLE_PREFIX_BYTES = Bytes.toBytes(LOCAL_INDEX_TABLE_PREFIX);
     public static final String VIEW_INDEX_SEQUENCE_PREFIX = "_SEQ_";
     public static final String VIEW_INDEX_SEQUENCE_NAME_PREFIX = "_ID_";
     public static final byte[] VIEW_INDEX_SEQUENCE_PREFIX_BYTES = Bytes.toBytes(VIEW_INDEX_SEQUENCE_PREFIX);
@@ -284,7 +292,7 @@ public class MetaDataUtil {
     }
     
     public static byte[] getViewIndexPhysicalName(byte[] physicalTableName) {
-        return ByteUtil.concat(VIEW_INDEX_TABLE_PREFIX_BYTES, physicalTableName);
+        return getIndexPhysicalName(physicalTableName, VIEW_INDEX_TABLE_PREFIX);
     }
 
     public static String getViewIndexTableName(String tableName) {
@@ -295,30 +303,55 @@ public class MetaDataUtil {
         return schemaName;
     }
 
+    public static byte[] getIndexPhysicalName(byte[] physicalTableName, String indexPrefix) {
+        return getIndexPhysicalName(Bytes.toString(physicalTableName), indexPrefix).getBytes();
+    }
+
+    public static String getIndexPhysicalName(String physicalTableName, String indexPrefix) {
+        if (physicalTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) {
+            String schemaName = SchemaUtil.getSchemaNameFromFullName(physicalTableName,
+                    QueryConstants.NAMESPACE_SEPARATOR);
+            String tableName = SchemaUtil.getTableNameFromFullName(physicalTableName,
+                    QueryConstants.NAMESPACE_SEPARATOR);
+            return (schemaName + QueryConstants.NAMESPACE_SEPARATOR + indexPrefix + tableName);
+        }
+        return indexPrefix + physicalTableName;
+    }
+
     public static byte[] getLocalIndexPhysicalName(byte[] physicalTableName) {
-        return ByteUtil.concat(LOCAL_INDEX_TABLE_PREFIX_BYTES, physicalTableName);
+        return getIndexPhysicalName(physicalTableName, LOCAL_INDEX_TABLE_PREFIX);
     }
-    
+
     public static String getLocalIndexTableName(String tableName) {
         return LOCAL_INDEX_TABLE_PREFIX + tableName;
     }
-    
+
     public static String getLocalIndexSchemaName(String schemaName) {
         return schemaName;
     }  
 
     public static String getUserTableName(String localIndexTableName) {
-        String schemaName = SchemaUtil.getSchemaNameFromFullName(localIndexTableName);
-        if(!schemaName.isEmpty()) schemaName = schemaName.substring(LOCAL_INDEX_TABLE_PREFIX.length());
-        String tableName = localIndexTableName.substring((schemaName.isEmpty() ? 0 : (schemaName.length() + QueryConstants.NAME_SEPARATOR.length()))
-            + LOCAL_INDEX_TABLE_PREFIX.length());
-        return SchemaUtil.getTableName(schemaName, tableName);
+        if (localIndexTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) {
+            String schemaName = SchemaUtil.getSchemaNameFromFullName(localIndexTableName,
+                    QueryConstants.NAMESPACE_SEPARATOR);
+            String tableName = SchemaUtil.getTableNameFromFullName(localIndexTableName,
+                    QueryConstants.NAMESPACE_SEPARATOR);
+            String userTableName = tableName.substring(LOCAL_INDEX_TABLE_PREFIX.length());
+            return (schemaName + QueryConstants.NAMESPACE_SEPARATOR + userTableName);
+        } else {
+            String schemaName = SchemaUtil.getSchemaNameFromFullName(localIndexTableName);
+            if (!schemaName.isEmpty()) schemaName = schemaName.substring(LOCAL_INDEX_TABLE_PREFIX.length());
+            String tableName = localIndexTableName.substring(
+                    (schemaName.isEmpty() ? 0 : (schemaName.length() + QueryConstants.NAME_SEPARATOR.length()))
+                            + LOCAL_INDEX_TABLE_PREFIX.length());
+            return SchemaUtil.getTableName(schemaName, tableName);
+        }
     }
 
     public static String getViewIndexSchemaName(PName physicalName) {
         return VIEW_INDEX_SEQUENCE_PREFIX + physicalName.getString();
     }
-    
+ 
     public static SequenceKey getViewIndexSequenceKey(String tenantId, PName physicalName, int nSaltBuckets) {
         // Create global sequence of the form: <prefixed base table name><tenant id>
         // rather than tenant-specific sequence, as it makes it much easier
@@ -337,15 +370,12 @@ public class MetaDataUtil {
         return VIEW_INDEX_ID_COLUMN_NAME;
     }
 
-    public static boolean hasViewIndexTable(PhoenixConnection connection, PName name) throws SQLException {
-        return hasViewIndexTable(connection, name.getBytes());
+    public static boolean hasViewIndexTable(PhoenixConnection connection, PName physicalName) throws SQLException {
+        return hasViewIndexTable(connection, physicalName.getBytes());
     }
-    
-    public static boolean hasViewIndexTable(PhoenixConnection connection, String schemaName, String tableName) throws SQLException {
-        return hasViewIndexTable(connection, SchemaUtil.getTableNameAsBytes(schemaName, tableName));
-    }
-    
-    public static boolean hasViewIndexTable(PhoenixConnection connection, byte[] physicalTableName) throws SQLException {
+
+    public static boolean hasViewIndexTable(PhoenixConnection connection, byte[] physicalTableName)
+            throws SQLException {
         byte[] physicalIndexName = MetaDataUtil.getViewIndexPhysicalName(physicalTableName);
         try {
             HTableDescriptor desc = connection.getQueryServices().getTableDescriptor(physicalIndexName);
@@ -355,12 +385,8 @@ public class MetaDataUtil {
         }
     }
 
-    public static boolean hasLocalIndexTable(PhoenixConnection connection, PName name) throws SQLException {
-        return hasLocalIndexTable(connection, name.getBytes());
-    }
-
-    public static boolean hasLocalIndexTable(PhoenixConnection connection, String schemaName, String tableName) throws SQLException {
-        return hasLocalIndexTable(connection, SchemaUtil.getTableNameAsBytes(schemaName, tableName));
+    public static boolean hasLocalIndexTable(PhoenixConnection connection, PName physicalName) throws SQLException {
+        return hasLocalIndexTable(connection, physicalName.getBytes());
     }
 
     public static boolean hasLocalIndexTable(PhoenixConnection connection, byte[] physicalTableName) throws SQLException {
@@ -436,6 +462,10 @@ public class MetaDataUtil {
     public static final String IS_LOCAL_INDEX_TABLE_PROP_NAME = "IS_LOCAL_INDEX_TABLE";
     public static final byte[] IS_LOCAL_INDEX_TABLE_PROP_BYTES = Bytes.toBytes(IS_LOCAL_INDEX_TABLE_PROP_NAME);
 
+    private static final String GET_VIEWS_QUERY = "SELECT " + TABLE_SCHEM + "," + TABLE_NAME + " FROM "
+            + SYSTEM_CATALOG_SCHEMA + "." + SYSTEM_CATALOG_TABLE + " WHERE " + COLUMN_FAMILY + " = ? AND " + LINK_TYPE
+            + " = " + LinkType.PHYSICAL_TABLE.getSerializedValue();
+
     public static Scan newTableRowsScan(byte[] key, long startTimeStamp, long stopTimeStamp){
         return newTableRowsScan(key, null, startTimeStamp, stopTimeStamp);
     }
@@ -465,4 +495,28 @@ public class MetaDataUtil {
         }
         return null;
     }
+
+    public static boolean isLocalIndex(String physicalName) {
+        if (physicalName.contains(LOCAL_INDEX_TABLE_PREFIX)) { return true; }
+        return false;
+    }
+
+    public static boolean isViewIndex(String physicalName) {
+        if (physicalName.contains(QueryConstants.NAMESPACE_SEPARATOR)) {
+            return SchemaUtil.getTableNameFromFullName(physicalName).startsWith(VIEW_INDEX_TABLE_PREFIX);
+        } else {
+            return physicalName.startsWith(VIEW_INDEX_TABLE_PREFIX);
+        }
+    }
+
+    public static Set<String> getViewNames(PhoenixConnection conn, String table) throws SQLException {
+        Set<String> viewNames = new HashSet<String>();
+        PreparedStatement preparedStatment = conn.prepareStatement(GET_VIEWS_QUERY);
+        preparedStatment.setString(1, SchemaUtil.normalizeIdentifier(table));
+        ResultSet rs = preparedStatment.executeQuery();
+        while (rs.next()) {
+            viewNames.add(SchemaUtil.getTableName(rs.getString(1), rs.getString(2)));
+        }
+        return viewNames;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e432be7/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
index 3117fa9..f83317e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
@@ -51,6 +51,7 @@ import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.PosixParser;
 import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -214,28 +215,34 @@ public class PhoenixRuntime {
         PhoenixConnection conn = null;
         try {
             Properties props = new Properties();
-            conn = DriverManager.getConnection(jdbcUrl, props)
-                    .unwrap(PhoenixConnection.class);
-
-            if (execCmd.isUpgrade()) {
-                if (conn.getClientInfo(PhoenixRuntime.CURRENT_SCN_ATTRIB) != null) {
-                    throw new SQLException("May not specify the CURRENT_SCN property when upgrading");
-                }
-                if (conn.getClientInfo(PhoenixRuntime.TENANT_ID_ATTRIB) != null) {
-                    throw new SQLException("May not specify the TENANT_ID_ATTRIB property when upgrading");
+            conn = DriverManager.getConnection(jdbcUrl, props).unwrap(PhoenixConnection.class);
+            if (execCmd.isMapNamespace()) {
+                String srcTable = execCmd.getSrcTable();
+                UpgradeUtil.upgradeTable(conn, srcTable);
+                Set<String> viewNames = MetaDataUtil.getViewNames(conn, srcTable);
+                System.out.println("Views found:"+viewNames);
+                for (String viewName : viewNames) {
+                    UpgradeUtil.upgradeTable(conn, viewName);
                 }
+            } else if (execCmd.isUpgrade()) {
+                if (conn.getClientInfo(PhoenixRuntime.CURRENT_SCN_ATTRIB) != null) { throw new SQLException(
+                        "May not specify the CURRENT_SCN property when upgrading"); }
+                if (conn.getClientInfo(PhoenixRuntime.TENANT_ID_ATTRIB) != null) { throw new SQLException(
+                        "May not specify the TENANT_ID_ATTRIB property when upgrading"); }
                 if (execCmd.getInputFiles().isEmpty()) {
                     List<String> tablesNeedingUpgrade = UpgradeUtil.getPhysicalTablesWithDescRowKey(conn);
                     if (tablesNeedingUpgrade.isEmpty()) {
                         String msg = "No tables are required to be upgraded due to incorrect row key order (PHOENIX-2067 and PHOENIX-2120)";
                         System.out.println(msg);
                     } else {
-                        String msg = "The following tables require upgrade due to a bug causing the row key to be incorrectly ordered (PHOENIX-2067 and PHOENIX-2120):\n" + Joiner.on(' ').join(tablesNeedingUpgrade);
+                        String msg = "The following tables require upgrade due to a bug causing the row key to be incorrectly ordered (PHOENIX-2067 and PHOENIX-2120):\n"
+                                + Joiner.on(' ').join(tablesNeedingUpgrade);
                         System.out.println("WARNING: " + msg);
                     }
                     List<String> unsupportedTables = UpgradeUtil.getPhysicalTablesWithDescVarbinaryRowKey(conn);
                     if (!unsupportedTables.isEmpty()) {
-                        String msg = "The following tables use an unsupported VARBINARY DESC construct and need to be changed:\n" + Joiner.on(' ').join(unsupportedTables);
+                        String msg = "The following tables use an unsupported VARBINARY DESC construct and need to be changed:\n"
+                                + Joiner.on(' ').join(unsupportedTables);
                         System.out.println("WARNING: " + msg);
                     }
                 } else {
@@ -244,21 +251,18 @@ public class PhoenixRuntime {
             } else {
                 for (String inputFile : execCmd.getInputFiles()) {
                     if (inputFile.endsWith(SQL_FILE_EXT)) {
-                        PhoenixRuntime.executeStatements(conn,
-                                new FileReader(inputFile), Collections.emptyList());
+                        PhoenixRuntime.executeStatements(conn, new FileReader(inputFile), Collections.emptyList());
                     } else if (inputFile.endsWith(CSV_FILE_EXT)) {
-    
+
                         String tableName = execCmd.getTableName();
                         if (tableName == null) {
                             tableName = SchemaUtil.normalizeIdentifier(
                                     inputFile.substring(inputFile.lastIndexOf(File.separatorChar) + 1,
                                             inputFile.length() - CSV_FILE_EXT.length()));
                         }
-                        CSVCommonsLoader csvLoader =
-                                new CSVCommonsLoader(conn, tableName, execCmd.getColumns(),
-                                        execCmd.isStrict(), execCmd.getFieldDelimiter(),
-                                        execCmd.getQuoteCharacter(), execCmd.getEscapeCharacter(),
-                                        execCmd.getArrayElementSeparator());
+                        CSVCommonsLoader csvLoader = new CSVCommonsLoader(conn, tableName, execCmd.getColumns(),
+                                execCmd.isStrict(), execCmd.getFieldDelimiter(), execCmd.getQuoteCharacter(),
+                                execCmd.getEscapeCharacter(), execCmd.getArrayElementSeparator());
                         csvLoader.upsert(inputFile);
                     }
                 }
@@ -279,6 +283,7 @@ public class PhoenixRuntime {
     }
 
     public static final String PHOENIX_TEST_DRIVER_URL_PARAM = "test=true";
+    public static final String SCHEMA_ATTRIB = "schema";
 
     private PhoenixRuntime() {
     }
@@ -513,6 +518,8 @@ public class PhoenixRuntime {
         private List<String> inputFiles;
         private boolean isUpgrade;
         private boolean isBypassUpgrade;
+        private boolean mapNamespace;
+        private String srcTable;
 
         /**
          * Factory method to build up an {@code ExecutionCommand} based on supplied parameters.
@@ -551,6 +558,9 @@ public class PhoenixRuntime {
                     "This would only be the case if you have not relied on auto padding for BINARY and CHAR data, " +
                     "but instead have always provided data up to the full max length of the column. See PHOENIX-2067 " +
                     "and PHOENIX-2120 for more information. ");
+            Option mapNamespaceOption = new Option("m", "map-namespace", true,
+                    "Used to map table to a namespace matching with schema, require "+ QueryServices.IS_NAMESPACE_MAPPING_ENABLED +
+                    " to be enabled");
             Options options = new Options();
             options.addOption(tableOption);
             options.addOption(headerOption);
@@ -561,6 +571,7 @@ public class PhoenixRuntime {
             options.addOption(arrayValueSeparatorOption);
             options.addOption(upgradeOption);
             options.addOption(bypassUpgradeOption);
+            options.addOption(mapNamespaceOption);
 
             CommandLineParser parser = new PosixParser();
             CommandLine cmdLine = null;
@@ -571,7 +582,10 @@ public class PhoenixRuntime {
             }
 
             ExecutionCommand execCmd = new ExecutionCommand();
-
+            if(cmdLine.hasOption(mapNamespaceOption.getOpt())){
+                execCmd.mapNamespace = true;
+                execCmd.srcTable = validateTableName(cmdLine.getOptionValue(mapNamespaceOption.getOpt()));
+            }
             if (cmdLine.hasOption(tableOption.getOpt())) {
                 execCmd.tableName = cmdLine.getOptionValue(tableOption.getOpt());
             }
@@ -627,7 +641,7 @@ public class PhoenixRuntime {
                 }
             }
 
-            if (inputFiles.isEmpty() && !execCmd.isUpgrade) {
+            if (inputFiles.isEmpty() && !execCmd.isUpgrade && !execCmd.isMapNamespace()) {
                 usageError("At least one input file must be supplied", options);
             }
 
@@ -636,6 +650,16 @@ public class PhoenixRuntime {
             return execCmd;
         }
 
+        private static String validateTableName(String tableName) {
+            if (tableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) {
+                throw new IllegalArgumentException(
+                        "tablename:" + tableName + " cannot have '" + QueryConstants.NAMESPACE_SEPARATOR + "' ");
+            } else {
+                return tableName;
+            }
+
+        }
+
         private static char getCharacter(String s) {
             String unescaped = StringEscapeUtils.unescapeJava(s);
             if (unescaped.length() > 1) {
@@ -708,6 +732,14 @@ public class PhoenixRuntime {
         public boolean isBypassUpgrade() {
             return isBypassUpgrade;
         }
+
+        public boolean isMapNamespace() {
+            return mapNamespace;
+        }
+
+        public String getSrcTable() {
+            return srcTable;
+        }
     }
     
     /**
@@ -1192,4 +1224,9 @@ public class PhoenixRuntime {
     public static long getWallClockTimeFromCellTimeStamp(long tsOfCell) {
         return TxUtils.isPreExistingVersion(tsOfCell) ? tsOfCell : TransactionUtil.convertToMilliseconds(tsOfCell);
     }
+
+    public static long getCurrentScn(ReadOnlyProps props) {
+        String scn = props.get(CURRENT_SCN_ATTRIB);
+        return scn != null ? Long.parseLong(scn) : HConstants.LATEST_TIMESTAMP;
+    }
  }


Mime
View raw message