phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tdsi...@apache.org
Subject [2/2] phoenix git commit: PHOENIX-2795 Support auto partition for views
Date Fri, 29 Apr 2016 20:23:49 GMT
PHOENIX-2795 Support auto partition for views


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/13f38ca9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/13f38ca9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/13f38ca9

Branch: refs/heads/master
Commit: 13f38ca9c1170289fcbcf0a7d8caeeaf5fdfe873
Parents: 7b7f3f6
Author: Thomas D'Silva <tdsilva@salesforce.com>
Authored: Fri Apr 22 13:41:41 2016 -0700
Committer: Thomas D'Silva <tdsilva@salesforce.com>
Committed: Fri Apr 29 13:09:18 2016 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/AutoPartitionViewsIT.java   | 371 +++++++++++++++++++
 .../apache/phoenix/compile/FromCompiler.java    |   2 +-
 .../apache/phoenix/compile/JoinCompiler.java    |   2 +-
 .../compile/TupleProjectionCompiler.java        |   4 +-
 .../apache/phoenix/compile/UnionCompiler.java   |   2 +-
 .../coprocessor/MetaDataEndpointImpl.java       |  92 ++++-
 .../phoenix/coprocessor/MetaDataProtocol.java   |  11 +
 .../coprocessor/generated/MetaDataProtos.java   | 268 ++++++++++----
 .../coprocessor/generated/PTableProtos.java     | 178 ++++++++-
 .../phoenix/exception/SQLExceptionCode.java     |   9 +
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   3 +
 .../apache/phoenix/query/QueryConstants.java    |   2 +
 .../apache/phoenix/schema/DelegateTable.java    |   4 +
 .../apache/phoenix/schema/MetaDataClient.java   | 110 +++++-
 .../java/org/apache/phoenix/schema/PTable.java  |   7 +
 .../org/apache/phoenix/schema/PTableImpl.java   |  51 ++-
 .../apache/phoenix/schema/TableProperty.java    |   2 +
 .../org/apache/phoenix/util/MetaDataUtil.java   |  42 ++-
 .../java/org/apache/phoenix/util/QueryUtil.java |   7 +-
 .../phoenix/execute/CorrelatePlanTest.java      |   2 +-
 .../execute/LiteralResultIteratorPlanTest.java  |   2 +-
 phoenix-protocol/src/main/MetaDataService.proto |   3 +
 phoenix-protocol/src/main/PTable.proto          |   1 +
 23 files changed, 1041 insertions(+), 134 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/13f38ca9/phoenix-core/src/it/java/org/apache/phoenix/end2end/AutoPartitionViewsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AutoPartitionViewsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AutoPartitionViewsIT.java
new file mode 100644
index 0000000..2b3f932
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AutoPartitionViewsIT.java
@@ -0,0 +1,371 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.schema.ColumnNotFoundException;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.SequenceNotFoundException;
+import org.apache.phoenix.schema.TableAlreadyExistsException;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+@RunWith(Parameterized.class)
+public class AutoPartitionViewsIT extends BaseHBaseManagedTimeIT {
+
+    private String tableDDLOptions;
+    private boolean isMultiTenant;
+    private final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + PhoenixRuntime.TENANT_ID_ATTRIB
+            + "=tenant1";
+    private final String TENANT_SPECIFIC_URL2 = getUrl() + ';' + PhoenixRuntime.TENANT_ID_ATTRIB
+            + "=tenant2";
+
+    @Parameters(name = "salted = {0}, multi-tenant = {1}")
+    public static Collection<Boolean[]> data() {
+        return Arrays.asList(new Boolean[][] { { false, false }, { false, true }, { true, false },
+                { true, true } });
+    }
+
+    public AutoPartitionViewsIT(boolean salted, boolean isMultiTenant) {
+        this.isMultiTenant = isMultiTenant;
+        StringBuilder optionBuilder = new StringBuilder(" AUTO_PARTITION_SEQ=metric_id_seq");
+        if (salted) optionBuilder.append(", SALTED=4 ");
+        if (isMultiTenant) optionBuilder.append(", MULTI_TENANT=true ");
+        this.tableDDLOptions = optionBuilder.toString();
+    }
+
+    @Test
+    public void testValidateAttributes() throws SQLException {
+        try (Connection conn = DriverManager.getConnection(getUrl());
+                Connection viewConn1 =
+                        isMultiTenant ? DriverManager.getConnection(TENANT_SPECIFIC_URL1)
+                                : DriverManager.getConnection(getUrl());
+                Connection viewConn2 =
+                        isMultiTenant ? DriverManager.getConnection(TENANT_SPECIFIC_URL1)
+                                : DriverManager.getConnection(getUrl())) {
+            try {
+                String ddl =
+                        String.format(
+                            "CREATE TABLE metric_table (%s metricId VARCHAR, val1 DOUBLE, val2 DOUBLE CONSTRAINT PK PRIMARY KEY( %s metricId)) %s",
+                                isMultiTenant ? "tenantId VARCHAR, " : "", 
+                                isMultiTenant ? "tenantId, ": "", 
+                                tableDDLOptions);
+                conn.createStatement().execute(ddl);
+                fail("Sequence value must be castable to the auto partition id column data type");
+            } catch (SQLException e) {
+                assertEquals(
+                    SQLExceptionCode.SEQUENCE_NOT_CASTABLE_TO_AUTO_PARTITION_ID_COLUMN
+                            .getErrorCode(),
+                    e.getErrorCode());
+            }
+
+            String ddl =
+                    String.format(
+                        "CREATE TABLE metric_table (%s metricId INTEGER NOT NULL, val1 DOUBLE, val2 DOUBLE CONSTRAINT PK PRIMARY KEY( %s metricId)) %s",
+                            isMultiTenant ? "tenantId VARCHAR NOT NULL, " : "", 
+                            isMultiTenant ? "tenantId, ": "", 
+                            tableDDLOptions);
+            conn.createStatement().execute(ddl);
+            
+            try {
+                viewConn1.createStatement().execute(
+                    "CREATE VIEW metric1 AS SELECT * FROM metric_table");
+                fail("Auto-partition sequence must be created before view is created");
+            } catch (SequenceNotFoundException e) {
+            }
+
+            conn.createStatement().execute(
+                "CREATE SEQUENCE metric_id_seq start with " + (Integer.MAX_VALUE-2) + " cache 1");
+            viewConn1.createStatement().execute(
+                "CREATE VIEW metric1 AS SELECT * FROM metric_table WHERE val2=1.2");
+            // create a view without a where clause
+            viewConn1.createStatement().execute(
+                    "CREATE VIEW metric2 AS SELECT * FROM metric_table");
+            // create a view with a complex where clause
+            viewConn1.createStatement().execute(
+                "CREATE VIEW metric3 AS SELECT * FROM metric_table WHERE val1=1.0 OR val2=2.0");
+
+            try {
+                viewConn1.createStatement().execute(
+                    "CREATE VIEW metric4 AS SELECT * FROM metric_table");
+                fail("Creating a view with a partition id that is too large should fail");
+            } catch (SQLException e) {
+                assertEquals(SQLExceptionCode.CANNOT_COERCE_AUTO_PARTITION_ID.getErrorCode(),
+                    e.getErrorCode());
+            }
+
+            if (isMultiTenant) {
+                // load tables into cache
+                viewConn1.createStatement().execute("SELECT * FROM METRIC1");
+                viewConn1.createStatement().execute("SELECT * FROM METRIC2");
+                viewConn1.createStatement().execute("SELECT * FROM METRIC3");
+            }
+            PhoenixConnection pconn = viewConn1.unwrap(PhoenixConnection.class);
+            PTable view1 = pconn.getTable(new PTableKey(pconn.getTenantId(), "METRIC1"));
+            PTable view2 = pconn.getTable(new PTableKey(pconn.getTenantId(), "METRIC2"));
+            PTable view3 = pconn.getTable(new PTableKey(pconn.getTenantId(), "METRIC3"));
+            
+            // verify the view statement was set correctly 
+            String expectedViewStatement1 =
+                    "SELECT * FROM \"METRIC_TABLE\" WHERE VAL2 = 1.2 AND METRICID = "
+                            + (Integer.MAX_VALUE - 2);
+            String expectedViewStatement2 =
+                    "SELECT * FROM \"METRIC_TABLE\" WHERE METRICID = " + (Integer.MAX_VALUE - 1);
+            String expectedViewStatement3 =
+                    "SELECT * FROM \"METRIC_TABLE\" WHERE (VAL1 = 1.0 OR VAL2 = 2.0) AND METRICID = " + Integer.MAX_VALUE;
+            assertEquals("Unexpected view statement", expectedViewStatement1,
+                view1.getViewStatement());
+            assertEquals("Unexpected view statement", expectedViewStatement2,
+                view2.getViewStatement());
+            assertEquals("Unexpected view statement", expectedViewStatement3,
+                view3.getViewStatement());
+            // verify isViewReferenced was set correctly
+            int expectedParitionColIndex = isMultiTenant ? 1 : 0;
+            PColumn partitionCol1 = view1.getColumns().get(expectedParitionColIndex);
+            PColumn partitionCol2 = view2.getColumns().get(expectedParitionColIndex);
+            PColumn partitionCol3 = view3.getColumns().get(expectedParitionColIndex);
+            assertTrue("Partition column view referenced attribute should be true ",
+                partitionCol1.isViewReferenced());
+            assertTrue("Partition column view referenced attribute should be true ",
+                partitionCol2.isViewReferenced());
+            assertTrue("Partition column view referenced attribute should be true ",
+                partitionCol3.isViewReferenced());
+            // verify viewConstant was set correctly
+            byte[] expectedPartition1 = new byte[Bytes.SIZEOF_LONG + 1];
+            PInteger.INSTANCE.toBytes(Integer.MAX_VALUE - 2, expectedPartition1, 0);
+            byte[] expectedPartition2 = new byte[Bytes.SIZEOF_LONG + 1];
+            PInteger.INSTANCE.toBytes(Integer.MAX_VALUE - 1, expectedPartition2, 0);
+            byte[] expectedPartition3 = new byte[Bytes.SIZEOF_LONG + 1];
+            PInteger.INSTANCE.toBytes(Integer.MAX_VALUE, expectedPartition3, 0);
+            assertArrayEquals("Unexpected Partition column view constant attribute",
+                expectedPartition1, partitionCol1.getViewConstant());
+            assertArrayEquals("Unexpected Partition column view constant attribute",
+                expectedPartition2, partitionCol2.getViewConstant());
+            assertArrayEquals("Unexpected Partition column view constant attribute",
+                expectedPartition3, partitionCol3.getViewConstant());
+
+            // verify that the table was created correctly on the server
+            viewConn2.createStatement().execute("SELECT * FROM METRIC1");
+            viewConn2.createStatement().execute("SELECT * FROM METRIC2");
+            viewConn2.createStatement().execute("SELECT * FROM METRIC3");
+            pconn = viewConn2.unwrap(PhoenixConnection.class);
+            view1 = pconn.getTable(new PTableKey(pconn.getTenantId(), "METRIC1"));
+            view2 = pconn.getTable(new PTableKey(pconn.getTenantId(), "METRIC2"));
+            view3 = pconn.getTable(new PTableKey(pconn.getTenantId(), "METRIC3"));
+            
+            // verify the view statement was set correctly 
+            assertEquals("Unexpected view statement", expectedViewStatement1,
+                view1.getViewStatement());
+            assertEquals("Unexpected view statement", expectedViewStatement2,
+                view2.getViewStatement());
+            assertEquals("Unexpected view statement", expectedViewStatement3,
+                view3.getViewStatement());
+            // verify isViewReferenced was set correctly
+            partitionCol1 = view1.getColumns().get(expectedParitionColIndex);
+            partitionCol2 = view2.getColumns().get(expectedParitionColIndex);
+            partitionCol3 = view3.getColumns().get(expectedParitionColIndex);
+            assertTrue("Partition column view referenced attribute should be true ",
+                partitionCol1.isViewReferenced());
+            assertTrue("Partition column view referenced attribute should be true ",
+                partitionCol2.isViewReferenced());
+            assertTrue("Partition column view referenced attribute should be true ",
+                partitionCol3.isViewReferenced());
+            // verify viewConstant was set correctly
+            assertArrayEquals("Unexpected Partition column view constant attribute",
+                expectedPartition1, partitionCol1.getViewConstant());
+            assertArrayEquals("Unexpected Partition column view constant attribute",
+                expectedPartition2, partitionCol2.getViewConstant());
+            assertArrayEquals("Unexpected Partition column view constant attribute",
+                expectedPartition3, partitionCol3.getViewConstant());
+        }
+    }
+
+    @Test
+    public void testViewCreationFailure() throws SQLException {
+        try (Connection conn = DriverManager.getConnection(getUrl());
+                Connection viewConn1 =
+                        isMultiTenant ? DriverManager.getConnection(TENANT_SPECIFIC_URL1)
+                                : DriverManager.getConnection(getUrl());
+                Connection viewConn2 =
+                        isMultiTenant ? DriverManager.getConnection(TENANT_SPECIFIC_URL2)
+                                : DriverManager.getConnection(getUrl())) {
+            String ddl =
+                    String.format(
+                        "CREATE TABLE hbase.metric_table (%s metricId INTEGER NOT NULL, val1 DOUBLE, val2 DOUBLE CONSTRAINT PK PRIMARY KEY( %s metricId)) %s",
+                            isMultiTenant ? "tenantId VARCHAR NOT NULL, " : "", 
+                            isMultiTenant ? "tenantId, ": "", 
+                            tableDDLOptions);
+            conn.createStatement().execute(ddl);
+            conn.createStatement().execute("CREATE SEQUENCE hbase.metric_id_seq CACHE 1");
+            // create a view
+            viewConn1.createStatement().execute(
+                "CREATE VIEW metric1 AS SELECT * FROM hbase.metric_table WHERE val2=1.2");
+            try {
+                // create the same view which should fail
+                viewConn1.createStatement()
+                        .execute("CREATE VIEW metric1 AS SELECT * FROM hbase.metric_table");
+                fail("view should already exist");
+            } catch (TableAlreadyExistsException e) {
+            }
+
+            // create a second view (without a where clause)
+            viewConn2.createStatement().execute(
+                "CREATE VIEW metric2 AS SELECT * FROM hbase.metric_table");
+
+            // upsert a row into each view
+            viewConn1.createStatement().execute("UPSERT INTO metric1(val1) VALUES(1.1)");
+            viewConn1.commit();
+            viewConn2.createStatement().execute("UPSERT INTO metric2(val1,val2) VALUES(2.1,2.2)");
+            viewConn2.commit();
+
+            // query the base table
+            ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM hbase.metric_table");
+            assertTrue(rs.next());
+            int offset = 0;
+            if (isMultiTenant) {
+                assertEquals("tenant1", rs.getString(1));
+                offset = 1;
+            }
+            assertEquals(1, rs.getInt(1+offset));
+            assertEquals(1.1, rs.getDouble(2+offset), 1e-6);
+            assertEquals(1.2, rs.getDouble(3+offset), 1e-6);
+            assertTrue(rs.next());
+            // validate that the auto partition sequence was not incremented even though view creation failed
+            if (isMultiTenant) {
+                assertEquals("tenant2", rs.getString(1));
+            }
+            assertEquals(2, rs.getInt(1+offset));
+            assertEquals(2.1, rs.getDouble(2+offset), 1e-6);
+            assertEquals(2.2, rs.getDouble(3+offset), 1e-6);
+            assertFalse(rs.next());
+
+            // query the first view
+            rs = viewConn1.createStatement().executeQuery("SELECT * FROM metric1");
+            assertTrue(rs.next());
+            assertEquals(1, rs.getInt(1));
+            assertEquals(1.1, rs.getDouble(2), 1e-6);
+            assertEquals(1.2, rs.getDouble(3), 1e-6);
+            assertFalse(rs.next());
+
+            // query the second view
+            rs = viewConn2.createStatement().executeQuery("SELECT * FROM metric2");
+            assertTrue(rs.next());
+            assertEquals(2, rs.getInt(1));
+            assertEquals(2.1, rs.getDouble(2), 1e-6);
+            assertEquals(2.2, rs.getDouble(3), 1e-6);
+            assertFalse(rs.next());
+        }
+    }
+    
+    @Test
+    public void testAddDropColumns() throws SQLException {
+        try (Connection conn = DriverManager.getConnection(getUrl());
+                Connection viewConn1 =
+                        isMultiTenant ? DriverManager.getConnection(TENANT_SPECIFIC_URL1)
+                                : DriverManager.getConnection(getUrl())) {
+            String ddl =
+                    String.format(
+                        "CREATE TABLE hbase.metric_table (%s metricId INTEGER NOT NULL, val1 DOUBLE, CONSTRAINT PK PRIMARY KEY( %s metricId)) %s",
+                            isMultiTenant ? "tenantId VARCHAR NOT NULL, " : "", 
+                            isMultiTenant ? "tenantId, ": "", 
+                            tableDDLOptions);
+            conn.createStatement().execute(ddl);
+            conn.createStatement().execute("CREATE SEQUENCE hbase.metric_id_seq CACHE 1");
+            // create a view
+            viewConn1.createStatement().execute(
+                "CREATE VIEW metric1 AS SELECT * FROM hbase.metric_table");
+            
+            // add a column to the base table
+            conn.createStatement().execute(
+                    "ALTER TABLE hbase.metric_table add val2 DOUBLE");
+            
+            // add a column to the view
+            viewConn1.createStatement().execute(
+                    "ALTER VIEW metric1 add val3 DOUBLE");
+
+            // upsert a row into the view
+            viewConn1.createStatement().execute("UPSERT INTO metric1(val1,val2,val3) VALUES(1.1,1.2,1.3)");
+            viewConn1.commit();
+
+            // query the base table
+            ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM hbase.metric_table");
+            assertTrue(rs.next());
+            int offset = 0;
+            if (isMultiTenant) {
+                assertEquals("tenant1", rs.getString(1));
+                offset = 1;
+            }
+            assertEquals(1, rs.getInt(1+offset));
+            assertEquals(1.1, rs.getDouble(2+offset), 1e-6);
+            assertEquals(1.2, rs.getDouble(3+offset), 1e-6);
+            assertFalse(rs.next());
+            
+            // query the view
+            rs = viewConn1.createStatement().executeQuery("SELECT * FROM metric1");
+            assertTrue(rs.next());
+            assertEquals(1, rs.getInt(1));
+            assertEquals(1.1, rs.getDouble(2), 1e-6);
+            assertEquals(1.2, rs.getDouble(3), 1e-6);
+            assertEquals(1.3, rs.getDouble(4), 1e-6);
+            assertFalse(rs.next());
+
+            // drop a column from the base table
+            conn.createStatement().execute(
+                    "ALTER TABLE hbase.metric_table DROP COLUMN val2");
+            
+            // add a column to the view
+            viewConn1.createStatement().execute(
+                    "ALTER VIEW metric1 DROP COLUMN val3");
+            
+            // verify columns don't exist
+            try {
+                viewConn1.createStatement().executeQuery("SELECT val2 FROM metric1");
+                fail("column should have been dropped");
+            }
+            catch (ColumnNotFoundException e) {
+            }
+            try {
+                viewConn1.createStatement().executeQuery("SELECT val3 FROM metric1");
+                fail("column should have been dropped");
+            }
+            catch (ColumnNotFoundException e) {
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/13f38ca9/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index a2fc371..612adae 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -776,7 +776,7 @@ public class FromCompiler {
                     MetaDataProtocol.MIN_TABLE_TIMESTAMP, PTable.INITIAL_SEQ_NUM, null, null, columns, null, null,
                     Collections.<PTable> emptyList(), false, Collections.<PName> emptyList(), null, null, false, false,
                     false, null, null, null, false, false, 0, 0L, SchemaUtil
-                            .isNamespaceMappingEnabled(PTableType.SUBQUERY, connection.getQueryServices().getProps()));
+                            .isNamespaceMappingEnabled(PTableType.SUBQUERY, connection.getQueryServices().getProps()), null);
 
             String alias = subselectNode.getAlias();
             TableRef tableRef = new TableRef(alias, t, MetaDataProtocol.MIN_TABLE_TIMESTAMP, false);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/13f38ca9/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index 93b32de..5317b49 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -1307,7 +1307,7 @@ public class JoinCompiler {
                 left.isImmutableRows(), Collections.<PName> emptyList(), null, null, PTable.DEFAULT_DISABLE_WAL,
                 left.isMultiTenant(), left.getStoreNulls(), left.getViewType(), left.getViewIndexId(),
                 left.getIndexType(), left.rowKeyOrderOptimizable(), left.isTransactional(),
-                left.getUpdateCacheFrequency(), left.getIndexDisableTimestamp(), left.isNamespaceMapped());
+                left.getUpdateCacheFrequency(), left.getIndexDisableTimestamp(), left.isNamespaceMapped(), left.getAutoPartitionSeqName());
     }
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/13f38ca9/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
index a9199c6..8c3d399 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
@@ -153,7 +153,7 @@ public class TupleProjectionCompiler {
                 table.getParentName(), table.getIndexes(), table.isImmutableRows(), Collections.<PName> emptyList(),
                 null, null, table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(),
                 table.getViewIndexId(),
-                table.getIndexType(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped());
+                table.getIndexType(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName());
     }
 
     public static PTable createProjectedTable(TableRef tableRef, List<ColumnRef> sourceColumnRefs, boolean retainPKColumns) throws SQLException {
@@ -181,7 +181,7 @@ public class TupleProjectionCompiler {
                 Collections.<PTable> emptyList(), table.isImmutableRows(), Collections.<PName> emptyList(), null, null,
                 table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(),
                 table.getViewIndexId(), null, table.rowKeyOrderOptimizable(), table.isTransactional(),
-                table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped());
+                table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName());
     }
 
     // For extracting column references from single select statement

http://git-wip-us.apache.org/repos/asf/phoenix/blob/13f38ca9/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
index 32fa8fe..71cd7fc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
@@ -85,7 +85,7 @@ public class UnionCompiler {
                 scn == null ? HConstants.LATEST_TIMESTAMP : scn, null, null, projectedColumns, null, null, null, true,
                 null, null, null, true, true, true, null, null, null, false, false, 0, 0L,
                 SchemaUtil.isNamespaceMappingEnabled(PTableType.SUBQUERY,
-                        statement.getConnection().getQueryServices().getProps()));
+                        statement.getConnection().getQueryServices().getProps()), null);
         TableRef tableRef = new TableRef(null, tempTable, 0, false);
         return tableRef;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/13f38ca9/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index c8382ec..25483a3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -21,6 +21,7 @@ import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Preconditions.checkState;
 import static org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE_BYTES;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CLASS_NAME_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_DEF_BYTES;
@@ -76,14 +77,19 @@ import static org.apache.phoenix.util.SchemaUtil.getVarCharLength;
 import static org.apache.phoenix.util.SchemaUtil.getVarChars;
 
 import java.io.IOException;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
 import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.Iterator;
 import java.util.List;
+import java.util.NavigableMap;
+import java.util.Properties;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -114,6 +120,7 @@ import org.apache.hadoop.hbase.regionserver.Region.RowLock;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.phoenix.cache.GlobalCache;
 import org.apache.phoenix.cache.GlobalCache.FunctionBytesPtr;
 import org.apache.phoenix.compile.ColumnResolver;
@@ -152,6 +159,7 @@ import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
+import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -184,6 +192,7 @@ import org.apache.phoenix.schema.PTable.LinkType;
 import org.apache.phoenix.schema.PTable.ViewType;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.SequenceNotFoundException;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.TableRef;
@@ -204,6 +213,7 @@ import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.KeyValueUtil;
 import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
@@ -266,6 +276,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     private static final KeyValue UPDATE_CACHE_FREQUENCY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, UPDATE_CACHE_FREQUENCY_BYTES);
     private static final KeyValue IS_NAMESPACE_MAPPED_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY,
             TABLE_FAMILY_BYTES, IS_NAMESPACE_MAPPED_BYTES);
+    private static final KeyValue AUTO_PARTITION_SEQ_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, AUTO_PARTITION_SEQ_BYTES);
     
     private static final List<KeyValue> TABLE_KV_COLUMNS = Arrays.<KeyValue>asList(
             EMPTY_KEYVALUE_KV,
@@ -290,7 +301,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             ROW_KEY_ORDER_OPTIMIZABLE_KV,
             TRANSACTIONAL_KV,
             UPDATE_CACHE_FREQUENCY_KV,
-            IS_NAMESPACE_MAPPED_KV
+            IS_NAMESPACE_MAPPED_KV,
+            AUTO_PARTITION_SEQ_KV
             );
     static {
         Collections.sort(TABLE_KV_COLUMNS, KeyValue.COMPARATOR);
@@ -318,6 +330,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     private static final int UPDATE_CACHE_FREQUENCY_INDEX = TABLE_KV_COLUMNS.indexOf(UPDATE_CACHE_FREQUENCY_KV);
     private static final int INDEX_DISABLE_TIMESTAMP = TABLE_KV_COLUMNS.indexOf(INDEX_DISABLE_TIMESTAMP_KV);
     private static final int IS_NAMESPACE_MAPPED_INDEX = TABLE_KV_COLUMNS.indexOf(IS_NAMESPACE_MAPPED_KV);
+    private static final int AUTO_PARTITION_SEQ_INDEX = TABLE_KV_COLUMNS.indexOf(AUTO_PARTITION_SEQ_KV);
 
     // KeyValues for Column
     private static final KeyValue DECIMAL_DIGITS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DECIMAL_DIGITS_BYTES);
@@ -898,7 +911,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         boolean isNamespaceMapped = isNamespaceMappedKv == null ? false
                 : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(isNamespaceMappedKv.getValueArray(),
                         isNamespaceMappedKv.getValueOffset(), isNamespaceMappedKv.getValueLength()));
-
+        Cell autoPartitionSeqKv = tableKeyValues[AUTO_PARTITION_SEQ_INDEX];
+        String autoPartitionSeq = autoPartitionSeqKv != null ? (String) PVarchar.INSTANCE.toObject(autoPartitionSeqKv.getValueArray(), autoPartitionSeqKv.getValueOffset(),
+            autoPartitionSeqKv.getValueLength()) : null;
+        
         List<PColumn> columns = Lists.newArrayListWithExpectedSize(columnCount);
         List<PTable> indexes = new ArrayList<PTable>();
         List<PName> physicalTables = new ArrayList<PName>();
@@ -948,7 +964,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 tableType == INDEX ? dataTableName : null, indexes, isImmutableRows, physicalTables, defaultFamilyName,
                 viewStatement, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType,
                 rowKeyOrderOptimizable, transactional, updateCacheFrequency, stats, baseColumnCount,
-                indexDisableTimestamp, isNamespaceMapped);
+                indexDisableTimestamp, isNamespaceMapped, autoPartitionSeq);
     }
 
     private PSchema getSchema(RegionScanner scanner, long clientTimeStamp) throws IOException, SQLException {
@@ -1347,6 +1363,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
 
                 long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
                 ImmutableBytesPtr parentCacheKey = null;
+                PTable parentTable = null;
                 if (parentTableName != null) {
                     // Check if the parent table resides in the same region. If not, don't worry about locking the parent table row
                     // or loading the parent table. For a view, the parent table that needs to be locked is the base physical table.
@@ -1355,7 +1372,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     if (result == null) {
                         acquireLock(region, parentTableKey, locks);
                         parentCacheKey = new ImmutableBytesPtr(parentTableKey);
-                        PTable parentTable = loadTable(env, parentTableKey, parentCacheKey, clientTimeStamp,
+                        parentTable = loadTable(env, parentTableKey, parentCacheKey, clientTimeStamp,
                                 clientTimeStamp);
                         if (parentTable == null || isTableDeleted(parentTable)) {
                             builder.setReturnCode(MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
@@ -1409,12 +1426,79 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         return;
                     }
                 }
+                
                 // Add cell for ROW_KEY_ORDER_OPTIMIZABLE = true, as we know that new tables
                 // conform the correct row key. The exception is for a VIEW, which the client
                 // sends over depending on its base physical table.
                 if (tableType != PTableType.VIEW) {
                     UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetadata, tableKey, clientTimeStamp);
                 }
+                // If the parent table of the view has the auto partition sequence name attribute, modify the 
+                // tableMetadata and set the view statement and partition column correctly
+                if (parentTable!=null && parentTable.getAutoPartitionSeqName()!=null) {
+                    long autoPartitionNum = 1;
+                    final Properties props = new Properties();
+                    props.setProperty(PhoenixRuntime.NO_UPGRADE_ATTRIB, Boolean.TRUE.toString());
+                    try (PhoenixConnection connection = DriverManager.getConnection(MetaDataUtil.getJdbcUrl(env), props).unwrap(PhoenixConnection.class);
+                            Statement stmt = connection.createStatement()) {
+                        String seqNextValueSql = String.format("SELECT NEXT VALUE FOR %s FROM %s LIMIT 1",
+                            SchemaUtil.getTableName(parentTable.getSchemaName().getString(), parentTable.getAutoPartitionSeqName()), PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
+                        ResultSet rs = stmt.executeQuery(seqNextValueSql);
+                        rs.next();
+                        autoPartitionNum = rs.getLong(1);
+                    }
+                    catch (SequenceNotFoundException e) {
+                        builder.setReturnCode(MetaDataProtos.MutationCode.AUTO_PARTITION_SEQUENCE_NOT_FOUND);
+                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
+                        done.run(builder.build());
+                        return;
+                    }
+                    PColumn autoPartitionCol = parentTable.getColumns().get(MetaDataUtil.getAutoPartitionColIndex(parentTable));
+                    if (!PLong.INSTANCE.isCoercibleTo(autoPartitionCol.getDataType(), autoPartitionNum)) {
+                        builder.setReturnCode(MetaDataProtos.MutationCode.CANNOT_COERCE_AUTO_PARTITION_ID);
+                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
+                        done.run(builder.build());
+                        return;
+                    }
+                    builder.setAutoPartitionNum(autoPartitionNum);
+                    
+                    // set the VIEW STATEMENT column of the header row
+                    Put tableHeaderPut = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetadata);
+                    NavigableMap<byte[], List<Cell>> familyCellMap = tableHeaderPut.getFamilyCellMap();
+                    List<Cell> cells = familyCellMap.get(TABLE_FAMILY_BYTES);
+                    Cell cell = cells.get(0);
+                    String autoPartitionWhere = QueryUtil.getViewPartitionClause(MetaDataUtil.getAutoPartitionColumnName(parentTable), autoPartitionNum);
+                    String hbaseVersion = VersionInfo.getVersion();
+                    ImmutableBytesPtr ptr = new ImmutableBytesPtr();
+                    KeyValueBuilder kvBuilder = KeyValueBuilder.get(hbaseVersion);
+                    MetaDataUtil.getMutationValue(tableHeaderPut, VIEW_STATEMENT_BYTES, kvBuilder, ptr);
+                    byte[] value = ptr.copyBytesIfNecessary();
+                    byte[] viewStatement = null;
+                    // if we have an existing where clause add the auto partition where clause to it
+                    if (!Bytes.equals(value, QueryConstants.EMPTY_COLUMN_VALUE_BYTES)) {
+                        viewStatement = Bytes.add(value, Bytes.toBytes(" AND "), Bytes.toBytes(autoPartitionWhere));
+                    }
+                    else { 
+                        viewStatement = Bytes.toBytes(QueryUtil.getViewStatement(parentTable.getSchemaName().getString(), parentTable.getTableName().getString(), autoPartitionWhere));
+                    }
+                    Cell viewStatementCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_STATEMENT_BYTES,
+                        cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), viewStatement);
+                    cells.add(viewStatementCell);
+                    
+                    // set the IS_VIEW_REFERENCED column of the auto partition column row
+                    Put autoPartitionPut = MetaDataUtil.getPutOnlyAutoPartitionColumn(parentTable, tableMetadata);
+                    familyCellMap = autoPartitionPut.getFamilyCellMap();
+                    cells = familyCellMap.get(TABLE_FAMILY_BYTES);
+                    cell = cells.get(0);
+                    byte[] bytes = new byte [Bytes.SIZEOF_LONG + 1];
+                    PDataType dataType = autoPartitionCol.getDataType();
+                    Object val = dataType.toObject(autoPartitionNum, PLong.INSTANCE);
+                    dataType.toBytes(val, bytes, 0);
+                    Cell viewConstantCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_CONSTANT_BYTES,
+                        cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), bytes);
+                    cells.add(viewConstantCell);
+                }
+                
                 // TODO: Switch this to HRegion#batchMutate when we want to support indexes on the
                 // system table. Basically, we get all the locks that we don't already hold for all the
                 // tableMetadata rows. This ensures we don't have deadlock situations (ensuring

http://git-wip-us.apache.org/repos/asf/phoenix/blob/13f38ca9/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index 0ebcd64..8c86b56 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -107,6 +107,8 @@ public abstract class MetaDataProtocol extends MetaDataService {
         SCHEMA_NOT_IN_REGION,
         TABLES_EXIST_ON_SCHEMA,
         UNALLOWED_SCHEMA_MUTATION,
+        AUTO_PARTITION_SEQUENCE_NOT_FOUND,
+        CANNOT_COERCE_AUTO_PARTITION_ID,
         NO_OP
     };
 
@@ -187,6 +189,7 @@ public abstract class MetaDataProtocol extends MetaDataService {
         private PSchema schema;
 
         private List<PFunction> functions = new ArrayList<PFunction>(1);
+        private long autoPartitionNum;
 
         public MetaDataMutationResult() {
         }
@@ -278,6 +281,10 @@ public abstract class MetaDataProtocol extends MetaDataService {
             return sharedTablesToDelete;
         }
 
+        public long getAutoPartitionNum() {
+            return autoPartitionNum;
+        }
+
         public static MetaDataMutationResult constructFromProto(MetaDataResponse proto) {
           MetaDataMutationResult result = new MetaDataMutationResult();
           result.returnCode = MutationCode.values()[proto.getReturnCode().ordinal()];
@@ -316,6 +323,9 @@ public abstract class MetaDataProtocol extends MetaDataService {
           if (proto.hasSchema()) {
             result.schema = PSchema.createFromProto(proto.getSchema());
           }
+          if (proto.hasAutoPartitionNum()) {
+              result.autoPartitionNum = proto.getAutoPartitionNum();
+          }
           return result;
         }
 
@@ -362,6 +372,7 @@ public abstract class MetaDataProtocol extends MetaDataService {
             if (result.getSchema() != null) {
               builder.setSchema(PSchema.toProto(result.schema));
             }
+            builder.setAutoPartitionNum(result.getAutoPartitionNum());
           }
           return builder.build();
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/13f38ca9/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java
index 62e300a..44dd9e0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java
@@ -93,6 +93,14 @@ public final class MetaDataProtos {
      * <code>UNALLOWED_SCHEMA_MUTATION = 19;</code>
      */
     UNALLOWED_SCHEMA_MUTATION(19, 19),
+    /**
+     * <code>AUTO_PARTITION_SEQUENCE_NOT_FOUND = 20;</code>
+     */
+    AUTO_PARTITION_SEQUENCE_NOT_FOUND(20, 20),
+    /**
+     * <code>CANNOT_COERCE_AUTO_PARTITION_ID = 21;</code>
+     */
+    CANNOT_COERCE_AUTO_PARTITION_ID(21, 21),
     ;
 
     /**
@@ -175,6 +183,14 @@ public final class MetaDataProtos {
      * <code>UNALLOWED_SCHEMA_MUTATION = 19;</code>
      */
     public static final int UNALLOWED_SCHEMA_MUTATION_VALUE = 19;
+    /**
+     * <code>AUTO_PARTITION_SEQUENCE_NOT_FOUND = 20;</code>
+     */
+    public static final int AUTO_PARTITION_SEQUENCE_NOT_FOUND_VALUE = 20;
+    /**
+     * <code>CANNOT_COERCE_AUTO_PARTITION_ID = 21;</code>
+     */
+    public static final int CANNOT_COERCE_AUTO_PARTITION_ID_VALUE = 21;
 
 
     public final int getNumber() { return value; }
@@ -201,6 +217,8 @@ public final class MetaDataProtos {
         case 17: return SCHEMA_NOT_IN_REGION;
         case 18: return TABLES_EXIST_ON_SCHEMA;
         case 19: return UNALLOWED_SCHEMA_MUTATION;
+        case 20: return AUTO_PARTITION_SEQUENCE_NOT_FOUND;
+        case 21: return CANNOT_COERCE_AUTO_PARTITION_ID;
         default: return null;
       }
     }
@@ -1669,6 +1687,16 @@ public final class MetaDataProtos {
      * <code>optional .PSchema schema = 10;</code>
      */
     org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchemaOrBuilder getSchemaOrBuilder();
+
+    // optional int64 autoPartitionNum = 11;
+    /**
+     * <code>optional int64 autoPartitionNum = 11;</code>
+     */
+    boolean hasAutoPartitionNum();
+    /**
+     * <code>optional int64 autoPartitionNum = 11;</code>
+     */
+    long getAutoPartitionNum();
   }
   /**
    * Protobuf type {@code MetaDataResponse}
@@ -1802,6 +1830,11 @@ public final class MetaDataProtos {
               bitField0_ |= 0x00000040;
               break;
             }
+            case 88: {
+              bitField0_ |= 0x00000080;
+              autoPartitionNum_ = input.readInt64();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -2070,6 +2103,22 @@ public final class MetaDataProtos {
       return schema_;
     }
 
+    // optional int64 autoPartitionNum = 11;
+    public static final int AUTOPARTITIONNUM_FIELD_NUMBER = 11;
+    private long autoPartitionNum_;
+    /**
+     * <code>optional int64 autoPartitionNum = 11;</code>
+     */
+    public boolean hasAutoPartitionNum() {
+      return ((bitField0_ & 0x00000080) == 0x00000080);
+    }
+    /**
+     * <code>optional int64 autoPartitionNum = 11;</code>
+     */
+    public long getAutoPartitionNum() {
+      return autoPartitionNum_;
+    }
+
     private void initFields() {
       returnCode_ = org.apache.phoenix.coprocessor.generated.MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS;
       mutationTime_ = 0L;
@@ -2081,6 +2130,7 @@ public final class MetaDataProtos {
       function_ = java.util.Collections.emptyList();
       sharedTablesToDelete_ = java.util.Collections.emptyList();
       schema_ = org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema.getDefaultInstance();
+      autoPartitionNum_ = 0L;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -2148,6 +2198,9 @@ public final class MetaDataProtos {
       if (((bitField0_ & 0x00000040) == 0x00000040)) {
         output.writeMessage(10, schema_);
       }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        output.writeInt64(11, autoPartitionNum_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -2202,6 +2255,10 @@ public final class MetaDataProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeMessageSize(10, schema_);
       }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt64Size(11, autoPartitionNum_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -2266,6 +2323,11 @@ public final class MetaDataProtos {
         result = result && getSchema()
             .equals(other.getSchema());
       }
+      result = result && (hasAutoPartitionNum() == other.hasAutoPartitionNum());
+      if (hasAutoPartitionNum()) {
+        result = result && (getAutoPartitionNum()
+            == other.getAutoPartitionNum());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -2319,6 +2381,10 @@ public final class MetaDataProtos {
         hash = (37 * hash) + SCHEMA_FIELD_NUMBER;
         hash = (53 * hash) + getSchema().hashCode();
       }
+      if (hasAutoPartitionNum()) {
+        hash = (37 * hash) + AUTOPARTITIONNUM_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getAutoPartitionNum());
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -2468,6 +2534,8 @@ public final class MetaDataProtos {
           schemaBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000200);
+        autoPartitionNum_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000400);
         return this;
       }
 
@@ -2555,6 +2623,10 @@ public final class MetaDataProtos {
         } else {
           result.schema_ = schemaBuilder_.build();
         }
+        if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
+          to_bitField0_ |= 0x00000080;
+        }
+        result.autoPartitionNum_ = autoPartitionNum_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -2654,6 +2726,9 @@ public final class MetaDataProtos {
         if (other.hasSchema()) {
           mergeSchema(other.getSchema());
         }
+        if (other.hasAutoPartitionNum()) {
+          setAutoPartitionNum(other.getAutoPartitionNum());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -3668,6 +3743,39 @@ public final class MetaDataProtos {
         return schemaBuilder_;
       }
 
+      // optional int64 autoPartitionNum = 11;
+      private long autoPartitionNum_ ;
+      /**
+       * <code>optional int64 autoPartitionNum = 11;</code>
+       */
+      public boolean hasAutoPartitionNum() {
+        return ((bitField0_ & 0x00000400) == 0x00000400);
+      }
+      /**
+       * <code>optional int64 autoPartitionNum = 11;</code>
+       */
+      public long getAutoPartitionNum() {
+        return autoPartitionNum_;
+      }
+      /**
+       * <code>optional int64 autoPartitionNum = 11;</code>
+       */
+      public Builder setAutoPartitionNum(long value) {
+        bitField0_ |= 0x00000400;
+        autoPartitionNum_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional int64 autoPartitionNum = 11;</code>
+       */
+      public Builder clearAutoPartitionNum() {
+        bitField0_ = (bitField0_ & ~0x00000400);
+        autoPartitionNum_ = 0L;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:MetaDataResponse)
     }
 
@@ -16763,90 +16871,92 @@ public final class MetaDataProtos {
       "TableState\022\020\n\010tenantId\030\001 \001(\014\022\022\n\nschemaNa" +
       "me\030\002 \002(\014\022\021\n\ttableName\030\003 \002(\014\022\031\n\007columns\030\004" +
       " \003(\0132\010.PColumn\022\025\n\rphysicalNames\030\005 \003(\014\022\023\n" +
-      "\013viewIndexId\030\006 \002(\005\"\242\002\n\020MetaDataResponse\022" +
+      "\013viewIndexId\030\006 \002(\005\"\274\002\n\020MetaDataResponse\022" +
       "!\n\nreturnCode\030\001 \001(\0162\r.MutationCode\022\024\n\014mu" +
       "tationTime\030\002 \001(\003\022\026\n\005table\030\003 \001(\0132\007.PTable" +
       "\022\026\n\016tablesToDelete\030\004 \003(\014\022\022\n\ncolumnName\030\005" +
       " \001(\014\022\022\n\nfamilyName\030\006 \001(\014\022\024\n\014functionName",
       "\030\007 \001(\014\022\034\n\010function\030\010 \003(\0132\n.PFunction\022/\n\024" +
       "sharedTablesToDelete\030\t \003(\0132\021.SharedTable" +
-      "State\022\030\n\006schema\030\n \001(\0132\010.PSchema\"\222\001\n\017GetT" +
-      "ableRequest\022\020\n\010tenantId\030\001 \002(\014\022\022\n\nschemaN" +
-      "ame\030\002 \002(\014\022\021\n\ttableName\030\003 \002(\014\022\026\n\016tableTim" +
-      "estamp\030\004 \002(\003\022\027\n\017clientTimestamp\030\005 \002(\003\022\025\n" +
-      "\rclientVersion\030\006 \001(\005\"\212\001\n\023GetFunctionsReq" +
-      "uest\022\020\n\010tenantId\030\001 \002(\014\022\025\n\rfunctionNames\030" +
-      "\002 \003(\014\022\032\n\022functionTimestamps\030\003 \003(\003\022\027\n\017cli" +
-      "entTimestamp\030\004 \002(\003\022\025\n\rclientVersion\030\005 \001(",
-      "\005\"V\n\020GetSchemaRequest\022\022\n\nschemaName\030\001 \002(" +
-      "\t\022\027\n\017clientTimestamp\030\002 \002(\003\022\025\n\rclientVers" +
-      "ion\030\003 \002(\005\"K\n\022CreateTableRequest\022\036\n\026table" +
-      "MetadataMutations\030\001 \003(\014\022\025\n\rclientVersion" +
-      "\030\002 \001(\005\"r\n\025CreateFunctionRequest\022\036\n\026table" +
-      "MetadataMutations\030\001 \003(\014\022\021\n\ttemporary\030\002 \002" +
-      "(\010\022\017\n\007replace\030\003 \001(\010\022\025\n\rclientVersion\030\004 \001" +
-      "(\005\"`\n\023CreateSchemaRequest\022\036\n\026tableMetada" +
-      "taMutations\030\001 \003(\014\022\022\n\nschemaName\030\002 \002(\t\022\025\n" +
-      "\rclientVersion\030\003 \002(\005\"m\n\020DropTableRequest",
-      "\022\036\n\026tableMetadataMutations\030\001 \003(\014\022\021\n\ttabl" +
-      "eType\030\002 \002(\t\022\017\n\007cascade\030\003 \001(\010\022\025\n\rclientVe" +
-      "rsion\030\004 \001(\005\"_\n\021DropSchemaRequest\022\037\n\027sche" +
-      "maMetadataMutations\030\001 \003(\014\022\022\n\nschemaName\030" +
-      "\002 \002(\t\022\025\n\rclientVersion\030\003 \002(\005\"I\n\020AddColum" +
-      "nRequest\022\036\n\026tableMetadataMutations\030\001 \003(\014" +
-      "\022\025\n\rclientVersion\030\002 \001(\005\"J\n\021DropColumnReq" +
-      "uest\022\036\n\026tableMetadataMutations\030\001 \003(\014\022\025\n\r" +
-      "clientVersion\030\002 \001(\005\"^\n\023DropFunctionReque" +
-      "st\022\036\n\026tableMetadataMutations\030\001 \003(\014\022\020\n\010if",
-      "Exists\030\002 \001(\010\022\025\n\rclientVersion\030\003 \001(\005\"P\n\027U" +
-      "pdateIndexStateRequest\022\036\n\026tableMetadataM" +
-      "utations\030\001 \003(\014\022\025\n\rclientVersion\030\002 \001(\005\"*\n" +
-      "\021ClearCacheRequest\022\025\n\rclientVersion\030\001 \001(" +
-      "\005\"*\n\022ClearCacheResponse\022\024\n\014unfreedBytes\030" +
-      "\001 \001(\003\"*\n\021GetVersionRequest\022\025\n\rclientVers" +
-      "ion\030\001 \001(\005\"%\n\022GetVersionResponse\022\017\n\007versi" +
-      "on\030\001 \002(\003\"\205\001\n\032ClearTableFromCacheRequest\022" +
-      "\020\n\010tenantId\030\001 \002(\014\022\022\n\nschemaName\030\002 \002(\014\022\021\n" +
-      "\ttableName\030\003 \002(\014\022\027\n\017clientTimestamp\030\004 \002(",
-      "\003\022\025\n\rclientVersion\030\005 \001(\005\"\035\n\033ClearTableFr" +
-      "omCacheResponse*\223\004\n\014MutationCode\022\030\n\024TABL" +
-      "E_ALREADY_EXISTS\020\000\022\023\n\017TABLE_NOT_FOUND\020\001\022" +
-      "\024\n\020COLUMN_NOT_FOUND\020\002\022\031\n\025COLUMN_ALREADY_" +
-      "EXISTS\020\003\022\035\n\031CONCURRENT_TABLE_MUTATION\020\004\022" +
-      "\027\n\023TABLE_NOT_IN_REGION\020\005\022\025\n\021NEWER_TABLE_" +
-      "FOUND\020\006\022\034\n\030UNALLOWED_TABLE_MUTATION\020\007\022\021\n" +
-      "\rNO_PK_COLUMNS\020\010\022\032\n\026PARENT_TABLE_NOT_FOU" +
-      "ND\020\t\022\033\n\027FUNCTION_ALREADY_EXISTS\020\n\022\026\n\022FUN" +
-      "CTION_NOT_FOUND\020\013\022\030\n\024NEWER_FUNCTION_FOUN",
-      "D\020\014\022\032\n\026FUNCTION_NOT_IN_REGION\020\r\022\031\n\025SCHEM" +
-      "A_ALREADY_EXISTS\020\016\022\026\n\022NEWER_SCHEMA_FOUND" +
-      "\020\017\022\024\n\020SCHEMA_NOT_FOUND\020\020\022\030\n\024SCHEMA_NOT_I" +
-      "N_REGION\020\021\022\032\n\026TABLES_EXIST_ON_SCHEMA\020\022\022\035" +
-      "\n\031UNALLOWED_SCHEMA_MUTATION\020\0232\345\006\n\017MetaDa" +
-      "taService\022/\n\010getTable\022\020.GetTableRequest\032" +
-      "\021.MetaDataResponse\0227\n\014getFunctions\022\024.Get" +
-      "FunctionsRequest\032\021.MetaDataResponse\0221\n\tg" +
-      "etSchema\022\021.GetSchemaRequest\032\021.MetaDataRe" +
-      "sponse\0225\n\013createTable\022\023.CreateTableReque",
-      "st\032\021.MetaDataResponse\022;\n\016createFunction\022" +
-      "\026.CreateFunctionRequest\032\021.MetaDataRespon" +
-      "se\0227\n\014createSchema\022\024.CreateSchemaRequest" +
-      "\032\021.MetaDataResponse\0221\n\tdropTable\022\021.DropT" +
-      "ableRequest\032\021.MetaDataResponse\0223\n\ndropSc" +
-      "hema\022\022.DropSchemaRequest\032\021.MetaDataRespo" +
-      "nse\0227\n\014dropFunction\022\024.DropFunctionReques" +
-      "t\032\021.MetaDataResponse\0221\n\taddColumn\022\021.AddC" +
-      "olumnRequest\032\021.MetaDataResponse\0223\n\ndropC" +
-      "olumn\022\022.DropColumnRequest\032\021.MetaDataResp",
-      "onse\022?\n\020updateIndexState\022\030.UpdateIndexSt" +
-      "ateRequest\032\021.MetaDataResponse\0225\n\nclearCa" +
-      "che\022\022.ClearCacheRequest\032\023.ClearCacheResp" +
-      "onse\0225\n\ngetVersion\022\022.GetVersionRequest\032\023" +
-      ".GetVersionResponse\022P\n\023clearTableFromCac" +
-      "he\022\033.ClearTableFromCacheRequest\032\034.ClearT" +
-      "ableFromCacheResponseBB\n(org.apache.phoe" +
-      "nix.coprocessor.generatedB\016MetaDataProto" +
-      "sH\001\210\001\001\240\001\001"
+      "State\022\030\n\006schema\030\n \001(\0132\010.PSchema\022\030\n\020autoP" +
+      "artitionNum\030\013 \001(\003\"\222\001\n\017GetTableRequest\022\020\n" +
+      "\010tenantId\030\001 \002(\014\022\022\n\nschemaName\030\002 \002(\014\022\021\n\tt" +
+      "ableName\030\003 \002(\014\022\026\n\016tableTimestamp\030\004 \002(\003\022\027" +
+      "\n\017clientTimestamp\030\005 \002(\003\022\025\n\rclientVersion" +
+      "\030\006 \001(\005\"\212\001\n\023GetFunctionsRequest\022\020\n\010tenant" +
+      "Id\030\001 \002(\014\022\025\n\rfunctionNames\030\002 \003(\014\022\032\n\022funct" +
+      "ionTimestamps\030\003 \003(\003\022\027\n\017clientTimestamp\030\004",
+      " \002(\003\022\025\n\rclientVersion\030\005 \001(\005\"V\n\020GetSchema" +
+      "Request\022\022\n\nschemaName\030\001 \002(\t\022\027\n\017clientTim" +
+      "estamp\030\002 \002(\003\022\025\n\rclientVersion\030\003 \002(\005\"K\n\022C" +
+      "reateTableRequest\022\036\n\026tableMetadataMutati" +
+      "ons\030\001 \003(\014\022\025\n\rclientVersion\030\002 \001(\005\"r\n\025Crea" +
+      "teFunctionRequest\022\036\n\026tableMetadataMutati" +
+      "ons\030\001 \003(\014\022\021\n\ttemporary\030\002 \002(\010\022\017\n\007replace\030" +
+      "\003 \001(\010\022\025\n\rclientVersion\030\004 \001(\005\"`\n\023CreateSc" +
+      "hemaRequest\022\036\n\026tableMetadataMutations\030\001 " +
+      "\003(\014\022\022\n\nschemaName\030\002 \002(\t\022\025\n\rclientVersion",
+      "\030\003 \002(\005\"m\n\020DropTableRequest\022\036\n\026tableMetad" +
+      "ataMutations\030\001 \003(\014\022\021\n\ttableType\030\002 \002(\t\022\017\n" +
+      "\007cascade\030\003 \001(\010\022\025\n\rclientVersion\030\004 \001(\005\"_\n" +
+      "\021DropSchemaRequest\022\037\n\027schemaMetadataMuta" +
+      "tions\030\001 \003(\014\022\022\n\nschemaName\030\002 \002(\t\022\025\n\rclien" +
+      "tVersion\030\003 \002(\005\"I\n\020AddColumnRequest\022\036\n\026ta" +
+      "bleMetadataMutations\030\001 \003(\014\022\025\n\rclientVers" +
+      "ion\030\002 \001(\005\"J\n\021DropColumnRequest\022\036\n\026tableM" +
+      "etadataMutations\030\001 \003(\014\022\025\n\rclientVersion\030" +
+      "\002 \001(\005\"^\n\023DropFunctionRequest\022\036\n\026tableMet",
+      "adataMutations\030\001 \003(\014\022\020\n\010ifExists\030\002 \001(\010\022\025" +
+      "\n\rclientVersion\030\003 \001(\005\"P\n\027UpdateIndexStat" +
+      "eRequest\022\036\n\026tableMetadataMutations\030\001 \003(\014" +
+      "\022\025\n\rclientVersion\030\002 \001(\005\"*\n\021ClearCacheReq" +
+      "uest\022\025\n\rclientVersion\030\001 \001(\005\"*\n\022ClearCach" +
+      "eResponse\022\024\n\014unfreedBytes\030\001 \001(\003\"*\n\021GetVe" +
+      "rsionRequest\022\025\n\rclientVersion\030\001 \001(\005\"%\n\022G" +
+      "etVersionResponse\022\017\n\007version\030\001 \002(\003\"\205\001\n\032C" +
+      "learTableFromCacheRequest\022\020\n\010tenantId\030\001 " +
+      "\002(\014\022\022\n\nschemaName\030\002 \002(\014\022\021\n\ttableName\030\003 \002",
+      "(\014\022\027\n\017clientTimestamp\030\004 \002(\003\022\025\n\rclientVer" +
+      "sion\030\005 \001(\005\"\035\n\033ClearTableFromCacheRespons" +
+      "e*\337\004\n\014MutationCode\022\030\n\024TABLE_ALREADY_EXIS" +
+      "TS\020\000\022\023\n\017TABLE_NOT_FOUND\020\001\022\024\n\020COLUMN_NOT_" +
+      "FOUND\020\002\022\031\n\025COLUMN_ALREADY_EXISTS\020\003\022\035\n\031CO" +
+      "NCURRENT_TABLE_MUTATION\020\004\022\027\n\023TABLE_NOT_I" +
+      "N_REGION\020\005\022\025\n\021NEWER_TABLE_FOUND\020\006\022\034\n\030UNA" +
+      "LLOWED_TABLE_MUTATION\020\007\022\021\n\rNO_PK_COLUMNS" +
+      "\020\010\022\032\n\026PARENT_TABLE_NOT_FOUND\020\t\022\033\n\027FUNCTI" +
+      "ON_ALREADY_EXISTS\020\n\022\026\n\022FUNCTION_NOT_FOUN",
+      "D\020\013\022\030\n\024NEWER_FUNCTION_FOUND\020\014\022\032\n\026FUNCTIO" +
+      "N_NOT_IN_REGION\020\r\022\031\n\025SCHEMA_ALREADY_EXIS" +
+      "TS\020\016\022\026\n\022NEWER_SCHEMA_FOUND\020\017\022\024\n\020SCHEMA_N" +
+      "OT_FOUND\020\020\022\030\n\024SCHEMA_NOT_IN_REGION\020\021\022\032\n\026" +
+      "TABLES_EXIST_ON_SCHEMA\020\022\022\035\n\031UNALLOWED_SC" +
+      "HEMA_MUTATION\020\023\022%\n!AUTO_PARTITION_SEQUEN" +
+      "CE_NOT_FOUND\020\024\022#\n\037CANNOT_COERCE_AUTO_PAR" +
+      "TITION_ID\020\0252\345\006\n\017MetaDataService\022/\n\010getTa" +
+      "ble\022\020.GetTableRequest\032\021.MetaDataResponse" +
+      "\0227\n\014getFunctions\022\024.GetFunctionsRequest\032\021",
+      ".MetaDataResponse\0221\n\tgetSchema\022\021.GetSche" +
+      "maRequest\032\021.MetaDataResponse\0225\n\013createTa" +
+      "ble\022\023.CreateTableRequest\032\021.MetaDataRespo" +
+      "nse\022;\n\016createFunction\022\026.CreateFunctionRe" +
+      "quest\032\021.MetaDataResponse\0227\n\014createSchema" +
+      "\022\024.CreateSchemaRequest\032\021.MetaDataRespons" +
+      "e\0221\n\tdropTable\022\021.DropTableRequest\032\021.Meta" +
+      "DataResponse\0223\n\ndropSchema\022\022.DropSchemaR" +
+      "equest\032\021.MetaDataResponse\0227\n\014dropFunctio" +
+      "n\022\024.DropFunctionRequest\032\021.MetaDataRespon",
+      "se\0221\n\taddColumn\022\021.AddColumnRequest\032\021.Met" +
+      "aDataResponse\0223\n\ndropColumn\022\022.DropColumn" +
+      "Request\032\021.MetaDataResponse\022?\n\020updateInde" +
+      "xState\022\030.UpdateIndexStateRequest\032\021.MetaD" +
+      "ataResponse\0225\n\nclearCache\022\022.ClearCacheRe" +
+      "quest\032\023.ClearCacheResponse\0225\n\ngetVersion" +
+      "\022\022.GetVersionRequest\032\023.GetVersionRespons" +
+      "e\022P\n\023clearTableFromCache\022\033.ClearTableFro" +
+      "mCacheRequest\032\034.ClearTableFromCacheRespo" +
+      "nseBB\n(org.apache.phoenix.coprocessor.ge",
+      "neratedB\016MetaDataProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -16864,7 +16974,7 @@ public final class MetaDataProtos {
           internal_static_MetaDataResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_MetaDataResponse_descriptor,
-              new java.lang.String[] { "ReturnCode", "MutationTime", "Table", "TablesToDelete", "ColumnName", "FamilyName", "FunctionName", "Function", "SharedTablesToDelete", "Schema", });
+              new java.lang.String[] { "ReturnCode", "MutationTime", "Table", "TablesToDelete", "ColumnName", "FamilyName", "FunctionName", "Function", "SharedTablesToDelete", "Schema", "AutoPartitionNum", });
           internal_static_GetTableRequest_descriptor =
             getDescriptor().getMessageTypes().get(2);
           internal_static_GetTableRequest_fieldAccessorTable = new

http://git-wip-us.apache.org/repos/asf/phoenix/blob/13f38ca9/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
index 126c0dd..fca181d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
@@ -3348,6 +3348,21 @@ public final class PTableProtos {
      * <code>optional bool isNamespaceMapped = 30;</code>
      */
     boolean getIsNamespaceMapped();
+
+    // optional string autoParititonSeqName = 31;
+    /**
+     * <code>optional string autoParititonSeqName = 31;</code>
+     */
+    boolean hasAutoParititonSeqName();
+    /**
+     * <code>optional string autoParititonSeqName = 31;</code>
+     */
+    java.lang.String getAutoParititonSeqName();
+    /**
+     * <code>optional string autoParititonSeqName = 31;</code>
+     */
+    com.google.protobuf.ByteString
+        getAutoParititonSeqNameBytes();
   }
   /**
    * Protobuf type {@code PTable}
@@ -3568,6 +3583,11 @@ public final class PTableProtos {
               isNamespaceMapped_ = input.readBool();
               break;
             }
+            case 250: {
+              bitField0_ |= 0x04000000;
+              autoParititonSeqName_ = input.readBytes();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -4194,6 +4214,49 @@ public final class PTableProtos {
       return isNamespaceMapped_;
     }
 
+    // optional string autoParititonSeqName = 31;
+    public static final int AUTOPARITITONSEQNAME_FIELD_NUMBER = 31;
+    private java.lang.Object autoParititonSeqName_;
+    /**
+     * <code>optional string autoParititonSeqName = 31;</code>
+     */
+    public boolean hasAutoParititonSeqName() {
+      return ((bitField0_ & 0x04000000) == 0x04000000);
+    }
+    /**
+     * <code>optional string autoParititonSeqName = 31;</code>
+     */
+    public java.lang.String getAutoParititonSeqName() {
+      java.lang.Object ref = autoParititonSeqName_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          autoParititonSeqName_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>optional string autoParititonSeqName = 31;</code>
+     */
+    public com.google.protobuf.ByteString
+        getAutoParititonSeqNameBytes() {
+      java.lang.Object ref = autoParititonSeqName_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        autoParititonSeqName_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
     private void initFields() {
       schemaNameBytes_ = com.google.protobuf.ByteString.EMPTY;
       tableNameBytes_ = com.google.protobuf.ByteString.EMPTY;
@@ -4225,6 +4288,7 @@ public final class PTableProtos {
       updateCacheFrequency_ = 0L;
       indexDisableTimestamp_ = 0L;
       isNamespaceMapped_ = false;
+      autoParititonSeqName_ = "";
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -4382,6 +4446,9 @@ public final class PTableProtos {
       if (((bitField0_ & 0x02000000) == 0x02000000)) {
         output.writeBool(30, isNamespaceMapped_);
       }
+      if (((bitField0_ & 0x04000000) == 0x04000000)) {
+        output.writeBytes(31, getAutoParititonSeqNameBytes());
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -4516,6 +4583,10 @@ public final class PTableProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeBoolSize(30, isNamespaceMapped_);
       }
+      if (((bitField0_ & 0x04000000) == 0x04000000)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(31, getAutoParititonSeqNameBytes());
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -4677,6 +4748,11 @@ public final class PTableProtos {
         result = result && (getIsNamespaceMapped()
             == other.getIsNamespaceMapped());
       }
+      result = result && (hasAutoParititonSeqName() == other.hasAutoParititonSeqName());
+      if (hasAutoParititonSeqName()) {
+        result = result && getAutoParititonSeqName()
+            .equals(other.getAutoParititonSeqName());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -4810,6 +4886,10 @@ public final class PTableProtos {
         hash = (37 * hash) + ISNAMESPACEMAPPED_FIELD_NUMBER;
         hash = (53 * hash) + hashBoolean(getIsNamespaceMapped());
       }
+      if (hasAutoParititonSeqName()) {
+        hash = (37 * hash) + AUTOPARITITONSEQNAME_FIELD_NUMBER;
+        hash = (53 * hash) + getAutoParititonSeqName().hashCode();
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -4994,6 +5074,8 @@ public final class PTableProtos {
         bitField0_ = (bitField0_ & ~0x10000000);
         isNamespaceMapped_ = false;
         bitField0_ = (bitField0_ & ~0x20000000);
+        autoParititonSeqName_ = "";
+        bitField0_ = (bitField0_ & ~0x40000000);
         return this;
       }
 
@@ -5158,6 +5240,10 @@ public final class PTableProtos {
           to_bitField0_ |= 0x02000000;
         }
         result.isNamespaceMapped_ = isNamespaceMapped_;
+        if (((from_bitField0_ & 0x40000000) == 0x40000000)) {
+          to_bitField0_ |= 0x04000000;
+        }
+        result.autoParititonSeqName_ = autoParititonSeqName_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -5342,6 +5428,11 @@ public final class PTableProtos {
         if (other.hasIsNamespaceMapped()) {
           setIsNamespaceMapped(other.getIsNamespaceMapped());
         }
+        if (other.hasAutoParititonSeqName()) {
+          bitField0_ |= 0x40000000;
+          autoParititonSeqName_ = other.autoParititonSeqName_;
+          onChanged();
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -7144,6 +7235,80 @@ public final class PTableProtos {
         return this;
       }
 
+      // optional string autoParititonSeqName = 31;
+      private java.lang.Object autoParititonSeqName_ = "";
+      /**
+       * <code>optional string autoParititonSeqName = 31;</code>
+       */
+      public boolean hasAutoParititonSeqName() {
+        return ((bitField0_ & 0x40000000) == 0x40000000);
+      }
+      /**
+       * <code>optional string autoParititonSeqName = 31;</code>
+       */
+      public java.lang.String getAutoParititonSeqName() {
+        java.lang.Object ref = autoParititonSeqName_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          autoParititonSeqName_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>optional string autoParititonSeqName = 31;</code>
+       */
+      public com.google.protobuf.ByteString
+          getAutoParititonSeqNameBytes() {
+        java.lang.Object ref = autoParititonSeqName_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          autoParititonSeqName_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>optional string autoParititonSeqName = 31;</code>
+       */
+      public Builder setAutoParititonSeqName(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x40000000;
+        autoParititonSeqName_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string autoParititonSeqName = 31;</code>
+       */
+      public Builder clearAutoParititonSeqName() {
+        bitField0_ = (bitField0_ & ~0x40000000);
+        autoParititonSeqName_ = getDefaultInstance().getAutoParititonSeqName();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string autoParititonSeqName = 31;</code>
+       */
+      public Builder setAutoParititonSeqNameBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x40000000;
+        autoParititonSeqName_ = value;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:PTable)
     }
 
@@ -7191,7 +7356,7 @@ public final class PTableProtos {
       "\016\n\006values\030\002 \003(\014\022\033\n\023guidePostsByteCount\030\003",
       " \001(\003\022\025\n\rkeyBytesCount\030\004 \001(\003\022\027\n\017guidePost" +
       "sCount\030\005 \001(\005\022!\n\013pGuidePosts\030\006 \001(\0132\014.PGui" +
-      "dePosts\"\336\005\n\006PTable\022\027\n\017schemaNameBytes\030\001 " +
+      "dePosts\"\374\005\n\006PTable\022\027\n\017schemaNameBytes\030\001 " +
       "\002(\014\022\026\n\016tableNameBytes\030\002 \002(\014\022\036\n\ttableType" +
       "\030\003 \002(\0162\013.PTableType\022\022\n\nindexState\030\004 \001(\t\022" +
       "\026\n\016sequenceNumber\030\005 \002(\003\022\021\n\ttimeStamp\030\006 \002" +
@@ -7209,10 +7374,11 @@ public final class PTableProtos {
       "\005\022\036\n\026rowKeyOrderOptimizable\030\032 \001(\010\022\025\n\rtra" +
       "nsactional\030\033 \001(\010\022\034\n\024updateCacheFrequency" +
       "\030\034 \001(\003\022\035\n\025indexDisableTimestamp\030\035 \001(\003\022\031\n",
-      "\021isNamespaceMapped\030\036 \001(\010*A\n\nPTableType\022\n" +
-      "\n\006SYSTEM\020\000\022\010\n\004USER\020\001\022\010\n\004VIEW\020\002\022\t\n\005INDEX\020" +
-      "\003\022\010\n\004JOIN\020\004B@\n(org.apache.phoenix.coproc" +
-      "essor.generatedB\014PTableProtosH\001\210\001\001\240\001\001"
+      "\021isNamespaceMapped\030\036 \001(\010\022\034\n\024autoParitito" +
+      "nSeqName\030\037 \001(\t*A\n\nPTableType\022\n\n\006SYSTEM\020\000" +
+      "\022\010\n\004USER\020\001\022\010\n\004VIEW\020\002\022\t\n\005INDEX\020\003\022\010\n\004JOIN\020" +
+      "\004B@\n(org.apache.phoenix.coprocessor.gene" +
+      "ratedB\014PTableProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -7236,7 +7402,7 @@ public final class PTableProtos {
           internal_static_PTable_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_PTable_descriptor,
-              new java.lang.String[] { "SchemaNameBytes", "TableNameBytes", "TableType", "IndexState", "SequenceNumber", "TimeStamp", "PkNameBytes", "BucketNum", "Columns", "Indexes", "IsImmutableRows", "GuidePosts", "DataTableNameBytes", "DefaultFamilyName", "DisableWAL", "MultiTenant", "ViewType", "ViewStatement", "PhysicalNames", "TenantId", "ViewIndexId", "IndexType", "StatsTimeStamp", "StoreNulls", "BaseColumnCount", "RowKeyOrderOptimizable", "Transactional", "UpdateCacheFrequency", "IndexDisableTimestamp", "IsNamespaceMapped", });
+              new java.lang.String[] { "SchemaNameBytes", "TableNameBytes", "TableType", "IndexState", "SequenceNumber", "TimeStamp", "PkNameBytes", "BucketNum", "Columns", "Indexes", "IsImmutableRows", "GuidePosts", "DataTableNameBytes", "DefaultFamilyName", "DisableWAL", "MultiTenant", "ViewType", "ViewStatement", "PhysicalNames", "TenantId", "ViewIndexId", "IndexType", "StatsTimeStamp", "StoreNulls", "BaseColumnCount", "RowKeyOrderOptimizable", "Transactional", "UpdateCacheFrequency", "IndexDisableTimestamp", "IsNamespaceMapped", "AutoParititonSeqName", });
           return null;
         }
       };

http://git-wip-us.apache.org/repos/asf/phoenix/blob/13f38ca9/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 9ac05a4..4d18cbb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -284,6 +284,9 @@ public enum SQLExceptionCode {
     TX_MUST_BE_ENABLED_TO_SET_AUTO_FLUSH(1083, "44A14", "Cannot set auto flush if transactions are disabled."),
     TX_MUST_BE_ENABLED_TO_SET_ISOLATION_LEVEL(1084, "44A15", "Cannot set isolation level to TRANSACTION_READ_COMMITTED or TRANSACTION_SERIALIZABLE if transactions are disabled."),
     TX_UNABLE_TO_GET_WRITE_FENCE(1085, "44A16", "Unable to obtain write fence for DDL operation."),
+    
+    SEQUENCE_NOT_CASTABLE_TO_AUTO_PARTITION_ID_COLUMN(1086, "44A17", "Sequence Value not castable to auto-partition id column"),
+    CANNOT_COERCE_AUTO_PARTITION_ID(1087, "44A18", "Auto-partition id cannot be coerced"),
 
     /** Sequence related */
     SEQUENCE_ALREADY_EXIST(1200, "42Z00", "Sequence already exists.", new Factory() {
@@ -314,6 +317,12 @@ public enum SQLExceptionCode {
     INCREMENT_BY_MUST_NOT_BE_ZERO(1214, "42Z14", "Sequence INCREMENT BY value cannot be zero."),
     NUM_SEQ_TO_ALLOCATE_MUST_BE_CONSTANT(1215, "42Z15", "Sequence NEXT n VALUES FOR must be a positive integer or constant." ),
     NUM_SEQ_TO_ALLOCATE_NOT_SUPPORTED(1216, "42Z16", "Sequence NEXT n VALUES FOR is not supported for Sequences with the CYCLE flag." ),
+    AUTO_PARTITION_SEQUENCE_UNDEFINED(1217, "42Z17", "Auto Partition Sequence undefined", new Factory() {
+        @Override
+        public SQLException newException(SQLExceptionInfo info) {
+            return new SequenceNotFoundException(info.getSchemaName(), info.getTableName());
+        }
+    }),
 
     /** Parser error. (errorcode 06, sqlState 42P) */
     PARSER_ERROR(601, "42P00", "Syntax error.", Factory.SYNTAX_ERROR),

http://git-wip-us.apache.org/repos/asf/phoenix/blob/13f38ca9/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 8aae013..c571625 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -292,6 +292,9 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData {
     public static final String UPDATE_CACHE_FREQUENCY = "UPDATE_CACHE_FREQUENCY";
     public static final byte[] UPDATE_CACHE_FREQUENCY_BYTES = Bytes.toBytes(UPDATE_CACHE_FREQUENCY);
 
+    public static final String AUTO_PARTITION_SEQ = "AUTO_PARTITION_SEQ";
+    public static final byte[] AUTO_PARTITION_SEQ_BYTES = Bytes.toBytes(AUTO_PARTITION_SEQ);
+    
     public static final String ASYNC_CREATED_DATE = "ASYNC_CREATED_DATE";
 
     private final PhoenixConnection connection;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/13f38ca9/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index 36aa8cd..5c7ac1a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.query;
 
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARG_POSITION;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.BASE_COLUMN_COUNT;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.BUFFER_LENGTH;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CACHE_SIZE;
@@ -267,6 +268,7 @@ public interface QueryConstants {
             TRANSACTIONAL + " BOOLEAN," +
             UPDATE_CACHE_FREQUENCY + " BIGINT," +
             IS_NAMESPACE_MAPPED + " BOOLEAN," +
+            AUTO_PARTITION_SEQ + " VARCHAR," +
             "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TENANT_ID + ","
             + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" +
             HConstants.VERSIONS + "=" + MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS + ",\n" +

http://git-wip-us.apache.org/repos/asf/phoenix/blob/13f38ca9/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
index 7b9af43..f1a7548 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
@@ -276,4 +276,8 @@ public class DelegateTable implements PTable {
     public boolean isNamespaceMapped() {
         return delegate.isNamespaceMapped();
     }
+
+    public String getAutoPartitionSeqName() {
+        return delegate.getAutoPartitionSeqName();
+    }
 }


Mime
View raw message