From dev-return-52199-archive-asf-public=cust-asf.ponee.io@phoenix.apache.org Fri Jun 1 08:35:28 2018 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx-eu-01.ponee.io (Postfix) with SMTP id D0F26180676 for ; Fri, 1 Jun 2018 08:35:27 +0200 (CEST) Received: (qmail 1597 invoked by uid 500); 1 Jun 2018 06:35:26 -0000 Mailing-List: contact dev-help@phoenix.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@phoenix.apache.org Delivered-To: mailing list dev@phoenix.apache.org Received: (qmail 1568 invoked by uid 99); 1 Jun 2018 06:35:26 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 01 Jun 2018 06:35:26 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id DFCF2E10BD; Fri, 1 Jun 2018 06:35:25 +0000 (UTC) From: twdsilva To: dev@phoenix.apache.org Reply-To: dev@phoenix.apache.org References: In-Reply-To: Subject: [GitHub] phoenix pull request #303: PHOENIX-3534 Support multi region SYSTEM.CATALOG ... Content-Type: text/plain Message-Id: <20180601063525.DFCF2E10BD@git1-us-west.apache.org> Date: Fri, 1 Jun 2018 06:35:25 +0000 (UTC) Github user twdsilva commented on a diff in the pull request: https://github.com/apache/phoenix/pull/303#discussion_r192308276 --- Diff: phoenix-core/src/test/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java --- @@ -0,0 +1,299 @@ +package org.apache.phoenix.coprocessor; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.phoenix.end2end.ParallelStatsDisabledIT; +import org.apache.phoenix.exception.SQLExceptionCode; +import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; +import org.apache.phoenix.schema.PColumn; +import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.schema.TableNotFoundException; +import org.apache.phoenix.util.PhoenixRuntime; +import org.junit.Test; + +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public class MetaDataEndpointImplTest extends ParallelStatsDisabledIT { --- End diff -- I removed the OrphanCleaner code that was there previously and I create a PHOENIX-3534 to handle cleanup of child view metadata for a parent that was dropped during compaction. Instead of having additional clean up code, I think we should just detect that we are trying to re-create a table that was dropped whose child view metadata wasn't cleaned up and then throw an exception. ---