Return-Path: X-Original-To: apmail-falcon-commits-archive@minotaur.apache.org Delivered-To: apmail-falcon-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 2BD971943B for ; Tue, 1 Mar 2016 07:24:19 +0000 (UTC) Received: (qmail 22791 invoked by uid 500); 1 Mar 2016 07:24:16 -0000 Delivered-To: apmail-falcon-commits-archive@falcon.apache.org Received: (qmail 22689 invoked by uid 500); 1 Mar 2016 07:24:16 -0000 Mailing-List: contact commits-help@falcon.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@falcon.apache.org Delivered-To: mailing list commits@falcon.apache.org Received: (qmail 22518 invoked by uid 99); 1 Mar 2016 07:24:15 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 01 Mar 2016 07:24:15 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 33B32E00B4; Tue, 1 Mar 2016 07:24:15 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: pallavi@apache.org To: commits@falcon.apache.org Date: Tue, 01 Mar 2016 07:24:16 -0000 Message-Id: In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [2/7] falcon git commit: Removing addons/ non-docs directory from asf-site branch http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/test/java/org/apache/falcon/hive/DBReplicationStatusTest.java ---------------------------------------------------------------------- diff --git a/addons/hivedr/src/test/java/org/apache/falcon/hive/DBReplicationStatusTest.java b/addons/hivedr/src/test/java/org/apache/falcon/hive/DBReplicationStatusTest.java deleted file mode 100644 index bfeca8d..0000000 --- a/addons/hivedr/src/test/java/org/apache/falcon/hive/DBReplicationStatusTest.java +++ /dev/null @@ -1,230 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.falcon.hive; - -import org.apache.falcon.hive.exception.HiveReplicationException; -import org.apache.falcon.hive.util.DBReplicationStatus; -import org.apache.falcon.hive.util.ReplicationStatus; -import org.testng.Assert; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.Test; - -import java.util.HashMap; -import java.util.Map; - -/** - * Unit tests for DBReplicationStatus. - */ -@Test -public class DBReplicationStatusTest { - - private Map tableStatuses = new HashMap(); - private ReplicationStatus dbReplicationStatus; - private ReplicationStatus tableStatus1; - - public DBReplicationStatusTest() { - } - - @BeforeClass - public void prepare() throws Exception { - dbReplicationStatus = new ReplicationStatus("source", "target", "jobname", - "Default1", null, ReplicationStatus.Status.FAILURE, 20L); - tableStatus1 = new ReplicationStatus("source", "target", "jobname", - "default1", "Table1", ReplicationStatus.Status.SUCCESS, 20L); - tableStatuses.put("Table1", tableStatus1); - - } - - public void dBReplicationStatusSerializeTest() throws Exception { - DBReplicationStatus replicationStatus = new DBReplicationStatus(dbReplicationStatus, tableStatuses); - - String expected = "{\n" + " \"db_status\": {\n" - + " \"sourceUri\": \"source\",\n" + " \"targetUri\": \"target\",\n" - + " \"jobName\": \"jobname\",\n" + " \"database\": \"default1\",\n" - + " \"status\": \"FAILURE\",\n" + " \"eventId\": 20\n" + " },\n" - + " \"table_status\": {\"table1\": {\n" + " \"sourceUri\": \"source\",\n" - + " \"targetUri\": \"target\",\n" + " \"jobName\": \"jobname\",\n" - + " \"database\": \"default1\",\n" + " \"table\": \"table1\",\n" - + " \"status\": \"SUCCESS\",\n" + " \"eventId\": 20\n" + " }}\n" + "}"; - String actual = replicationStatus.toJsonString(); - Assert.assertEquals(actual, expected); - } - - public void dBReplicationStatusDeserializeTest() throws Exception { - - String jsonString = "{\"db_status\":{\"sourceUri\":\"source\"," - + "\"targetUri\":\"target\",\"jobName\":\"jobname\",\"database\":\"default1\",\"status\":\"SUCCESS\"," - + "\"eventId\":20},\"table_status\":{\"Table1\":{\"sourceUri\":\"source\",\"targetUri\":\"target\"," - + "\"jobName\":\"jobname\",\"database\":\"default1\",\"table\":\"Table1\",\"status\":\"SUCCESS\"," - + "\"eventId\":20},\"table3\":{\"sourceUri\":\"source\",\"targetUri\":\"target\"," - + "\"jobName\":\"jobname\", \"database\":\"Default1\",\"table\":\"table3\",\"status\":\"FAILURE\"," - + "\"eventId\":10}, \"table2\":{\"sourceUri\":\"source\",\"targetUri\":\"target\"," - + "\"jobName\":\"jobname\", \"database\":\"default1\",\"table\":\"table2\",\"status\":\"INIT\"}}}"; - - DBReplicationStatus dbStatus = new DBReplicationStatus(jsonString); - Assert.assertEquals(dbStatus.getDatabaseStatus().getDatabase(), "default1"); - Assert.assertEquals(dbStatus.getDatabaseStatus().getJobName(), "jobname"); - Assert.assertEquals(dbStatus.getDatabaseStatus().getEventId(), 20); - - Assert.assertEquals(dbStatus.getTableStatuses().get("table1").getEventId(), 20); - Assert.assertEquals(dbStatus.getTableStatuses().get("table1").getStatus(), ReplicationStatus.Status.SUCCESS); - Assert.assertEquals(dbStatus.getTableStatuses().get("table2").getEventId(), -1); - Assert.assertEquals(dbStatus.getTableStatuses().get("table2").getStatus(), ReplicationStatus.Status.INIT); - Assert.assertEquals(dbStatus.getTableStatuses().get("table3").getEventId(), 10); - Assert.assertEquals(dbStatus.getTableStatuses().get("table3").getStatus(), ReplicationStatus.Status.FAILURE); - - - } - - public void wrongDBForTableTest() throws Exception { - - ReplicationStatus newDbStatus = new ReplicationStatus("source", "target", "jobname", - "wrongDb", null, ReplicationStatus.Status.FAILURE, 20L); - new DBReplicationStatus(newDbStatus); - - try { - new DBReplicationStatus(newDbStatus, tableStatuses); - Assert.fail(); - } catch (HiveReplicationException e) { - Assert.assertEquals(e.getMessage(), - "Cannot set status for table default1.table1, It does not belong to DB wrongdb"); - } - - String jsonString = "{\n" + " \"db_status\": {\n" - + " \"sourceUri\": \"source\",\n" + " \"targetUri\": \"target\",\n" - + " \"jobName\": \"jobname\",\n" + " \"database\": \"wrongdb\",\n" - + " \"status\": \"FAILURE\",\n" + " \"eventId\": 20\n" + " },\n" - + " \"table_status\": {\"table1\": {\n" + " \"sourceUri\": \"source\",\n" - + " \"targetUri\": \"target\",\n" + " \"jobName\": \"jobname\",\n" - + " \"database\": \"default1\",\n" + " \"table\": \"table1\",\n" - + " \"status\": \"SUCCESS\",\n" + " \"eventId\": 20\n" + " }}\n" + "}"; - - try { - new DBReplicationStatus(jsonString); - Assert.fail(); - } catch (HiveReplicationException e) { - Assert.assertEquals(e.getMessage(), - "Unable to create DBReplicationStatus from JsonString. Cannot set status for " - + "table default1.table1, It does not belong to DB wrongdb"); - } - } - - public void updateTableStatusTest() throws Exception { - DBReplicationStatus replicationStatus = new DBReplicationStatus(dbReplicationStatus, tableStatuses); - replicationStatus.updateTableStatus(tableStatus1); - - // wrong DB test - try { - replicationStatus.updateTableStatus(new ReplicationStatus("source", "target", "jobname", - "wrongDB", "table2", ReplicationStatus.Status.INIT, -1L)); - Assert.fail(); - } catch (HiveReplicationException e) { - Assert.assertEquals(e.getMessage(), - "Cannot update Table Status. TableDB wrongdb does not match current DB default1"); - } - - // wrong status test - try { - replicationStatus.updateTableStatus(dbReplicationStatus); - Assert.fail(); - } catch (HiveReplicationException e) { - Assert.assertEquals(e.getMessage(), - "Cannot update Table Status. Table name is empty."); - } - - } - - public void updateDBStatusTest() throws Exception { - DBReplicationStatus replicationStatus = new DBReplicationStatus(dbReplicationStatus, tableStatuses); - replicationStatus.updateDbStatus(dbReplicationStatus); - - // wrong DB test - try { - replicationStatus.updateDbStatus(new ReplicationStatus("source", "target", "jobname", - "wrongDB", null, ReplicationStatus.Status.INIT, -1L)); - Assert.fail(); - } catch (HiveReplicationException e) { - Assert.assertEquals(e.getMessage(), - "Cannot update Database Status. StatusDB wrongdb does not match current DB default1"); - } - - // wrong status test - try { - replicationStatus.updateDbStatus(tableStatus1); - Assert.fail(); - } catch (HiveReplicationException e) { - Assert.assertEquals(e.getMessage(), - "Cannot update DB Status. This is table level status."); - } - } - - public void updateDbStatusFromTableStatusesTest() throws Exception { - - ReplicationStatus dbStatus = new ReplicationStatus("source", "target", "jobname", - "default1", null, ReplicationStatus.Status.SUCCESS, 20L); - ReplicationStatus table1 = new ReplicationStatus("source", "target", "jobname", - "default1", "table1", ReplicationStatus.Status.SUCCESS, 20L); - ReplicationStatus table2 = new ReplicationStatus("source", "target", "jobname", - "Default1", "table2", ReplicationStatus.Status.INIT, -1L); - ReplicationStatus table3 = new ReplicationStatus("source", "target", "jobname", - "default1", "Table3", ReplicationStatus.Status.FAILURE, 15L); - ReplicationStatus table4 = new ReplicationStatus("source", "target", "jobname", - "Default1", "Table4", ReplicationStatus.Status.FAILURE, 18L); - Map tables = new HashMap(); - - tables.put("table1", table1); - tables.put("table2", table2); - tables.put("table3", table3); - tables.put("table4", table4); - - // If there is a failue, last eventId should be lowest eventId of failed tables - DBReplicationStatus status = new DBReplicationStatus(dbStatus, tables); - Assert.assertEquals(status.getDatabaseStatus().getEventId(), 20); - Assert.assertEquals(status.getDatabaseStatus().getStatus(), ReplicationStatus.Status.SUCCESS); - status.updateDbStatusFromTableStatuses(); - Assert.assertEquals(status.getDatabaseStatus().getEventId(), 15); - Assert.assertEquals(status.getDatabaseStatus().getStatus(), ReplicationStatus.Status.FAILURE); - - // If all tables succeed, last eventId should be highest eventId of success tables - table3 = new ReplicationStatus("source", "target", "jobname", - "default1", "table3", ReplicationStatus.Status.SUCCESS, 25L); - table4 = new ReplicationStatus("source", "target", "jobname", - "default1", "table4", ReplicationStatus.Status.SUCCESS, 22L); - tables.put("Table3", table3); - tables.put("Table4", table4); - status = new DBReplicationStatus(dbStatus, tables); - status.updateDbStatusFromTableStatuses(); - Assert.assertEquals(status.getDatabaseStatus().getEventId(), 25); - Assert.assertEquals(status.getDatabaseStatus().getStatus(), ReplicationStatus.Status.SUCCESS); - - // Init tables should not change DB status. - Map initOnlyTables = new HashMap(); - initOnlyTables.put("table2", table2); - dbStatus = new ReplicationStatus("source", "target", "jobname", - "default1", null, ReplicationStatus.Status.SUCCESS, 20L); - status = new DBReplicationStatus(dbStatus, initOnlyTables); - Assert.assertEquals(status.getDatabaseStatus().getEventId(), 20); - Assert.assertEquals(status.getDatabaseStatus().getStatus(), ReplicationStatus.Status.SUCCESS); - status.updateDbStatusFromTableStatuses(); - Assert.assertEquals(status.getDatabaseStatus().getEventId(), 20); - Assert.assertEquals(status.getDatabaseStatus().getStatus(), ReplicationStatus.Status.SUCCESS); - - - } - -} http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/test/java/org/apache/falcon/hive/DRTest.java ---------------------------------------------------------------------- diff --git a/addons/hivedr/src/test/java/org/apache/falcon/hive/DRTest.java b/addons/hivedr/src/test/java/org/apache/falcon/hive/DRTest.java deleted file mode 100644 index 1f44b62..0000000 --- a/addons/hivedr/src/test/java/org/apache/falcon/hive/DRTest.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.falcon.hive; - -/** - * Test class for DR. - */ -public class DRTest { - public void testHiveDr(String[] args) { - String[] testArgs = { - "-sourceMetastoreUri", "thrift://localhost:9083", - "-sourceDatabase", "default", - "-sourceTable", "test", - "-sourceStagingPath", "/apps/hive/tools/dr", - "-sourceNN", "hdfs://localhost:8020", - "-sourceRM", "local", - - "-targetMetastoreUri", "thrift://localhost:9083", - "-targetStagingPath", "/apps/hive/tools/dr", - "-targetNN", "hdfs://localhost:8020", - "-targetRM", "local", - - "-maxEvents", "5", - "-replicationMaxMaps", "1", - "-distcpMapBandwidth", "4", - }; - HiveDRTool.main(testArgs); - } -} http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRStatusStoreTest.java ---------------------------------------------------------------------- diff --git a/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRStatusStoreTest.java b/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRStatusStoreTest.java deleted file mode 100644 index 5bc39df..0000000 --- a/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRStatusStoreTest.java +++ /dev/null @@ -1,343 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.falcon.hive; - -import org.apache.falcon.cluster.util.EmbeddedCluster; -import org.apache.falcon.hadoop.JailedFileSystem; -import org.apache.falcon.hive.exception.HiveReplicationException; -import org.apache.falcon.hive.util.DRStatusStore; -import org.apache.falcon.hive.util.HiveDRStatusStore; -import org.apache.falcon.hive.util.ReplicationStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocalFileSystem; -import org.apache.hadoop.fs.LocatedFileStatus; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RemoteIterator; -import org.testng.Assert; -import org.testng.annotations.AfterClass; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; - -/** - * Unit tests for HiveDRStatusStore. - */ -@Test -public class HiveDRStatusStoreTest { - private HiveDRStatusStore drStatusStore; - private FileSystem fileSystem = new JailedFileSystem(); - - public HiveDRStatusStoreTest() throws Exception { - EmbeddedCluster cluster = EmbeddedCluster.newCluster("hiveReplTest"); - Path storePath = new Path(DRStatusStore.BASE_DEFAULT_STORE_PATH); - - fileSystem.initialize(LocalFileSystem.getDefaultUri(cluster.getConf()), cluster.getConf()); - if (fileSystem.exists(storePath)) { - fileSystem.delete(storePath, true); - } - FileSystem.mkdirs(fileSystem, storePath, DRStatusStore.DEFAULT_STORE_PERMISSION); - drStatusStore = new HiveDRStatusStore(fileSystem, fileSystem.getFileStatus(storePath).getGroup()); - } - - @BeforeClass - public void updateReplicationStatusTest() throws Exception { - ReplicationStatus dbStatus = new ReplicationStatus("source", "target", "jobname", - "Default1", null, ReplicationStatus.Status.SUCCESS, 20L); - ReplicationStatus table1 = new ReplicationStatus("source", "target", "jobname", - "Default1", "table1", ReplicationStatus.Status.SUCCESS, 20L); - ReplicationStatus table2 = new ReplicationStatus("source", "target", "jobname", - "default1", "Table2", ReplicationStatus.Status.INIT, -1L); - ReplicationStatus table3 = new ReplicationStatus("source", "target", "jobname", - "Default1", "Table3", ReplicationStatus.Status.FAILURE, 15L); - ReplicationStatus table4 = new ReplicationStatus("source", "target", "jobname", - "default1", "table4", ReplicationStatus.Status.FAILURE, 18L); - ArrayList replicationStatusList = new ArrayList(); - replicationStatusList.add(table1); - replicationStatusList.add(table2); - replicationStatusList.add(table3); - replicationStatusList.add(table4); - replicationStatusList.add(dbStatus); - drStatusStore.updateReplicationStatus("jobname", replicationStatusList); - } - - @Test(expectedExceptions = IOException.class, - expectedExceptionsMessageRegExp = ".*does not have correct ownership/permissions.*") - public void testDrStatusStoreWithFakeUser() throws IOException { - new HiveDRStatusStore(fileSystem, "fakeGroup"); - } - - public void updateReplicationStatusNewTablesTest() throws Exception { - ReplicationStatus dbStatus = new ReplicationStatus("source", "target", "jobname2", - "default2", null, ReplicationStatus.Status.SUCCESS, 20L); - ReplicationStatus table1 = new ReplicationStatus("source", "target", "jobname2", - "Default2", "table1", ReplicationStatus.Status.SUCCESS, 20L); - ReplicationStatus table2 = new ReplicationStatus("source", "target", "jobname2", - "default2", "Table2", ReplicationStatus.Status.INIT, -1L); - ReplicationStatus table3 = new ReplicationStatus("source", "target", "jobname2", - "default2", "table3", ReplicationStatus.Status.FAILURE, 15L); - ReplicationStatus table4 = new ReplicationStatus("source", "target", "jobname2", - "Default2", "Table4", ReplicationStatus.Status.FAILURE, 18L); - ArrayList replicationStatusList = new ArrayList(); - replicationStatusList.add(table1); - replicationStatusList.add(table2); - replicationStatusList.add(table3); - replicationStatusList.add(table4); - replicationStatusList.add(dbStatus); - - drStatusStore.updateReplicationStatus("jobname2", replicationStatusList); - ReplicationStatus status = drStatusStore.getReplicationStatus("source", "target", "jobname2", "default2"); - Assert.assertEquals(status.getEventId(), 15); - Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.FAILURE); - Assert.assertEquals(status.getJobName(), "jobname2"); - Assert.assertEquals(status.getTable(), null); - Assert.assertEquals(status.getSourceUri(), "source"); - - Iterator iter = drStatusStore.getTableReplicationStatusesInDb("source", "target", - "jobname2", "default2"); - int size = 0; - while(iter.hasNext()) { - iter.next(); - size++; - } - Assert.assertEquals(4, size); - - table3 = new ReplicationStatus("source", "target", "jobname2", - "default2", "table3", ReplicationStatus.Status.SUCCESS, 25L); - table4 = new ReplicationStatus("source", "target", "jobname2", - "Default2", "table4", ReplicationStatus.Status.SUCCESS, 22L); - ReplicationStatus table5 = new ReplicationStatus("source", "target", "jobname2", - "default2", "Table5", ReplicationStatus.Status.SUCCESS, 18L); - ReplicationStatus db1table1 = new ReplicationStatus("source", "target", "jobname2", - "Default1", "Table1", ReplicationStatus.Status.SUCCESS, 18L); - replicationStatusList = new ArrayList(); - replicationStatusList.add(table5); - replicationStatusList.add(table3); - replicationStatusList.add(table4); - replicationStatusList.add(db1table1); - - drStatusStore.updateReplicationStatus("jobname2", replicationStatusList); - status = drStatusStore.getReplicationStatus("source", "target", "jobname2", "default1"); - Assert.assertEquals(status.getEventId(), 18); - Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.SUCCESS); - - status = drStatusStore.getReplicationStatus("source", "target", "jobname2", "default2"); - Assert.assertEquals(status.getEventId(), 25); - Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.SUCCESS); - - iter = drStatusStore.getTableReplicationStatusesInDb("source", "target", - "jobname2", "default2"); - size = 0; - while(iter.hasNext()) { - iter.next(); - size++; - } - Assert.assertEquals(5, size); - } - - public void getReplicationStatusDBTest() throws HiveReplicationException { - ReplicationStatus status = drStatusStore.getReplicationStatus("source", "target", "jobname", "Default1"); - Assert.assertEquals(status.getEventId(), 15); - Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.FAILURE); - Assert.assertEquals(status.getJobName(), "jobname"); - Assert.assertEquals(status.getTable(), null); - Assert.assertEquals(status.getSourceUri(), "source"); - } - - public void checkReplicationConflictTest() throws HiveReplicationException { - - try { - //same source, same job, same DB, null table : pass - drStatusStore.checkForReplicationConflict("source", "jobname", "default1", null); - - //same source, same job, same DB, same table : pass - drStatusStore.checkForReplicationConflict("source", "jobname", "default1", "table1"); - - //same source, same job, different DB, null table : pass - drStatusStore.checkForReplicationConflict("source", "jobname", "diffDB", null); - - //same source, same job, different DB, different table : pass - drStatusStore.checkForReplicationConflict("source", "jobname", "diffDB", "diffTable"); - - // same source, different job, same DB, diff table : pass - drStatusStore.checkForReplicationConflict("source", "diffJob", "default1", "diffTable"); - } catch (Exception e) { - Assert.fail(e.getMessage()); - } - - try { - // different source, same job, same DB, null table : fail - drStatusStore.checkForReplicationConflict("diffSource", "jobname", "default1", null); - Assert.fail(); - } catch (HiveReplicationException e) { - Assert.assertEquals(e.getMessage(), - "Two different sources are attempting to replicate to same db default1." - + " New Source = diffSource, Existing Source = source"); - } - - try { - // same source, different job, same DB, null table : fail - drStatusStore.checkForReplicationConflict("source", "diffJob", "default1", null); - Assert.fail(); - } catch (HiveReplicationException e) { - Assert.assertEquals(e.getMessage(), - "Two different jobs are attempting to replicate to same db default1." - + " New Job = diffJob, Existing Job = jobname"); - } - - try { - // same source, different job, same DB, same table : fail - drStatusStore.checkForReplicationConflict("source", "diffJob", "default1", "table1"); - Assert.fail(); - } catch (HiveReplicationException e) { - Assert.assertEquals(e.getMessage(), - "Two different jobs are trying to replicate to same table table1." - + " New job = diffJob, Existing job = jobname"); - } - - - } - - public void deleteReplicationStatusTest() throws Exception { - ReplicationStatus dbStatus = new ReplicationStatus("source", "target", "deleteJob", - "deleteDB", null, ReplicationStatus.Status.SUCCESS, 20L); - ReplicationStatus table1 = new ReplicationStatus("source", "target", "deleteJob", - "deleteDB", "Table1", ReplicationStatus.Status.SUCCESS, 20L); - ArrayList replicationStatusList = new ArrayList(); - replicationStatusList.add(table1); - replicationStatusList.add(dbStatus); - drStatusStore.updateReplicationStatus("deleteJob", replicationStatusList); - - ReplicationStatus status = drStatusStore.getReplicationStatus("source", "target", "deleteJob", "deleteDB"); - Path statusPath = drStatusStore.getStatusDirPath(status.getDatabase(), status.getJobName()); - Assert.assertEquals(fileSystem.exists(statusPath), true); - - drStatusStore.deleteReplicationStatus("deleteJob", "deleteDB"); - Assert.assertEquals(fileSystem.exists(statusPath), false); - } - - public void getReplicationStatusTableTest() throws HiveReplicationException { - ReplicationStatus status = drStatusStore.getReplicationStatus("source", "target", - "jobname", "default1", "table1"); - Assert.assertEquals(status.getEventId(), 20); - Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.SUCCESS); - Assert.assertEquals(status.getTable(), "table1"); - - status = drStatusStore.getReplicationStatus("source", "target", - "jobname", "Default1", "Table2"); - Assert.assertEquals(status.getEventId(), -1); - Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.INIT); - Assert.assertEquals(status.getTable(), "table2"); - - status = drStatusStore.getReplicationStatus("source", "target", - "jobname", "default1", "Table3"); - Assert.assertEquals(status.getEventId(), 15); - Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.FAILURE); - Assert.assertEquals(status.getTable(), "table3"); - - status = drStatusStore.getReplicationStatus("source", "target", - "jobname", "default1", "table4"); - Assert.assertEquals(status.getEventId(), 18); - Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.FAILURE); - Assert.assertEquals(status.getTable(), "table4"); - } - - public void getTableReplicationStatusesInDbTest() throws HiveReplicationException { - Iterator iter = drStatusStore.getTableReplicationStatusesInDb("source", "target", - "jobname", "Default1"); - int size = 0; - while(iter.hasNext()) { - size++; - ReplicationStatus status = iter.next(); - if (status.getTable().equals("table3")) { - Assert.assertEquals(status.getEventId(), 15); - Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.FAILURE); - Assert.assertEquals(status.getTable(), "table3"); - } - } - Assert.assertEquals(4, size); - } - - public void fileRotationTest() throws Exception { - // initialize replication status store for db default3. - // This should init with eventId = -1 and status = INIT - ReplicationStatus status = drStatusStore.getReplicationStatus("source", "target", - "jobname3", "default3"); - Assert.assertEquals(status.getEventId(), -1); - Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.INIT); - - // update status 5 times resulting in 6 files : latest.json + five rotated files - ReplicationStatus dbStatus = new ReplicationStatus("source", "target", "jobname3", - "Default3", null, ReplicationStatus.Status.SUCCESS, 20L); - ReplicationStatus table1 = new ReplicationStatus("source", "target", "jobname3", - "default3", "Table1", ReplicationStatus.Status.SUCCESS, 20L); - ArrayList replicationStatusList = new ArrayList(); - replicationStatusList.add(table1); - replicationStatusList.add(dbStatus); - - for(int i=0; i<5; i++) { - Thread.sleep(2000); - drStatusStore.updateReplicationStatus("jobname3", replicationStatusList); - } - - status = drStatusStore.getReplicationStatus("source", "target", "jobname3", "default3"); - Path statusPath = drStatusStore.getStatusDirPath(status.getDatabase(), status.getJobName()); - RemoteIterator iter = fileSystem.listFiles(statusPath, false); - Assert.assertEquals(getRemoteIterSize(iter), 6); - - drStatusStore.rotateStatusFiles(statusPath, 3, 10000000); - iter = fileSystem.listFiles(statusPath, false); - Assert.assertEquals(getRemoteIterSize(iter), 6); - - drStatusStore.rotateStatusFiles(statusPath, 3, 6000); - iter = fileSystem.listFiles(statusPath, false); - Assert.assertEquals(getRemoteIterSize(iter), 3); - } - - public void wrongJobNameTest() throws Exception { - ReplicationStatus dbStatus = new ReplicationStatus("source", "target", "jobname3", - "Default3", null, ReplicationStatus.Status.SUCCESS, 20L); - ArrayList replicationStatusList = new ArrayList(); - replicationStatusList.add(dbStatus); - - try { - drStatusStore.updateReplicationStatus("jobname2", replicationStatusList); - Assert.fail(); - } catch (HiveReplicationException e) { - // Expected exception due to jobname mismatch - } - } - - @AfterClass - public void cleanUp() throws IOException { - fileSystem.delete(new Path(DRStatusStore.BASE_DEFAULT_STORE_PATH), true); - } - - private int getRemoteIterSize(RemoteIterator iter) throws IOException { - int size = 0; - while(iter.hasNext()) { - iter.next(); - size++; - } - return size; - } - - -} http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRTest.java ---------------------------------------------------------------------- diff --git a/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRTest.java b/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRTest.java deleted file mode 100644 index cdeddaa..0000000 --- a/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRTest.java +++ /dev/null @@ -1,252 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.falcon.hive; - -import com.google.common.base.Function; -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.io.IOUtils; -import org.apache.falcon.cluster.util.EmbeddedCluster; -import org.apache.falcon.hadoop.JailedFileSystem; -import org.apache.falcon.hive.util.DRStatusStore; -import org.apache.falcon.hive.util.DelimiterUtils; -import org.apache.falcon.hive.util.EventSourcerUtils; -import org.apache.falcon.hive.util.HiveDRStatusStore; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocalFileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hive.hcatalog.api.HCatClient; -import org.apache.hive.hcatalog.api.HCatNotificationEvent; -import org.apache.hive.hcatalog.api.repl.Command; -import org.apache.hive.hcatalog.api.repl.ReplicationTask; -import org.apache.hive.hcatalog.api.repl.ReplicationUtils; -import org.apache.hive.hcatalog.api.repl.StagingDirectoryProvider; -import org.apache.hive.hcatalog.common.HCatConstants; -import org.apache.hive.hcatalog.messaging.MessageFactory; -import org.testng.Assert; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - -import javax.annotation.Nullable; -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.util.ArrayList; -import java.util.List; - -/** - * Test for Hive DR export and import. - */ -public class HiveDRTest { - private FileSystem fileSystem; - private HCatClient client; - private MetaStoreEventSourcer sourcer; - private EmbeddedCluster cluster; - private String dbName = "testdb"; - private String tableName = "testtable"; - private StagingDirectoryProvider stagingDirectoryProvider; - private MessageFactory msgFactory = MessageFactory.getInstance(); - - @BeforeMethod - public void setup() throws Exception { - client = HCatClient.create(new HiveConf()); - initializeFileSystem(); - sourcer = new MetaStoreEventSourcer(client, null, new EventSourcerUtils(cluster.getConf(), - false, "hiveReplTest"), null); - stagingDirectoryProvider = new StagingDirectoryProvider.TrivialImpl("/tmp", "/"); - } - - private void initializeFileSystem() throws Exception { - cluster = EmbeddedCluster.newCluster("hivedr"); - fileSystem = new JailedFileSystem(); - Path storePath = new Path(DRStatusStore.BASE_DEFAULT_STORE_PATH); - fileSystem.initialize(LocalFileSystem.getDefaultUri(cluster.getConf()), cluster.getConf()); - if (fileSystem.exists(storePath)) { - fileSystem.delete(storePath, true); - } - FileSystem.mkdirs(fileSystem, storePath, DRStatusStore.DEFAULT_STORE_PERMISSION); - HiveDRStatusStore drStatusStore = new HiveDRStatusStore(fileSystem, - fileSystem.getFileStatus(storePath).getGroup()); - } - - // Dummy mapping used for all db and table name mappings - private Function debugMapping = new Function(){ - @Nullable - @Override - public String apply(@Nullable String s) { - if (s == null){ - return null; - } else { - StringBuilder sb = new StringBuilder(s); - return sb.toString() + sb.reverse().toString(); - } - } - }; - - @Test - public void testExportImportReplication() throws Exception { - Table t = new Table(); - t.setDbName(dbName); - t.setTableName(tableName); - NotificationEvent event = new NotificationEvent(getEventId(), getTime(), - HCatConstants.HCAT_CREATE_TABLE_EVENT, msgFactory.buildCreateTableMessage(t).toString()); - event.setDbName(t.getDbName()); - event.setTableName(t.getTableName()); - - HCatNotificationEvent hev = new HCatNotificationEvent(event); - ReplicationTask rtask = ReplicationTask.create(client, hev); - - Assert.assertEquals(hev.toString(), rtask.getEvent().toString()); - verifyExportImportReplicationTask(rtask); - } - - private void verifyExportImportReplicationTask(ReplicationTask rtask) throws Exception { - Assert.assertEquals(true, rtask.needsStagingDirs()); - Assert.assertEquals(false, rtask.isActionable()); - - rtask.withSrcStagingDirProvider(stagingDirectoryProvider) - .withDstStagingDirProvider(stagingDirectoryProvider) - .withDbNameMapping(debugMapping) - .withTableNameMapping(debugMapping); - - List taskAdd = new ArrayList(); - taskAdd.add(rtask); - sourcer.processTableReplicationEvents(taskAdd.iterator(), dbName, tableName, - stagingDirectoryProvider.toString(), stagingDirectoryProvider.toString()); - - String metaFileName = sourcer.persistToMetaFile("hiveReplTest"); - String event = readEventFile(new Path(metaFileName)); - Assert.assertEquals(event.split(DelimiterUtils.FIELD_DELIM).length, 4); - Assert.assertEquals(dbName, - new String(Base64.decodeBase64(event.split(DelimiterUtils.FIELD_DELIM)[0]), "UTF-8")); - Assert.assertEquals(tableName, - new String(Base64.decodeBase64(event.split(DelimiterUtils.FIELD_DELIM)[1]), "UTF-8")); - - String exportStr = readEventFile(new Path(event.split(DelimiterUtils.FIELD_DELIM)[2])); - String[] commandList = exportStr.split(DelimiterUtils.NEWLINE_DELIM); - for (String command : commandList) { - Command cmd = ReplicationUtils.deserializeCommand(command); - Assert.assertEquals(cmd.getEventId(), 42); - for(String stmt : cmd.get()) { - Assert.assertTrue(stmt.startsWith("EXPORT TABLE")); - } - } - - String importStr = readEventFile(new Path(event.split(DelimiterUtils.FIELD_DELIM)[3])); - commandList = importStr.split(DelimiterUtils.NEWLINE_DELIM); - for (String command : commandList) { - Command cmd = ReplicationUtils.deserializeCommand(command); - Assert.assertEquals(cmd.getEventId(), 42); - for (String stmt : cmd.get()) { - Assert.assertTrue(stmt.startsWith("IMPORT TABLE")); - } - } - } - - @Test - public void testImportReplication() throws Exception { - Table t = new Table(); - t.setDbName("testdb"); - t.setTableName("testtable"); - NotificationEvent event = new NotificationEvent(getEventId(), getTime(), - HCatConstants.HCAT_DROP_TABLE_EVENT, msgFactory.buildDropTableMessage(t).toString()); - event.setDbName(t.getDbName()); - event.setTableName(t.getTableName()); - - HCatNotificationEvent hev = new HCatNotificationEvent(event); - ReplicationTask rtask = ReplicationTask.create(client, hev); - - Assert.assertEquals(hev.toString(), rtask.getEvent().toString()); - verifyImportReplicationTask(rtask); - } - - private void verifyImportReplicationTask(ReplicationTask rtask) throws Exception { - Assert.assertEquals(false, rtask.needsStagingDirs()); - Assert.assertEquals(true, rtask.isActionable()); - rtask.withDbNameMapping(debugMapping) - .withTableNameMapping(debugMapping); - - List taskAdd = new ArrayList(); - taskAdd.add(rtask); - sourcer.processTableReplicationEvents(taskAdd.iterator(), dbName, tableName, - stagingDirectoryProvider.toString(), stagingDirectoryProvider.toString()); - String persistFileName = sourcer.persistToMetaFile("hiveReplTest"); - String event = readEventFile(new Path(persistFileName)); - - Assert.assertEquals(event.split(DelimiterUtils.FIELD_DELIM).length, 4); - Assert.assertEquals(dbName, - new String(Base64.decodeBase64(event.split(DelimiterUtils.FIELD_DELIM)[0]), "UTF-8")); - Assert.assertEquals(tableName, - new String(Base64.decodeBase64(event.split(DelimiterUtils.FIELD_DELIM)[1]), "UTF-8")); - - String exportStr = readEventFile(new Path(event.split(DelimiterUtils.FIELD_DELIM)[2])); - String[] commandList = exportStr.split(DelimiterUtils.NEWLINE_DELIM); - for (String command : commandList) { - Command cmd = ReplicationUtils.deserializeCommand(command); - Assert.assertEquals(cmd.getEventId(), 42); - Assert.assertEquals(cmd.get().size(), 0); //In case of drop size of export is 0. Metadata operation - } - - String importStr = readEventFile(new Path(event.split(DelimiterUtils.FIELD_DELIM)[3])); - commandList = importStr.split(DelimiterUtils.NEWLINE_DELIM); - for (String command : commandList) { - Command cmd = ReplicationUtils.deserializeCommand(command); - Assert.assertEquals(cmd.getEventId(), 42); - for (String stmt : cmd.get()) { - Assert.assertTrue(stmt.startsWith("DROP TABLE")); - } - } - } - - private long getEventId() { - // Does not need to be unique, just non-zero distinct value to test against. - return 42; - } - - private int getTime() { - // Does not need to be actual time, just non-zero distinct value to test against. - return 1729; - } - - private String readEventFile(Path eventFileName) throws IOException { - StringBuilder eventString = new StringBuilder(); - BufferedReader in = new BufferedReader(new InputStreamReader( - fileSystem.open(eventFileName))); - try { - String line; - while ((line=in.readLine())!=null) { - eventString.append(line); - } - } catch (Exception e) { - throw new IOException(e); - } finally { - IOUtils.closeQuietly(in); - } - return eventString.toString(); - } - - @AfterMethod - public void tearDown() throws Exception { - client.close(); - } - -} http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/test/java/org/apache/falcon/hive/ReplicationStatusTest.java ---------------------------------------------------------------------- diff --git a/addons/hivedr/src/test/java/org/apache/falcon/hive/ReplicationStatusTest.java b/addons/hivedr/src/test/java/org/apache/falcon/hive/ReplicationStatusTest.java deleted file mode 100644 index a02639c..0000000 --- a/addons/hivedr/src/test/java/org/apache/falcon/hive/ReplicationStatusTest.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.falcon.hive; - -import org.apache.falcon.hive.exception.HiveReplicationException; -import org.apache.falcon.hive.util.ReplicationStatus; -import org.testng.Assert; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.Test; - -/** - * Unit tests for ReplicationStatus. - */ -@Test -public class ReplicationStatusTest { - - private ReplicationStatus dbStatus, tableStatus; - - public ReplicationStatusTest() {} - - - @BeforeClass - public void prepare() throws Exception { - dbStatus = new ReplicationStatus("source", "target", "jobname", - "default1", null, ReplicationStatus.Status.INIT, 0L); - tableStatus = new ReplicationStatus("source", "target", "jobname", - "testDb", "Table1", ReplicationStatus.Status.SUCCESS, 0L); - } - - public void replicationStatusSerializeTest() throws Exception { - String expected = "{\n \"sourceUri\": \"source\",\n" - + " \"targetUri\": \"target\",\n \"jobName\": \"jobname\",\n" - + " \"database\": \"testdb\",\n \"table\": \"table1\",\n" - + " \"status\": \"SUCCESS\",\n \"eventId\": 0\n}"; - String actual = tableStatus.toJsonString(); - Assert.assertEquals(actual, expected); - - expected = "{\n \"sourceUri\": \"source\",\n \"targetUri\": \"target\",\n" - + " \"jobName\": \"jobname\",\n \"database\": \"default1\",\n" - + " \"status\": \"INIT\",\n \"eventId\": 0\n}"; - actual = dbStatus.toJsonString(); - Assert.assertEquals(actual, expected); - } - - public void replicationStatusDeserializeTest() throws Exception { - String tableInput = "{\n \"sourceUri\": \"source\",\n" - + " \"targetUri\": \"target\",\n \"jobName\": \"testJob\",\n" - + " \"database\": \"Test1\",\n \"table\": \"table1\",\n" - + " \"status\": \"SUCCESS\",\n \"eventId\": 0\n}"; - String dbInput = "{ \"sourceUri\": \"source\", \"targetUri\": \"target\",\"jobName\": \"jobname\",\n" - + " \"database\": \"default1\", \"status\": \"FAILURE\"," - + " \"eventId\": 27, \"statusLog\": \"testLog\"}"; - - ReplicationStatus newDbStatus = new ReplicationStatus(dbInput); - ReplicationStatus newTableStatus = new ReplicationStatus(tableInput); - - Assert.assertEquals(newDbStatus.getTable(), null); - Assert.assertEquals(newDbStatus.getEventId(), 27); - Assert.assertEquals(newDbStatus.getDatabase(), "default1"); - Assert.assertEquals(newDbStatus.getLog(), "testLog"); - Assert.assertEquals(newDbStatus.getStatus(), ReplicationStatus.Status.FAILURE); - - - Assert.assertEquals(newTableStatus.getTable(), "table1"); - Assert.assertEquals(newTableStatus.getEventId(), 0); - Assert.assertEquals(newTableStatus.getDatabase(), "test1"); - Assert.assertEquals(newTableStatus.getJobName(), "testJob"); - - // no table, no eventId, no log - dbInput = "{\n \"sourceUri\": \"source\",\n" - + " \"targetUri\": \"target\",\n \"jobName\": \"testJob\",\n" - + " \"database\": \"Test1\",\n" - + " \"status\": \"SUCCESS\"\n}"; - newDbStatus = new ReplicationStatus(dbInput); - - Assert.assertEquals(newDbStatus.getDatabase(), "test1"); - Assert.assertEquals(newDbStatus.getTable(), null); - Assert.assertEquals(newDbStatus.getEventId(), -1); - Assert.assertEquals(newDbStatus.getLog(), null); - - } - - public void invalidEventIdTest() throws Exception { - String tableInput = "{\n \"sourceUri\": \"source\",\n" - + " \"targetUri\": \"target\",\n \"jobName\": \"testJob\",\n" - + " \"database\": \"test1\",\n \"table\": \"table1\",\n" - + " \"status\": \"SUCCESS\",\n \"eventId\": -100\n}"; - - ReplicationStatus newTableStatus = new ReplicationStatus(tableInput); - Assert.assertEquals(newTableStatus.getEventId(), -1); - - newTableStatus.setEventId(-200); - Assert.assertEquals(newTableStatus.getEventId(), -1); - - String expected = "{\n \"sourceUri\": \"source\",\n" - + " \"targetUri\": \"target\",\n \"jobName\": \"testJob\",\n" - + " \"database\": \"test1\",\n \"table\": \"table1\",\n" - + " \"status\": \"SUCCESS\",\n \"eventId\": -1\n}"; - String actual = newTableStatus.toJsonString(); - Assert.assertEquals(actual, expected); - - newTableStatus.setEventId(50); - Assert.assertEquals(newTableStatus.getEventId(), 50); - } - - public void invalidStatusTest() throws Exception { - - String dbInput = "{ \"sourceUri\": \"source\", \"targetUri\": \"target\",\"jobName\": \"jobname\",\n" - + " \"database\": \"default1\", \"status\": \"BLAH\"," - + " \"eventId\": 27, \"statusLog\": \"testLog\"}"; - - try { - new ReplicationStatus(dbInput); - Assert.fail(); - } catch (HiveReplicationException e) { - Assert.assertEquals(e.getMessage(), - "Unable to deserialize jsonString to ReplicationStatus. Invalid status BLAH"); - } - } - - -} http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hdfs-replication/README.txt ---------------------------------------------------------------------- diff --git a/addons/recipes/hdfs-replication/README.txt b/addons/recipes/hdfs-replication/README.txt deleted file mode 100644 index 5742d43..0000000 --- a/addons/recipes/hdfs-replication/README.txt +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDFS Directory Replication Recipe - -Overview -This recipe implements replicating arbitrary directories on HDFS from one -Hadoop cluster to another Hadoop cluster. -This piggy backs on replication solution in Falcon which uses the DistCp tool. - -Use Case -* Copy directories between HDFS clusters with out dated partitions -* Archive directories from HDFS to Cloud. Ex: S3, Azure WASB - -Limitations -As the data volume and number of files grow, this can get inefficient. http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hdfs-replication/pom.xml ---------------------------------------------------------------------- diff --git a/addons/recipes/hdfs-replication/pom.xml b/addons/recipes/hdfs-replication/pom.xml deleted file mode 100644 index 98d9795..0000000 --- a/addons/recipes/hdfs-replication/pom.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - 4.0.0 - org.apache.falcon.recipes - falcon-hdfs-replication-recipe - 0.10-SNAPSHOT - Apache Falcon Sample Hdfs Replicaiton Recipe - Apache Falcon Sample Hdfs Replication Recipe - jar - http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-template.xml ---------------------------------------------------------------------- diff --git a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-template.xml b/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-template.xml deleted file mode 100644 index 441a189..0000000 --- a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-template.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - _falcon_mirroring_type=HDFS - - 1 - - LAST_ONLY - ##falcon.recipe.frequency## - UTC - - - - - - - - - - \ No newline at end of file http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-workflow.xml ---------------------------------------------------------------------- diff --git a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-workflow.xml b/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-workflow.xml deleted file mode 100644 index c1966be..0000000 --- a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-workflow.xml +++ /dev/null @@ -1,82 +0,0 @@ - - - - - - - ${jobTracker} - ${nameNode} - - - oozie.launcher.mapreduce.job.user.classpath.first - true - - - mapred.job.queue.name - ${queueName} - - - oozie.launcher.mapred.job.priority - ${jobPriority} - - - oozie.use.system.libpath - true - - - oozie.action.sharelib.for.java - distcp - - - oozie.launcher.oozie.libpath - ${wf:conf("falcon.libpath")} - - - oozie.launcher.mapreduce.job.hdfs-servers - ${drSourceClusterFS},${drTargetClusterFS} - - - org.apache.falcon.replication.FeedReplicator - -Dmapred.job.queue.name=${queueName} - -Dmapred.job.priority=${jobPriority} - -maxMaps - ${distcpMaxMaps} - -mapBandwidth - ${distcpMapBandwidth} - -sourcePaths - ${drSourceDir} - -targetPath - ${drTargetClusterFS}${drTargetDir} - -falconFeedStorageType - FILESYSTEM - -availabilityFlag - ${availabilityFlag == 'NA' ? "NA" : availabilityFlag} - -counterLogDir - ${logDir}/job-${nominalTime}/${srcClusterName == 'NA' ? '' : srcClusterName} - - - - - - - Workflow action failed, error message[${wf:errorMessage(wf:lastErrorNode())}] - - - - http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication.properties ---------------------------------------------------------------------- diff --git a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication.properties b/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication.properties deleted file mode 100644 index 4642835..0000000 --- a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication.properties +++ /dev/null @@ -1,79 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -##### NOTE: This is a TEMPLATE file which can be copied and edited - -##### Recipe properties -##### Unique recipe job name -falcon.recipe.name=sales-monthly - -##### Workflow properties -falcon.recipe.workflow.name=hdfs-dr-workflow -# Provide Wf absolute path. This can be HDFS or local FS path. If WF is on local FS it will be copied to HDFS -falcon.recipe.workflow.path=/apps/data-mirroring/workflows/hdfs-replication-workflow.xml -# Provide Wf lib absolute path. This can be HDFS or local FS path. If libs are on local FS it will be copied to HDFS -#falcon.recipe.workflow.lib.path=/recipes/hdfs-replication/lib - -##### Cluster properties -# Cluster where job should run -falcon.recipe.cluster.name=primaryCluster -# Change the cluster hdfs write end point here. This is mandatory. -falcon.recipe.cluster.hdfs.writeEndPoint=hdfs://240.0.0.10:8020 -# Change the cluster validity start time here -falcon.recipe.cluster.validity.start=2015-03-13T00:00Z -# Change the cluster validity end time here -falcon.recipe.cluster.validity.end=2016-12-30T00:00Z - -##### Scheduling properties -# Change the recipe frequency here. Valid frequency type are minutes, hours, days, months -falcon.recipe.process.frequency=minutes(5) - -##### Tag properties - An optional list of comma separated tags, Key Value Pairs, separated by comma -##### Uncomment to add tags -#falcon.recipe.tags= - -##### Retry policy properties - -falcon.recipe.retry.policy=periodic -falcon.recipe.retry.delay=minutes(30) -falcon.recipe.retry.attempts=3 -falcon.recipe.retry.onTimeout=false - -##### ACL properties - Uncomment and change ACL if authorization is enabled - -falcon.recipe.acl.owner=ambari-qa -falcon.recipe.acl.group=users -falcon.recipe.acl.permission=0x755 -falcon.recipe.nn.principal=nn/_HOST@EXAMPLE.COM - -##### Custom Job properties - -# Specify multiple comma separated source directories -drSourceDir=/user/hrt_qa/dr/test/primaryCluster/input -drSourceClusterFS=hdfs://240.0.0.10:8020 -drTargetDir=/user/hrt_qa/dr/test/backupCluster/input -drTargetClusterFS=hdfs://240.0.0.11:8020 - -# Change it to specify the maximum number of mappers for DistCP -distcpMaxMaps=1 -# Change it to specify the bandwidth in MB for each mapper in DistCP -distcpMapBandwidth=100 - -##### Email Notification for Falcon instance completion -falcon.recipe.notification.type=email -falcon.recipe.notification.receivers=NA \ No newline at end of file http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hive-disaster-recovery/README.txt ---------------------------------------------------------------------- diff --git a/addons/recipes/hive-disaster-recovery/README.txt b/addons/recipes/hive-disaster-recovery/README.txt deleted file mode 100644 index ab393b1..0000000 --- a/addons/recipes/hive-disaster-recovery/README.txt +++ /dev/null @@ -1,58 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -Hive Metastore Disaster Recovery Recipe - -Overview -This recipe implements replicating hive metadata and data from one -Hadoop cluster to another Hadoop cluster. -This piggy backs on replication solution in Falcon which uses the DistCp tool. - -Use Case -* -* - -Limitations -* -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -Hive Metastore Disaster Recovery Recipe - -Overview -This recipe implements replicating hive metadata and data from one -Hadoop cluster to another Hadoop cluster. -This piggy backs on replication solution in Falcon which uses the DistCp tool. - -Use Case -* -* - -Limitations -* http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hive-disaster-recovery/pom.xml ---------------------------------------------------------------------- diff --git a/addons/recipes/hive-disaster-recovery/pom.xml b/addons/recipes/hive-disaster-recovery/pom.xml deleted file mode 100644 index 0f782d2..0000000 --- a/addons/recipes/hive-disaster-recovery/pom.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - 4.0.0 - org.apache.falcon.recipes - falcon-hive-replication-recipe - 0.10-SNAPSHOT - Apache Falcon Hive Disaster Recovery Recipe - Apache Falcon Sample Hive Disaster Recovery Recipe - jar - http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-template.xml ---------------------------------------------------------------------- diff --git a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-template.xml b/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-template.xml deleted file mode 100644 index f0de091..0000000 --- a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-template.xml +++ /dev/null @@ -1,45 +0,0 @@ - - - - - - - - - - - - _falcon_mirroring_type=HIVE - - 1 - - LAST_ONLY - ##process.frequency## - UTC - - - - - - - - - - http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-workflow.xml ---------------------------------------------------------------------- diff --git a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-workflow.xml b/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-workflow.xml deleted file mode 100644 index 0494cf6..0000000 --- a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-workflow.xml +++ /dev/null @@ -1,357 +0,0 @@ - - - - - - hcat.metastore.uri - ${sourceMetastoreUri} - - - hcat.metastore.principal - ${sourceHiveMetastoreKerberosPrincipal} - - - - - hcat.metastore.uri - ${targetMetastoreUri} - - - hcat.metastore.principal - ${targetHiveMetastoreKerberosPrincipal} - - - - - hive2.server.principal - ${sourceHive2KerberosPrincipal} - - - hive2.jdbc.url - jdbc:${sourceHiveServer2Uri}/${sourceDatabase} - - - - - hive2.server.principal - ${targetHive2KerberosPrincipal} - - - hive2.jdbc.url - jdbc:${targetHiveServer2Uri}/${sourceDatabase} - - - - - - - ${jobTracker} - ${nameNode} - - - oozie.launcher.mapreduce.job.user.classpath.first - true - - - mapred.job.queue.name - ${queueName} - - - oozie.launcher.mapred.job.priority - ${jobPriority} - - - oozie.use.system.libpath - true - - - oozie.action.sharelib.for.java - distcp,hive,hive2,hcatalog - - - oozie.launcher.mapreduce.job.hdfs-servers - ${sourceNN},${targetNN} - - - mapreduce.job.hdfs-servers - ${sourceNN},${targetNN} - - - org.apache.falcon.hive.HiveDRTool - -Dmapred.job.queue.name=${queueName} - -Dmapred.job.priority=${jobPriority} - -falconLibPath - ${wf:conf("falcon.libpath")} - -sourceCluster - ${sourceCluster} - -sourceMetastoreUri - ${sourceMetastoreUri} - -sourceHiveServer2Uri - ${sourceHiveServer2Uri} - -sourceDatabase - ${sourceDatabase} - -sourceTable - ${sourceTable} - -sourceStagingPath - ${sourceStagingPath} - -sourceNN - ${sourceNN} - -sourceNNKerberosPrincipal - ${sourceNNKerberosPrincipal} - -sourceHiveMetastoreKerberosPrincipal - ${sourceHiveMetastoreKerberosPrincipal} - -sourceHive2KerberosPrincipal - ${sourceHive2KerberosPrincipal} - -targetCluster - ${targetCluster} - -targetMetastoreUri - ${targetMetastoreUri} - -targetHiveServer2Uri - ${targetHiveServer2Uri} - -targetStagingPath - ${targetStagingPath} - -targetNN - ${targetNN} - -targetNNKerberosPrincipal - ${targetNNKerberosPrincipal} - -targetHiveMetastoreKerberosPrincipal - ${targetHiveMetastoreKerberosPrincipal} - -targetHive2KerberosPrincipal - ${targetHive2KerberosPrincipal} - -maxEvents - ${maxEvents} - -clusterForJobRun - ${clusterForJobRun} - -clusterForJobRunWriteEP - ${clusterForJobRunWriteEP} - -clusterForJobNNKerberosPrincipal - ${clusterForJobNNKerberosPrincipal} - -drJobName - ${drJobName}-${nominalTime} - -executionStage - lastevents - - - - - - - - ${jobTracker} - ${nameNode} - - - oozie.launcher.mapreduce.job.user.classpath.first - true - - - mapred.job.queue.name - ${queueName} - - - oozie.launcher.mapred.job.priority - ${jobPriority} - - - oozie.use.system.libpath - true - - - oozie.action.sharelib.for.java - distcp,hive,hive2,hcatalog - - - oozie.launcher.mapreduce.job.hdfs-servers - ${sourceNN},${targetNN} - - - mapreduce.job.hdfs-servers - ${sourceNN},${targetNN} - - - org.apache.falcon.hive.HiveDRTool - -Dmapred.job.queue.name=${queueName} - -Dmapred.job.priority=${jobPriority} - -falconLibPath - ${wf:conf("falcon.libpath")} - -replicationMaxMaps - ${replicationMaxMaps} - -distcpMaxMaps - ${distcpMaxMaps} - -sourceCluster - ${sourceCluster} - -sourceMetastoreUri - ${sourceMetastoreUri} - -sourceHiveServer2Uri - ${sourceHiveServer2Uri} - -sourceDatabase - ${sourceDatabase} - -sourceTable - ${sourceTable} - -sourceStagingPath - ${sourceStagingPath} - -sourceNN - ${sourceNN} - -sourceNNKerberosPrincipal - ${sourceNNKerberosPrincipal} - -sourceHiveMetastoreKerberosPrincipal - ${sourceHiveMetastoreKerberosPrincipal} - -sourceHive2KerberosPrincipal - ${sourceHive2KerberosPrincipal} - -targetCluster - ${targetCluster} - -targetMetastoreUri - ${targetMetastoreUri} - -targetHiveServer2Uri - ${targetHiveServer2Uri} - -targetStagingPath - ${targetStagingPath} - -targetNN - ${targetNN} - -targetNNKerberosPrincipal - ${targetNNKerberosPrincipal} - -targetHiveMetastoreKerberosPrincipal - ${targetHiveMetastoreKerberosPrincipal} - -targetHive2KerberosPrincipal - ${targetHive2KerberosPrincipal} - -maxEvents - ${maxEvents} - -distcpMapBandwidth - ${distcpMapBandwidth} - -clusterForJobRun - ${clusterForJobRun} - -clusterForJobRunWriteEP - ${clusterForJobRunWriteEP} - -clusterForJobNNKerberosPrincipal - ${clusterForJobNNKerberosPrincipal} - -drJobName - ${drJobName}-${nominalTime} - -executionStage - export - -counterLogDir - ${logDir}/job-${nominalTime}/${srcClusterName == 'NA' ? '' : srcClusterName}/ - - - - - - - - ${jobTracker} - ${nameNode} - - - oozie.launcher.mapreduce.job.user.classpath.first - true - - - mapred.job.queue.name - ${queueName} - - - oozie.launcher.mapred.job.priority - ${jobPriority} - - - oozie.use.system.libpath - true - - - oozie.action.sharelib.for.java - distcp,hive,hive2,hcatalog - - - oozie.launcher.mapreduce.job.hdfs-servers - ${sourceNN},${targetNN} - - - mapreduce.job.hdfs-servers - ${sourceNN},${targetNN} - - - org.apache.falcon.hive.HiveDRTool - -Dmapred.job.queue.name=${queueName} - -Dmapred.job.priority=${jobPriority} - -falconLibPath - ${wf:conf("falcon.libpath")} - -replicationMaxMaps - ${replicationMaxMaps} - -distcpMaxMaps - ${distcpMaxMaps} - -sourceCluster - ${sourceCluster} - -sourceMetastoreUri - ${sourceMetastoreUri} - -sourceHiveServer2Uri - ${sourceHiveServer2Uri} - -sourceDatabase - ${sourceDatabase} - -sourceTable - ${sourceTable} - -sourceStagingPath - ${sourceStagingPath} - -sourceNN - ${sourceNN} - -sourceNNKerberosPrincipal - ${sourceNNKerberosPrincipal} - -sourceHiveMetastoreKerberosPrincipal - ${sourceHiveMetastoreKerberosPrincipal} - -sourceHive2KerberosPrincipal - ${sourceHive2KerberosPrincipal} - -targetCluster - ${targetCluster} - -targetMetastoreUri - ${targetMetastoreUri} - -targetHiveServer2Uri - ${targetHiveServer2Uri} - -targetStagingPath - ${targetStagingPath} - -targetNN - ${targetNN} - -targetNNKerberosPrincipal - ${targetNNKerberosPrincipal} - -targetHiveMetastoreKerberosPrincipal - ${targetHiveMetastoreKerberosPrincipal} - -targetHive2KerberosPrincipal - ${targetHive2KerberosPrincipal} - -maxEvents - ${maxEvents} - -distcpMapBandwidth - ${distcpMapBandwidth} - -clusterForJobRun - ${clusterForJobRun} - -clusterForJobRunWriteEP - ${clusterForJobRunWriteEP} - -clusterForJobNNKerberosPrincipal - ${clusterForJobNNKerberosPrincipal} - -drJobName - ${drJobName}-${nominalTime} - -executionStage - import - - - - - - - Workflow action failed, error message[${wf:errorMessage(wf:lastErrorNode())}] - - - -