falcon-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From prag...@apache.org
Subject [1/2] falcon git commit: FALCON-2127 Falcon regression compilation issue and deleting depricated recipe test cases
Date Fri, 26 Aug 2016 12:24:53 GMT
Repository: falcon
Updated Branches:
  refs/heads/master 35b10b438 -> c00975e41


http://git-wip-us.apache.org/repos/asf/falcon/blob/c00975e4/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/hive/dr/HiveDbDRTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/hive/dr/HiveDbDRTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/hive/dr/HiveDbDRTest.java
deleted file mode 100644
index e281bee..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/hive/dr/HiveDbDRTest.java
+++ /dev/null
@@ -1,279 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.hive.dr;
-
-import org.apache.falcon.cli.FalconCLI;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.regression.Entities.ClusterMerlin;
-import org.apache.falcon.regression.Entities.RecipeMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.supportClasses.NotifyingAssert;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.Config;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.HiveAssert;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.MatrixUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hive.hcatalog.api.HCatClient;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.OozieClient;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.List;
-
-import static org.apache.falcon.regression.core.util.HiveUtil.runSql;
-import static org.apache.falcon.regression.hive.dr.HiveObjectCreator.bootstrapCopy;
-import static org.apache.falcon.regression.hive.dr.HiveObjectCreator.createVanillaTable;
-
-/**
- * Hive DR Testing for Hive database replication.
- */
-@Test(groups = {"embedded", "multiCluster"})
-public class HiveDbDRTest extends BaseTestClass {
-    private static final Logger LOGGER = Logger.getLogger(HiveDbDRTest.class);
-    private final ColoHelper cluster = servers.get(0);
-    private final ColoHelper cluster2 = servers.get(1);
-    private final FileSystem clusterFS = serverFS.get(0);
-    private final FileSystem clusterFS2 = serverFS.get(1);
-    private final OozieClient clusterOC = serverOC.get(0);
-    private final OozieClient clusterOC2 = serverOC.get(1);
-    private HCatClient clusterHC;
-    private HCatClient clusterHC2;
-    private RecipeMerlin recipeMerlin;
-    private Connection connection;
-    private Connection connection2;
-
-    @DataProvider
-    public Object[][] getRecipeLocation() {
-        return MatrixUtil.crossProduct(RecipeExecLocation.values());
-    }
-
-    private void setUp(RecipeExecLocation recipeExecLocation) throws Exception {
-        clusterHC = cluster.getClusterHelper().getHCatClient();
-        clusterHC2 = cluster2.getClusterHelper().getHCatClient();
-        bundles[0] = new Bundle(BundleUtil.readHCatBundle(), cluster);
-        bundles[1] = new Bundle(BundleUtil.readHCatBundle(), cluster2);
-        bundles[0].generateUniqueBundle(this);
-        bundles[1].generateUniqueBundle(this);
-        final ClusterMerlin srcCluster = bundles[0].getClusterElement();
-        final ClusterMerlin tgtCluster = bundles[1].getClusterElement();
-        Bundle.submitCluster(recipeExecLocation.getRecipeBundle(bundles[0], bundles[1]));
-
-        String recipeDir = "HiveDrRecipe";
-        if (MerlinConstants.IS_SECURE) {
-            recipeDir = "HiveDrSecureRecipe";
-        }
-        recipeMerlin = RecipeMerlin.readFromDir(recipeDir, FalconCLI.RecipeOperation.HIVE_DISASTER_RECOVERY)
-            .withRecipeCluster(recipeExecLocation.getRecipeCluster(srcCluster, tgtCluster));
-        recipeMerlin.withSourceCluster(srcCluster)
-            .withTargetCluster(tgtCluster)
-            .withFrequency(new Frequency("5", Frequency.TimeUnit.minutes))
-            .withValidity(TimeUtil.getTimeWrtSystemTime(-1), TimeUtil.getTimeWrtSystemTime(11));
-        recipeMerlin.setUniqueName(this.getClass().getSimpleName());
-
-        connection = cluster.getClusterHelper().getHiveJdbcConnection();
-
-        connection2 = cluster2.getClusterHelper().getHiveJdbcConnection();
-    }
-
-    private void setUpDb(String dbName, Connection conn) throws SQLException {
-        runSql(conn, "drop database if exists " + dbName + " cascade");
-        runSql(conn, "create database " + dbName);
-        runSql(conn, "use " + dbName);
-    }
-
-    @Test(dataProvider = "getRecipeLocation")
-    public void drDbDropDb(final RecipeExecLocation recipeExecLocation) throws Exception {
-        setUp(recipeExecLocation);
-        final String dbName = "drDbDropDb";
-        setUpDb(dbName, connection);
-        setUpDb(dbName, connection2);
-        recipeMerlin.withSourceDb(dbName).withSourceTable("*");
-        final List<String> command = recipeMerlin.getSubmissionCommand();
-
-        Assert.assertEquals(Bundle.runFalconCLI(command), 0, "Recipe submission failed.");
-
-        runSql(connection, "drop database " + dbName);
-
-        InstanceUtil.waitTillInstanceReachState(recipeExecLocation.getRecipeOC(clusterOC, clusterOC2),
-            recipeMerlin.getName(), 1, CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-
-        final List<String> dstDbs = runSql(connection2, "show databases");
-        Assert.assertFalse(dstDbs.contains(dbName), "dstDbs = " + dstDbs + " was not expected to "
-            + "contain " + dbName);
-    }
-
-
-    @Test(dataProvider = "isDBReplication")
-    public void drDbFailPass(Boolean isDBReplication) throws Exception {
-        final RecipeExecLocation recipeExecLocation = RecipeExecLocation.SourceCluster;
-        setUp(recipeExecLocation);
-        final String dbName = "drDbFailPass";
-        final String tblName = "vanillaTable";
-        final String hiveWarehouseLocation = Config.getProperty("hive.warehouse.location", "/apps/hive/warehouse/");
-        final String dbPath = HadoopUtil.joinPath(hiveWarehouseLocation, dbName.toLowerCase() + ".db");
-        setUpDb(dbName, connection);
-        runSql(connection, "create table " + tblName + "(data string)");
-        setUpDb(dbName, connection2);
-        bootstrapCopy(connection, clusterFS, tblName, connection2, clusterFS2, tblName);
-
-        recipeMerlin.withSourceDb(dbName).withSourceTable(isDBReplication ? "*" : tblName);
-
-        final List<String> command = recipeMerlin.getSubmissionCommand();
-        Assert.assertEquals(Bundle.runFalconCLI(command), 0, "Recipe submission failed.");
-
-        runSql(connection, "insert into table " + tblName + " values('cannot be replicated now')");
-        final String noReadWritePerm = "d---r-xr-x";
-        LOGGER.info("Setting " + clusterFS2.getUri() + dbPath + " to : " + noReadWritePerm);
-        clusterFS2.setPermission(new Path(dbPath), FsPermission.valueOf(noReadWritePerm));
-
-        InstanceUtil.waitTillInstanceReachState(recipeExecLocation.getRecipeOC(clusterOC, clusterOC2),
-            recipeMerlin.getName(), 1, CoordinatorAction.Status.KILLED, EntityType.PROCESS);
-
-        final String readWritePerm = "drwxr-xr-x";
-        LOGGER.info("Setting " + clusterFS2.getUri() + dbPath + " to : " + readWritePerm);
-        clusterFS2.setPermission(new Path(dbPath), FsPermission.valueOf(readWritePerm));
-
-        InstanceUtil.waitTillInstanceReachState(recipeExecLocation.getRecipeOC(clusterOC, clusterOC2),
-            recipeMerlin.getName(), 1, CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-
-        HiveAssert.assertTableEqual(cluster, clusterHC.getTable(dbName, tblName),
-            cluster2, clusterHC2.getTable(dbName, tblName), new NotifyingAssert(true)
-        ).assertAll();
-    }
-
-    @Test
-    public void drDbAddDropTable() throws Exception {
-        final RecipeExecLocation recipeExecLocation = RecipeExecLocation.SourceCluster;
-        setUp(recipeExecLocation);
-        final String dbName = "drDbAddDropTable";
-        final String tblToBeDropped = "table_to_be_dropped";
-        final String tblToBeDroppedAndAdded = "table_to_be_dropped_and_readded";
-        final String newTableToBeAdded = "new_table_to_be_added";
-
-        setUpDb(dbName, connection);
-        setUpDb(dbName, connection2);
-        recipeMerlin.withSourceDb(dbName).withSourceTable("*")
-            .withFrequency(new Frequency("2", Frequency.TimeUnit.minutes));
-        final List<String> command = recipeMerlin.getSubmissionCommand();
-
-        createVanillaTable(connection, tblToBeDropped);
-        createVanillaTable(connection, tblToBeDroppedAndAdded);
-        bootstrapCopy(connection, clusterFS, tblToBeDropped,
-            connection2, clusterFS2, tblToBeDropped);
-        bootstrapCopy(connection, clusterFS, tblToBeDroppedAndAdded,
-            connection2, clusterFS2, tblToBeDroppedAndAdded);
-
-        /* For first replication - two tables are dropped & one table is added */
-        runSql(connection, "drop table " + tblToBeDropped);
-        runSql(connection, "drop table " + tblToBeDroppedAndAdded);
-        createVanillaTable(connection, newTableToBeAdded);
-
-        Assert.assertEquals(Bundle.runFalconCLI(command), 0, "Recipe submission failed.");
-
-        InstanceUtil.waitTillInstanceReachState(recipeExecLocation.getRecipeOC(clusterOC, clusterOC2),
-            recipeMerlin.getName(), 1, CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-
-        final NotifyingAssert anAssert = new NotifyingAssert(true);
-        HiveAssert.assertDbEqual(cluster, clusterHC.getDatabase(dbName),
-            cluster2, clusterHC2.getDatabase(dbName), anAssert);
-
-        /* For second replication - a dropped tables is added back */
-        createVanillaTable(connection, tblToBeDroppedAndAdded);
-
-        InstanceUtil.waitTillInstanceReachState(recipeExecLocation.getRecipeOC(clusterOC, clusterOC2),
-            recipeMerlin.getName(), 2, CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-
-        HiveAssert.assertDbEqual(cluster, clusterHC.getDatabase(dbName),
-            cluster2, clusterHC2.getDatabase(dbName), anAssert);
-        anAssert.assertAll();
-    }
-
-    @Test
-    public void drDbNonReplicatableTable() throws Exception {
-        final RecipeExecLocation recipeExecLocation = RecipeExecLocation.SourceCluster;
-        setUp(recipeExecLocation);
-        final String dbName = "drDbNonReplicatableTable";
-        final String tblName = "vanillaTable";
-        final String tblView = "vanillaTableView";
-        final String tblOffline = "offlineTable";
-
-        setUpDb(dbName, connection);
-        setUpDb(dbName, connection2);
-        recipeMerlin.withSourceDb(dbName).withSourceTable("*")
-            .withFrequency(new Frequency("2", Frequency.TimeUnit.minutes));
-        final List<String> command = recipeMerlin.getSubmissionCommand();
-
-        createVanillaTable(connection, tblName);
-        runSql(connection, "create view " + tblView + " as select * from " + tblName);
-        createVanillaTable(connection, tblOffline);
-        bootstrapCopy(connection, clusterFS, tblName, connection2, clusterFS2, tblName);
-        bootstrapCopy(connection, clusterFS, tblOffline, connection2, clusterFS2, tblOffline);
-        final String newComment = "'new comment for offline table should not reach destination'";
-        runSql(connection,
-            "alter table " + tblOffline + " set tblproperties ('comment' =" + newComment +")");
-        runSql(connection, "alter table " + tblOffline + " enable offline");
-        Assert.assertEquals(Bundle.runFalconCLI(command), 0, "Recipe submission failed.");
-
-        InstanceUtil.waitTillInstanceReachState(recipeExecLocation.getRecipeOC(clusterOC, clusterOC2),
-            recipeMerlin.getName(), 1, CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-
-        //vanilla table gets replicated, offline table & view are not replicated
-        HiveAssert.assertTableEqual(cluster, clusterHC.getTable(dbName, tblName),
-            cluster2, clusterHC2.getTable(dbName, tblName), new NotifyingAssert(true)).assertAll();
-        final List<String> dstTables = runSql(connection2, "show tables");
-        Assert.assertFalse(dstTables.contains(tblView),
-            "dstTables = " + dstTables + " was not expected to contain " + tblView);
-        final List<String> dstComment =
-            runSql(connection2, "show tblproperties " + tblOffline + "('comment')");
-        Assert.assertFalse(dstComment.contains(newComment),
-            tblOffline + " comment = " + dstComment + " was not expected to contain " + newComment);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() throws IOException {
-        try {
-            prism.getProcessHelper().deleteByName(recipeMerlin.getName(), null);
-        } catch (Exception e) {
-            LOGGER.info("Deletion of process: " + recipeMerlin.getName() + " failed with exception: " + e);
-        }
-        removeTestClassEntities();
-        cleanTestsDirs();
-    }
-
-    @DataProvider
-    public Object[][] isDBReplication() {
-        return new Object[][]{{Boolean.TRUE}, {Boolean.FALSE}};
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/c00975e4/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/hive/dr/HiveObjectCreator.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/hive/dr/HiveObjectCreator.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/hive/dr/HiveObjectCreator.java
deleted file mode 100644
index 9eb389a..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/hive/dr/HiveObjectCreator.java
+++ /dev/null
@@ -1,208 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.hive.dr;
-
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.log4j.Logger;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.SQLException;
-
-import static org.apache.falcon.regression.core.util.HadoopUtil.writeDataForHive;
-import static org.apache.falcon.regression.core.util.HiveUtil.runSql;
-
-/**
- * Create Hive tables for testing Hive DR. Note that this is not expected to be used out of
- * HiveDR tests.
- */
-final class HiveObjectCreator {
-    private static final Logger LOGGER = Logger.getLogger(HiveObjectCreator.class);
-    private static final String HDFS_TMP_DIR = "/tmp/hive_objects/";
-
-    private HiveObjectCreator() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-
-    static void bootstrapCopy(Connection srcConnection, FileSystem srcFs, String srcTable,
-                              Connection dstConnection, FileSystem dstFs, String dstTable) throws Exception {
-        LOGGER.info("Starting bootstrap...");
-        final String dumpPath = HDFS_TMP_DIR + srcTable + "/";
-        HadoopUtil.recreateDir(srcFs, dumpPath);
-        runSqlQuietly(srcConnection, "dfs -chmod -R 777 " + dumpPath);
-        HadoopUtil.deleteDirIfExists(dumpPath, dstFs);
-        runSql(srcConnection, "export table " + srcTable + " to '" + dumpPath + "' FOR REPLICATION('ignore')");
-        FileUtil.copy(srcFs, new Path(dumpPath), dstFs, new Path(dumpPath), false, true, new Configuration());
-        runSqlQuietly(dstConnection, "dfs -chmod -R 777 " + dumpPath);
-        runSql(dstConnection, "import table " + dstTable + " from '" + dumpPath + "'");
-        HadoopUtil.deleteDirIfExists(dumpPath, srcFs);
-        HadoopUtil.deleteDirIfExists(dumpPath, dstFs);
-        LOGGER.info("Finished bootstrap");
-    }
-
-    /* We need to delete it using hive query as the created directory is owned by hive.*/
-    private static void runSqlQuietly(Connection srcConnection, String sql) {
-        try {
-            runSql(srcConnection, sql);
-        } catch (SQLException ex) {
-            LOGGER.info("Exception while hive ql execution: " + ex.getMessage());
-        }
-    }
-
-    /**
-     * Create an external table.
-     * @param connection jdbc connection object to use for issuing queries to hive
-     * @param fs filesystem object to upload the data
-     * @param clickDataLocation location to upload the data to
-     * @throws IOException
-     * @throws SQLException
-     */
-    static void createExternalTable(Connection connection, FileSystem fs, String
-        clickDataLocation, String tableName) throws IOException, SQLException {
-        HadoopUtil.deleteDirIfExists(clickDataLocation, fs);
-        fs.mkdirs(new Path(clickDataLocation));
-        fs.setPermission(new Path(clickDataLocation), FsPermission.getDirDefault());
-        writeDataForHive(fs, clickDataLocation,
-            new StringBuffer("click1").append((char) 0x01).append("01:01:01").append("\n")
-                .append("click2").append((char) 0x01).append("02:02:02"), true);
-        //clusterFS.setPermission(new Path(clickDataPart2), FsPermission.getFileDefault());
-        runSql(connection, "create external table " + tableName
-            + " (data string, time string) "
-            + "location '" + clickDataLocation + "'");
-        runSql(connection, "select * from " + tableName);
-    }
-
-
-    /**
-     * Create an external table.
-     * @param connection jdbc connection object to use for issuing queries to hive
-     * @param fs filesystem object to upload the data
-     * @param clickDataLocation location to upload the data to
-     * @throws IOException
-     * @throws SQLException
-     */
-    static void createExternalPartitionedTable(Connection connection, FileSystem fs, String
-        clickDataLocation, String tableName) throws IOException, SQLException {
-        final String clickDataPart1 = clickDataLocation + "2001-01-01/";
-        final String clickDataPart2 = clickDataLocation + "2001-01-02/";
-        fs.mkdirs(new Path(clickDataLocation));
-        fs.setPermission(new Path(clickDataLocation), FsPermission.getDirDefault());
-        writeDataForHive(fs, clickDataPart1,
-            new StringBuffer("click1").append((char) 0x01).append("01:01:01"), true);
-        writeDataForHive(fs, clickDataPart2,
-            new StringBuffer("click2").append((char) 0x01).append("02:02:02"), true);
-        //clusterFS.setPermission(new Path(clickDataPart2), FsPermission.getFileDefault());
-        runSql(connection, "create external table " + tableName
-            + " (data string, time string) partitioned by (date_ string) "
-            + "location '" + clickDataLocation + "'");
-        runSql(connection, "alter table " + tableName + " add partition "
-            + "(date_='2001-01-01') location '" + clickDataPart1 + "'");
-        runSql(connection, "alter table " + tableName + " add partition "
-            + "(date_='2001-01-02') location '" + clickDataPart2 + "'");
-        runSql(connection, "select * from " + tableName);
-    }
-
-    /**
-     * Create an partitioned table.
-     * @param connection jdbc connection object to use for issuing queries to hive
-     * @throws SQLException
-     */
-    static void createPartitionedTable(Connection connection) throws SQLException {
-        runSql(connection, "create table global_store_sales "
-            + "(customer_id string, item_id string, quantity float, price float, time timestamp) "
-            + "partitioned by (country string)");
-        runSql(connection,
-            "insert into table global_store_sales partition (country = 'us') values"
-                + "('c1', 'i1', '1', '1', '2001-01-01 01:01:01')");
-        runSql(connection,
-            "insert into table global_store_sales partition (country = 'uk') values"
-                + "('c2', 'i2', '2', '2', '2001-01-01 01:01:02')");
-        runSql(connection, "select * from global_store_sales");
-    }
-
-    /**
-     * Create an plain old table.
-     * @param connection jdbc connection object to use for issuing queries to hive
-     * @param tblName
-     * @throws SQLException
-     */
-    static void createVanillaTable(Connection connection, String tblName) throws SQLException {
-        //vanilla table
-        runSql(connection, "create table " + tblName
-            + "(customer_id string, item_id string, quantity float, price float, time timestamp)");
-        runSql(connection, "insert into table " + tblName + " values "
-            + "('c1', 'i1', '1', '1', '2001-01-01 01:01:01'), "
-            + "('c2', 'i2', '2', '2', '2001-01-01 01:01:02')");
-        runSql(connection, "select * from " + tblName);
-    }
-
-    /**
-     * Create a partitioned table with either dynamic or static partitions.
-     * @param connection jdbc connection object to use for issuing queries to hive
-     * @param dynamic should partitions be added in dynamic or static way
-     * @throws SQLException
-     */
-    static void createPartitionedTable(Connection connection,
-                                       boolean dynamic) throws SQLException {
-        String [][] partitions = {
-            {"us", "Kansas", },
-            {"us", "California", },
-            {"au", "Queensland", },
-            {"au", "Victoria", },
-        };
-        //create table
-        runSql(connection, "drop table global_store_sales");
-        runSql(connection, "create table global_store_sales(customer_id string,"
-            + " item_id string, quantity float, price float, time timestamp) "
-            + "partitioned by (country string, state string)");
-        //provide data
-        String query;
-        if (dynamic) {
-            //disable strict mode, thus both partitions can be used as dynamic
-            runSql(connection, "set hive.exec.dynamic.partition.mode=nonstrict");
-            query = "insert into table global_store_sales partition"
-                + "(country, state) values('c%3$s', 'i%3$s', '%3$s', '%3$s', "
-                + "'2001-01-01 01:01:0%3$s', '%1$s', '%2$s')";
-        } else {
-            query = "insert into table global_store_sales partition"
-                + "(country = '%1$s', state = '%2$s') values('c%3$s', 'i%3$s', '%3$s', '%3$s', "
-                + "'2001-01-01 01:01:0%3$s')";
-        }
-        for (int i = 0; i < partitions.length; i++) {
-            runSql(connection, String.format(query, partitions[i][0], partitions[i][1], i + 1));
-        }
-        runSql(connection, "select * from global_store_sales");
-    }
-
-    static void createSerDeTable(Connection connection) throws SQLException {
-        runSql(connection, "create table store_json "
-            + "(customer_id string, item_id string, quantity float, price float, time timestamp) "
-            + "row format serde 'org.apache.hive.hcatalog.data.JsonSerDe' ");
-        runSql(connection, "insert into table store_json values "
-            + "('c1', 'i1', '1', '1', '2001-01-01 01:01:01'), "
-            + "('c2', 'i2', '2', '2', '2001-01-01 01:01:02')");
-        runSql(connection, "select * from store_json");
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/c00975e4/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/hive/dr/RecipeExecLocation.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/hive/dr/RecipeExecLocation.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/hive/dr/RecipeExecLocation.java
deleted file mode 100644
index a124082..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/hive/dr/RecipeExecLocation.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.hive.dr;
-
-import org.apache.falcon.regression.Entities.ClusterMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.oozie.client.OozieClient;
-
-/**
- * Enum to represent location of recipe execution.
- */
-enum RecipeExecLocation {
-    SourceCluster {
-        protected OozieClient getRecipeOC(OozieClient srcOC, OozieClient tgtOC) {
-            return srcOC;
-        }
-        protected ClusterMerlin getRecipeCluster(ClusterMerlin srcCM, ClusterMerlin tgtCM) {
-            return srcCM;
-        }
-        protected Bundle getRecipeBundle(Bundle srcBundle, Bundle tgtBundle) {
-            return srcBundle;
-        }
-    },
-    TargetCluster {
-        protected OozieClient getRecipeOC(OozieClient srcOC, OozieClient tgtOC) {
-            return tgtOC;
-        }
-        protected ClusterMerlin getRecipeCluster(ClusterMerlin srcCM, ClusterMerlin tgtCM) {
-            return tgtCM;
-        }
-        protected Bundle getRecipeBundle(Bundle srcBundle, Bundle tgtBundle) {
-            return tgtBundle;
-        }
-    };
-
-    /** Get oozie client for the Oozie that is going to run the recipe.
-     * @param srcOC the oozie client for the source cluster
-     * @param tgtOC the oozie client for the target cluster
-     * @return oozie client for the Oozie that is going to run the recipe
-     */
-    abstract OozieClient getRecipeOC(OozieClient srcOC, OozieClient tgtOC);
-
-    abstract ClusterMerlin getRecipeCluster(ClusterMerlin srcCM, ClusterMerlin tgtCM);
-
-    abstract Bundle getRecipeBundle(Bundle srcBundle, Bundle tgtBundle);
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/c00975e4/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSourceTargetOptionsTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSourceTargetOptionsTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSourceTargetOptionsTest.java
deleted file mode 100644
index 8bec758..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSourceTargetOptionsTest.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.searchUI;
-
-import org.apache.falcon.cli.FalconCLI;
-import org.apache.falcon.regression.Entities.ClusterMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.testHelper.BaseUITestClass;
-import org.apache.falcon.regression.ui.search.LoginPage;
-import org.apache.falcon.regression.ui.search.MirrorWizardPage;
-import org.apache.falcon.regression.ui.search.MirrorWizardPage.Location;
-import org.apache.falcon.regression.ui.search.SearchPage;
-import org.apache.falcon.resource.EntityList;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.EnumSet;
-import java.util.Set;
-import java.util.TreeSet;
-
-/** UI tests for mirror creation. */
-@Test(groups = "search-ui")
-public class MirrorSourceTargetOptionsTest extends BaseUITestClass{
-    private final ColoHelper cluster = servers.get(0);
-    private SearchPage searchPage;
-    private MirrorWizardPage mirrorPage;
-    private MirrorWizardPage.ClusterBlock source;
-    private MirrorWizardPage.ClusterBlock target;
-
-    @BeforeClass(alwaysRun = true)
-    public void setup() throws Exception {
-        openBrowser();
-        searchPage = LoginPage.open(getDriver()).doDefaultLogin();
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].submitClusters(cluster);
-
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void refreshMirrorPage() throws Exception {
-        searchPage.refresh();
-        mirrorPage = searchPage.getPageHeader().doCreateMirror();
-        source = mirrorPage.getSourceBlock();
-        target = mirrorPage.getTargetBlock();
-    }
-
-
-    @Test
-    public void testExclusiveWhereToRunJob() {
-        source.selectRunHere();
-        target.selectRunHere();
-        Assert.assertFalse(source.isRunHereSelected(), "'Run job here' shouldn't be selected on Source");
-        Assert.assertTrue(target.isRunHereSelected(), "'Run job here' should be selected on Target");
-
-        source.selectRunHere();
-        Assert.assertTrue(source.isRunHereSelected(), "'Run job here' should be selected on Source");
-        Assert.assertFalse(target.isRunHereSelected(), "'Run job here' shouldn't be selected on Target");
-
-        mirrorPage.setMirrorType(FalconCLI.RecipeOperation.HIVE_DISASTER_RECOVERY);
-
-        target.selectRunHere();
-        Assert.assertFalse(source.isRunHereSelected(), "'Run job here' shouldn't be selected on Source");
-        Assert.assertTrue(target.isRunHereSelected(), "'Run job here' should be selected on Target");
-
-        source.selectRunHere();
-        Assert.assertTrue(source.isRunHereSelected(), "'Run job here' should be selected on Source");
-        Assert.assertFalse(target.isRunHereSelected(), "'Run job here' shouldn't be selected on Target");
-
-        mirrorPage.setMirrorType(FalconCLI.RecipeOperation.HDFS_REPLICATION);
-        source.setLocationType(Location.AZURE);
-        Assert.assertFalse(source.isRunHereAvailable(),
-                "'Run job here' shouldn't be available on source if Source=Azure");
-
-        source.setLocationType(Location.S3);
-        Assert.assertFalse(source.isRunHereAvailable(),
-                "'Run job here' shouldn't be available on source if Source=S3");
-
-        source.setLocationType(Location.HDFS);
-        target.setLocationType(Location.AZURE);
-        Assert.assertFalse(target.isRunHereAvailable(),
-                "'Run job here' shouldn't be available on target if Target=Azure");
-
-        target.setLocationType(Location.S3);
-        Assert.assertFalse(target.isRunHereAvailable(),
-                "'Run job here' shouldn't be available on target if Target=S3");
-
-    }
-
-    @Test
-    public void testExclusiveFSOptions() {
-        source.setLocationType(Location.HDFS);
-        Assert.assertEquals(target.getAvailableLocationTypes(),
-                EnumSet.allOf(Location.class), "All target types should be available if source=HDFS");
-
-
-        source.setLocationType(Location.AZURE);
-        Assert.assertEquals(target.getAvailableLocationTypes(),
-                EnumSet.of(Location.HDFS), "Only HDFS should be available as target if source=Azure");
-
-        source.setLocationType(Location.S3);
-        Assert.assertEquals(target.getAvailableLocationTypes(),
-                EnumSet.of(Location.HDFS), "Only HDFS should be available as target if source=S3");
-
-        source.setLocationType(Location.HDFS);
-        target.setLocationType(Location.HDFS);
-        Assert.assertEquals(target.getAvailableLocationTypes(),
-                EnumSet.allOf(Location.class), "All source types should be available if target=HDFS");
-
-
-        target.setLocationType(Location.AZURE);
-        Assert.assertEquals(source.getAvailableLocationTypes(),
-                EnumSet.of(Location.HDFS), "Only HDFS should be available as source if target=Azure");
-
-        target.setLocationType(Location.S3);
-        Assert.assertEquals(source.getAvailableLocationTypes(),
-                EnumSet.of(Location.HDFS), "Only HDFS should be available as source if target=S3");
-    }
-
-    @Test
-    public void testClustersDropDownList() throws Exception {
-        //add more clusters
-        ClusterMerlin clusterMerlin = bundles[0].getClusterElement();
-        String clusterName = clusterMerlin.getName() + '-';
-        for (int i = 0; i < 5; i++) {
-            clusterMerlin.setName(clusterName + i);
-            prism.getClusterHelper().submitEntity(clusterMerlin.toString());
-        }
-        EntityList result =
-            prism.getClusterHelper().listAllEntities().getEntityList();
-        Assert.assertNotNull(result.getElements(),
-            "There should be more than 5 clusters in result");
-        Set<String> apiClusterNames = new TreeSet<>();
-        for (EntityList.EntityElement element : result.getElements()) {
-            apiClusterNames.add(element.name);
-        }
-
-        //refresh page to get new clusters on UI
-        refreshMirrorPage();
-
-        mirrorPage.setMirrorType(FalconCLI.RecipeOperation.HDFS_REPLICATION);
-        source.setLocationType(Location.HDFS);
-        target.setLocationType(Location.HDFS);
-
-        Assert.assertEquals(source.getAvailableClusters(), apiClusterNames,
-            "Clusters available via API are not the same as on Source for HDFS replication");
-        Assert.assertEquals(target.getAvailableClusters(), apiClusterNames,
-            "Clusters available via API are not the same as on Target for HDFS replication");
-
-        mirrorPage.setMirrorType(FalconCLI.RecipeOperation.HIVE_DISASTER_RECOVERY);
-
-        Assert.assertEquals(source.getAvailableClusters(), apiClusterNames,
-            "Clusters available via API are not the same as on Source for HIVE replication");
-        Assert.assertEquals(target.getAvailableClusters(), apiClusterNames,
-            "Clusters available via API are not the same as on Target for HIVE replication");
-    }
-
-    @Test
-    public void testInvalidValidity() {
-        mirrorPage.setName(bundles[0].getProcessName());
-        mirrorPage.setMirrorType(FalconCLI.RecipeOperation.HDFS_REPLICATION);
-        String baseTestDir = cleanAndGetTestDir();
-        source.setPath(baseTestDir);
-        source.selectCluster(bundles[0].getClusterNames().get(0));
-        target.setPath(baseTestDir);
-        target.selectCluster(bundles[0].getClusterNames().get(0));
-
-        mirrorPage.setStartTime("2010-01-01T02:00Z");
-        mirrorPage.setEndTime("2010-01-01T01:00Z");
-        mirrorPage.next();
-        mirrorPage.save();
-        Assert.assertTrue(mirrorPage.getActiveAlertText().contains("should be before process end"),
-            "Warning about wrong Validity should be present");
-        //check the same through notification bar
-        mirrorPage.getPageHeader().validateNotificationCountAndCheckLast(1, "should be before process end");
-    }
-
-    @AfterClass(alwaysRun = true)
-    public void tearDownClass() {
-        removeTestClassEntities();
-        closeBrowser();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/c00975e4/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSummaryTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSummaryTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSummaryTest.java
deleted file mode 100644
index ce014ef..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSummaryTest.java
+++ /dev/null
@@ -1,207 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.searchUI;
-
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.process.PolicyType;
-import org.apache.falcon.entity.v0.process.Retry;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.testHelper.BaseUITestClass;
-import org.apache.falcon.regression.ui.search.LoginPage;
-import org.apache.falcon.regression.ui.search.MirrorWizardPage;
-import org.apache.falcon.regression.ui.search.MirrorWizardPage.Summary;
-import org.apache.falcon.regression.ui.search.SearchPage;
-import org.apache.log4j.Logger;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.EnumMap;
-import java.util.Map;
-
-
-/** UI tests for mirror creation. */
-@Test(groups = "search-ui")
-public class MirrorSummaryTest extends BaseUITestClass{
-    private static final Logger LOGGER = Logger.getLogger(MirrorSummaryTest.class);
-
-    private final ColoHelper cluster = servers.get(0);
-    private SearchPage searchPage;
-    private MirrorWizardPage mirrorPage;
-    private String baseTestDir = cleanAndGetTestDir();
-    private String start = "2010-01-01T01:00Z";
-    private String end = "2010-01-01T02:00Z";
-    private Map<Summary, String> baseMap;
-
-    @BeforeClass(alwaysRun = true)
-    public void setupClass() throws Exception {
-        baseMap = new EnumMap<>(Summary.class);
-        baseMap.put(Summary.MAX_MAPS, "5");
-        baseMap.put(Summary.MAX_BANDWIDTH, "100");
-        baseMap.put(Summary.ACL_OWNER, LoginPage.UI_DEFAULT_USER);
-        baseMap.put(Summary.ACL_GROUP, "users");
-        baseMap.put(Summary.ACL_PERMISSIONS, "0755");
-        baseMap.put(Summary.RETRY_POLICY, "periodic");
-        baseMap.put(Summary.RETRY_DELAY, "30 minutes");
-        baseMap.put(Summary.RETRY_ATTEMPTS, "3");
-        baseMap.put(Summary.FREQUENCY, "5 minutes");
-        baseMap.put(Summary.SOURCE_PATH, baseTestDir);
-        baseMap.put(Summary.TARGET_PATH, baseTestDir);
-        baseMap.put(Summary.START, start);
-        baseMap.put(Summary.END, end);
-
-        //HDFS is default mirror type
-        baseMap.put(Summary.TYPE, "HDFS");
-        baseMap.put(Summary.TAGS, "_falcon_mirroring_type - HDFS");
-        baseMap.put(Summary.SOURCE_LOCATION, "HDFS");
-        baseMap.put(Summary.TARGET_LOCATION, "HDFS");
-
-        openBrowser();
-        searchPage = LoginPage.open(getDriver()).doDefaultLogin();
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        removeTestClassEntities();
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].submitClusters(cluster);
-        searchPage.refresh();
-        mirrorPage = searchPage.getPageHeader().doCreateMirror();
-        MirrorWizardPage.ClusterBlock source = mirrorPage.getSourceBlock();
-        MirrorWizardPage.ClusterBlock target = mirrorPage.getTargetBlock();
-        String clusterName = bundles[0].getClusterNames().get(0);
-        String mirrorName = bundles[0].getProcessName();
-
-        baseMap.put(Summary.RUN_ON, clusterName);
-        baseMap.put(Summary.NAME, mirrorName);
-        baseMap.put(Summary.SOURCE_CLUSTER, clusterName);
-        baseMap.put(Summary.TARGET_CLUSTER, clusterName);
-
-        mirrorPage.setName(mirrorName);
-
-        source.setPath(baseTestDir);
-        source.selectCluster(clusterName);
-        target.setPath(baseTestDir);
-        target.selectCluster(clusterName);
-
-        mirrorPage.setStartTime(start);
-        mirrorPage.setEndTime(end);
-
-    }
-
-    @Test
-    public void testSummaryDefaultScenario() {
-        mirrorPage.next();
-
-        Map<Summary, String> actualParams = mirrorPage.getSummaryProperties();
-
-
-        LOGGER.info("Actual parameters: " + actualParams);
-        LOGGER.info("Expected parameters: " + baseMap);
-
-        Assert.assertEquals(actualParams, baseMap);
-
-        mirrorPage.save();
-        Assert.assertTrue(mirrorPage.getActiveAlertText().contains("Submit successful"),
-            "Submit should be successful");
-    }
-
-    @Test
-    public void testModificationOnPreviousStep() {
-        mirrorPage.next();
-
-        Map<Summary, String> actualParams = mirrorPage.getSummaryProperties();
-
-        LOGGER.info("Actual parameters: " + actualParams);
-        LOGGER.info("Expected parameters: " + baseMap);
-
-        Assert.assertEquals(actualParams, baseMap);
-
-        mirrorPage.previous();
-
-        String newPath = baseTestDir + "/new";
-        mirrorPage.getTargetBlock().setPath(newPath);
-
-        Map<Summary, String> expectedParams = new EnumMap<>(baseMap);
-        expectedParams.put(Summary.TARGET_PATH, newPath);
-
-        LOGGER.info("Target path set to " + newPath);
-
-        mirrorPage.next();
-
-        Assert.assertEquals(mirrorPage.getSummaryProperties(), expectedParams);
-
-
-    }
-
-
-    @Test
-    public void testAdvancedScenario() {
-
-        mirrorPage.toggleAdvancedOptions();
-        mirrorPage.setHdfsDistCpMaxMaps("9");
-        mirrorPage.setHdfsMaxBandwidth("50");
-        mirrorPage.setAclOwner("somebody");
-        mirrorPage.setAclGroup("somegroup");
-        mirrorPage.setAclPermission("0000");
-        mirrorPage.setFrequency(new Frequency("8", Frequency.TimeUnit.hours));
-        Retry retry = new Retry();
-        retry.setAttempts(8);
-        retry.setPolicy(PolicyType.FINAL);
-        retry.setDelay(new Frequency("13", Frequency.TimeUnit.days));
-        mirrorPage.setRetry(retry);
-
-
-        mirrorPage.next();
-
-        Map<Summary, String> actualParams = mirrorPage.getSummaryProperties();
-        Map<Summary, String> expectedParams = new EnumMap<>(baseMap);
-        expectedParams.put(Summary.ACL_OWNER, "somebody");
-        expectedParams.put(Summary.ACL_GROUP, "somegroup");
-        expectedParams.put(Summary.ACL_PERMISSIONS, "0000");
-        expectedParams.put(Summary.MAX_MAPS, "9");
-        expectedParams.put(Summary.MAX_BANDWIDTH, "50");
-        expectedParams.put(Summary.FREQUENCY, "8 hours");
-        expectedParams.put(Summary.RETRY_ATTEMPTS, "8");
-        expectedParams.put(Summary.RETRY_POLICY, "final");
-        expectedParams.put(Summary.RETRY_DELAY, "13 days");
-
-
-        LOGGER.info("Actual parameters: " + actualParams);
-        LOGGER.info("Expected parameters: " + expectedParams);
-
-        Assert.assertEquals(actualParams, expectedParams);
-
-
-    }
-
-
-    @AfterClass(alwaysRun = true)
-    public void tearDownClass() {
-        removeTestClassEntities();
-        closeBrowser();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/c00975e4/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorTest.java
deleted file mode 100644
index a7887da..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorTest.java
+++ /dev/null
@@ -1,414 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.searchUI;
-
-import org.apache.falcon.cli.FalconCLI;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.cluster.ClusterLocationType;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.regression.Entities.ClusterMerlin;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.Entities.RecipeMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.supportClasses.NotifyingAssert;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.testHelper.BaseUITestClass;
-import org.apache.falcon.regression.ui.search.LoginPage;
-import org.apache.falcon.regression.ui.search.MirrorWizardPage;
-import org.apache.falcon.regression.ui.search.SearchPage;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hive.hcatalog.api.HCatClient;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.OozieClient;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.sql.Connection;
-import java.util.Arrays;
-
-/** UI tests for Mirror Setup Wizard. */
-@Test(groups = {"search-ui", "multiCluster"})
-public class MirrorTest extends BaseUITestClass {
-    private static final Logger LOGGER = Logger.getLogger(MirrorTest.class);
-    private final String baseTestDir = cleanAndGetTestDir();
-    private final String hdfsSrcDir = baseTestDir + "/hdfsSrcDir";
-    private final String hdfsTgtDir = baseTestDir + "/hdfsTgtDir";
-    private final String hdfsStrictDir = baseTestDir + "/strictDir";
-    private static final String DB_NAME = "MirrorTest";
-    private static final String DB2_NAME = "MirrorTest2";
-    private static final String TBL1_NAME = "MirrorTable1";
-    private static final String TBL2_NAME = "MirrorTable2";
-    private final ColoHelper cluster = servers.get(0);
-    private final ColoHelper cluster2 = servers.get(1);
-    private final FileSystem clusterFS = serverFS.get(0);
-    private final FileSystem clusterFS2 = serverFS.get(1);
-    private final OozieClient clusterOC = serverOC.get(0);
-    private final OozieClient clusterOC2 = serverOC.get(1);
-    private HCatClient clusterHC;
-    private HCatClient clusterHC2;
-    private RecipeMerlin recipeMerlin;
-    private Connection connection;
-    private Connection connection2;
-    private MirrorWizardPage mirrorPage;
-
-    /**
-     * Submit one cluster, 2 feeds and 10 processes with 1 to 10 tags (1st process has 1 tag,
-     * 2nd - two tags.. 10th has 10 tags).
-     * @throws URISyntaxException
-     * @throws IOException
-     * @throws AuthenticationException
-     * @throws InterruptedException
-     */
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        bundles[0] = new Bundle(BundleUtil.readHCatBundle(), cluster);
-        bundles[1] = new Bundle(BundleUtil.readHCatBundle(), cluster2);
-        bundles[0].generateUniqueBundle(this);
-        bundles[1].generateUniqueBundle(this);
-        final ClusterMerlin srcCluster = bundles[0].getClusterElement();
-        final ClusterMerlin tgtCluster = bundles[1].getClusterElement();
-        Bundle.submitCluster(bundles[0], bundles[1]);
-
-        recipeMerlin = RecipeMerlin.readFromDir("HiveDrRecipe",
-            FalconCLI.RecipeOperation.HIVE_DISASTER_RECOVERY)
-            .withRecipeCluster(srcCluster);
-        recipeMerlin.withSourceCluster(srcCluster)
-            .withTargetCluster(tgtCluster)
-            .withFrequency(new Frequency("5", Frequency.TimeUnit.minutes))
-            .withValidity(TimeUtil.getTimeWrtSystemTime(-5), TimeUtil.getTimeWrtSystemTime(5));
-        recipeMerlin.setUniqueName(this.getClass().getSimpleName());
-        recipeMerlin.withSourceDb(DB_NAME);
-        HadoopUtil.recreateDir(clusterFS, hdfsStrictDir);
-        HadoopUtil.recreateDir(clusterFS2, hdfsStrictDir);
-        clusterFS.setPermission(new Path(hdfsStrictDir), FsPermission.valueOf("drwx------"));
-        clusterFS2.setPermission(new Path(hdfsStrictDir), FsPermission.valueOf("drwx------"));
-        openBrowser();
-        SearchPage searchPage = LoginPage.open(getDriver()).doDefaultLogin();
-        mirrorPage = searchPage.getPageHeader().doCreateMirror();
-        mirrorPage.checkPage();
-    }
-
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() throws IOException {
-        removeTestClassEntities();
-        closeBrowser();
-    }
-
-    @Test
-    public void testHeader() throws Exception {
-        mirrorPage.getPageHeader().checkHeader();
-    }
-
-    /**
-     * Create DB on source with 1 table.
-     * Select Dataset type as FileSystem. Select source and target as hdfs.
-     * Populate all fields (name, source, target, validity etc.) with correct and existing values.
-     * Click next. Create mirror.
-     * Using get entity definition API check that entity has been created.
-     * @throws Exception
-     */
-    @Test(enabled = false)
-    public void testHdfsDefaultScenario() throws Exception {
-        final ClusterMerlin srcCluster = bundles[0].getClusterElement();
-        final ClusterMerlin tgtCluster = bundles[1].getClusterElement();
-        RecipeMerlin hdfsRecipe = RecipeMerlin.readFromDir("HdfsRecipe",
-            FalconCLI.RecipeOperation.HDFS_REPLICATION)
-            .withRecipeCluster(srcCluster);
-        hdfsRecipe.withSourceCluster(srcCluster)
-            .withTargetCluster(tgtCluster)
-            .withFrequency(new Frequency("5", Frequency.TimeUnit.minutes))
-            .withValidity(TimeUtil.getTimeWrtSystemTime(-5), TimeUtil.getTimeWrtSystemTime(5));
-        hdfsRecipe.setUniqueName(this.getClass().getSimpleName());
-        hdfsRecipe.withSourceDir(hdfsSrcDir).withTargetDir(hdfsTgtDir);
-        hdfsRecipe.setTags(Arrays.asList("key1=val1", "key2=val2", "key3=val3"));
-
-        mirrorPage.applyRecipe(hdfsRecipe, true);
-        mirrorPage.next();
-        mirrorPage.save();
-
-        AssertUtil.assertSucceeded(prism.getProcessHelper().getStatus(
-            createFakeProcessForRecipe(bundles[0].getProcessObject(), recipeMerlin)));
-    }
-
-    /**
-     * Create DB on source with 1 table.
-     * Select Dataset type as Hive.
-     * Populate all fields (name, source, target, validity etc.) with correct and existing values.
-     * Click next. Create mirror.
-     * Using get entity definition API check that entity has been created.
-     * @throws Exception
-     */
-    @Test(dataProvider = "getDbsAndTbls")
-    public void testHiveDefaultScenario(String dbName, String tblName) throws Exception {
-        recipeMerlin.withSourceDb(dbName);
-        recipeMerlin.withSourceTable(tblName);
-        recipeMerlin.setTags(Arrays.asList("key1=val1", "key2=val2", "key3=val3"));
-        mirrorPage.applyRecipe(recipeMerlin, true);
-        mirrorPage.next();
-        mirrorPage.save();
-        AssertUtil.assertSucceeded(prism.getProcessHelper().getStatus(
-            createFakeProcessForRecipe(bundles[0].getProcessObject(), recipeMerlin)));
-    }
-
-    @DataProvider
-    public Object[][] getDbsAndTbls() {
-        return new String[][]{
-            {DB_NAME, ""},
-            {DB_NAME + ',' + DB2_NAME, ""},
-            {DB_NAME, TBL1_NAME + ',' + TBL2_NAME},
-        };
-    }
-
-    /**
-     *  If "send alerts to" is empty on HiveDR UI, default value for drNotificationReceivers property must be "NA".
-     */
-    @Test
-    public void testSendAlertsDefaultValue()
-        throws InterruptedException, IOException, URISyntaxException, AuthenticationException {
-        recipeMerlin.withSourceDb(DB_NAME);
-        recipeMerlin.withSourceTable(TBL1_NAME);
-        mirrorPage.applyRecipe(recipeMerlin, false);
-        mirrorPage.next();
-        mirrorPage.save();
-        ProcessMerlin process = bundles[0].getProcessObject();
-        process.setName(recipeMerlin.getName());
-        process = new ProcessMerlin(cluster.getProcessHelper().getEntityDefinition(process.toString()).getMessage());
-        String drNotificationReceivers = process.getProperty("drNotificationReceivers");
-        Assert.assertTrue(drNotificationReceivers != null && drNotificationReceivers.equals("NA"),
-            "Default value for drNotificationReceivers should be NA.");
-
-        /* particular check that on table replication scenario UI doesn't pick up thrift server
-           end point in place of Hive server2 end point*/
-        String expectedUri = recipeMerlin.getTgtCluster().getInterfaceEndpoint(Interfacetype.REGISTRY)
-            .replace("thrift", "hive2").replace("9083", "10000");
-        Assert.assertEquals(process.getProperty("targetHiveServer2Uri"), expectedUri,
-            "Hive server2 end point should be picked by UI.");
-        expectedUri = recipeMerlin.getSrcCluster().getInterfaceEndpoint(Interfacetype.REGISTRY)
-            .replace("thrift", "hive2").replace("9083", "10000");
-        Assert.assertEquals(process.getProperty("sourceHiveServer2Uri"), expectedUri,
-            "Hive server2 end point should be picked by UI.");
-    }
-
-    /**
-     * Test that Hive DR UI doesn't picks thrift server end point in place of Hive server2 end point.
-     * Test that specified HDFS target staging path on Hive DR UI, isn't getting assigned to "*".
-     */
-    @Test
-    public void testHDFSTargetStagingPath()
-        throws URISyntaxException, AuthenticationException, InterruptedException, IOException {
-        recipeMerlin.withSourceDb(DB_NAME);
-        mirrorPage.applyRecipe(recipeMerlin, false);
-        mirrorPage.next();
-        mirrorPage.save();
-        ProcessMerlin process = bundles[0].getProcessObject();
-        process.setName(recipeMerlin.getName());
-        process = new ProcessMerlin(cluster.getProcessHelper().getEntityDefinition(process.toString()).getMessage());
-
-        // check that that Hive DR UI doesn't picks thrift server end point in place of Hive server2 end point
-        String expectedUri = recipeMerlin.getTgtCluster().getInterfaceEndpoint(Interfacetype.REGISTRY)
-            .replace("thrift", "hive2").replace("9083", "10000");
-        Assert.assertEquals(process.getProperty("targetHiveServer2Uri"), expectedUri,
-            "Hive server2 end point should be picked by UI.");
-        expectedUri = recipeMerlin.getSrcCluster().getInterfaceEndpoint(Interfacetype.REGISTRY)
-            .replace("thrift", "hive2").replace("9083", "10000");
-        Assert.assertEquals(process.getProperty("sourceHiveServer2Uri"), expectedUri,
-            "Hive server2 end point should be picked by UI.");
-
-        //check that that specified HDFS target staging path on Hive DR UI, isn't getting assigned to "*"
-        Assert.assertFalse(process.getProperty("targetStagingPath").equals("*"),
-            "HDFS target staging path shouldn't be assigned to '*'.");
-    }
-
-    /**
-     * Test recipe with bad acls.
-     * Set owner/group as invalid string (utf-8, special chars, number).
-     * Check that user is not allowed to go to the next step and has been notified with an alert.
-     * Set permissions as 4digit number, negative, string, 000. Check the same.
-     */
-    @Test(enabled = false)
-    public void testInvalidAcl() {
-        recipeMerlin.setTags(Arrays.asList("key1=val1", "key2=val2", "key3=val3"));
-        final String goodAclOwner = MerlinConstants.CURRENT_USER_NAME;
-        final String goodAclGroup = MerlinConstants.CURRENT_USER_GROUP;
-        final String goodAclPerms = "777";
-        mirrorPage.applyRecipe(recipeMerlin, true);
-        NotifyingAssert notifyingAssert = new NotifyingAssert(true);
-        for(String badAclOwner: new String[] {"utf8\u20ACchar", "speci@l", "123"}) {
-            mirrorPage.setAclOwner(badAclOwner);
-            notifyingAssert.assertTrue(mirrorPage.isAclOwnerWarningDisplayed(),
-                "Expecting invalid owner warning to be displayed for bad acl owner: " + badAclOwner);
-            mirrorPage.next(); //should not go through
-            if (mirrorPage.getStepNumber() == 2) {
-                mirrorPage.silentPrevious();
-                mirrorPage.toggleAdvancedOptions();
-            }
-            mirrorPage.setAclOwner(goodAclOwner);
-            notifyingAssert.assertFalse(mirrorPage.isAclOwnerWarningDisplayed(),
-                "Expecting invalid owner warning to not be displayed for good acl owner: " + goodAclOwner);
-        }
-
-        for(String badAclGroup: new String[] {"utf8\u20ACchar", "speci@l", "123"}) {
-            mirrorPage.setAclGroup(badAclGroup);
-            notifyingAssert.assertTrue(mirrorPage.isAclGroupWarningDisplayed(),
-                "Expecting invalid group warning to be displayed for bad acl group: " + badAclGroup);
-            mirrorPage.next(); //should not go through
-            if (mirrorPage.getStepNumber() == 2) {
-                mirrorPage.silentPrevious();
-                mirrorPage.toggleAdvancedOptions();
-            }
-            mirrorPage.setAclGroup(goodAclGroup);
-            notifyingAssert.assertFalse(mirrorPage.isAclGroupWarningDisplayed(),
-                "Expecting invalid group warning to not be displayed for good acl group: " + goodAclGroup);
-        }
-
-        for(String badAclPermission: new String[] {"1234", "-123", "str", "000", "1*", "*1"}) {
-            mirrorPage.setAclPermission(badAclPermission);
-            notifyingAssert.assertTrue(mirrorPage.isAclPermissionWarningDisplayed(),
-                "Expecting invalid permission warning to be displayed for bad acl permission: " + badAclPermission);
-            mirrorPage.next(); //should not go through
-            if (mirrorPage.getStepNumber() == 2) {
-                mirrorPage.silentPrevious();
-                mirrorPage.toggleAdvancedOptions();
-            }
-            mirrorPage.setAclPermission(goodAclPerms); //clear error
-            notifyingAssert.assertFalse(mirrorPage.isAclPermissionWarningDisplayed(),
-                "Expecting invalid permission warning to not be displayed for good acl permission: " + goodAclPerms);
-        }
-        notifyingAssert.assertAll();
-    }
-
-    /**
-     * Select Hive as dataset type.
-     * Set source/target staging paths as path with invalid pattern, digit, empty value, special/utf-8 symbols. Check
-     * that user is not allowed
-     to go to the next step and has been notified with an alert.
-     */
-    @Test(enabled = false)
-    public void testHiveAdvancedInvalidStaging() {
-        recipeMerlin.withSourceDb(DB_NAME);
-        recipeMerlin.setTags(Arrays.asList("key1=val1", "key2=val2", "key3=val3"));
-        mirrorPage.applyRecipe(recipeMerlin, true);
-        NotifyingAssert notifyingAssert = new NotifyingAssert(true);
-        final String goodSrcStaging = recipeMerlin.getSrcCluster().getLocation(ClusterLocationType.STAGING).getPath();
-        final String goodTgtStaging = recipeMerlin.getTgtCluster().getLocation(ClusterLocationType.STAGING).getPath();
-        final String[] badTestPaths = new String[] {"not_a_path", "", "not/allowed"};
-        for (String path : badTestPaths) {
-            mirrorPage.setSourceStaging(path);
-            //check error
-            mirrorPage.next();
-            if (mirrorPage.getStepNumber() == 2) {
-                notifyingAssert.fail(
-                    "Navigation to page 2 should not be allowed as source staging path is bad: " + path);
-                mirrorPage.silentPrevious();
-                mirrorPage.toggleAdvancedOptions();
-            }
-            mirrorPage.setSourceStaging(goodSrcStaging);
-            //check error disappeared
-        }
-        for (String path : badTestPaths) {
-            mirrorPage.setTargetStaging(path);
-            //check error
-            mirrorPage.next();
-            if (mirrorPage.getStepNumber() == 2) {
-                notifyingAssert.fail(
-                    "Navigation to page 2 should not be allowed as target staging path is bad: " + path);
-                mirrorPage.silentPrevious();
-                mirrorPage.toggleAdvancedOptions();
-            }
-            mirrorPage.setTargetStaging(goodTgtStaging);
-            //check error disappeared
-        }
-        notifyingAssert.assertAll();
-    }
-
-    /**
-     * Select Hive as dataset type.
-     * Set source/target staging paths as path pointing to directories with strict permissions
-     * (another owner, 700 permissions).
-     * Check that user is not allowed to go to the next step and has been notified with an alert.
-     */
-    @Test(enabled = false)
-    public void testHiveAdvancedStagingAcl() throws Exception {
-        recipeMerlin.withSourceDb(DB_NAME);
-        recipeMerlin.setTags(Arrays.asList("key1=val1", "key2=val2", "key3=val3"));
-        mirrorPage.applyRecipe(recipeMerlin, true);
-        NotifyingAssert notifyingAssert = new NotifyingAssert(true);
-        final String goodSrcStaging = recipeMerlin.getSrcCluster().getLocation(ClusterLocationType.STAGING).getPath();
-        final String goodTgtStaging = recipeMerlin.getTgtCluster().getLocation(ClusterLocationType.STAGING).getPath();
-        final String[] badTestPaths = new String[] {"/apps", hdfsStrictDir};
-        for (String path : badTestPaths) {
-            mirrorPage.setSourceStaging(path);
-            //check error
-            mirrorPage.next();
-            if (mirrorPage.getStepNumber() == 2) {
-                notifyingAssert.fail(
-                    "Navigation to page 2 should not be allowed as source staging path is bad: " + path
-                        + " (" + clusterFS.getFileStatus(new Path(path)) + ")");
-
-                mirrorPage.silentPrevious();
-                mirrorPage.toggleAdvancedOptions();
-            }
-            mirrorPage.setSourceStaging(goodSrcStaging);
-            //check error disappeared
-        }
-        for (String path : badTestPaths) {
-            mirrorPage.setTargetStaging(path);
-            //check error
-            mirrorPage.next();
-            if (mirrorPage.getStepNumber() == 2) {
-                notifyingAssert.fail(
-                    "Navigation to page 2 should not be allowed as target staging path is bad: " + path
-                        + " (" + clusterFS.getFileStatus(new Path(path)) + ")");
-                mirrorPage.silentPrevious();
-                mirrorPage.toggleAdvancedOptions();
-            }
-            mirrorPage.setTargetStaging(goodTgtStaging);
-            //check error disappeared
-        }
-        notifyingAssert.assertAll();
-    }
-
-    /**
-     * Hack to work with process corresponding to recipe.
-     * @param processMerlin process merlin to be modified
-     *                      (ideally we want to get rid of this and use recipe to generate a fake process xml)
-     * @param recipe recipe object that need to be faked
-     * @return
-     */
-    private String createFakeProcessForRecipe(ProcessMerlin processMerlin, RecipeMerlin recipe) {
-        processMerlin.setName(recipe.getName());
-        return processMerlin.toString();
-    }
-
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/c00975e4/falcon-regression/pom.xml
----------------------------------------------------------------------
diff --git a/falcon-regression/pom.xml b/falcon-regression/pom.xml
index b11cd83..1692323 100644
--- a/falcon-regression/pom.xml
+++ b/falcon-regression/pom.xml
@@ -183,6 +183,12 @@
             </dependency>
 
             <dependency>
+                <groupId>org.apache.falcon</groupId>
+                <artifactId>falcon-cli</artifactId>
+                <version>${project.version}</version>
+            </dependency>
+
+            <dependency>
                 <groupId>com.google.code.findbugs</groupId>
                 <artifactId>annotations</artifactId>
                 <version>2.0.1</version>


Mime
View raw message