falcon-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From srik...@apache.org
Subject [3/8] FALCON-93 Replication to handle hive table replication. Contributed by Venkatesh Seetharam
Date Thu, 17 Oct 2013 15:45:28 GMT
http://git-wip-us.apache.org/repos/asf/incubator-falcon/blob/353574c6/webapp/pom.xml
----------------------------------------------------------------------
diff --git a/webapp/pom.xml b/webapp/pom.xml
index 9cff2f2..21ab8ab 100644
--- a/webapp/pom.xml
+++ b/webapp/pom.xml
@@ -257,6 +257,16 @@
                                     <outputDirectory>${project.build.directory}/sharelib</outputDirectory>
                                     <destFileName>pig.jar</destFileName>
                                 </artifactItem>
+                                <!-- this is only used in integration-tests against external
clusters -->
+                                <artifactItem>
+                                    <groupId>org.apache.activemq</groupId>
+                                    <artifactId>kahadb</artifactId>
+                                    <version>5.4.3</version>
+                                    <type>jar</type>
+                                    <overWrite>false</overWrite>
+                                    <outputDirectory>${project.build.directory}/libext</outputDirectory>
+                                    <destFileName>kahadb.jar</destFileName>
+                                </artifactItem>
                             </artifactItems>
                         </configuration>
                     </execution>

http://git-wip-us.apache.org/repos/asf/incubator-falcon/blob/353574c6/webapp/src/test/java/org/apache/falcon/catalog/TableStorageFeedReplicationIT.java
----------------------------------------------------------------------
diff --git a/webapp/src/test/java/org/apache/falcon/catalog/TableStorageFeedReplicationIT.java
b/webapp/src/test/java/org/apache/falcon/catalog/TableStorageFeedReplicationIT.java
new file mode 100644
index 0000000..a736ea1
--- /dev/null
+++ b/webapp/src/test/java/org/apache/falcon/catalog/TableStorageFeedReplicationIT.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.catalog;
+
+import org.apache.falcon.entity.ClusterHelper;
+import org.apache.falcon.entity.v0.cluster.Cluster;
+import org.apache.falcon.entity.v0.cluster.Interfacetype;
+import org.apache.falcon.resource.APIResult;
+import org.apache.falcon.resource.InstancesResult;
+import org.apache.falcon.resource.TestContext;
+import org.apache.falcon.util.HiveTestUtils;
+import org.apache.falcon.util.OozieTestUtils;
+import org.apache.falcon.util.StartupProperties;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hcatalog.api.HCatPartition;
+import org.apache.oozie.client.OozieClient;
+import org.apache.oozie.client.WorkflowJob;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+
+import javax.ws.rs.core.MediaType;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Integration tests for Feed Replication with Table storage.
+ *
+ * This test is disabled as it heavily depends on oozie sharelibs for
+ * hcatalog being made available on HDFS. captured in FALCON-139.
+ */
+@Test (enabled = false)
+public class TableStorageFeedReplicationIT {
+
+    private static final String SOURCE_DATABASE_NAME = "src_demo_db";
+    private static final String SOURCE_TABLE_NAME = "customer_raw";
+
+    private static final String TARGET_DATABASE_NAME = "tgt_demo_db";
+    private static final String TARGET_TABLE_NAME = "customer_bcp";
+
+    private static final String PARTITION_VALUE = "2013-09-24-00"; // ${YEAR}-${MONTH}-${DAY}-${HOUR}
+
+    private final TestContext sourceContext = new TestContext();
+    private String sourceMetastoreUrl;
+
+    private final TestContext targetContext = new TestContext();
+    private String targetMetastoreUrl;
+
+
+    @BeforeClass
+    public void setUp() throws Exception {
+        TestContext.cleanupStore();
+
+        Map<String, String> overlay = sourceContext.getUniqueOverlay();
+        String sourceFilePath = sourceContext.overlayParametersOverTemplate("/table/primary-cluster.xml",
overlay);
+        sourceContext.setCluster(sourceFilePath);
+
+        final Cluster sourceCluster = sourceContext.getCluster().getCluster();
+        String sourceStorageUrl = ClusterHelper.getStorageUrl(sourceCluster);
+
+        // copyTestDataToHDFS
+        final String sourcePath = sourceStorageUrl + "/falcon/test/input/" + PARTITION_VALUE;
+        TestContext.copyResourceToHDFS("/apps/pig/data.txt", "data.txt", sourcePath);
+
+        sourceMetastoreUrl = ClusterHelper.getInterface(sourceCluster, Interfacetype.REGISTRY).getEndpoint();
+        setupHiveMetastore(sourceMetastoreUrl, SOURCE_DATABASE_NAME, SOURCE_TABLE_NAME);
+        HiveTestUtils.loadData(sourceMetastoreUrl, SOURCE_DATABASE_NAME, SOURCE_TABLE_NAME,
sourcePath,
+                PARTITION_VALUE);
+
+        String targetFilePath = targetContext.overlayParametersOverTemplate("/table/bcp-cluster.xml",
overlay);
+        targetContext.setCluster(targetFilePath);
+
+        final Cluster targetCluster = targetContext.getCluster().getCluster();
+        targetMetastoreUrl = ClusterHelper.getInterface(targetCluster, Interfacetype.REGISTRY).getEndpoint();
+        setupHiveMetastore(targetMetastoreUrl, TARGET_DATABASE_NAME, TARGET_TABLE_NAME);
+
+        // set up kahadb to be sent as part of workflows
+        StartupProperties.get().setProperty("libext.paths", "./target/libext");
+        String libext = ClusterHelper.getLocation(targetCluster, "working") + "/libext";
+        String targetStorageUrl = ClusterHelper.getStorageUrl(targetCluster);
+        TestContext.copyOozieShareLibsToHDFS("./target/libext", targetStorageUrl + libext);
+    }
+
+    private void setupHiveMetastore(String metastoreUrl, String databaseName,
+                                    String tableName) throws Exception {
+        cleanupHiveMetastore(metastoreUrl, databaseName, tableName);
+
+        HiveTestUtils.createDatabase(metastoreUrl, databaseName);
+        final List<String> partitionKeys = Arrays.asList("ds");
+        HiveTestUtils.createTable(metastoreUrl, databaseName, tableName, partitionKeys);
+        // todo this is a kludge to work around hive's limitations
+        HiveTestUtils.alterTable(metastoreUrl, databaseName, tableName);
+    }
+
+    @AfterClass
+    public void tearDown() throws Exception {
+        TestContext.executeWithURL("entity -delete -type feed -name customer-table-replicating-feed");
+        TestContext.executeWithURL("entity -delete -type cluster -name primary-cluster");
+        TestContext.executeWithURL("entity -delete -type cluster -name bcp-cluster");
+
+        cleanupHiveMetastore(sourceMetastoreUrl, SOURCE_DATABASE_NAME, SOURCE_TABLE_NAME);
+        cleanupHiveMetastore(targetMetastoreUrl, TARGET_DATABASE_NAME, TARGET_TABLE_NAME);
+
+        cleanupStagingDirs(sourceContext.getCluster().getCluster(), SOURCE_DATABASE_NAME);
+        cleanupStagingDirs(targetContext.getCluster().getCluster(), TARGET_DATABASE_NAME);
+    }
+
+    private void cleanupHiveMetastore(String metastoreUrl, String databaseName, String tableName)
throws Exception {
+        HiveTestUtils.dropTable(metastoreUrl, databaseName, tableName);
+        HiveTestUtils.dropDatabase(metastoreUrl, databaseName);
+    }
+
+    private void cleanupStagingDirs(Cluster cluster, String databaseName) throws IOException
{
+        FileSystem fs = FileSystem.get(ClusterHelper.getConfiguration(cluster));
+        String stagingDir = "/apps/falcon/staging/"
+                + "FALCON_FEED_REPLICATION_customer-table-replicating-feed_primary-cluster/"
+                + databaseName;
+        fs.delete(new Path(stagingDir), true);
+    }
+
+    @Test (enabled = false)
+    public void testTableReplication() throws Exception {
+        final String feedName = "customer-table-replicating-feed";
+        final Map<String, String> overlay = sourceContext.getUniqueOverlay();
+        String filePath = sourceContext.overlayParametersOverTemplate("/table/primary-cluster.xml",
overlay);
+        Assert.assertEquals(0, TestContext.executeWithURL("entity -submit -type cluster -file
" + filePath));
+
+        filePath = targetContext.overlayParametersOverTemplate("/table/bcp-cluster.xml",
overlay);
+        Assert.assertEquals(0, TestContext.executeWithURL("entity -submit -type cluster -file
" + filePath));
+
+        HCatPartition sourcePartition = HiveTestUtils.getPartition(
+                sourceMetastoreUrl, SOURCE_DATABASE_NAME, SOURCE_TABLE_NAME, "ds", PARTITION_VALUE);
+        Assert.assertNotNull(sourcePartition);
+
+        filePath = sourceContext.overlayParametersOverTemplate("/table/customer-table-replicating-feed.xml",
overlay);
+        Assert.assertEquals(0, TestContext.executeWithURL("entity -submitAndSchedule -type
feed -file " + filePath));
+
+        // wait until the workflow job completes
+        WorkflowJob jobInfo = OozieTestUtils.getWorkflowJob(targetContext.getCluster().getCluster(),
+                OozieClient.FILTER_NAME + "=FALCON_FEED_REPLICATION_" + feedName);
+        Assert.assertEquals(WorkflowJob.Status.SUCCEEDED, jobInfo.getStatus());
+
+        // verify if the partition on the target exists
+        HCatPartition targetPartition = HiveTestUtils.getPartition(
+                targetMetastoreUrl, TARGET_DATABASE_NAME, TARGET_TABLE_NAME, "ds", PARTITION_VALUE);
+        Assert.assertNotNull(targetPartition);
+
+        InstancesResult response = targetContext.getService().path("api/instance/running/feed/"
+ feedName)
+                .header("Remote-User", "guest")
+                .accept(MediaType.APPLICATION_JSON)
+                .get(InstancesResult.class);
+        Assert.assertEquals(APIResult.Status.SUCCEEDED, response.getStatus());
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-falcon/blob/353574c6/webapp/src/test/java/org/apache/falcon/resource/TestContext.java
----------------------------------------------------------------------
diff --git a/webapp/src/test/java/org/apache/falcon/resource/TestContext.java b/webapp/src/test/java/org/apache/falcon/resource/TestContext.java
index af6f22c..3d22207 100644
--- a/webapp/src/test/java/org/apache/falcon/resource/TestContext.java
+++ b/webapp/src/test/java/org/apache/falcon/resource/TestContext.java
@@ -395,51 +395,6 @@ public class TestContext {
         return false;
     }
 
-    /*
-    public WorkflowJob getWorkflowJob(String filter) throws Exception {
-        OozieClient ozClient = OozieClientFactory.get(cluster.getCluster());
-
-        List<WorkflowJob> jobs;
-        while (true) {
-            jobs = ozClient.getJobsInfo(filter);
-            System.out.println("jobs = " + jobs);
-            if (jobs.size() > 0) {
-                break;
-            } else {
-                Thread.sleep(1000);
-            }
-        }
-
-        WorkflowJob jobInfo = jobs.get(0);
-        while (true) {
-            if (!(jobInfo.getStatus() == WorkflowJob.Status.RUNNING
-                    || jobInfo.getStatus() == WorkflowJob.Status.PREP)) {
-                break;
-            } else {
-                Thread.sleep(1000);
-                jobInfo = ozClient.getJobInfo(jobInfo.getId());
-                System.out.println("jobInfo = " + jobInfo);
-            }
-        }
-
-        return jobInfo;
-    }
-
-    public Path getOozieLogPath(WorkflowJob jobInfo) throws Exception {
-        Path stagingPath = new Path(ClusterHelper.getLocation(cluster.getCluster(), "staging"),
-                EntityUtil.getStagingPath(cluster.getCluster()) + "/../logs");
-        final Path logPath = new Path(ClusterHelper.getStorageUrl(cluster.getCluster()),
stagingPath);
-        LogMover.main(new String[]{"-workflowEngineUrl",
-                ClusterHelper.getOozieUrl(cluster.getCluster()),
-                "-subflowId", jobInfo.getId(), "-runId", "1",
-                "-logDir", logPath.toString() + "/job-2012-04-21-00-00",
-                "-status", "SUCCEEDED", "-entityType", "process",
-                "-userWorkflowEngine", "pig",});
-
-        return new Path(logPath, "job-2012-04-21-00-00/001/oozie.log");
-    }
-    */
-
     public Map<String, String> getUniqueOverlay() throws FalconException {
         Map<String, String> overlay = new HashMap<String, String>();
         long time = System.currentTimeMillis();

http://git-wip-us.apache.org/repos/asf/incubator-falcon/blob/353574c6/webapp/src/test/java/org/apache/falcon/util/HiveTestUtils.java
----------------------------------------------------------------------
diff --git a/webapp/src/test/java/org/apache/falcon/util/HiveTestUtils.java b/webapp/src/test/java/org/apache/falcon/util/HiveTestUtils.java
index db8ee01..4cad063 100644
--- a/webapp/src/test/java/org/apache/falcon/util/HiveTestUtils.java
+++ b/webapp/src/test/java/org/apache/falcon/util/HiveTestUtils.java
@@ -77,7 +77,7 @@ public final class HiveTestUtils {
     }
 
     public static void createExternalTable(String metaStoreUrl, String databaseName, String
tableName,
-                                    List<String> partitionKeys, String externalLocation)
throws Exception {
+                                           List<String> partitionKeys, String externalLocation)
throws Exception {
         ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
         cols.add(new HCatFieldSchema("id", HCatFieldSchema.Type.INT, "id comment"));
         cols.add(new HCatFieldSchema("value", HCatFieldSchema.Type.STRING, "value comment"));
@@ -101,12 +101,27 @@ public final class HiveTestUtils {
         client.createTable(tableDesc);
     }
 
+    public static void alterTable(String metaStoreUrl, String databaseName,
+                                  String tableName) throws Exception {
+        StringBuilder alterTableDdl = new StringBuilder();
+        alterTableDdl
+                .append(" alter table ")
+                .append(tableName)
+                .append(" set fileformat ")
+                .append(" inputformat 'org.apache.hadoop.mapred.TextInputFormat' ")
+                .append(" outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'");
+
+        startSessionState(metaStoreUrl);
+        execHiveDDL("use " + databaseName);
+        execHiveDDL(alterTableDdl.toString());
+    }
+
     public static void loadData(String metaStoreUrl, String databaseName, String tableName,
                                 String path, String partition) throws Exception {
         StringBuilder alterTableDdl = new StringBuilder();
         alterTableDdl
                 .append(" load data inpath ")
-                .append(" '").append(path).append(partition).append("' ")
+                .append(" '").append(path).append("' ")
                 .append(" into table ")
                 .append(tableName)
                 .append(" partition ").append(" (ds='").append(partition).append("') ");
@@ -142,7 +157,7 @@ public final class HiveTestUtils {
         System.out.println("response.getErrorMessage() = " + response.getErrorMessage());
         System.out.println("response.getSQLState() = " + response.getSQLState());
 
-        if (response.getResponseCode() == 1) {
+        if (response.getResponseCode() > 0) {
             throw new Exception(response.getErrorMessage());
         }
     }

http://git-wip-us.apache.org/repos/asf/incubator-falcon/blob/353574c6/webapp/src/test/java/org/apache/falcon/util/OozieTestUtils.java
----------------------------------------------------------------------
diff --git a/webapp/src/test/java/org/apache/falcon/util/OozieTestUtils.java b/webapp/src/test/java/org/apache/falcon/util/OozieTestUtils.java
new file mode 100644
index 0000000..690fb6b
--- /dev/null
+++ b/webapp/src/test/java/org/apache/falcon/util/OozieTestUtils.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.util;
+
+import org.apache.falcon.entity.ClusterHelper;
+import org.apache.falcon.entity.EntityUtil;
+import org.apache.falcon.entity.v0.cluster.Cluster;
+import org.apache.falcon.logging.LogMover;
+import org.apache.falcon.workflow.engine.OozieClientFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.oozie.client.OozieClient;
+import org.apache.oozie.client.WorkflowJob;
+
+import java.util.List;
+
+/**
+ * Oozie Utility class for integration-tests.
+ */
+public final class OozieTestUtils {
+
+    private OozieTestUtils() {
+    }
+
+    public static WorkflowJob getWorkflowJob(Cluster cluster, String filter) throws Exception
{
+        OozieClient ozClient = OozieClientFactory.get(cluster);
+
+        List<WorkflowJob> jobs;
+        while (true) {
+            jobs = ozClient.getJobsInfo(filter);
+            System.out.println("jobs = " + jobs);
+            if (jobs.size() > 0) {
+                break;
+            } else {
+                Thread.sleep(1000);
+            }
+        }
+
+        WorkflowJob jobInfo = jobs.get(0);
+        while (true) {
+            if (!(jobInfo.getStatus() == WorkflowJob.Status.RUNNING
+                    || jobInfo.getStatus() == WorkflowJob.Status.PREP)) {
+                break;
+            } else {
+                Thread.sleep(1000);
+                jobInfo = ozClient.getJobInfo(jobInfo.getId());
+                System.out.println("jobInfo = " + jobInfo);
+            }
+        }
+
+        return jobInfo;
+    }
+
+    public static Path getOozieLogPath(Cluster cluster, WorkflowJob jobInfo) throws Exception
{
+
+        Path stagingPath = new Path(ClusterHelper.getLocation(cluster, "staging"),
+                EntityUtil.getStagingPath(cluster) + "/../logs");
+        final Path logPath = new Path(ClusterHelper.getStorageUrl(cluster), stagingPath);
+        LogMover.main(new String[] {
+            "-workflowEngineUrl", ClusterHelper.getOozieUrl(cluster),
+            "-subflowId", jobInfo.getId(), "-runId", "1",
+            "-logDir", logPath.toString() + "/job-2012-04-21-00-00",
+            "-status", "SUCCEEDED", "-entityType", "process",
+            "-userWorkflowEngine", "pig",
+        });
+
+        return new Path(logPath, "job-2012-04-21-00-00/001/oozie.log");
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-falcon/blob/353574c6/webapp/src/test/resources/table/bcp-cluster.xml
----------------------------------------------------------------------
diff --git a/webapp/src/test/resources/table/bcp-cluster.xml b/webapp/src/test/resources/table/bcp-cluster.xml
new file mode 100644
index 0000000..4c6f6c6
--- /dev/null
+++ b/webapp/src/test/resources/table/bcp-cluster.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+<!--
+    BCP cluster configuration for demo vm
+  -->
+<cluster colo="east-coast" description="BCP Cluster"
+         name="bcp-cluster"
+         xmlns="uri:falcon:cluster:0.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+
+    <interfaces>
+        <interface type="readonly" endpoint="hftp://localhost:20070"
+                   version="1.1.1" />
+
+        <interface type="write" endpoint="hdfs://localhost:20020"
+                   version="1.1.1" />
+
+        <interface type="execute" endpoint="localhost:20300"
+                   version="1.1.1" />
+
+        <interface type="workflow" endpoint="http://localhost:11020/oozie/"
+                   version="3.3.0" />
+
+        <interface type="registry" endpoint="thrift://localhost:29083"
+                   version="0.11.0" />
+
+        <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true"
+                   version="5.4.3" />
+    </interfaces>
+
+    <locations>
+        <location name="staging" path="/apps/falcon/staging" />
+        <location name="temp" path="/tmp" />
+        <location name="working" path="/apps/falcon/working" />
+    </locations>
+
+</cluster>

http://git-wip-us.apache.org/repos/asf/incubator-falcon/blob/353574c6/webapp/src/test/resources/table/customer-table-replicating-feed.xml
----------------------------------------------------------------------
diff --git a/webapp/src/test/resources/table/customer-table-replicating-feed.xml b/webapp/src/test/resources/table/customer-table-replicating-feed.xml
new file mode 100644
index 0000000..c1a48a3
--- /dev/null
+++ b/webapp/src/test/resources/table/customer-table-replicating-feed.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+<!--
+    Replicating Hourly customer table from primary to secondary cluster.
+  -->
+<feed description="Replicating customer table feed" name="customer-table-replicating-feed"
+      xmlns="uri:falcon:feed:0.1">
+
+    <frequency>hours(1)</frequency>
+    <timezone>UTC</timezone>
+
+    <clusters>
+        <cluster name="primary-cluster" type="source">
+            <validity start="2013-09-24T00:00Z" end="2013-10-26T00:00Z"/>
+            <retention limit="hours(2)" action="delete"/>
+        </cluster>
+        <cluster name="bcp-cluster" type="target">
+            <validity start="2013-09-24T00:00Z" end="2013-10-26T00:00Z"/>
+            <retention limit="days(30)" action="delete"/>
+
+            <table uri="catalog:tgt_demo_db:customer_bcp#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}"
/>
+        </cluster>
+    </clusters>
+
+    <table uri="catalog:src_demo_db:customer_raw#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
+
+    <ACL owner="seetharam" group="users" permission="0755"/>
+    <schema location="" provider="hcatalog"/>
+
+</feed>

http://git-wip-us.apache.org/repos/asf/incubator-falcon/blob/353574c6/webapp/src/test/resources/table/primary-cluster.xml
----------------------------------------------------------------------
diff --git a/webapp/src/test/resources/table/primary-cluster.xml b/webapp/src/test/resources/table/primary-cluster.xml
new file mode 100644
index 0000000..38abd98
--- /dev/null
+++ b/webapp/src/test/resources/table/primary-cluster.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+<!--
+    Primary cluster configuration for demo vm
+  -->
+<cluster colo="west-coast" description="Primary Cluster"
+         name="primary-cluster"
+         xmlns="uri:falcon:cluster:0.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+
+    <interfaces>
+        <interface type="readonly" endpoint="hftp://localhost:10070"
+                   version="1.1.1" />
+
+        <interface type="write" endpoint="hdfs://localhost:10020"
+                   version="1.1.1" />
+
+        <interface type="execute" endpoint="localhost:10300"
+                   version="1.1.1" />
+
+        <interface type="workflow" endpoint="http://localhost:11010/oozie/"
+                   version="3.3.0" />
+
+        <interface type="registry" endpoint="thrift://localhost:19083"
+                   version="0.11.0" />
+
+        <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true"
+                   version="5.4.3" />
+    </interfaces>
+
+    <locations>
+        <location name="staging" path="/apps/falcon/staging" />
+        <location name="temp" path="/tmp" />
+        <location name="working" path="/apps/falcon/working" />
+    </locations>
+
+</cluster>


Mime
View raw message