atlas-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mad...@apache.org
Subject [05/25] incubator-atlas git commit: ATLAS-1898: initial commit of ODF
Date Wed, 28 Jun 2017 05:57:18 GMT
http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/TestZookeeper.java
----------------------------------------------------------------------
diff --git a/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/TestZookeeper.java b/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/TestZookeeper.java
new file mode 100755
index 0000000..9650bd6
--- /dev/null
+++ b/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/TestZookeeper.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.store.zookeeper34.test;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.BindException;
+import java.net.DatagramSocket;
+import java.net.ServerSocket;
+import java.rmi.NotBoundException;
+import java.util.Properties;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.ZooKeeper.States;
+import org.apache.zookeeper.server.ServerConfig;
+import org.apache.zookeeper.server.ZooKeeperServerMain;
+import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
+
+import org.apache.atlas.odf.core.Utils;
+
+public class TestZookeeper {
+
+	public TestZookeeper() {
+	}
+
+	public void start() {
+		try {
+			startZookeeper();
+		} catch (Exception e) {
+			e.printStackTrace();
+			throw new RuntimeException(e);
+		}
+	}
+
+	public static boolean deleteRecursive(File path) throws FileNotFoundException {
+		if (!path.exists()) {
+			throw new FileNotFoundException(path.getAbsolutePath());
+		}
+		boolean ret = true;
+		if (path.isDirectory()) {
+			for (File f : path.listFiles()) {
+				ret = ret && deleteRecursive(f);
+			}
+		}
+		return ret && path.delete();
+	}
+
+	static Thread zookeeperThread = null;
+	static Object lockObject = new Object();
+	static ZooKeeperServerMainWithShutdown zooKeeperServer = null;
+
+	boolean cleanData = true; // all data is cleaned at server start !!
+
+	Logger logger = Logger.getLogger(TestZookeeper.class.getName());
+
+	void log(String s) {
+		logger.info(s);
+	}
+
+	int zookeeperStartupTime = 10000;
+
+	static class ZooKeeperServerMainWithShutdown extends ZooKeeperServerMain {
+		public void shutdown() {
+			super.shutdown();
+		}
+	}
+
+	private void startZookeeper() throws Exception {
+		log("Starting zookeeper");
+
+		final Properties zkProps = Utils.readConfigProperties("org/apache/atlas/odf/core/messaging/kafka/test-embedded-zookeeper.properties");
+		log("zookeeper properties: " + zkProps);
+		if (cleanData) {
+			String dataDir = zkProps.getProperty("dataDir");
+			log("Removing all data from zookeeper data dir " + dataDir);
+			File dir = new File(dataDir);
+			if (dir.exists()) {
+				if (!deleteRecursive(dir)) {
+					throw new IOException("Could not delete directory " + dataDir);
+				}
+			}
+		}
+		final ZooKeeperServerMainWithShutdown zk = new ZooKeeperServerMainWithShutdown();
+		final ServerConfig serverConfig = new ServerConfig();
+		log("Loading zookeeper config...");
+		QuorumPeerConfig zkConfig = new QuorumPeerConfig();
+		zkConfig.parseProperties(zkProps);
+		serverConfig.readFrom(zkConfig);
+		final String zkPort = (String) zkProps.get("clientPort");
+
+		Runnable zookeeperStarter = new Runnable() {
+
+			@Override
+			public void run() {
+				try {
+					log("Now starting Zookeeper with API...");
+					zk.runFromConfig(serverConfig);
+				} catch (BindException ex) {
+					log("Embedded zookeeper could not be started, port is already in use. Trying to use external zookeeper");
+					ZooKeeper zK = null;
+					try {
+						zK = new ZooKeeper("localhost:" + zkPort, 5000, null);
+						if (zK.getState().equals(States.CONNECTED)) {
+							log("Using existing zookeeper running on port " + zkPort);
+							return;
+						} else {
+							throw new NotBoundException();
+						}
+					} catch (Exception zkEx) {
+						throw new RuntimeException("Could not connect to zookeeper on port " + zkPort + ". Please close all applications listening on this port.");
+					} finally {
+						if (zK != null) {
+							try {
+								zK.close();
+							} catch (InterruptedException e) {
+								logger.log(Level.WARNING, "An error occured closing the zk connection", e);
+							}
+						}
+					}
+				} catch (Exception e) {
+					e.printStackTrace();
+					throw new RuntimeException(e);
+				}
+
+			}
+		};
+
+		zookeeperThread = new Thread(zookeeperStarter);
+		zookeeperThread.setDaemon(true);
+		zookeeperThread.start();
+		log("Zookeeper start initiated, waiting 10s...");
+		Thread.sleep(10000);
+		zooKeeperServer = zk;
+		log("Zookeeper started");
+
+	}
+
+	public boolean isRunning() {
+		return zooKeeperServer != null;
+	}
+
+	boolean isPortAvailable(int port) {
+		ServerSocket ss = null;
+		DatagramSocket ds = null;
+		try {
+			ss = new ServerSocket(port);
+			ss.setReuseAddress(true);
+			ds = new DatagramSocket(port);
+			ds.setReuseAddress(true);
+			return true;
+		} catch (IOException e) {
+		} finally {
+			if (ds != null) {
+				ds.close();
+			}
+
+			if (ss != null) {
+				try {
+					ss.close();
+				} catch (IOException e) {
+				}
+			}
+		}
+
+		return false;
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/ZookeeperConfigurationStorageTest.java
----------------------------------------------------------------------
diff --git a/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/ZookeeperConfigurationStorageTest.java b/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/ZookeeperConfigurationStorageTest.java
new file mode 100755
index 0000000..1db55f2
--- /dev/null
+++ b/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/ZookeeperConfigurationStorageTest.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.store.zookeeper34.test;
+
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+import org.apache.atlas.odf.core.store.zookeeper34.ZookeeperConfigurationStorage;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * this test uses the real storage implementation therefore a zookeeper is required
+ */
+public class ZookeeperConfigurationStorageTest {
+	@BeforeClass
+	public static void setup() {
+		new TestZookeeper().start();
+	}
+
+	@Test
+	public void testStoreInZookeeper() {
+		ZookeeperConfigurationStorage store = new ZookeeperConfigurationStorage() {
+
+			@Override
+			public String getZookeeperConfigPath() {
+				return "/odf/testconfig";
+			}
+			
+		};
+		ConfigContainer container = new ConfigContainer();
+		ODFSettings odfConfig = new ODFSettings();
+		String instanceId = "my_test_id";
+		odfConfig.setInstanceId(instanceId);
+		container.setOdf(odfConfig);
+		store.storeConfig(container);
+
+		ConfigContainer updatedContainer = store.getConfig(null);
+		Assert.assertEquals(instanceId, updatedContainer.getOdf().getInstanceId());
+		store.clearCache();
+		
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-store/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
----------------------------------------------------------------------
diff --git a/odf/odf-store/src/test/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-store/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
new file mode 100755
index 0000000..2a5f331
--- /dev/null
+++ b/odf/odf-store/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
@@ -0,0 +1,16 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+## USE for TESTs only
+
+DiscoveryServiceQueueManager=MockQueueManager

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/.gitignore
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/.gitignore b/odf/odf-test-env/.gitignore
new file mode 100755
index 0000000..2045ff3
--- /dev/null
+++ b/odf/odf-test-env/.gitignore
@@ -0,0 +1,5 @@
+target
+.settings
+.classpath
+.project
+.DS_Store

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/pom.xml
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/pom.xml b/odf/odf-test-env/pom.xml
new file mode 100755
index 0000000..a37ed22
--- /dev/null
+++ b/odf/odf-test-env/pom.xml
@@ -0,0 +1,142 @@
+<?xml version="1.0"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project
+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
+	xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+	<modelVersion>4.0.0</modelVersion>
+	<parent>
+		<groupId>org.apache.atlas.odf</groupId>
+		<artifactId>odf</artifactId>
+		<version>1.2.0-SNAPSHOT</version>
+	</parent>
+	<artifactId>odf-test-env</artifactId>
+	<name>odf-test-env</name>
+	<url>http://maven.apache.org</url>
+	<properties>
+		<!-- specify versions of components to be downloaded -->
+		<jetty.version>9.2.10.v20150310</jetty.version>
+		<kafka.version>0.10.0.0</kafka.version>
+		<scala.version>2.11</scala.version>
+		<spark.version>2.1.0</spark.version>
+		<jetty.port>58081</jetty.port>
+	</properties>
+
+	<dependencies>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-web</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>war</type>
+		</dependency>
+	</dependencies>
+	<build>
+		<plugins>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-compiler-plugin</artifactId>
+				<executions>
+					<execution>
+						<id>default-compile</id>
+						<phase>compile</phase>
+						<goals>
+							<goal>compile</goal>
+						</goals>
+						<configuration>
+							<skipMain>true</skipMain>
+							<!-- do not compile anything -->
+						</configuration>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-surefire-plugin</artifactId>
+				<version>2.19</version>
+				<configuration>
+					<skipTests>true</skipTests>
+					<!-- do not run tests -->
+				</configuration>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-antrun-plugin</artifactId>
+				<version>1.8</version>
+				<executions>
+					<execution>
+						<id>prepare-atlas</id>
+						<phase>validate</phase>
+						<goals>
+							<goal>run</goal>
+						</goals>
+						<configuration>
+							<target>
+								<property name="atlas-unpack-dir" value="${project.build.directory}/downloads" />
+								<property name="atlas.version" value="${atlas.version}" />
+								<ant antfile="../odf-atlas/build_atlas.xml" target="prepare-atlas"></ant>
+							</target>
+						</configuration>
+					</execution>
+					<execution>
+						<id>prepare-components</id>
+						<phase>validate</phase>
+						<goals>
+							<goal>run</goal>
+						</goals>
+						<configuration>
+							<target>
+								<property name="unpack-dir" value="${project.build.directory}/downloads" />
+								<property name="jetty.version" value="${jetty.version}" />
+								<property name="jetty.port" value="${jetty.port}" />
+								<property name="kafka.version" value="${kafka.version}" />
+								<property name="scala.version" value="${scala.version}" />
+								<property name="project.basedir" value="${project.basedir}"/>
+								<ant antfile="prepare_components.xml" target="default"></ant>
+							</target>
+						</configuration>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-assembly-plugin</artifactId>
+				<configuration>
+					<descriptor>src/assembly/bin.xml</descriptor>
+					<finalName>odf-test-env-${project.version}</finalName>
+				</configuration>
+				<executions>
+					<execution>
+						<id>create-distribution</id>
+						<phase>package</phase>
+						<goals>
+							<goal>single</goal>
+						</goals>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<artifactId>maven-jar-plugin</artifactId>
+				<version>2.3.1</version>
+				<executions>
+					<execution>
+						<id>default-jar</id>
+						<!-- do not create default-jar -->
+						<phase>none</phase>
+					</execution>
+				</executions>
+			</plugin>
+		</plugins>
+	</build>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/prepare_components.xml
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/prepare_components.xml b/odf/odf-test-env/prepare_components.xml
new file mode 100755
index 0000000..a6a733b
--- /dev/null
+++ b/odf/odf-test-env/prepare_components.xml
@@ -0,0 +1,169 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project name="prepare_components">
+
+	<!-- Property is provided by pom.xml -->
+	<!-- <property name="jetty.version" value="" /> -->
+	<!-- <property name="kafka.version" value="" /> -->
+	<!-- <property name="scala.version" value="" /> -->
+
+	<dirname property="script.basedir" file="${ant.file.prepare_components}" />
+
+	<property name="jetty-dir" value="jetty-distribution-${jetty.version}" />
+	<property name="kafka-dir" value="kafka_${scala.version}-${kafka.version}" />
+	<property name="spark-dir" value="spark_${spark.version}" />
+
+	<property name="jetty-archive" value="/tmp/${jetty-dir}.zip" />
+	<property name="kafka-archive" value="/tmp/${kafka-dir}.tar.gz" />
+	<property name="spark-archive" value="/tmp/${spark-dir}.tar.gz" />
+
+	<condition property="jetty-zip-not-found">
+		<not>
+			<available file="${jetty-archive}">
+			</available>
+		</not>
+	</condition>
+
+	<condition property="kafka-zip-not-found">
+		<not>
+			<available file="${kafka-archive}">
+			</available>
+		</not>
+	</condition>
+
+	<condition property="spark-zip-not-found">
+		<not>
+			<available file="${spark-archive}">
+			</available>
+		</not>
+	</condition>
+
+	<condition property="jetty-unpacked">
+	   <available file="${unpack-dir}/${jetty-dir}/bin/jetty.sh"/>
+    </condition>
+
+	<condition property="kafka-unpacked">
+	   <available file="${unpack-dir}/${kafka-dir}/bin/kafka-server-start.sh"/>
+    </condition>
+
+	<condition property="spark-unpacked">
+	   <available file="${unpack-dir}/${spark-dir}/sbin/start-master.sh"/>
+    </condition>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="download-jetty" if="jetty-zip-not-found">
+		<echo message="Downloading Jetty. Depending on your network this can last up to 20 (yes, twenty) minutes." />
+		<get verbose="true" src="https://repo1.maven.org/maven2/org/eclipse/jetty/jetty-distribution/${jetty.version}/jetty-distribution-${jetty.version}.zip" dest="${jetty-archive}" />
+		<echo message="Jetty downloaded" />
+	</target>
+
+	<target name="download-kafka" if="kafka-zip-not-found">
+		<echo message="Downloading Kafka. Depending on your network this can last up to 20 (yes, twenty) minutes." />
+		<get verbose="true" src="http://ftp-stud.hs-esslingen.de/pub/Mirrors/ftp.apache.org/dist/kafka/${kafka.version}/kafka_${scala.version}-${kafka.version}.tgz" dest="${kafka-archive}" />
+		<echo message="Kafka downloaded" />
+	</target>
+
+	<target name="download-spark" if="spark-zip-not-found">
+		<echo message="Downloading Spark. Depending on your network this can last up to 20 (yes, twenty) minutes." />
+		<get verbose="true" src="http://d3kbcqa49mib13.cloudfront.net/spark-${spark.version}-bin-hadoop2.7.tgz" dest="${spark-archive}" />
+		<echo message="Spark downloaded" />
+	</target>
+
+	<target name="unzip-jetty" unless="jetty-unpacked">
+		<antcall target="download-jetty"/>
+		<echo message="Installing Jetty test instance" />
+		<echo message="Deleting ${unpack-dir}/${jetty-dir}" />
+		<delete dir="${unpack-dir}/${jetty-dir}" />
+		<echo message="deleted" />
+		<unzip src="${jetty-archive}" dest="${unpack-dir}" />
+		<!-- Create Jetty base folder -->
+		<mkdir dir="${unpack-dir}/odfjettybase"/>
+		<!-- Generate Jetty base configuration files -->
+		<java dir="${unpack-dir}/odfjettybase" classname="org.eclipse.jetty.start.Main" fork="true">
+			<arg value="--add-to-startd=https,ssl,deploy,plus"/>
+			<classpath>
+				<pathelement location="${unpack-dir}/${jetty-dir}/start.jar"/>
+				<pathelement path="${unpack-dir}/${jetty-dir}"/>
+				<pathelement path="${java.class.path}"/>
+			</classpath>
+			<jvmarg value="-Djetty.home=${unpack-dir}/${jetty-dir}"/>
+			<jvmarg value="-Djetty.base=${unpack-dir}/odfjettybase"/>
+		</java>
+		<!-- Update Jetty port number -->
+		<replace file="${unpack-dir}/odfjettybase/start.d/https.ini" token="https.port=8443" value="https.port=${jetty.port}"/>
+	</target>
+
+	<target name="unzip-kafka" unless="kafka-unpacked">
+		<antcall target="download-kafka"/>
+		<echo message="Installing Kafka test instance" />
+		<echo message="Deleting ${unpack-dir}/${kafka-dir}" />
+		<delete dir="${unpack-dir}/${kafka-dir}" />
+		<echo message="deleted" />
+	    <untar src="${kafka-archive}" dest="${unpack-dir}" compression="gzip" />
+
+		<!-- remove -loggc command line argument in scripts because they don't exist in the IBM JVM -->
+		<replace file="${unpack-dir}/kafka_${scala.version}-${kafka.version}/bin/kafka-server-start.sh" token="-loggc" value=""/>
+		<replace file="${unpack-dir}/kafka_${scala.version}-${kafka.version}/bin/zookeeper-server-start.sh" token="-loggc" value=""/>
+	</target>
+
+	<target name="unzip-spark" unless="spark-unpacked">
+		<antcall target="download-spark"/>
+		<echo message="Installing Spark test instance" />
+		<echo message="Deleting ${unpack-dir}/${spark-dir}" />
+		<delete dir="${unpack-dir}/${spark-dir}" />
+		<echo message="deleted" />
+	    <untar src="${spark-archive}" dest="${unpack-dir}" compression="gzip" />
+	</target>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="enable-jetty-basic-authentication">
+		<echo message="Enabling jetty basic authentication..." />
+		<echo message="Updating jetty.xml file..." />
+		<replace file="${unpack-dir}/${jetty-dir}/etc/jetty.xml">
+			<!-- See corresponding config in web.xml file of SDP webapp -->
+			<replacetoken><![CDATA[</Configure>]]></replacetoken>
+			<replacevalue>
+				<![CDATA[
+	<Call name="addBean">
+		<Arg>
+			<New class="org.eclipse.jetty.security.HashLoginService">
+				<Set name="name">ODF Realm</Set>
+				<Set name="config"><SystemProperty name="jetty.home" default="."/>/etc/realm.properties</Set>
+			</New>
+		</Arg>
+	</Call>
+</Configure>
+				]]>
+			</replacevalue>
+		</replace>
+		<echo message="Copying credentials file..." />
+		<copy file="${script.basedir}/../jettyconfig/realm.properties" tofile="${unpack-dir}/${jetty-dir}/etc/realm.properties" overwrite="true"/>
+		<echo message="Jetty basic authentication has been enabled." />
+	</target>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="default">
+		<mkdir dir="${unpack-dir}"/>
+		<antcall target="unzip-jetty"/>
+		<antcall target="enable-jetty-basic-authentication"/>
+		<antcall target="unzip-kafka"/>
+		<antcall target="unzip-spark"/>
+	</target>
+
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/src/assembly/bin.xml
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/src/assembly/bin.xml b/odf/odf-test-env/src/assembly/bin.xml
new file mode 100755
index 0000000..b5731a7
--- /dev/null
+++ b/odf/odf-test-env/src/assembly/bin.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<assembly
+	xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+	xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
+	<id>bin</id>
+	<formats>
+		<format>zip</format>
+	</formats>
+	<fileSets>
+		<fileSet>
+			<outputDirectory>/</outputDirectory>
+			<directory>target/downloads</directory>
+			<excludes>
+				<exclude>*.zip</exclude>
+				<exclude>*.gz</exclude>
+				<exclude>**/zookeeper.properties</exclude>
+				<exclude>**/server.properties</exclude>
+			</excludes>
+			<fileMode>0755</fileMode>
+		</fileSet>
+		<fileSet>
+			<outputDirectory>/</outputDirectory>
+			<directory>src/main/scripts</directory>
+			<fileMode>0755</fileMode>
+			<excludes>
+			   <exclude>**/jenkins-*.sh</exclude>
+			</excludes>
+		</fileSet>
+		<fileSet>
+			<outputDirectory>/kafka_${scala.version}-${kafka.version}/config</outputDirectory>
+			<directory>src/main/config</directory>
+			<includes>
+				<include>*.properties</include>
+			</includes>
+		</fileSet>
+		<fileSet>
+			<directory>../odf-doc/target/site</directory>
+			<outputDirectory>/odf-documentation</outputDirectory>
+		</fileSet>
+	</fileSets>
+	<files>
+		<file>
+			<source>../odf-doc/src/site/markdown/test-env.md</source>
+			<outputDirectory>/</outputDirectory>
+			<destName>README.md</destName>
+		</file>
+	</files>
+	<dependencySets>
+		<dependencySet>
+			<outputDirectory>/odfjettybase/webapps</outputDirectory>
+			<includes>
+				<include>*:war:*</include>
+			</includes>
+			<excludes>
+				<exclude>*:jar:*</exclude>
+			</excludes>
+		</dependencySet>
+	</dependencySets>
+</assembly>

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/src/main/config/server.properties
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/src/main/config/server.properties b/odf/odf-test-env/src/main/config/server.properties
new file mode 100755
index 0000000..1f2a406
--- /dev/null
+++ b/odf/odf-test-env/src/main/config/server.properties
@@ -0,0 +1,134 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# see kafka.server.KafkaConfig for additional details and defaults
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id=0
+
+############################# Socket Server Settings #############################
+
+# The port the socket server listens on
+port=59092
+
+# Hostname the broker will bind to. If not set, the server will bind to all interfaces
+#host.name=localhost
+
+# Hostname the broker will advertise to producers and consumers. If not set, it uses the
+# value for "host.name" if configured.  Otherwise, it will use the value returned from
+# java.net.InetAddress.getCanonicalHostName().
+#advertised.host.name=<hostname routable by clients>
+
+# The port to publish to ZooKeeper for clients to use. If this is not set,
+# it will publish the same port that the broker binds to.
+#advertised.port=<port accessible by clients>
+
+# The number of threads handling network requests
+num.network.threads=3
+ 
+# The number of threads doing disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=102400
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=102400
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs=/tmp/odftestenv-kafka-logs
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk. 
+# There are a few important trade-offs here:
+#    1. Durability: Unflushed data may be lost if you are not using replication.
+#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
+# segments don't drop below log.retention.bytes.
+#log.retention.bytes=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=1073741824
+
+# The interval at which log segments are checked to see if they can be deleted according 
+# to the retention policies
+log.retention.check.interval.ms=300000
+
+# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
+# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
+log.cleaner.enable=false
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect=localhost:52181
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=6000

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/src/main/config/zookeeper.properties
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/src/main/config/zookeeper.properties b/odf/odf-test-env/src/main/config/zookeeper.properties
new file mode 100755
index 0000000..5f4d7e0
--- /dev/null
+++ b/odf/odf-test-env/src/main/config/zookeeper.properties
@@ -0,0 +1,33 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# the directory where the snapshot is stored.
+dataDir=/tmp/odftestenv-zookeeper
+# the port at which the clients will connect
+clientPort=52181
+# disable the per-ip limit on the number of connections since this is a non-production config
+maxClientCnxns=0

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/src/main/scripts/clean_atlas.bat
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/src/main/scripts/clean_atlas.bat b/odf/odf-test-env/src/main/scripts/clean_atlas.bat
new file mode 100755
index 0000000..84c2449
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/clean_atlas.bat
@@ -0,0 +1,22 @@
+REM
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM
+REM   http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+setlocal
+
+REM you should not have to change anything below this line ;-)
+
+set TESTENVDIR=%~dp0
+set ATLAS_HOME=%TESTENVDIR%apache-atlas-0.7-incubating-release
+
+echo Delete atlas data
+del /F /S /Q "%ATLAS_HOME%\data"

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/src/main/scripts/clean_atlas.sh
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/src/main/scripts/clean_atlas.sh b/odf/odf-test-env/src/main/scripts/clean_atlas.sh
new file mode 100755
index 0000000..4eb3b1d
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/clean_atlas.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# You should not have to change anything below this line ;-)
+export BASEDIR="$( cd "$(dirname "$0")" ; pwd -P )"
+
+export ATLAS_HOME=$BASEDIR/apache-atlas-0.7-incubating-release
+
+echo Delete atlas data
+rm -rf $ATLAS_HOME/data

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/src/main/scripts/deploy-odf-war.bat
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/src/main/scripts/deploy-odf-war.bat b/odf/odf-test-env/src/main/scripts/deploy-odf-war.bat
new file mode 100755
index 0000000..92561ad
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/deploy-odf-war.bat
@@ -0,0 +1,24 @@
+REM 
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM
+REM   http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+setlocal
+
+REM set ODF_GIT_DIR to the root project of your ODF Git project (i.e. where the top pom.xml resides)
+set ODF_GIT_DIR=c:\git\open-discovery-framework
+
+
+REM you should not have to change anything below this line ;-)
+
+set TESTENVDIR=%~dp0
+
+copy /Y %ODF_GIT_DIR%\odf-web\target\odf-web-1.2.0-SNAPSHOT.war %TESTENVDIR%\odfjettybase\webapps

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/src/main/scripts/deploy-odf-war.sh
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/src/main/scripts/deploy-odf-war.sh b/odf/odf-test-env/src/main/scripts/deploy-odf-war.sh
new file mode 100755
index 0000000..732515a
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/deploy-odf-war.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set ODF_GIT_DIR to the root project of your ODF Git project (i.e. where the top pom.xml resides)
+export ODF_GIT_DIR=~/git/open-discovery-framework
+
+# You should not have to change anything below this line ;-)
+export BASEDIR="$( cd "$(dirname "$0")" ; pwd -P )"
+cp $ODF_GIT_DIR/odf-web/target/odf-web-1.2.0-SNAPSHOT.war $BASEDIR/odfjettybase/webapps

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/src/main/scripts/download-install-odf-testenv.sh
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/src/main/scripts/download-install-odf-testenv.sh b/odf/odf-test-env/src/main/scripts/download-install-odf-testenv.sh
new file mode 100755
index 0000000..e3f6c52
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/download-install-odf-testenv.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Script to download, start, and configure the ODF test environment.
+# JenkinsBuildNumber refers to the build number of the job Open-Discovery-Framework, see here:
+# https://shared-discovery-platform-jenkins.swg-devops.com:8443/job/Open-Discovery-Framework
+#
+# Usage: download-install-odf-testenv.sh [<JenkinsBuildNumber> <Directory> ]
+#        Default values:
+#             <JenkinsBuildNumber>: lastSuccessfulBuild
+#             <Directory>: ~/odf-test-env
+#
+
+JENKINSBUILDNUMBER=$1
+if [ -z "$JENKINSBUILDNUMBER" ]; then
+   JENKINSBUILDNUMBER=lastSuccessfulBuild
+   echo Jenkins build number not provided, using default $JENKINSBUILDNUMBER
+fi
+
+TESTENVDIR=$2
+if [ -z "$TESTENVDIR" ]; then
+   TESTENVDIR=~/odf-test-env
+   echo Target directory not provided, using default $TESTENVDIR
+fi
+
+# hidden third parameter taking the jenkins job name
+JENKINSJOB=$3
+if [ -z "$JENKINSJOB" ]; then
+   JENKINSJOB=Open-Discovery-Framework
+   echo Jenkins job not provided, using default $JENKINSJOB
+fi
+
+echo Downloading test env to directory $TESTENVDIR, Jenkins build number: $JENKINSBUILDNUMBER
+
+
+TESTENVVERSION=1.2.0-SNAPSHOT
+TESTENVZIP=/tmp/odf-test-env.zip
+FULLHOSTNAME=`hostname -f`
+
+
+echo Downloading ODF test env
+curl https://shared-discovery-platform-jenkins.swg-devops.com:8443/job/$JENKINSJOB/$JENKINSBUILDNUMBER/artifact/odf-test-env/target/odf-test-env-$TESTENVVERSION-bin.zip --output $TESTENVZIP
+
+echo Stopping test env if it exists...
+$TESTENVDIR/odf-test-env-$TESTENVVERSION/odftestenv.sh stop
+sleep 1
+echo Test env stopped
+
+echo Removing existing test env directory...
+rm -rf $TESTENVDIR/odf-test-env-$TESTENVVERSION
+echo Existing test env directory removed
+
+echo Unpacking $TESTENVZIP to $TESTENVDIR
+mkdir -p $TESTENVDIR
+unzip -q $TESTENVZIP -d $TESTENVDIR
+
+$TESTENVDIR/odf-test-env-$TESTENVVERSION/odftestenv.sh cleanall
+
+echo ODF test env installed and started
+echo "Point your browser to https://$FULLHOSTNAME:58081/odf-web-1.2.0-SNAPSHOT to check it out"

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/src/main/scripts/jenkins-manage-testenv.sh
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/src/main/scripts/jenkins-manage-testenv.sh b/odf/odf-test-env/src/main/scripts/jenkins-manage-testenv.sh
new file mode 100755
index 0000000..bdb1428
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/jenkins-manage-testenv.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is the script used in the job definition of our Jenkins job Manage-Install-ODF-Testenv
+# The original can be foudn in get: odf-test-env/src/main/scripts/jenkins-manage-testenv.sh
+#
+# The Jenkins job should have the following parameters:
+#
+# 1. nodelabel: Label parameter. Default: odftestenv
+#
+# 2. action: Choice parameter with these choices: start, stop, cleanall, cleanconfig, cleanmetadata, install
+# Action description:
+#Available actions are:
+#<ul>
+#  <li>install: Remove the existing and install a new test environment build.
+#    Installs the most recent successful build by default. To change which build is used
+#    set the parameters <em>buildnumber</em> and <em>job</em> accordingly.</li>
+#  <li>start: (re)start the test environment</li>
+#  <li>stop:  stop the test environment</li>
+#  <li>cleanall: (re)starts with clean configuration and clean metadata</li>
+#  <li>cleanconfig   (re)starts with clean configuration</li>
+#  <li>cleanmetadata (re)starts with clean metadata</li>
+#</ul>
+#
+# 3. jenkinsjob: Choice parameter with choices: Shared-Discovery-Platform, Shared-Discovery-Platform-Parameters
+#
+# 4. buildnumber: String parmeter with default: lastSuccessfulBuild
+#
+
+echo Managing ODF test environment with parameters: action = $action, buildnumber = $buildnumber, jenkinsjob = $jenkinsjob
+
+if [ "$action" = "install" ]; then
+  ODFTESTENVTARGETDIR=/home/atlasadmin/odf-test-env
+  OUTPUTFILE=/tmp/download-install-odf-testenv.sh
+
+  if [ "$buildnumber" = "" ]; then
+    buildnumber=lastSuccessfulBuild
+  fi
+
+  if [ "$jenkinsjob" = "" ]; then
+    jenkinsjob=Shared-Discovery-Platform
+  fi
+
+  echo Downloading build number $buildnumber
+  curl https://shared-discovery-platform-jenkins.swg-devops.com:8443/job/$jenkinsjob/$buildnumber/artifact/odf-test-env/src/main/scripts/download-install-odf-testenv.sh --output $OUTPUTFILE
+
+  echo Running installer script on directory $ODFTESTENVTARGETDIR with build number $buildnumber
+  chmod 755 $OUTPUTFILE
+  export BUILD_ID=dontletjenkinskillme
+  echo Running command $OUTPUTFILE $buildnumber $ODFTESTENVTARGETDIR $jenkinsjob
+  $OUTPUTFILE $buildnumber $ODFTESTENVTARGETDIR $jenkinsjob
+else
+  TESTENVDIR=~/odf-test-env/odf-test-env-1.2.0-SNAPSHOT
+  export BUILD_ID=dontletjenkinskillme
+
+  $TESTENVDIR/odftestenv.sh $action
+fi

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/src/main/scripts/odftestenv.sh
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/src/main/scripts/odftestenv.sh b/odf/odf-test-env/src/main/scripts/odftestenv.sh
new file mode 100755
index 0000000..94d08f3
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/odftestenv.sh
@@ -0,0 +1,232 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# You should not have to change anything below this line ;-)
+###############################################################
+
+#############################################
+## Check that java and python are available
+
+
+if [ "x$JAVA_HOME" == "x" ]; then
+  echo "JAVA_HOME is not set, using standard java on path"
+  JAVAEXE=$(which java)
+else
+  echo "JAVA_HOME is set to $JAVA_HOME"
+  JAVAEXE=$JAVA_HOME/bin/java
+fi
+
+if [ ! -x $JAVAEXE ]; then
+   echo "Java executable $JAVAEXE could not be found. Set JAVA_HOME accordingly or make sure that java is in your path".
+   exit 1
+fi
+
+echo "Using java: $JAVAEXE"
+
+
+PYTHON27EXE=python
+PYTHONVERSION=`$PYTHON27EXE --version 2>&1`
+if [[ ! $PYTHONVERSION == *2.7.* ]]; then
+   echo "Warning: Python command is not version 2.7. Starting / stopping Atlas might not work properly"
+fi
+
+
+###############################################
+## Set some variables
+
+BASEDIR="$( cd "$(dirname "$0")" ; pwd -P )"
+FULLHOSTNAME=`hostname -f`
+
+ATLAS_HOME=$BASEDIR/apache-atlas-0.7-incubating-release
+ATLAS_PORT=21453
+ATLAS_URL=https://localhost:$ATLAS_PORT
+ATLAS_USER=admin
+ATLAS_PASSWORD=UR0+HOiApXG9B8SNpKN5ww==
+
+ZK_DATADIR=/tmp/odftestenv-zookeeper
+KAFKA_DATADIR=/tmp/odftestenv-kafka-logs
+
+# export KAFKA_OPTS so that is picked up by the kafka and zookeeper start scripts. This can be used as a marker to search for those processes
+KILLMARKER=thisisanodftestenvprocess
+export KAFKA_OPTS="-D$KILLMARKER=true"
+KAFKA_HOME=$BASEDIR/kafka_2.11-0.10.0.0
+SPARK_HOME=$BASEDIR/spark-2.1.0-bin-hadoop2.7
+
+JETTY_BASE=$BASEDIR/odfjettybase
+JETTY_HOME=$BASEDIR/jetty-distribution-9.2.10.v20150310
+
+##########################################
+## Copy required files
+
+if [ "$(uname)" == "Darwin" ]; then
+	cp $ATLAS_HOME/conf/atlas-application.properties_mac $ATLAS_HOME/conf/atlas-application.properties
+else
+	cp $ATLAS_HOME/conf/atlas-application.properties_linux $ATLAS_HOME/conf/atlas-application.properties
+fi
+
+##########################################
+## Functions
+
+function waitSeconds {
+   echo "     Waiting for $1 seconds..."
+   sleep $1
+}
+
+function cleanMetadata {
+	echo Removing Atlas data...
+	rm -rf $ATLAS_HOME/data
+	rm -rf $ATLAS_HOME/logs
+	echo Atlas data removed
+}
+
+function cleanConfig {
+	echo Removing Zookeeper and Kafka data...
+	rm -rf $KAFKA_DATADIR
+    rm -rf $ZK_DATADIR
+	echo Zookeeper and Kafka data removed.
+}
+
+function reconfigureODF {
+	echo Configuring ODF...
+    JSON='{ "sparkConfig": { "clusterMasterUrl": "'$SPARK_MASTER'" } }'
+    echo Updating config to $JSON
+    curl -H "Content-Type: application/json" -X PUT -d "$JSON" -k -u sdp:admin4sdp https://$FULLHOSTNAME:58081/odf-web-1.2.0-SNAPSHOT/odf/api/v1/settings
+    echo ODF configured.
+}
+
+function healthCheck {
+    echo Running ODF health check
+    curl -X GET -k -u sdp:admin4sdp https://$FULLHOSTNAME:58081/odf-web-1.2.0-SNAPSHOT/odf/api/v1/engine/health
+    echo Health check finished
+}
+
+function startTestEnv {
+   echo Starting ODF test env
+   if [ -f "$ZKDATADIR" ]; then
+      echo zookeeper data exists
+   fi
+
+   echo "Starting Zookeeper"
+   nohup $KAFKA_HOME/bin/zookeeper-server-start.sh $KAFKA_HOME/config/zookeeper.properties &> $BASEDIR/nohupzookeeper.out &
+   waitSeconds 5
+   echo "Starting Kafka"
+   nohup $KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties &> $BASEDIR/nohupkafka.out &
+   waitSeconds 5
+   if [[ $(unzip -v $JETTY_BASE/webapps/odf-web-1.2.0-SNAPSHOT.war | grep odf-atlas-) ]]; then
+     echo "Starting Atlas"
+     nohup $PYTHON27EXE $ATLAS_HOME/bin/atlas_start.py -port $ATLAS_PORT &> $BASEDIR/nohupatlas.out &
+     waitSeconds 30
+   else
+       echo "Do not start Atlas because ODF was built without it."
+   fi
+   echo "Starting Spark master"
+   cd $SPARK_HOME
+   nohup sbin/start-master.sh &> $BASEDIR/nohupspark.out &
+   waitSeconds 5
+   SPARK_MASTER=$(curl http://localhost:8080 | awk '/ Spark Master at/{print $NF}')
+   echo "Spark master URL: $SPARK_MASTER"
+   echo "Starting Spark slave"
+   nohup sbin/start-slave.sh $SPARK_MASTER &> $BASEDIR/nohupspark.out &
+   waitSeconds 5
+   echo "Starting ODF on Jetty"
+   cd $JETTY_BASE
+   nohup $JAVAEXE -Dodf.zookeeper.connect=localhost:52181 -Datlas.url=$ATLAS_URL -Datlas.user=$ATLAS_USER -Datlas.password=$ATLAS_PASSWORD -Dorg.eclipse.jetty.servlet.LEVEL=ALL -jar $JETTY_HOME/start.jar STOP.PORT=53000 STOP.KEY=STOP &> $BASEDIR/nohupjetty.out &
+   waitSeconds 10
+
+   healthCheck
+   reconfigureODF
+
+   echo "ODF test env started on https://$FULLHOSTNAME:58081/odf-web-1.2.0-SNAPSHOT"
+}
+
+function stopTestEnv {
+   echo Stopping ODF test env ...
+   echo Stopping kafka and zookeeper...
+   PROCESSNUM=`ps aux | grep $KILLMARKER | grep -v grep | wc | awk '{print $1}'`
+   if [ $PROCESSNUM -gt 0 ]; then
+      echo Killing $PROCESSNUM Kafka / ZK processes
+      kill -9 $(ps aux | grep $KILLMARKER | grep -v grep | awk '{print $2}')
+   else
+      echo No Kafka / Zookeeper processes found
+   fi
+   waitSeconds 3
+   echo Kafka and Zookeeper stopped
+   echo Stopping Atlas...
+   $PYTHON27EXE $ATLAS_HOME/bin/atlas_stop.py
+   waitSeconds 5
+   echo Atlas stopped
+   echo Stopping Spark...
+   cd $SPARK_HOME
+   SPARK_MASTER=$(curl http://localhost:8080 | awk '/ Spark Master at/{print $NF}')
+   sbin/stop-slave.sh $SPARK_MASTER
+   sbin/stop-master.sh
+   waitSeconds 5
+   echo Spark stopped
+   echo Stopping Jetty...
+   cd $JETTY_BASE
+   $JAVAEXE -jar $JETTY_HOME/start.jar STOP.PORT=53000 STOP.KEY=STOP --stop
+   waitSeconds 5
+   echo Jetty stopped
+   echo ODF test env stopped
+}
+
+
+function usageAndExit {
+  echo "Usage: $0 start|stop|cleanconfig|cleanmetadata|cleanall"
+  echo "Manage the ODF test environment"
+  echo "Options:"
+  echo "         start         (re)start"
+  echo "         stop          stop"
+  echo "         cleanall      (re)starts with clean configuration and clean metadata"
+  echo "         cleanconfig   (re)starts with clean configuration"
+  echo "         cleanmetadata (re)starts with clean metadata"
+  exit 1;
+}
+
+###############################################
+## main script
+
+if [ -z "$1" ]; then
+   usageAndExit
+elif [ "$1" = "start" ]; then
+   echo "(Re) starting test env..."
+   stopTestEnv
+   echo "-------------------------------------"
+   startTestEnv
+   echo "Test env restarted"
+elif [ "$1" = "stop" ]; then
+   stopTestEnv
+elif [ "$1" = "cleanconfig" ]; then
+   echo "(Re) starting test env with clean configuration..."
+   stopTestEnv
+   cleanConfig
+   startTestEnv
+   echo "(Re)started test env with clean configuration"
+elif [ "$1" = "cleanmetadata" ]; then
+   echo "(Re) starting test env with clean metadata..."
+   stopTestEnv
+   cleanMetadata
+   startTestEnv
+   echo "(Re)started test env with clean metadata"
+elif [ "$1" = "cleanall" ]; then
+   echo "(Re) starting test env with clean configuration and metadata..."
+   stopTestEnv
+   cleanConfig
+   cleanMetadata
+   startTestEnv
+   echo "(Re)started test env with clean configuration and metadata"
+else
+   usageAndExit
+fi

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/src/main/scripts/start-odf-testenv.bat
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/src/main/scripts/start-odf-testenv.bat b/odf/odf-test-env/src/main/scripts/start-odf-testenv.bat
new file mode 100755
index 0000000..db442e0
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/start-odf-testenv.bat
@@ -0,0 +1,57 @@
+REM
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM
+REM   http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+setlocal
+
+set JAVAEXE=%JAVA_HOME%\bin\java.exe
+set PYTHON27EXE=python
+
+
+REM you should not have to change anything below this line ;-)
+
+set TESTENVDIR=%~dp0
+set JETTY_HOME=%TESTENVDIR%jetty-distribution-9.2.10.v20150310
+set KAFKA_PACKAGE_DIR=%TESTENVDIR%kafka_2.11-0.10.0.0
+set ATLAS_HOME=%TESTENVDIR%apache-atlas-0.7-incubating-release
+
+echo Delete logs
+del /F /S /Q "C:\tmp\odftestenv-kafka-logs"
+del /F /S /Q "C:\tmp\odftestenv-zookeeper"
+
+echo Copy required files
+xcopy %ATLAS_HOME%\conf\atlas-application.properties_windows %ATLAS_HOME%\conf\atlas-application.properties /Y
+
+REM Workaround for issue #94 (Location of keystore files is hardcoded in Atlas config)
+if not exist "C:\tmp\apache-atlas-0.7-incubating-release\conf" (mkdir "C:\tmp\apache-atlas-0.7-incubating-release\conf")
+xcopy %ATLAS_HOME%\conf\keystore_ibmjdk.jceks C:\tmp\apache-atlas-0.7-incubating-release\conf /Y
+xcopy %ATLAS_HOME%\conf\keystore_ibmjdk.jks C:\tmp\apache-atlas-0.7-incubating-release\conf /Y	
+
+echo Start zookeeper:
+start "Zookeeper" %KAFKA_PACKAGE_DIR%\bin\windows\zookeeper-server-start.bat %KAFKA_PACKAGE_DIR%\config\zookeeper.properties
+
+timeout 5 /NOBREAK
+
+echo Start kafka:
+start "Kafka" %KAFKA_PACKAGE_DIR%\bin\windows\kafka-server-start.bat %KAFKA_PACKAGE_DIR%\config\server.properties
+
+timeout 5 /NOBREAK
+
+echo Start Atlas
+start "Stop Atlas" %PYTHON27EXE% %ATLAS_HOME%\bin\atlas_stop.py
+start "Start Atlas" %PYTHON27EXE% %ATLAS_HOME%\bin\atlas_start.py -port 21443
+
+echo Start jetty
+set JETTY_BASE=%TESTENVDIR%odfjettybase
+rem set JETTY_BASE=%TESTENVDIR%base2
+cd %JETTY_BASE%
+start "Jetty" %JAVAEXE% -Dodf.zookeeper.connect=localhost:52181 -Datlas.url=https://localhost:21443 -Datlas.user=admin -Datlas.password=UR0+HOiApXG9B8SNpKN5ww== -Dodf.logspec=ALL,/tmp/odf-test-env-trace.log -jar %JETTY_HOME%\start.jar

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/src/main/scripts/start-odf-testenv.sh
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/src/main/scripts/start-odf-testenv.sh b/odf/odf-test-env/src/main/scripts/start-odf-testenv.sh
new file mode 100755
index 0000000..664b5a9
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/start-odf-testenv.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+export JAVAEXE=java
+export PYTHON27EXE=python
+
+# You should not have to change anything below this line ;-)
+export BASEDIR="$( cd "$(dirname "$0")" ; pwd -P )"
+
+export JETTY_HOME=$BASEDIR/jetty-distribution-9.2.10.v20150310
+export KAFKA_PACKAGE_DIR=$BASEDIR/kafka_2.11-0.10.0.0
+export ATLAS_HOME=$BASEDIR/apache-atlas-0.7-incubating-release
+
+echo Delete logs
+rm -rf /tmp/odftestenv-kafka-logs
+rm -rf /tmp/odftestenv-zookeeper
+
+echo Copy required files
+if [ "$(uname)" == "Darwin" ]; then
+	cp $ATLAS_HOME/conf/atlas-application.properties_mac $ATLAS_HOME/conf/atlas-application.properties
+else
+	cp $ATLAS_HOME/conf/atlas-application.properties_linux $ATLAS_HOME/conf/atlas-application.properties
+fi
+
+echo Start zookeeper:
+$KAFKA_PACKAGE_DIR/bin/zookeeper-server-start.sh $KAFKA_PACKAGE_DIR/config/zookeeper.properties &
+
+sleep 5
+
+echo Start kafka:
+$KAFKA_PACKAGE_DIR/bin/kafka-server-start.sh $KAFKA_PACKAGE_DIR/config/server.properties &
+
+sleep 5
+
+echo Stop and restart Atlas
+$PYTHON27EXE $ATLAS_HOME/bin/atlas_stop.py
+$PYTHON27EXE $ATLAS_HOME/bin/atlas_start.py -port 21443
+
+echo Start jetty
+export JETTY_BASE=$BASEDIR/odfjettybase
+cd $JETTY_BASE
+$JAVAEXE -Dodf.zookeeper.connect=localhost:52181 -Dorg.eclipse.jetty.servlet.LEVEL=ALL -jar $JETTY_HOME/start.jar &

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-test-env/src/main/scripts/stop-odf-testenv.sh
----------------------------------------------------------------------
diff --git a/odf/odf-test-env/src/main/scripts/stop-odf-testenv.sh b/odf/odf-test-env/src/main/scripts/stop-odf-testenv.sh
new file mode 100755
index 0000000..6f974b9
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/stop-odf-testenv.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+echo Stopping all processes of the odf-test-env...
+kill -9 $(ps aux | grep 'odf-test-env' | grep -v 'download-install' | grep -v 'stop-odf-testenv' | awk '{print $2}')

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-web/.gitignore
----------------------------------------------------------------------
diff --git a/odf/odf-web/.gitignore b/odf/odf-web/.gitignore
new file mode 100755
index 0000000..7322e4f
--- /dev/null
+++ b/odf/odf-web/.gitignore
@@ -0,0 +1,24 @@
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+.settings
+target
+.classpath
+.project
+.factorypath
+.externalToolBuilders
+build
+build/**
+node_modules
+node_modules/**
+.DS_Store

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-web/download_swagger-ui.xml
----------------------------------------------------------------------
diff --git a/odf/odf-web/download_swagger-ui.xml b/odf/odf-web/download_swagger-ui.xml
new file mode 100755
index 0000000..74ef82d
--- /dev/null
+++ b/odf/odf-web/download_swagger-ui.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project name="odf-download-swagger-ui">
+
+	<property name="swagger-dir" value="swagger-ui-${swagger.version}" />
+	<!-- download swagger ui directly from the web:
+	<property name="swagger-download" value="https://github.com/swagger-api/swagger-ui/archive/v${swagger.version}.tar.gz" />
+	<property name="swagger-archive" value="${unpack-dir}/${swagger-dir}.tar.gz" />
+	-->
+	<!-- download swagger ui from box: -->
+	<property name="swagger-download" value="https://ibm.box.com/shared/static/13cb0nobufykaxvrnezjf2fbtf0hpfn7.gz" />
+	<property name="swagger-archive" value="${unpack-dir}/swagger-ui-2.1.4.tar.gz" />
+
+	<condition property="swagger-zip-not-found">
+		<not>
+			<available file="${swagger-archive}">
+			</available>
+		</not>
+	</condition>
+
+	<condition property="swagger-unpacked">
+	   <available file="${unpack-dir}/${swagger-dir}/dist" type="dir" />
+    </condition>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="download-swagger-ui" if="swagger-zip-not-found">
+		<echo message="Downloading Swagger..." />
+		<get verbose="true" src="${swagger-download}" dest="${swagger-archive}" />
+		<echo message="Swagger downloaded" />
+	</target>
+
+	<target name="unzip-swagger" unless="swagger-unpacked">
+		<antcall target="download-swagger-ui"/>
+		<echo message="Installing Swagger" />
+		<echo message="Deleting ${unpack-dir}/${swagger-dir}" />
+		<delete dir="${unpack-dir}/${swagger-dir}" />
+		<echo message="Deleted" />
+	    <untar src="${swagger-archive}" dest="${unpack-dir}" compression="gzip" />
+	    <!-- <unzip src="${swagger-archive}" dest="${unpack-dir}" /> -->
+	</target>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="default">
+		<mkdir dir="${unpack-dir}"/>
+		<antcall target="unzip-swagger"/>
+	</target>
+
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-web/package.json
----------------------------------------------------------------------
diff --git a/odf/odf-web/package.json b/odf/odf-web/package.json
new file mode 100755
index 0000000..1fe599b
--- /dev/null
+++ b/odf/odf-web/package.json
@@ -0,0 +1,30 @@
+{
+  "name": "odf-web",
+  "version": "1.2.0-SNAPSHOT",
+  "main": "index.html",
+  "dependencies": {
+    "bootstrap": "^3.3.6",
+    "d3": "^3.5.12",
+    "react": "^0.14.6",
+    "jquery": "^2.2.0",
+    "react-addons-linked-state-mixin": "^0.14.6",
+    "react-bootstrap": "^0.28.2",
+    "react-dom": "^0.14.6",
+    "react-d3-components": "^0.6.1",
+    "bootstrap-material-design" : "^0.5.7",
+    "roboto-font": "^0.1.0"
+  },
+  "devDependencies": {    
+  	"webpack": "^1.12.11",
+  	"imports-loader": "^0.6.5",
+    "babel-core": "^6.4.0",
+    "babel-preset-es2015": "^6.3.13",
+    "babel-loader": "^6.2.1",
+    "babel-preset-react": "^6.3.13",
+    "url-loader": "^0.5.7",
+    "css-loader": "^0.23.1",
+    "style-loader": "^0.13.0"
+  },
+  "author": "IBM",
+  "license": "ISC"
+}

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-web/pom.xml
----------------------------------------------------------------------
diff --git a/odf/odf-web/pom.xml b/odf/odf-web/pom.xml
new file mode 100755
index 0000000..df0b702
--- /dev/null
+++ b/odf/odf-web/pom.xml
@@ -0,0 +1,441 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+	<modelVersion>4.0.0</modelVersion>
+	<parent>
+		<groupId>org.apache.atlas.odf</groupId>
+		<artifactId>odf</artifactId>
+		<version>1.2.0-SNAPSHOT</version>
+	</parent>
+	<artifactId>odf-web</artifactId>
+	<packaging>war</packaging>
+	<properties>
+		<!-- specify versions of components to be downloaded -->
+		<swagger.version>2.1.4</swagger.version>
+		<swagger.base.path>/${project.artifactId}-${project.version}/odf/api/v1</swagger.base.path>
+	</properties>
+
+	<dependencies>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-api</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-core</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>runtime</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.glassfish.jersey.core</groupId>
+			<artifactId>jersey-server</artifactId>
+			<version>2.22.2</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>javax.ws.rs</groupId>
+			<artifactId>jsr311-api</artifactId>
+			<version>1.1.1</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>javax.servlet</groupId>
+			<artifactId>servlet-api</artifactId>
+			<version>2.5</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<artifactId>swagger-jaxrs</artifactId>
+			<version>1.5.9</version>
+			<groupId>io.swagger</groupId>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-doc</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>war</type>
+			<scope>runtime</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-spark</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>runtime</scope>
+			<exclusions>
+				<!-- Exclude this dependency to avoid the following error when running the jetty-maven-plugin:
+				 "A required class was missing while executing org.eclipse.jetty:jetty-maven-plugin:9.2.14.v20151106:start: com/sun/jersey/spi/inject/InjectableProvider" -->
+				<exclusion>
+					<groupId>org.apache.hadoop</groupId>
+					<artifactId>hadoop-hdfs</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+		<!-- Required for compatibility with Spark cluster (must use same version) -->
+		<dependency>
+			<groupId>org.apache.commons</groupId>
+			<artifactId>commons-lang3</artifactId>
+			<version>3.5</version>
+			<scope>runtime</scope>
+		</dependency>
+		<dependency>
+			<groupId>junit</groupId>
+			<artifactId>junit</artifactId>
+			<version>4.12</version>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-core</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-messaging</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>runtime</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-messaging</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-store</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>runtime</scope>
+		</dependency>
+	</dependencies>
+
+	<repositories>
+		<repository>
+			<id>iis-central</id>
+			<name>Archiva Managed Maven Repository</name>
+			<url>http://iis-repo.swg.usma.ibm.com:8080/archiva/repository/all/</url>
+		</repository>
+	</repositories>
+
+	<profiles>
+		<profile>
+			<id>atlas</id>
+			<dependencies>
+				<dependency>
+					<groupId>org.apache.atlas.odf</groupId>
+					<artifactId>odf-atlas</artifactId>
+					<version>1.2.0-SNAPSHOT</version>
+					<scope>runtime</scope>
+				</dependency>
+			</dependencies>
+		</profile>
+		<profile>
+			<id>jenkinsbuild</id>
+			<properties>
+				<cf.password>${env.CFPASSWORD}</cf.password> <!-- Take cf.password from environment variable when running in Jenkins so that the password doesn't appear in the log -->
+			</properties>
+		</profile>
+		<profile>
+			<id>integration-tests</id>
+			<activation>
+				<property>
+					<name>reduced-tests</name>
+					<value>!true</value>
+				</property>
+			</activation>
+			<build>
+				<plugins>
+					<plugin>
+						<groupId>org.apache.maven.plugins</groupId>
+						<artifactId>maven-failsafe-plugin</artifactId>
+						<version>2.19</version>
+						<configuration>
+							<systemPropertyVariables>
+								<!-- we always use the embedded Kafka in our integration tests -->
+								<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>
+								<odf.test.base.url>${odf.test.base.url}</odf.test.base.url>
+								<odf.test.webapp.url>${odf.test.webapp.url}</odf.test.webapp.url>
+								<odf.test.user>${odf.test.user}</odf.test.user>
+								<odf.test.password>${odf.test.password}</odf.test.password>
+								<odf.logspec>${odf.integrationtest.logspec}.client</odf.logspec>
+								<!-- The atlas configuration properties are only required when the "atlas" profile is activated -->
+								<atlas.url>${atlas.url}</atlas.url>
+								<atlas.user>${atlas.user}</atlas.user>
+								<atlas.password>${atlas.password}</atlas.password>
+							</systemPropertyVariables>
+							<includes>
+								<include>**/integrationtest/**</include>
+							</includes>
+						</configuration>
+						<executions>
+							<execution>
+								<id>integration-test</id>
+								<goals>
+									<goal>integration-test</goal>
+								</goals>
+							</execution>
+							<execution>
+								<id>verify</id>
+								<goals>
+									<goal>verify</goal>
+								</goals>
+							</execution>
+						</executions>
+					</plugin>
+				</plugins>
+			</build>
+		</profile>
+	</profiles>
+
+	<build>
+		<plugins>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-surefire-plugin</artifactId>
+				<version>2.19</version>
+				<configuration>
+					<systemPropertyVariables>
+						<odf.logspec>${odf.unittest.logspec}</odf.logspec>
+						<odf.build.project.name>${project.name}</odf.build.project.name>
+					</systemPropertyVariables>
+					<excludes>
+						<exclude>**/integrationtest/**</exclude>
+					</excludes>
+				</configuration>
+			</plugin>
+			<plugin>
+				<groupId>org.eclipse.jetty</groupId>
+				<artifactId>jetty-maven-plugin</artifactId>
+				<version>9.2.14.v20151106</version>
+				<configuration>
+					<jettyXml>${project.parent.basedir}/jettyconfig/jetty.xml,${project.parent.basedir}/jettyconfig/jetty-ssl.xml,${project.parent.basedir}/jettyconfig/jetty-https.xml</jettyXml>
+					<scanIntervalSeconds>10</scanIntervalSeconds>
+					<stopPort>8005</stopPort>
+					<stopKey>STOP</stopKey>
+					<systemProperties>
+						<systemProperty>
+							<name>odf.zookeeper.connect</name>
+							<value>${testZookeepeConnectionString}</value>
+						</systemProperty>
+						<systemProperty>
+							<name>odf.logspec</name>
+							<value>${odf.integrationtest.logspec}.jettyserver</value>
+						</systemProperty>
+						<systemProperty>
+							<name>jetty.config.dir</name>
+							<value>${project.parent.basedir}/target/jettyconfig</value>
+						</systemProperty>
+						<systemProperty>
+							<name>atlas.url</name>
+							<value>${atlas.url}</value>
+						</systemProperty>
+						<systemProperty>
+							<name>atlas.user</name>
+							<value>${atlas.user}</value>
+						</systemProperty>
+						<systemProperty>
+							<name>atlas.password</name>
+							<value>${atlas.password}</value>
+						</systemProperty>
+					</systemProperties>
+				</configuration>
+				<executions>
+					<execution>
+						<id>start-jetty</id>
+						<phase>pre-integration-test</phase>
+						<goals>
+							<goal>start</goal>
+						</goals>
+						<configuration>
+							<scanIntervalSeconds>0</scanIntervalSeconds>
+							<daemon>true</daemon>
+						</configuration>
+					</execution>
+					<execution>
+						<id>stop-jetty</id>
+						<phase>post-integration-test</phase>
+						<goals>
+							<goal>stop</goal>
+						</goals>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>com.github.eirslett</groupId>
+				<artifactId>frontend-maven-plugin</artifactId>
+				<version>0.0.27</version>
+				<configuration>
+					<installDirectory>build</installDirectory>
+				</configuration>
+
+				<executions>
+					<execution>
+						<id>install node and npm</id>
+						<goals>
+							<goal>install-node-and-npm</goal>
+						</goals>
+						<configuration>
+							<nodeVersion>v0.12.2</nodeVersion>
+							<npmVersion>2.7.6</npmVersion>
+						</configuration>
+					</execution>
+					<execution>
+						<id>npm install</id>
+						<goals>
+							<goal>npm</goal>
+						</goals>
+						<configuration>
+							<arguments>install</arguments>
+						</configuration>
+					</execution>
+					<execution>
+						<id>webpack build</id>
+						<goals>
+							<goal>webpack</goal>
+						</goals>
+						<configuration>
+							<!-- change to -p for production mode -->
+							<arguments>-d</arguments>
+						</configuration>
+					</execution>
+					<!-- <execution> <id>npm-list-packages</id> <goals> <goal>npm</goal>
+						</goals> <phase>validate</phase> <configuration> <arguments>ls depth=0</arguments>
+						</configuration> </execution> -->
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-war-plugin</artifactId>
+				<version>2.4</version>
+				<configuration>
+					<failOnMissingWebXml>false</failOnMissingWebXml>
+					<packagingExcludes>**/scripts/**</packagingExcludes>
+					<overlays>
+						<overlay>
+							<!-- define here which files you want to take over from the odf-doc
+								war. -->
+							<groupId>org.apache.atlas.odf</groupId>
+							<artifactId>odf-doc</artifactId>
+							<excludes>
+								<exclude>WEB-INF/web.xml</exclude>
+							</excludes>
+							<includes>
+								<include>doc/**</include>
+							</includes>
+						</overlay>
+					</overlays>
+				</configuration>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-antrun-plugin</artifactId>
+				<version>1.8</version>
+				<executions>
+					<execution>
+						<inherited>false</inherited>
+						<id>prepare-embedded-jetty</id>
+						<phase>validate</phase>
+						<goals>
+							<goal>run</goal>
+						</goals>
+						<configuration>
+							<target>
+								<ant antfile="../prepare_embedded_jetty.xml" target="prepare-jetty-config" />
+							</target>
+						</configuration>
+					</execution>
+					<execution>
+						<id>prepare-components</id>
+						<phase>validate</phase>
+						<goals>
+							<goal>run</goal>
+						</goals>
+						<configuration>
+							<target>
+								<property name="unpack-dir" value="${project.build.directory}/downloads" />
+								<property name="swagger.version" value="${swagger.version}" />
+								<ant antfile="download_swagger-ui.xml" target="default"></ant>
+							</target>
+						</configuration>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<artifactId>maven-resources-plugin</artifactId>
+				<version>2.6</version>
+				<executions>
+					<execution>
+						<id>copy-resources</id>
+						<phase>process-resources</phase>
+						<goals>
+							<goal>copy-resources</goal>
+						</goals>
+						<configuration>
+							<outputDirectory>${project.build.directory}/${project.artifactId}-${project.version}/swagger</outputDirectory>
+							<resources>
+								<resource>
+									<directory>${project.build.directory}/downloads/swagger-ui-${swagger.version}/dist</directory>
+									<filtering>false</filtering>
+									<excludes>
+										<exclude>index.html</exclude>
+									</excludes>
+								</resource>
+							</resources>
+						</configuration>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>com.github.kongchen</groupId>
+				<artifactId>swagger-maven-plugin</artifactId>
+				<version>3.1.1</version>
+				<configuration>
+					<apiSources>
+						<apiSource>
+							<springmvc>false</springmvc>
+							<locations>org.apache.atlas.odf.admin.rest.resources</locations>
+							<schemes>https</schemes>
+							<basePath>${swagger.base.path}</basePath>
+							<info>
+								<title>Open Discovery Framework</title>
+								<version>v1</version>
+								<description>
+									API reference
+								</description>
+							</info>
+							<swaggerDirectory>${project.build.directory}/${project.artifactId}-${project.version}/swagger</swaggerDirectory>
+							<swaggerApiReader>com.wordnik.swagger.jaxrs.reader.DefaultJaxrsApiReader</swaggerApiReader>
+						</apiSource>
+					</apiSources>
+				</configuration>
+				<executions>
+					<execution>
+						<phase>compile</phase>
+						<goals>
+							<goal>generate</goal>
+						</goals>
+					</execution>
+				</executions>
+			</plugin>
+		</plugins>
+	</build>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/log/LoggingHandler.java
----------------------------------------------------------------------
diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/log/LoggingHandler.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/log/LoggingHandler.java
new file mode 100755
index 0000000..89756cc
--- /dev/null
+++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/log/LoggingHandler.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.admin.log;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.logging.Handler;
+import java.util.logging.Level;
+import java.util.logging.LogRecord;
+import java.util.logging.SimpleFormatter;
+
+public class LoggingHandler extends Handler {
+
+	private int LOG_CACHE_SIZE = 1000;
+	private static List<LogRecord> cachedLogs = Collections.synchronizedList(new ArrayList<LogRecord>());
+
+	@Override
+	public void publish(LogRecord record) {
+		cachedLogs.add(record);
+		if (cachedLogs.size() >= LOG_CACHE_SIZE) {
+			cachedLogs.remove(0);
+		}
+	}
+
+	@Override
+	public void flush() {
+		cachedLogs.clear();
+	}
+
+	@Override
+	public void close() throws SecurityException {
+		cachedLogs.clear();
+	}
+
+	public List<LogRecord> getCachedLog() {
+		return new ArrayList<LogRecord>(cachedLogs);
+	}
+
+	public String getFormattedCachedLog(Integer numberOfLogs, Level logLevel) {
+		final List<LogRecord> cachedLog = getCachedLog();
+		StringBuilder lg = new StringBuilder();
+		final SimpleFormatter simpleFormatter = new SimpleFormatter();
+		if (numberOfLogs != null) {
+			for (int no = numberOfLogs; no > 0; no--) {
+				if (no > -1 && no < cachedLog.size() - 1) {
+					final LogRecord record = cachedLog.get(cachedLog.size() - no);
+					if (record.getLevel().intValue() >= logLevel.intValue()) {
+						lg.append(simpleFormatter.format(record));
+					}
+				}
+			}
+		} else {
+			for (LogRecord record : cachedLog) {
+				lg.append(simpleFormatter.format(record));
+			}
+		}
+		return lg.toString();
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/ODFAdminApp.java
----------------------------------------------------------------------
diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/ODFAdminApp.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/ODFAdminApp.java
new file mode 100755
index 0000000..b51da36
--- /dev/null
+++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/ODFAdminApp.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.admin.rest;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import javax.ws.rs.core.Application;
+
+import com.fasterxml.jackson.jaxrs.json.JacksonJsonProvider;
+import org.apache.atlas.odf.admin.rest.resources.AnalysesResource;
+import org.apache.atlas.odf.admin.rest.resources.AnnotationsResource;
+import org.apache.atlas.odf.admin.rest.resources.DiscoveryServicesResource;
+import org.apache.atlas.odf.admin.rest.resources.EngineResource;
+import org.apache.atlas.odf.admin.rest.resources.ImportResource;
+import org.apache.atlas.odf.admin.rest.resources.MetadataResource;
+import org.apache.atlas.odf.admin.rest.resources.SettingsResource;
+
+public class ODFAdminApp extends Application {
+	@Override
+	public Set<Class<?>> getClasses() {
+		Set<Class<?>> classes = new HashSet<Class<?>>();
+		classes.add(AnalysesResource.class);
+		classes.add(SettingsResource.class);
+		classes.add(EngineResource.class);
+		classes.add(MetadataResource.class);
+		classes.add(AnnotationsResource.class);
+		classes.add(DiscoveryServicesResource.class);
+		classes.add(ImportResource.class);
+		return classes;
+	}
+
+	@Override
+	public Set<Object> getSingletons() {
+		Set<Object> set = new HashSet<Object>();
+		set.add(new JacksonJsonProvider());
+		return set;
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/6d19e129/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/RestUtils.java
----------------------------------------------------------------------
diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/RestUtils.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/RestUtils.java
new file mode 100755
index 0000000..ed9010d
--- /dev/null
+++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/RestUtils.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.admin.rest;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+
+public class RestUtils {
+	public static Response createErrorResponse(Throwable t) {
+		StringWriter sw = new StringWriter();
+		PrintWriter pw = new PrintWriter(sw);
+		t.printStackTrace(pw);
+		return createErrorResponse(sw.toString());
+	}
+
+	public static Response createErrorResponse(String msg) {
+		Logger logger = Logger.getLogger(RestUtils.class.getName());
+		logger.log(Level.WARNING, "An unknown exception was thrown: ''{0}''", msg);
+		String errorMsg = "{ \"error\": \"An unknown exception occurred\"}";
+		try {
+			JSONObject errorJSON = new JSONObject();
+			errorJSON.put("error", msg);
+			errorMsg = errorJSON.write();
+		} catch (JSONException e) {
+			// do nothing, should never happen
+		}
+		return Response.status(Status.BAD_REQUEST).entity(errorMsg).build();
+	}
+}


Mime
View raw message