eagle-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From m.@apache.org
Subject incubator-eagle git commit: Fix test building problem caused by kafka version and remove unused files
Date Fri, 08 Jul 2016 07:02:14 GMT
Repository: incubator-eagle
Updated Branches:
  refs/heads/develop 160f674cd -> 0b77d947a


Fix test building problem caused by kafka version and remove unused files

Fix test building problem caused by kafka version and remove unused files

Author: jinhuwu <jinhuwu@ebay.com>

Closes #255 from wujinhu/develop.


Project: http://git-wip-us.apache.org/repos/asf/incubator-eagle/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-eagle/commit/0b77d947
Tree: http://git-wip-us.apache.org/repos/asf/incubator-eagle/tree/0b77d947
Diff: http://git-wip-us.apache.org/repos/asf/incubator-eagle/diff/0b77d947

Branch: refs/heads/develop
Commit: 0b77d947a4ebdb996d837414c9e3eaa8b0f06093
Parents: 160f674
Author: jinhuwu <wujinhu920@126.com>
Authored: Fri Jul 8 15:01:36 2016 +0800
Committer: anyway1021 <mw@apache.org>
Committed: Fri Jul 8 15:01:36 2016 +0800

----------------------------------------------------------------------
 .../eagle/correlation/meta/LocalKafkaTest.java  |  11 +-
 .../eagle/alert/engine/e2e/Integration1.java    |   6 +-
 eagle-jpm/eagle-jpa-spark-history/pom.xml       |  66 ---
 eagle-jpm/eagle-jpa-spark-running/pom.xml       |  66 ---
 .../src/main/resources/core-site.xml            | 497 -------------------
 .../src/main/resources/hdfs-site.xml            | 449 -----------------
 6 files changed, 6 insertions(+), 1089 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0b77d947/eagle-core/eagle-alert-parent/eagle-alert/alert-common/src/test/java/org/apache/eagle/correlation/meta/LocalKafkaTest.java
----------------------------------------------------------------------
diff --git a/eagle-core/eagle-alert-parent/eagle-alert/alert-common/src/test/java/org/apache/eagle/correlation/meta/LocalKafkaTest.java
b/eagle-core/eagle-alert-parent/eagle-alert/alert-common/src/test/java/org/apache/eagle/correlation/meta/LocalKafkaTest.java
index 3857649..d8bd942 100644
--- a/eagle-core/eagle-alert-parent/eagle-alert/alert-common/src/test/java/org/apache/eagle/correlation/meta/LocalKafkaTest.java
+++ b/eagle-core/eagle-alert-parent/eagle-alert/alert-common/src/test/java/org/apache/eagle/correlation/meta/LocalKafkaTest.java
@@ -16,18 +16,15 @@ package org.apache.eagle.correlation.meta;
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-import java.util.Properties;
-
 import kafka.admin.AdminUtils;
-import kafka.admin.RackAwareMode;
 import kafka.utils.ZKStringSerializer$;
-import kafka.utils.ZkUtils;
-
 import org.I0Itec.zkclient.ZkClient;
 import org.I0Itec.zkclient.ZkConnection;
 import org.apache.eagle.alert.utils.KafkaEmbedded;
 import org.junit.Ignore;
 
+import java.util.Properties;
+
 /**
  * @since Jun 3, 2016
  *
@@ -59,8 +56,8 @@ public class LocalKafkaTest {
         ZkClient zkClient = new ZkClient("localhost:2181", 10000, 10000, ZKStringSerializer$.MODULE$);
         Properties topicConfiguration = new Properties();
         ZkConnection zkConnection = new ZkConnection("localhost:2181");
-        ZkUtils zkUtils = new ZkUtils(zkClient, zkConnection, false);
-        AdminUtils.createTopic(zkUtils, topic, 1, 1, topicConfiguration, RackAwareMode.Disabled$.MODULE$);
+//        ZkUtils zkUtils = new ZkUtils(zkClient, zkConnection, false);
+        AdminUtils.createTopic(zkClient, topic, 1, 1, topicConfiguration);
     }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0b77d947/eagle-core/eagle-alert-parent/eagle-alert/alert-engine/src/test/java/org/apache/eagle/alert/engine/e2e/Integration1.java
----------------------------------------------------------------------
diff --git a/eagle-core/eagle-alert-parent/eagle-alert/alert-engine/src/test/java/org/apache/eagle/alert/engine/e2e/Integration1.java
b/eagle-core/eagle-alert-parent/eagle-alert/alert-engine/src/test/java/org/apache/eagle/alert/engine/e2e/Integration1.java
index 927dfd7..ac07d19 100644
--- a/eagle-core/eagle-alert-parent/eagle-alert/alert-engine/src/test/java/org/apache/eagle/alert/engine/e2e/Integration1.java
+++ b/eagle-core/eagle-alert-parent/eagle-alert/alert-engine/src/test/java/org/apache/eagle/alert/engine/e2e/Integration1.java
@@ -23,9 +23,7 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.ThreadFactory;
 
 import kafka.admin.AdminUtils;
-import kafka.admin.RackAwareMode;
 import kafka.utils.ZKStringSerializer$;
-import kafka.utils.ZkUtils;
 
 import org.I0Itec.zkclient.ZkClient;
 import org.I0Itec.zkclient.ZkConnection;
@@ -158,8 +156,8 @@ public class Integration1 {
         ZkClient zkClient = new ZkClient(zkconfig.zkQuorum, 10000, 10000, ZKStringSerializer$.MODULE$);
         Properties topicConfiguration = new Properties();
         ZkConnection zkConnection = new ZkConnection(zkconfig.zkQuorum);
-        ZkUtils zkUtils = new ZkUtils(zkClient, zkConnection, false);
-        AdminUtils.createTopic(zkUtils, topic, 1, 1, topicConfiguration, RackAwareMode.Disabled$.MODULE$);
+//        ZkUtils zkUtils = new ZkUtils(zkClient, zkConnection, false);
+        AdminUtils.createTopic(zkClient, topic, 1, 1, topicConfiguration);// RackAwareMode.Disabled$.MODULE$);
     }
 
     public static void proactive_schedule(Config config) throws Exception {

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0b77d947/eagle-jpm/eagle-jpa-spark-history/pom.xml
----------------------------------------------------------------------
diff --git a/eagle-jpm/eagle-jpa-spark-history/pom.xml b/eagle-jpm/eagle-jpa-spark-history/pom.xml
deleted file mode 100644
index cc293b6..0000000
--- a/eagle-jpm/eagle-jpa-spark-history/pom.xml
+++ /dev/null
@@ -1,66 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one or more
-  ~ contributor license agreements.  See the NOTICE file distributed with
-  ~ this work for additional information regarding copyright ownership.
-  ~ The ASF licenses this file to You under the Apache License, Version 2.0
-  ~ (the "License"); you may not use this file except in compliance with
-  ~ the License.  You may obtain a copy of the License at
-  ~
-  ~    http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing, software
-  ~ distributed under the License is distributed on an "AS IS" BASIS,
-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~ See the License for the specific language governing permissions and
-  ~ limitations under the License.
-  -->
-
-<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns="http://maven.apache.org/POM/4.0.0"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.eagle</groupId>
-    <artifactId>eagle-jpm-parent</artifactId>
-    <version>0.3.0-incubating</version>
-    <relativePath>../pom.xml</relativePath>
-  </parent>
-  <artifactId>eagle-jpm-spark-history</artifactId>
-  <name>eagle-jpm-spark-history</name>
-  <url>http://maven.apache.org</url>
-  <dependencies>
-  	<dependency>
-  		<groupId>org.slf4j</groupId>
-  		<artifactId>slf4j-api</artifactId>
-  	</dependency>
-  	<dependency>
-  		<groupId>org.apache.eagle</groupId>
-  		<artifactId>eagle-stream-process-api</artifactId>
-        <version>${project.version}</version>
-  	</dependency>
-      <dependency>
-          <groupId>org.apache.eagle</groupId>
-          <artifactId>eagle-stream-process-base</artifactId>
-          <version>${project.version}</version>
-      </dependency>
-  	<dependency>
-  		<groupId>org.apache.eagle</groupId>
-  		<artifactId>eagle-job-common</artifactId>
-  		<version>${project.version}</version>
-  	</dependency>  	  	
-  	<dependency>
-		<groupId>org.jsoup</groupId>
-		<artifactId>jsoup</artifactId>
-	</dependency>
-  	<dependency>
-  		<groupId>org.apache.storm</groupId>
-  		<artifactId>storm-core</artifactId>
-  		<exclusions>
-      		<exclusion>
-      			<groupId>ch.qos.logback</groupId>
-        		<artifactId>logback-classic</artifactId>
-      		</exclusion>
-      	</exclusions> 
-  	</dependency>
-  </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0b77d947/eagle-jpm/eagle-jpa-spark-running/pom.xml
----------------------------------------------------------------------
diff --git a/eagle-jpm/eagle-jpa-spark-running/pom.xml b/eagle-jpm/eagle-jpa-spark-running/pom.xml
deleted file mode 100644
index 42c476a..0000000
--- a/eagle-jpm/eagle-jpa-spark-running/pom.xml
+++ /dev/null
@@ -1,66 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one or more
-  ~ contributor license agreements.  See the NOTICE file distributed with
-  ~ this work for additional information regarding copyright ownership.
-  ~ The ASF licenses this file to You under the Apache License, Version 2.0
-  ~ (the "License"); you may not use this file except in compliance with
-  ~ the License.  You may obtain a copy of the License at
-  ~
-  ~    http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing, software
-  ~ distributed under the License is distributed on an "AS IS" BASIS,
-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~ See the License for the specific language governing permissions and
-  ~ limitations under the License.
-  -->
-
-<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns="http://maven.apache.org/POM/4.0.0"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.eagle</groupId>
-    <artifactId>eagle-jpm-parent</artifactId>
-    <version>0.3.0-incubating</version>
-    <relativePath>../pom.xml</relativePath>
-  </parent>
-  <artifactId>eagle-jpm-spark-running</artifactId>
-  <name>eagle-jpm-spark-running</name>
-  <url>http://maven.apache.org</url>
-  <dependencies>
-  	<dependency>
-  		<groupId>org.slf4j</groupId>
-  		<artifactId>slf4j-api</artifactId>
-  	</dependency>
-  	<dependency>
-  		<groupId>org.apache.eagle</groupId>
-  		<artifactId>eagle-stream-process-api</artifactId>
-        <version>${project.version}</version>
-  	</dependency>
-      <dependency>
-          <groupId>org.apache.eagle</groupId>
-          <artifactId>eagle-stream-process-base</artifactId>
-          <version>${project.version}</version>
-      </dependency>
-  	<dependency>
-  		<groupId>org.apache.eagle</groupId>
-  		<artifactId>eagle-job-common</artifactId>
-  		<version>${project.version}</version>
-  	</dependency>  	  	
-  	<dependency>
-		<groupId>org.jsoup</groupId>
-		<artifactId>jsoup</artifactId>
-	</dependency>
-  	<dependency>
-  		<groupId>org.apache.storm</groupId>
-  		<artifactId>storm-core</artifactId>
-  		<exclusions>
-      		<exclusion>
-      			<groupId>ch.qos.logback</groupId>
-        		<artifactId>logback-classic</artifactId>
-      		</exclusion>
-      	</exclusions> 
-  	</dependency>
-  </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0b77d947/eagle-jpm/eagle-jpm-mr-history/src/main/resources/core-site.xml
----------------------------------------------------------------------
diff --git a/eagle-jpm/eagle-jpm-mr-history/src/main/resources/core-site.xml b/eagle-jpm/eagle-jpm-mr-history/src/main/resources/core-site.xml
deleted file mode 100644
index 11e8486..0000000
--- a/eagle-jpm/eagle-jpm-mr-history/src/main/resources/core-site.xml
+++ /dev/null
@@ -1,497 +0,0 @@
-<?xml version="1.0"?>
-<!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more
-	~ contributor license agreements. See the NOTICE file distributed with ~
-	this work for additional information regarding copyright ownership. ~ The
-	ASF licenses this file to You under the Apache License, Version 2.0 ~ (the
-	"License"); you may not use this file except in compliance with ~ the License.
-	You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0
-	~ ~ Unless required by applicable law or agreed to in writing, software ~
-	distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT
-	WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the
-	License for the specific language governing permissions and ~ limitations
-	under the License. -->
-
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-<property>
-  <description>If users connect through a SOCKS proxy, we don't
-   want their SocketFactory settings interfering with the socket
-   factory associated with the actual daemons.</description>
-   <name>hadoop.rpc.socket.factory.class.default</name>
-   <value>org.apache.hadoop.net.StandardSocketFactory</value>
-</property>
-
-<property>
-  <name>hadoop.tmp.dir</name>
-  <value>/tmp/hadoop/hadoop-${user.name}</value>
-  <description>A base for other temporary directories.</description>
-</property>
-
-<property>
-  <name>hadoop.rpc.socket.factory.class.ClientProtocol</name>
-  <value></value>
-</property>
-
-<property>
-  <name>hadoop.rpc.socket.factory.class.JobSubmissionProtocol</name>
-  <value></value>
-</property>
-              
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.SnappyCodec</value>
-  </property>
-
-  <!-- LZO: see http://www.facebook.com/notes/cloudera/hadoop-at-twitter-part-1-splittable-lzo-compression/178581952002
-->
-  <property>
-      <name>io.compression.codec.lzo.class</name>
-      <value>com.hadoop.compression.lzo.LzoCodec</value>
-  </property>
-
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.defaultFS</name>
-    <!-- cluster variant -->
-    <value>hdfs://apollo-phx-nn-ha</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <description>Topology script</description>
-    <name>net.topology.script.file.name</name>
-    <value>/apache/hadoop/etc/hadoop/topology</value>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>480</value>
-    <description>Number of minutes between trash checkpoints.
-                 If zero, the trash feature is disabled.
-    </description>
-  </property>	
-
-  <!-- mobius-proxyagent impersonation configurations -->
-<property>
-  <name>hadoop.proxyuser.mobius-proxyagent.groups</name>
-  <value>hdmi-mm,hdmi-set,hdmi-research,hdmi-technology,hdmi-hadoopeng,hdmi-cs,hdmi-milo,hdmi-appdev,hdmi-siteanalytics,hdmi-prod,hdmi-others,hdmi-sdc,hdmi-finance,hdmi-est,hdmi-cci,hdmi-mptna,hdmi-xcom,hdmi-stu,hdmi-mobile</value>
-  <description>Allow user mobius-proxyagent to impersonate any members of the groups
</description>
-</property>
-
-<property>
-    <name>hadoop.proxyuser.mobius-proxyagent.hosts</name>
-    <value>10.114.118.13,10.115.201.53</value>
-    <description>The mobius-proxyagent can connect from hosts to impersonate a user</description>
-</property>
-
-<property>
-      <name>hadoop.proxyuser.bridge_adm.groups</name>
-      <value>hdmi-mm,hdmi-set,hdmi-research,hdmi-technology,hdmi-hadoopeng,hdmi-cs,hdmi-milo,hdmi-appdev,hdmi-siteanalytics,hdmi-prod,hdmi-others,hdmi-sdc,hdmi-finance,hdmi-est,hdmi-cci,hdmi-mptna,hdmi-xcom,hdmi-stu,hdmi-mobile</value>
-      <description>Allow user bridge_adm (Teradata-Hadoop bridge) to impersonate any
members of the groups </description>
-</property>
-
-<property>
-    <name>hadoop.proxyuser.bridge_adm.hosts</name>
-    <value>10.103.47.11,10.103.47.12,10.103.47.13,10.103.47.14,10.103.47.15,10.103.47.16,10.103.47.17,10.103.47.18,10.103.47.19,10.103.47.20,10.103.47.21,10.103.47.22,10.103.48.11,10.103.48.12,10.103.48.13,10.103.48.14,10.103.48.15,10.103.48.16,10.103.48.17,10.103.48.18,10.103.48.19,10.103.48.20,10.103.48.21,10.103.48.22,10.103.88.11,10.103.88.12,10.103.88.13,10.103.88.14,10.103.88.15,10.103.88.16,10.103.88.17,10.103.88.18,10.103.88.19,10.103.88.20,10.103.88.21,10.103.88.22,10.103.88.23,10.103.88.24,10.103.88.25,10.103.88.26,10.103.88.27,10.103.88.28,10.103.88.29,10.103.88.30,10.103.88.31,10.103.88.32,10.103.88.33,10.103.88.34,10.103.89.11,10.103.89.12,10.103.89.13,10.103.89.14,10.103.89.15,10.103.89.16,10.103.89.17,10.103.89.18,10.103.89.19,10.103.89.20,10.103.89.21,10.103.89.22,10.103.89.23,10.103.89.24,10.103.89.25,10.103.89.26,10.103.89.27,10.103.89.28,10.103.89.29,10.103.89.30,10.103.89.31,10.103.89.32,10.103.89.33,10.103.89.34,10.115.37.50,10.115.37.51,10.115.37.52,10.115.37.5
 3,10.115.38.50,10.115.38.51,10.115.38.52,10.115.38.53,10.115.208.11,10.115.208.12,10.115.208.13,10.115.208.14,10.115.208.15,10.115.208.16,10.115.208.17,10.115.208.18,10.115.208.19,10.115.208.20,10.115.208.21,10.115.208.22,10.115.208.23,10.115.208.24,10.115.208.25,10.115.208.26,10.103.158.101,10.103.158.102,10.103.158.103,10.103.158.104,10.103.158.105,10.103.158.106,10.103.158.107,10.103.158.108,10.103.158.109,10.103.158.110,10.103.158.111,10.103.158.112,10.103.158.113,10.103.158.114,10.103.158.115,10.103.158.116</value>
-    <description>The bridge_adm user (Teradata-Hadoop bridge) can connect from hosts
to impersonate a user</description>
-</property>
-
-<property>
-    <name>hadoop.proxyuser.hadoop.hosts</name>
-    <value>*</value>
-</property>
-
-<property>
-    <name>hadoop.proxyuser.hadoop.groups</name>
-    <value>*</value>
-</property>
-
-<property>
-   <name>hadoop.proxyuser.sg_adm.groups</name>
-   <value>hdmi-etl</value>
-   <description>Allow user sg_adm (HDMIT-4462) to impersonate any  members of the groups
</description>
-</property>
-
-<property>
-   <name>hadoop.proxyuser.sg_adm.hosts</name>
-   <value>*</value>
-   <description>The sg_adm user (HDMIT-4462) can connect from hosts to impersonate
a user</description>
-</property>
-
-  <property>
-    <name>fs.inmemory.size.mb</name>
-    <value>256</value>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
-<property>
-  <name>hadoop.proxyuser.hive.groups</name>
-  <value>*</value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.hive.hosts</name>
-  <value>*</value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.oozie.groups</name>
-  <value>*</value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.oozie.hosts</name>
-  <value>phxaishdc9en0007-be.phx.ebay.com</value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<!-- BEGIN security configuration -->
-  <property>
-    <name>hadoop.security.authentication</name>
-    <value>kerberos</value>
-    <!-- A value of "simple" would  disable security. -->
-  </property>
-  
-  <property>
-    <name>hadoop.security.authorization</name>
-    <value>true</value>
-  </property>
-
-  <!-- Setting to ShellBasedUnixGroupsMapping to override the default of 
-       JniBasedUnixGroupsMappingWithFallback.  See HWX case 00006991 -->
-  <property>
-    <name>hadoop.security.group.mapping</name>
-    <value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value>
-  </property>
-
-  <property>
-    <name>hadoop.http.filter.initializers</name>
-    <value>org.apache.hadoop.security.AuthenticationFilterInitializer</value>
-  </property>
-
-<!-- BEGIN hadoop.http.authentication properties --> 
-  <property>
-    <name>hadoop.http.authentication.type</name>
-    <value>org.apache.hadoop.security.authentication.server.CompositeAuthenticationHandler</value>
-  </property>
-
-  <property>
-    <name>hadoop.http.authentication.token.validity</name>
-    <value>36000</value>
-    <!-- in seconds -->
-  </property>
-
-  <property>
-    <name>hadoop.http.authentication.signature.secret.file</name>
-    <value>/etc/hadoop/http_auth_secret</value>
-  </property>
-
-  <property>
-    <name>hadoop.http.authentication.cookie.domain</name>
-    <value>ebay.com</value>
-  </property>
-
-  <property>
-    <name>hadoop.http.authentication.pingFederate.config.file</name>
-    <value>/etc/hadoop/pingfederate-agent-config.txt</value>
-  </property>
-
-  <property>
-    <name>hadoop.http.authentication.pingFederate.url</name>
-    <value>https://sso.corp.ebay.com/sp/startSSO.ping?PartnerIdpId=eBayHadoop</value>
-  </property>
-
-  <property>
-    <name>hadoop.http.authentication.pingFederate.anonymous.allowed</name>
-    <value>true</value>
-  </property>
-
-<!-- BEGIN properties enabled per HDP-2.1.3 upgrade -->
-
-  <property>
-    <name>hadoop.http.authentication.composite.handlers</name>
-    <value>org.apache.hadoop.security.authentication.server.PingFederateAuthenticationHandler,kerberos,anonymous</value>
-  </property>
-
-  <property>
-    <name>hadoop.http.authentication.composite.default-non-browser-handler-type</name>
-    <value>kerberos</value>
-  </property>
-
-  <property>
-    <name>hadoop.http.authentication.kerberos.keytab</name>
-    <value>/etc/hadoop/hadoop.keytab</value>
-  </property>
-
-  <property>
-    <name>hadoop.http.authentication.kerberos.principal</name>
-    <value>*</value>
-  </property>
-
-<!-- END properties enabled per HDP-2.1.3 upgrade -->
-
-<!-- END hadoop.http.authentication properties --> 
-
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value>
-        RULE:[1:$1]
-        RULE:[2:$1]
-        DEFAULT
-    </value>
-  </property>
-
-  <property>
-    <name>kerberos.multiplerealm.supported</name>
-    <value>true</value>
-  </property>
-  
-  <property>
-    <name>kerberos.multiplerealm.realms</name>
-    <value>CORP.EBAY.COM</value>
-  </property>
-
-<!--SSL SUPPORT -->
-
-<property>
-  <name>hadoop.ssl.keystores.factory.class</name>
-    <value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
-    <description>
-          The keystores factory to use for retrieving certificates.
-    </description>
-</property>
-
-<property>
-  <name>hadoop.ssl.require.client.cert</name>
-  <value>false</value>
-  <description>Whether client certificates are required</description>
-</property>
-
-<property>
-  <name>hadoop.ssl.hostname.verifier</name>
-  <value>ALLOW_ALL</value>
-  <description>
-    The hostname verifier to provide for HttpsURLConnections.
-    Valid values are: DEFAULT, STRICT, STRICT_I6, DEFAULT_AND_LOCALHOST and
-    ALLOW_ALL
-  </description>
-</property>
-
-<property>
-  <name>hadoop.ssl.server.conf</name>
-  <value>ssl-server.xml</value>
-  <description>
-    Resource file from which ssl server keystore information will be extracted.
-    This file is looked up in the classpath, typically it should be in Hadoop
-    conf/ directory.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.ssl.client.conf</name>
-  <value>ssl-client.xml</value>
-  <description>
-    Resource file from which ssl client keystore information will be extracted
-    This file is looked up in the classpath, typically it should be in Hadoop
-    conf/ directory.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.ssl.enabled</name>
-  <value>false</value>
-  <description>
-    Whether to use SSL for the HTTP endpoints. If set to true, the
-    NameNode, DataNode, ResourceManager, NodeManager, HistoryServer and
-    MapReduceAppMaster web UIs will be served over HTTPS instead HTTP.
-  </description>
-</property>
-
-<!-- User Group Resolution -->
-
-<property>
-    <name>hadoop.security.groups.cache.secs</name>
-    <value>3600</value>
-</property>
-
-<!-- END security configuration -->
-
-
-
-<!-- BEGIN properties enabled per HDP-2.1.3 upgrade -->
-
-<!-- BEGIN Quality of Service -->
-
-  <property>
-    <name>ipc.8020.callqueue.impl</name>
-    <value>com.ebay.hadoop.ipc.FairCallQueue</value>
-  </property>
-
-  <property>
-    <name>ipc.8020.identity-provider.impl</name>
-    <value>com.ebay.hadoop.ipc.EbayUserIdentityProvider</value>
-  </property>
-
-  <property>
-    <name>ipc.8020.faircallqueue.rpc-scheduler</name>
-    <value>com.ebay.hadoop.ipc.DecayRpcScheduler</value>
-  </property>
-
-  <property>
-    <name>ipc.8020.faircallqueue.priority-levels</name>
-    <value>10</value>
-  </property>
-
-  <property>
-    <name>ipc.8020.faircallqueue.decay-scheduler.thresholds</name>
-   <!-- <value>1,2,7,10,20,30,40,50,60</value> -->
-    <value>1,2,3,5,8,13,20,35,50</value>
-  </property>
-
-  <property>
-    <name>ipc.8020.faircallqueue.decay-scheduler.period-ms</name>
-    <value>1000</value>
-  </property>
-
-  <property>
-    <name>ipc.8020.faircallqueue.multiplexer.weights</name>
-   <!-- <value>10,5,3,2,1,1,1,1,1,1</value> -->
-     <value>80,30,25,20,17,12,6,3,2,1</value>
-  </property>
-
-<!-- END Quality of Service -->
-
-
-
-<!-- BEGIN Selective Encryption --> 
-<!-- disabled per HADP-6065 - miguenther - 26 August 2014 
-  <property>
-    <name>hadoop.rpc.protection</name>
-    <value>authentication,privacy</value>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>hadoop.security.saslproperties.resolver.class</name>
-    <value>org.apache.hadoop.security.WhitelistBasedResolver</value>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>hadoop.security.sasl.variablewhitelist.enable</name>
-    <value>true</value>
-    <final>true</final>
-  </property>
--->
-<!-- END Selective Encryption -->
-
-
-<!-- END properties enabled per HDP-2.1.3 upgrade -->
-
-<property>
-  <name>ha.zookeeper.quorum</name>
-  <value>apollo-phx-zk-1.vip.ebay.com:2181,apollo-phx-zk-2.vip.ebay.com:2181,apollo-phx-zk-3.vip.ebay.com:2181,apollo-phx-zk-4.vip.ebay.com:2181,apollo-phx-zk-5.vip.ebay.com:2181</value>
-</property>
-
-<!-- NEW QOP Proposed configs below - Same as Ares Tiffany Sept 01, 2015 -->
-<property>
-    <name>hadoop.rpc.protection</name>
-    <value>authentication,privacy</value>
-</property>
-
-  <property>
-      <name>hadoop.security.saslproperties.resolver.class</name>
-      <value>org.apache.hadoop.security.WhitelistBasedResolver</value>
-  </property>
-
-  <property>
-      <name>hadoop.security.sasl.fixedwhitelist.file</name>
-      <value>/etc/hadoop/fixedwhitelist</value>
-  </property>
-
-  <property>
-      <name>hadoop.security.sasl.variablewhitelist.enable</name>
-      <value>true</value>
-  </property>
-
-  <property>
-      <name>hadoop.security.sasl.variablewhitelist.file</name>
-      <value>/etc/hadoop/whitelist</value>
-  </property>
-
-  <property>
-        <name>hadoop.security.sasl.variablewhitelist.cache.secs</name>
-        <value>3600</value>
-  </property>
-<!-- END NEW QOP Proposed configs below - Same as Ares Tiffany Sept 01, 2015 -->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0b77d947/eagle-jpm/eagle-jpm-mr-history/src/main/resources/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/eagle-jpm/eagle-jpm-mr-history/src/main/resources/hdfs-site.xml b/eagle-jpm/eagle-jpm-mr-history/src/main/resources/hdfs-site.xml
deleted file mode 100644
index 52ba754..0000000
--- a/eagle-jpm/eagle-jpm-mr-history/src/main/resources/hdfs-site.xml
+++ /dev/null
@@ -1,449 +0,0 @@
-<?xml version="1.0"?>
-<!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more
-	~ contributor license agreements. See the NOTICE file distributed with ~
-	this work for additional information regarding copyright ownership. ~ The
-	ASF licenses this file to You under the Apache License, Version 2.0 ~ (the
-	"License"); you may not use this file except in compliance with ~ the License.
-	You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0
-	~ ~ Unless required by applicable law or agreed to in writing, software ~
-	distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT
-	WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the
-	License for the specific language governing permissions and ~ limitations
-	under the License. -->
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <!-- The directories for NN, DN and SNN configs -->
-
-  <property>
-    <name>dfs.namenode.name.dir</name>
-    <value>/hadoop/nn1/1</value>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir</name>
-    <value>/hadoop/1/data,/hadoop/2/data,/hadoop/3/data,/hadoop/4/data,/hadoop/5/data,/hadoop/6/data,/hadoop/7/data,/hadoop/8/data,/hadoop/9/data,/hadoop/10/data,/hadoop/11/data,/hadoop/12/data</value>
-  </property>
-
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>900</value>
-  </property>
-
-  <property>
-    <name>dfs.namenode.decommission.interval</name>
-    <value>150</value>
-  </property>
-
-  <!-- The Nodes include and exclude -->
-
-  <property>
-    <name>dfs.hosts</name>
-    <!-- The files containing hosts allowed to connect to namenode -->
-    <value>/apache/hadoop/etc/hadoop/hosts</value>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <!-- The files containing hosts allowed to connect to namenode -->
-    <value>/apache/hadoop/etc/hadoop/hdfs-exclude</value>
-  </property>
-
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>3</value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.balance.bandwidthPerSec</name>
-    <value>10485760</value>
-  </property>
-
-  <property>
-    <!-- Amount of space which HDFS will refuse to use in bytes -->
-    <name>dfs.datanode.du.reserved</name>
-    <value>107374182400</value> <!-- 100 GB-->
-  </property>
-
-  <!-- RMERCHIA AISOPS159160 2012-09-25 -->
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>6</value>
-    <description>how frequently dn send a heartbeat.</description>
-  </property>
-
-  <!-- RMERCHIA AISOPS159160 2012-09-25  change to 6 hours on 2012-10-02 -->
-
-  <property>
-    <name>dfs.blockreport.intervalMsec</name>
-    <value>21600000</value>
-    <description>how frequently dn send a blockreport.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.safemode.threshold-pct</name>
-    <value>1.0f</value>
-    <!-- Allows 10 blocks unreported out of 10,000,000 -->
-    <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.safemode.extension</name>
-    <value>120000</value>
-    <!-- 2 minutes -->
-    <description> Determines extension of safe mode in milliseconds after the threshold
level is reached. </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.enabled</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-  </property>
-
-  <property>
-    <name>dfs.blocksize</name>
-    <!-- 128mb (default 64m or 67108864) -->
-    <value>268435456</value>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>128</value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.handler.count</name>
-    <value>50</value>
-  </property>
-
-  <!-- updated from 4k to 16k as part of HADP-6065 - miguenther - 26 august 2014 -->
-  <property>
-    <name>dfs.datanode.max.transfer.threads</name>
-    <value>16384</value>
-  </property>
-
-  <property>
-    <name>dfs.namenode.replication.max-streams</name>
-    <value>40</value>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hadoop</value>
-    <description>the user who is allowed to perform short circuit reads.</description>
-  </property>
-
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>dfs.namenode.name.dir.restore</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>dfs.ls.limit</name>
-    <value>4096</value>
-  </property>
-
-  <!-- NameNode security config -->
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value>/etc/hadoop/hadoop.keytab</value>
-  </property>
-  <property>
-    <name>dfs.namenode.kerberos.internal.spnego.principal</name>
-    <value>*</value>
-  </property>
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value>/etc/hadoop/hadoop.keytab</value>
-  </property>
-  <property>
-    <name>dfs.namenode.kerberos.principal</name>
-    <value>hadoop/_HOST@APD.EBAY.COM</value>
-    <!-- _HOST will be replaced by the the domain name present in fs.default.name. It
is better to use the actual host name  -->
-  </property>
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value>HTTP/_HOST@APD.EBAY.COM,HTTP/apollo-hdfs.corp.ebay.com@CORP.EBAY.COM</value>
-  </property>
-
-  <!-- DataNode security config -->
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>700</value>
-  </property>
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:1004</value>
-  </property>
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:1006</value>
-  </property>
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value>/etc/hadoop/hadoop.keytab</value>
-  </property>
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value>hadoop/_HOST@APD.EBAY.COM</value>
-    <!-- _HOST will be replaced by the frst domain name mapped to the ip -->
-  </property>
-
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdmi-hadoopeng</value>
-  </property>
-
-  <!-- HTTPS SUPPORT -->
-
-  <property>
-    <name>dfs.https.need.client.auth</name>
-    <value>false</value>
-    <description>Whether SSL client certificate authentication is required
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.server.keystore.resource</name>
-    <value>ssl-server.xml</value>
-    <description>Resource file from which ssl server keystore
-      information will be extracted
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.client.keystore.resource</name>
-    <value>ssl-client.xml</value>
-    <description>Resource file from which ssl client keystore
-      information will be extracted
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.https.address</name>
-    <value>0.0.0.0:50075</value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:1006</value>
-  </property>
-
-
-
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/run/hadoop-hdfs/dn</value>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-  </property>
-
-
-  <property>
-    <name>dfs.namenode.service.handler.count</name>
-    <value>55</value>
-  </property>
-
-
-
-
-  <!-- BEGIN properties enabled per HDP-2.1.3 upgrade -->
-
-  <property>
-    <name>dfs.namenode.acls.enabled</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>dfs.http.policy</name>
-    <value>HTTP_AND_HTTPS</value>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.filter</name>
-    <value>org.apache.hadoop.hdfs.web.TokenAuthFilter,authentication</value>
-  </property>
-
-  <!-- END properties enabled per HDP-2.1.3 upgrade -->
-
-
-  <!-- added as part of HAPD-6065 - miguenther 26 August 2014 -->
-  <property>
-    <name>ipc.server.read.threadpool.size</name>
-    <value>3</value>
-  </property>
-
-
-  <!-- Apollo PHX HA Configs -->
-  <property>
-    <name>dfs.nameservices</name>
-    <value>apollo-phx-nn-ha</value>
-    <description>Logical name for this new nameservice</description>
-  </property>
-
-  <property>
-    <name>dfs.ha.namenodes.apollo-phx-nn-ha</name>
-    <value>nn1,nn2</value>
-  </property>
-
-  <property>
-    <name>dfs.namenode.rpc-address.apollo-phx-nn-ha.nn1</name>
-    <value>apollo-phx-nn.vip.ebay.com:8020</value>
-  </property>
-
-  <property>
-    <name>dfs.namenode.rpc-address.apollo-phx-nn-ha.nn2</name>
-    <value>apollo-phx-nn-2.vip.ebay.com:8020</value>
-  </property>
-
-  <property>
-    <name>dfs.namenode.servicerpc-address.apollo-phx-nn-ha.nn1</name>
-    <value>apollo-phx-nn.vip.ebay.com:8030</value>
-  </property>
-
-  <property>
-    <name>dfs.namenode.servicerpc-address.apollo-phx-nn-ha.nn2</name>
-    <value>apollo-phx-nn-2.vip.ebay.com:8030</value>
-  </property>
-
-  <property>
-    <name>dfs.namenode.http-address.apollo-phx-nn-ha.nn1</name>
-    <value>apollo-phx-nn.vip.ebay.com:50080</value>
-  </property>
-
-  <property>
-    <name>dfs.namenode.http-address.apollo-phx-nn-ha.nn2</name>
-    <value>apollo-phx-nn-2.vip.ebay.com:50080</value>
-  </property>
-
-  <property>
-    <name>dfs.namenode.shared.edits.dir</name>
-    <value>qjournal://phxaishdc9en0010-be.phx.ebay.com:8485;phxaishdc9en0011-be.phx.ebay.com:8485;phxaishdc9en0012-be.phx.ebay.com:8485;phxaishdc9en0013-be.phx.ebay.com:8485;phxaishdc9en0014-be.phx.ebay.com:8485/apollo-phx-nn-ha</value>
-  </property>
-
-  <property>
-    <name>dfs.client.failover.proxy.provider.apollo-phx-nn-ha</name>
-    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
-  </property>
-
-  <property>
-    <name>dfs.ha.fencing.methods</name>
-    <value>sshfence
-      shell(/bin/true)
-    </value>
-  </property>
-
-  <property>
-    <name>dfs.ha.fencing.ssh.private-key-files</name>
-    <value>/home/hadoop/.ssh/id_rsa</value>
-  </property>
-
-  <property>
-    <name>dfs.ha.fencing.ssh.connect-timeout</name>
-    <value>30000</value>
-  </property>
-
-  <property>
-    <name>dfs.ha.automatic-failover.enabled</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.edits.dir</name>
-    <value>/hadoop/qjm/apollo</value>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.kerberos.principal</name>
-    <value>hadoop/_HOST@APD.EBAY.COM</value>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.kerberos.internal.spnego.principal</name>
-    <value>HTTP/_HOST@APD.EBAY.COM</value>
-  </property>
-
-  <property>
-    <name>dfs.namenode.https-address.apollo-phx-nn-ha.nn2</name>
-    <value>apollo-phx-nn-2.vip.ebay.com:50070</value>
-  </property>
-
-  <property>
-    <name>dfs.namenode.https-address.apollo-phx-nn-ha.nn1</name>
-    <value>apollo-phx-nn.vip.ebay.com:50070</value>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.keytab.file</name>
-    <value>/etc/hadoop/hadoop.keytab</value>
-  </property>
-
-  <!-- Apollo HA Configs END -->
-
-  <!-- BEGIN Selective Encryption as in Ares - Sept 01, 2015 Tiffany -->
-  <property>
-    <name>dfs.encrypt.data.transfer</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>dfs.encrypt.data.transfer.algorithm</name>
-    <value>rc4</value>
-    <final>true</final>
-  </property>
-  <property>
-    <name>dfs.trustedchannel.resolver.class</name>
-    <value>org.apache.hadoop.hdfs.datatransfer.FlagListTrustedChannelResolver</value>
-    <final>true</final>
-  </property>
-  <property>
-    <name>dfs.datatransfer.client.encrypt</name>
-    <value>false</value>
-    <final>true</final>
-  </property>
-
-  <!-- END Selective Encryption as in Ares - Sept 01, 2015 Tiffany -->
-
-  <!-- Post Upgrade - improve performance - Oct 23, 2015 Tiffany -->
-  <property>
-    <name>dfs.client.block.write.locateFollowingBlock.retries</name>
-    <value>8</value>
-  </property>
-  <!-- END Post Upgrade - improve performance - Oct 23, 2015 Tiffany -->
-
-</configuration>



Mime
View raw message