eagle-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From h..@apache.org
Subject [3/6] incubator-eagle git commit: [EAGLE-482] Fix checkstyle configuration
Date Mon, 22 Aug 2016 07:24:52 GMT
http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a197eb02/eagle-dev/eclipse-scala-templates.xml
----------------------------------------------------------------------
diff --git a/eagle-dev/eclipse-scala-templates.xml b/eagle-dev/eclipse-scala-templates.xml
index 55ce31a..99448ee 100644
--- a/eagle-dev/eclipse-scala-templates.xml
+++ b/eagle-dev/eclipse-scala-templates.xml
@@ -1,117 +1,142 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?><templates><template autoinsert="true" context="org.scala-ide.sdt.core.templates" deleted="false" description="New App template for the new file wizard" enabled="true" id="org.scalaide.core.templates.wizards.app" name="wizard_app">/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-${package_name}
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<templates>
+    <template autoinsert="true" context="org.scala-ide.sdt.core.templates" deleted="false"
+              description="New App template for the new file wizard" enabled="true"
+              id="org.scalaide.core.templates.wizards.app" name="wizard_app">/*
+        * Licensed to the Apache Software Foundation (ASF) under one or more
+        * contributor license agreements. See the NOTICE file distributed with
+        * this work for additional information regarding copyright ownership.
+        * The ASF licenses this file to You under the Apache License, Version 2.0
+        * (the "License"); you may not use this file except in compliance with
+        * the License. You may obtain a copy of the License at
+        *
+        * http://www.apache.org/licenses/LICENSE-2.0
+        *
+        * Unless required by applicable law or agreed to in writing, software
+        * distributed under the License is distributed on an "AS IS" BASIS,
+        * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+        * See the License for the specific language governing permissions and
+        * limitations under the License.
+        */
+        ${package_name}
 
-object ${type_name} extends App {
-  ${cursor}
-}</template><template autoinsert="true" context="org.scala-ide.sdt.core.templates" deleted="false" description="New class template for the new file wizard" enabled="true" id="org.scalaide.core.templates.wizards.class" name="wizard_class">/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-${package_name}
+        object ${type_name} extends App {
+        ${cursor}
+        }
+    </template>
+    <template autoinsert="true" context="org.scala-ide.sdt.core.templates" deleted="false"
+              description="New class template for the new file wizard" enabled="true"
+              id="org.scalaide.core.templates.wizards.class" name="wizard_class">/*
+        * Licensed to the Apache Software Foundation (ASF) under one or more
+        * contributor license agreements. See the NOTICE file distributed with
+        * this work for additional information regarding copyright ownership.
+        * The ASF licenses this file to You under the Apache License, Version 2.0
+        * (the "License"); you may not use this file except in compliance with
+        * the License. You may obtain a copy of the License at
+        *
+        * http://www.apache.org/licenses/LICENSE-2.0
+        *
+        * Unless required by applicable law or agreed to in writing, software
+        * distributed under the License is distributed on an "AS IS" BASIS,
+        * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+        * See the License for the specific language governing permissions and
+        * limitations under the License.
+        */
+        ${package_name}
 
-class ${type_name} {
-  ${cursor}
-}</template><template autoinsert="true" context="org.scala-ide.sdt.core.templates" deleted="false" description="New empty file template for the new file wizard" enabled="true" id="org.scalaide.core.templates.wizards.empty" name="wizard_empty">/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- </template><template autoinsert="true" context="org.scala-ide.sdt.core.templates" deleted="false" description="New object template for the new file wizard" enabled="true" id="org.scalaide.core.templates.wizards.object" name="wizard_object">/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-${package_name}
+        class ${type_name} {
+        ${cursor}
+        }
+    </template>
+    <template autoinsert="true" context="org.scala-ide.sdt.core.templates" deleted="false"
+              description="New empty file template for the new file wizard" enabled="true"
+              id="org.scalaide.core.templates.wizards.empty" name="wizard_empty">/*
+        * Licensed to the Apache Software Foundation (ASF) under one or more
+        * contributor license agreements. See the NOTICE file distributed with
+        * this work for additional information regarding copyright ownership.
+        * The ASF licenses this file to You under the Apache License, Version 2.0
+        * (the "License"); you may not use this file except in compliance with
+        * the License. You may obtain a copy of the License at
+        *
+        * http://www.apache.org/licenses/LICENSE-2.0
+        *
+        * Unless required by applicable law or agreed to in writing, software
+        * distributed under the License is distributed on an "AS IS" BASIS,
+        * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+        * See the License for the specific language governing permissions and
+        * limitations under the License.
+        */
+    </template>
+    <template autoinsert="true" context="org.scala-ide.sdt.core.templates" deleted="false"
+              description="New object template for the new file wizard" enabled="true"
+              id="org.scalaide.core.templates.wizards.object" name="wizard_object">/*
+        * Licensed to the Apache Software Foundation (ASF) under one or more
+        * contributor license agreements. See the NOTICE file distributed with
+        * this work for additional information regarding copyright ownership.
+        * The ASF licenses this file to You under the Apache License, Version 2.0
+        * (the "License"); you may not use this file except in compliance with
+        * the License. You may obtain a copy of the License at
+        *
+        * http://www.apache.org/licenses/LICENSE-2.0
+        *
+        * Unless required by applicable law or agreed to in writing, software
+        * distributed under the License is distributed on an "AS IS" BASIS,
+        * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+        * See the License for the specific language governing permissions and
+        * limitations under the License.
+        */
+        ${package_name}
 
-object ${type_name} {
-  ${cursor}
-}</template><template autoinsert="true" context="org.scala-ide.sdt.core.templates" deleted="false" description="New package object template for the new file wizard" enabled="true" id="org.scalaide.core.templates.wizards.packageObject" name="wizard_package_object">/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-${package_name}
+        object ${type_name} {
+        ${cursor}
+        }
+    </template>
+    <template autoinsert="true" context="org.scala-ide.sdt.core.templates" deleted="false"
+              description="New package object template for the new file wizard" enabled="true"
+              id="org.scalaide.core.templates.wizards.packageObject" name="wizard_package_object">/*
+        * Licensed to the Apache Software Foundation (ASF) under one or more
+        * contributor license agreements. See the NOTICE file distributed with
+        * this work for additional information regarding copyright ownership.
+        * The ASF licenses this file to You under the Apache License, Version 2.0
+        * (the "License"); you may not use this file except in compliance with
+        * the License. You may obtain a copy of the License at
+        *
+        * http://www.apache.org/licenses/LICENSE-2.0
+        *
+        * Unless required by applicable law or agreed to in writing, software
+        * distributed under the License is distributed on an "AS IS" BASIS,
+        * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+        * See the License for the specific language governing permissions and
+        * limitations under the License.
+        */
+        ${package_name}
 
-package object ${type_name} {
-  ${cursor}
-}</template><template autoinsert="true" context="org.scala-ide.sdt.core.templates" deleted="false" description="New trait template for the new file wizard" enabled="true" id="org.scalaide.core.templates.wizards.trait" name="wizard_trait">/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-${package_name}
+        package object ${type_name} {
+        ${cursor}
+        }
+    </template>
+    <template autoinsert="true" context="org.scala-ide.sdt.core.templates" deleted="false"
+              description="New trait template for the new file wizard" enabled="true"
+              id="org.scalaide.core.templates.wizards.trait" name="wizard_trait">/*
+        * Licensed to the Apache Software Foundation (ASF) under one or more
+        * contributor license agreements. See the NOTICE file distributed with
+        * this work for additional information regarding copyright ownership.
+        * The ASF licenses this file to You under the Apache License, Version 2.0
+        * (the "License"); you may not use this file except in compliance with
+        * the License. You may obtain a copy of the License at
+        *
+        * http://www.apache.org/licenses/LICENSE-2.0
+        *
+        * Unless required by applicable law or agreed to in writing, software
+        * distributed under the License is distributed on an "AS IS" BASIS,
+        * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+        * See the License for the specific language governing permissions and
+        * limitations under the License.
+        */
+        ${package_name}
 
-trait ${type_name} {
-  ${cursor}
-}</template></templates>
\ No newline at end of file
+        trait ${type_name} {
+        ${cursor}
+        }
+    </template>
+</templates>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a197eb02/eagle-examples/eagle-app-example/src/test/resources/application.conf
----------------------------------------------------------------------
diff --git a/eagle-examples/eagle-app-example/src/test/resources/application.conf b/eagle-examples/eagle-app-example/src/test/resources/application.conf
index c7fdb8c..de9be89 100644
--- a/eagle-examples/eagle-app-example/src/test/resources/application.conf
+++ b/eagle-examples/eagle-app-example/src/test/resources/application.conf
@@ -14,56 +14,56 @@
 # limitations under the License.
 
 {
-	"coordinator" : {
-		"policiesPerBolt" : 5,
-		"boltParallelism" : 5,
-		"policyDefaultParallelism" : 5,
-		"boltLoadUpbound": 0.8,
-		"topologyLoadUpbound" : 0.8,
-		"numOfAlertBoltsPerTopology" : 5,
-		"zkConfig" : {
-			"zkQuorum" : "127.0.0.1:2181",
-			"zkRoot" : "/alert",
-			"zkSessionTimeoutMs" : 10000,
-			"connectionTimeoutMs" : 10000,
-			"zkRetryTimes" : 3,
-			"zkRetryInterval" : 3000
-		},
-		"metadataService" : {
-			"host" : "localhost",
-			"port" : 8080,
-			"context" : "/rest"
-		},
-		"metadataDynamicCheck" : {
-			"initDelayMillis" : 1000,
-			"delayMillis" : 30000
-		}
-	},
-	"metadata":{
-		"store": "org.apache.eagle.metadata.service.memory.MemoryMetadataStore"
-	},
-	"application":{
-		"sink":{
-			"type": "org.apache.eagle.app.sink.KafkaStreamSink",
-			"config": {
-				"kafkaBrokerHost" : "",
-				"kafkaZkConnection" : ""
-			}
-		},
-		"storm": {
-			"nimbusHost": "localhost"
-			"nimbusThriftPort": 6627
-		}
-	},
+  "coordinator": {
+    "policiesPerBolt": 5,
+    "boltParallelism": 5,
+    "policyDefaultParallelism": 5,
+    "boltLoadUpbound": 0.8,
+    "topologyLoadUpbound": 0.8,
+    "numOfAlertBoltsPerTopology": 5,
+    "zkConfig": {
+      "zkQuorum": "127.0.0.1:2181",
+      "zkRoot": "/alert",
+      "zkSessionTimeoutMs": 10000,
+      "connectionTimeoutMs": 10000,
+      "zkRetryTimes": 3,
+      "zkRetryInterval": 3000
+    },
+    "metadataService": {
+      "host": "localhost",
+      "port": 8080,
+      "context": "/rest"
+    },
+    "metadataDynamicCheck": {
+      "initDelayMillis": 1000,
+      "delayMillis": 30000
+    }
+  },
+  "metadata": {
+    "store": "org.apache.eagle.metadata.service.memory.MemoryMetadataStore"
+  },
+  "application": {
+    "sink": {
+      "type": "org.apache.eagle.app.sink.KafkaStreamSink",
+      "config": {
+        "kafkaBrokerHost": "",
+        "kafkaZkConnection": ""
+      }
+    },
+    "storm": {
+      "nimbusHost": "localhost"
+      "nimbusThriftPort": 6627
+    }
+  },
 
-	"appId": "unit_test_example_app"
-	"spoutNum": 3
-	"loaded": true
-	"mode":"LOCAL"
-	"dataSinkConfig": {
-		"topic" : "test_topic",
-		"brokerList" : "sandbox.hortonworks.com:6667",
-		"serializerClass" : "kafka.serializer.StringEncoder",
-		"keySerializerClass" : "kafka.serializer.StringEncoder"
-	}
+  "appId": "unit_test_example_app"
+  "spoutNum": 3
+  "loaded": true
+  "mode": "LOCAL"
+  "dataSinkConfig": {
+    "topic": "test_topic",
+    "brokerList": "sandbox.hortonworks.com:6667",
+    "serializerClass": "kafka.serializer.StringEncoder",
+    "keySerializerClass": "kafka.serializer.StringEncoder"
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a197eb02/eagle-examples/eagle-cassandra-example/conf/cassandra-security-local.conf
----------------------------------------------------------------------
diff --git a/eagle-examples/eagle-cassandra-example/conf/cassandra-security-local.conf b/eagle-examples/eagle-cassandra-example/conf/cassandra-security-local.conf
index bbf6da6..56ce525 100644
--- a/eagle-examples/eagle-cassandra-example/conf/cassandra-security-local.conf
+++ b/eagle-examples/eagle-cassandra-example/conf/cassandra-security-local.conf
@@ -14,56 +14,56 @@
 # limitations under the License.
 
 {
-	"envContextConfig" : {
-		"env" : "storm",
-		"mode" : "local",
-		"topologyName" : "cassandrawQueryLogMonitorTopology",
-		"stormConfigFile" : "cassandraw-querlog-storm.yaml",
-		"parallelismConfig" : {
-			"cassandraQueryLogStream" : 1,
-			"cassandraQueryLogExecutor*" : 1
-		}
-	},
-	"dataSourceConfig": {
-		"topic" : "cassandra_querylog_local",
-		"zkConnection" : "localhost:2181",
-		"zkConnectionTimeoutMS" : 15000,
-		"consumerGroupId" : "eagle.consumer",
-		"fetchSize" : 1048586,
-		"deserializerClass" : "org.apache.eagle.datastream.storm.JsonMessageDeserializer",
-		"transactionZKServers" : "localhost",
-		"transactionZKPort" : 2181,
-		"transactionZKRoot" : "/consumers",
-		"transactionStateUpdateMS" : 2000
-	},
-	"alertExecutorConfigs" : {
-		"cassandraQueryLogExecutor" : {
-			"parallelism" : 1,
-			"partitioner" : "org.apache.eagle.policy.DefaultPolicyPartitioner"
-			"needValidation" : "true"
-		}
-	},
-	"eagleProps" : {
-		"site" : "sandbox",
-		"application": "cassandraQueryLog",
-		"dataJoinPollIntervalSec" : 30,
-		"mailHost" : "mailHost.com",
-		"mailSmtpPort":"25",
-		"mailDebug" : "true",
-		"balancePartitionEnabled" : true,
-		#"partitionRefreshIntervalInMin" : 60,
-		#"kafkaStatisticRangeInMin" : 60,
-		"eagleService": {
-			"host": "localhost",
-			"port": 9099,
-			"username": "admin",
-			"password": "secret"
-		}
-		"readHdfsUserCommandPatternFrom" : "file"
-	},
-	"dynamicConfigSource" : {
-		"enabled" : true,
-		"initDelayMillis" : 0,
-		"delayMillis" : 30000
-	}
+  "envContextConfig": {
+    "env": "storm",
+    "mode": "local",
+    "topologyName": "cassandrawQueryLogMonitorTopology",
+    "stormConfigFile": "cassandraw-querlog-storm.yaml",
+    "parallelismConfig": {
+      "cassandraQueryLogStream": 1,
+      "cassandraQueryLogExecutor*": 1
+    }
+  },
+  "dataSourceConfig": {
+    "topic": "cassandra_querylog_local",
+    "zkConnection": "localhost:2181",
+    "zkConnectionTimeoutMS": 15000,
+    "consumerGroupId": "eagle.consumer",
+    "fetchSize": 1048586,
+    "deserializerClass": "org.apache.eagle.datastream.storm.JsonMessageDeserializer",
+    "transactionZKServers": "localhost",
+    "transactionZKPort": 2181,
+    "transactionZKRoot": "/consumers",
+    "transactionStateUpdateMS": 2000
+  },
+  "alertExecutorConfigs": {
+    "cassandraQueryLogExecutor": {
+      "parallelism": 1,
+      "partitioner": "org.apache.eagle.policy.DefaultPolicyPartitioner"
+      "needValidation": "true"
+    }
+  },
+  "eagleProps": {
+    "site": "sandbox",
+    "application": "cassandraQueryLog",
+    "dataJoinPollIntervalSec": 30,
+    "mailHost": "mailHost.com",
+    "mailSmtpPort": "25",
+    "mailDebug": "true",
+    "balancePartitionEnabled": true,
+    #"partitionRefreshIntervalInMin" : 60,
+    #"kafkaStatisticRangeInMin" : 60,
+    "eagleService": {
+      "host": "localhost",
+      "port": 9099,
+      "username": "admin",
+      "password": "secret"
+    }
+    "readHdfsUserCommandPatternFrom": "file"
+  },
+  "dynamicConfigSource": {
+    "enabled": true,
+    "initDelayMillis": 0,
+    "delayMillis": 30000
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a197eb02/eagle-examples/pom.xml
----------------------------------------------------------------------
diff --git a/eagle-examples/pom.xml b/eagle-examples/pom.xml
index 273e934..13428af 100644
--- a/eagle-examples/pom.xml
+++ b/eagle-examples/pom.xml
@@ -15,7 +15,8 @@
   ~ See the License for the specific language governing permissions and
   ~ limitations under the License.
   -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <parent>
         <artifactId>eagle-parent</artifactId>
         <groupId>org.apache.eagle</groupId>

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a197eb02/eagle-external/eagle-ambari/lib/EAGLE/configuration/application.conf
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-ambari/lib/EAGLE/configuration/application.conf b/eagle-external/eagle-ambari/lib/EAGLE/configuration/application.conf
index 9caef36..d311296 100644
--- a/eagle-external/eagle-ambari/lib/EAGLE/configuration/application.conf
+++ b/eagle-external/eagle-ambari/lib/EAGLE/configuration/application.conf
@@ -14,43 +14,43 @@
 # limitations under the License.
 
 {
-  "deployInstanceIdentifier" : {
+  "deployInstanceIdentifier": {
     "programId": "hdfsAuditLogMonitoring"
   },
-  "envContextConfig" : {
-    "env" : "storm",
-    "mode" : "cluster",
-    "topologyName" : "auditLogProcessTology",
-    "stormConfigFile" : "storm.yaml.1",
-    "parallelismConfig" : {
-      "msgConsumer" : 2,
-      "downsampling" : 3
+  "envContextConfig": {
+    "env": "storm",
+    "mode": "cluster",
+    "topologyName": "auditLogProcessTology",
+    "stormConfigFile": "storm.yaml.1",
+    "parallelismConfig": {
+      "msgConsumer": 2,
+      "downsampling": 3
     }
   },
   "dataSourceConfig": {
-    "flavor" : "stormkafka",
-    "topic" : "hdfs_audit_log",
-    "zkConnection" : "localhost:2181",
-    "zkConnectionTimeoutMS" : 15000,
-    "consumerGroupId" : "EagleConsumer",
-    "fetchSize" : 1048586,
-    "deserializerClass" : "eagle.security.auditlog.AuditLogKafkaDeserializer",
-    "transactionZKServers" : "localhost",
-    "transactionZKPort" : 2181,
-    "transactionZKRoot" : "/brokers/topics",
-    "transactionStateUpdateMS" : 2000
+    "flavor": "stormkafka",
+    "topic": "hdfs_audit_log",
+    "zkConnection": "localhost:2181",
+    "zkConnectionTimeoutMS": 15000,
+    "consumerGroupId": "EagleConsumer",
+    "fetchSize": 1048586,
+    "deserializerClass": "eagle.security.auditlog.AuditLogKafkaDeserializer",
+    "transactionZKServers": "localhost",
+    "transactionZKPort": 2181,
+    "transactionZKRoot": "/brokers/topics",
+    "transactionStateUpdateMS": 2000
   },
-  "alertExecutorConfigs" : {
-     "hdfsAuditLogAlertExecutor" : {
-       "parallelism" : 2,
-       "partitioner" : "eagle.alert.policy.DefaultPolicyPartitioner",
-       "sourceStreams" : ["hdfsAuditLogEventStream"]
-     }
+  "alertExecutorConfigs": {
+    "hdfsAuditLogAlertExecutor": {
+      "parallelism": 2,
+      "partitioner": "eagle.alert.policy.DefaultPolicyPartitioner",
+      "sourceStreams": ["hdfsAuditLogEventStream"]
+    }
   },
-  "eagleProps" : {
-  	"ipLocationLoadFrom" : "jmx",
-  	"namenodeUrl" : "https://dc1-nn.vip.xyz.com:50070",
-  	"ipLocationLoadIntervalSeconds" : "300",
+  "eagleProps": {
+    "ipLocationLoadFrom": "jmx",
+    "namenodeUrl": "https://dc1-nn.vip.xyz.com:50070",
+    "ipLocationLoadIntervalSeconds": "300",
     "eagleService": {
       "host": "localhost",
       "port": 9099,
@@ -58,20 +58,20 @@
       "password": "secret"
     }
   },
-  "metadataJoinConfigs" : {
-  	"ipZoneJoin" : {
-  	   "ipLocationLoadFrom" : "jmx",
-  	   "namenodeUrl" : "https://dc1-nn.vip.xyz.com:50070",
-  	   "ipLocationLoadIntervalSeconds" : "300"
-  	},
-  	"fileMetadataJoin" : {
-  	
-  	}
+  "metadataJoinConfigs": {
+    "ipZoneJoin": {
+      "ipLocationLoadFrom": "jmx",
+      "namenodeUrl": "https://dc1-nn.vip.xyz.com:50070",
+      "ipLocationLoadIntervalSeconds": "300"
+    },
+    "fileMetadataJoin": {
+
+    }
   },
-  "dynamicConfigSource" : {
-  	"enabled" : true,
-  	"initDelayMillis" : 0,
-  	"delayMillis" : 30000,
-  	"ignoreDeleteFromSource" : true
+  "dynamicConfigSource": {
+    "enabled": true,
+    "initDelayMillis": 0,
+    "delayMillis": 30000,
+    "ignoreDeleteFromSource": true
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a197eb02/eagle-external/eagle-ambari/lib/EAGLE/configuration/eagle-service.conf
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-ambari/lib/EAGLE/configuration/eagle-service.conf b/eagle-external/eagle-ambari/lib/EAGLE/configuration/eagle-service.conf
index cb374bd..590546f 100644
--- a/eagle-external/eagle-ambari/lib/EAGLE/configuration/eagle-service.conf
+++ b/eagle-external/eagle-ambari/lib/EAGLE/configuration/eagle-service.conf
@@ -13,13 +13,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-env=test
+env = test
 
-eagle.storage.type=hbase
+eagle.storage.type = hbase
 
-table.name.prefixed.with.environment=false
+table.name.prefixed.with.environment = false
 
 ### begin hbase configuration
-hbase.zookeeper.quorum=127.0.0.1
-hbase.zookeeper.property.clientPort=2181
+hbase.zookeeper.quorum = 127.0.0.1
+hbase.zookeeper.property.clientPort = 2181
 ### end hbase configuration

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a197eb02/eagle-external/eagle-ambari/lib/EAGLE/configuration/log4j.properties
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-ambari/lib/EAGLE/configuration/log4j.properties b/eagle-external/eagle-ambari/lib/EAGLE/configuration/log4j.properties
index 25331ab..76ea987 100644
--- a/eagle-external/eagle-ambari/lib/EAGLE/configuration/log4j.properties
+++ b/eagle-external/eagle-ambari/lib/EAGLE/configuration/log4j.properties
@@ -12,24 +12,19 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 log4j.rootLogger=INFO, stdout
-
- eagle.log.dir=../logs
- eagle.log.file=eagle.log
-
+eagle.log.dir=../logs
+eagle.log.file=eagle.log
 # standard output
 log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %p [%t] %c{2}[%L]: %m%n
-
 # Daily Rolling File Appender
- log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
- log4j.appender.DRFA.File=${eagle.log.dir}/${eagle.log.file}
- log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${eagle.log.dir}/${eagle.log.file}
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
 ## 30-day backup
 # log4j.appender.DRFA.MaxBackupIndex=30
- log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
 # Pattern format: Date LogLevel LoggerName LogMessage
 log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p [%t] %c{2}[%L]: %m%n
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a197eb02/eagle-external/eagle-ambari/lib/metainfo.xml
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-ambari/lib/metainfo.xml b/eagle-external/eagle-ambari/lib/metainfo.xml
index 0953c8f..bb5019c 100644
--- a/eagle-external/eagle-ambari/lib/metainfo.xml
+++ b/eagle-external/eagle-ambari/lib/metainfo.xml
@@ -17,11 +17,11 @@
 -->
 
 <metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>EAGLE</name>
-      <extends>common-services/EAGLE/0.0.1</extends>
-    </service>
-  </services>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>EAGLE</name>
+            <extends>common-services/EAGLE/0.0.1</extends>
+        </service>
+    </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a197eb02/eagle-external/eagle-docker/resource/eagle-multinode.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/eagle-multinode.json b/eagle-external/eagle-docker/resource/eagle-multinode.json
index 6d6da67..4c1071a 100644
--- a/eagle-external/eagle-docker/resource/eagle-multinode.json
+++ b/eagle-external/eagle-docker/resource/eagle-multinode.json
@@ -1,5 +1,5 @@
 {
-"configurations": [
+  "configurations": [
     {
       "hdfs-site": {
         "dfs.permissions.enabled": "false"
@@ -10,17 +10,17 @@
       }
     },
     {
-       "hadoop-env": {
-          "properties" : {
-            "content" : "\r\n# Set Hadoop-specific environment variables here.\r\n\r\n# The only required environment variable is JAVA_HOME.  All others are\r\n# optional.  When running a distributed configuration it is best to\r\n# set JAVA_HOME in this file, so that it is correctly defined on\r\n# remote nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop home directory\r\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# Hadoop Configuration Directory\r\n\r\n{# this is different for HDP1 #}\r\n# Path to jsvc required by secure HDP 2.0 datanode\r\nexport JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. Default is 1000.\r\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n# Extra Java runtime options.  Empty by default.\r\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=tr
 ue ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS when specified\r\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT ${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d
 %H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCE
 R_OPTS}\"\r\n\r\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode as after dropping privileges\r\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n# Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n# Where log files are stored.  $HADOOP_HOME/logs by default.\r\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n# History server logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n# Where log files are stored in the secure data environment.\r\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# e
 xport HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop code should be rsync'd from.  Unset by default.\r\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n# Seconds to sleep between slave commands.  Unset by default.  This\r\n# can be useful in large clusters, where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can service them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where pid files are stored. /tmp by default.\r\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# History server pid\r\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n# A string representing this instance of hadoop. $USER by default.\r\nexport HADOOP_IDENT_STRING=$USER\r\n\r\n# The scheduling priority for daemon processes.  See 'm
 an nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add libraries required by mysql connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by oracle connector\r\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/hdp/current/eagle/lib/log4jkafka/lib/*\r\n\r\n# added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\r\n  if [ -d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\r\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez
 -client/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command line\r\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly required for hadoop 2.0\r\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\r\n"
-	  }
+      "hadoop-env": {
+        "properties": {
+          "content": "\r\n# Set Hadoop-specific environment variables here.\r\n\r\n# The only required environment variable is JAVA_HOME.  All others are\r\n# optional.  When running a distributed configuration it is best to\r\n# set JAVA_HOME in this file, so that it is correctly defined on\r\n# remote nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop home directory\r\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# Hadoop Configuration Directory\r\n\r\n{# this is different for HDP1 #}\r\n# Path to jsvc required by secure HDP 2.0 datanode\r\nexport JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. Default is 1000.\r\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n# Extra Java runtime options.  Empty by default.\r\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true 
 ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS when specified\r\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT ${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%
 M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_O
 PTS}\"\r\n\r\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode as after dropping privileges\r\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n# Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n# Where log files are stored.  $HADOOP_HOME/logs by default.\r\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n# History server logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n# Where log files are stored in the secure data environment.\r\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# expo
 rt HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop code should be rsync'd from.  Unset by default.\r\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n# Seconds to sleep between slave commands.  Unset by default.  This\r\n# can be useful in large clusters, where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can service them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where pid files are stored. /tmp by default.\r\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# History server pid\r\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n# A string representing this instance of hadoop. $USER by default.\r\nexport HADOOP_IDENT_STRING=$USER\r\n\r\n# The scheduling priority for daemon processes.  See 'man 
 nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add libraries required by mysql connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by oracle connector\r\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/hdp/current/eagle/lib/log4jkafka/lib/*\r\n\r\n# added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\r\n  if [ -d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\r\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-cl
 ient/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command line\r\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly required for hadoop 2.0\r\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\r\n"
         }
+      }
     },
     {
       "hdfs-log4j": {
         "properties": {
-          "content" : "\r\n#\r\n# Licensed to the Apache Software Foundation (ASF) under one\r\n# or more contributor license agreements.  See the NOTICE file\r\n# distributed with this work for additional information\r\n# regarding copyright ownership.  The ASF licenses this file\r\n# to you under the Apache License, Version 2.0 (the\r\n# \"License\"); you may not use this file except in compliance\r\n# with the License.  You may obtain a copy of the License at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing,\r\n# software distributed under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n# KIND, either express or implied.  See the License for the\r\n# specific language governing permissions and limitations\r\n# under the License.\r\n#\r\n\r\n\r\n# Define some default values that can be overridden by system properties\r\n# To change daemon root logger use hadoo
 p_root_logger in hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n# Define the root logger to the system property \"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# Daily Rolling File Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 30-day backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n# Pattern format: Date LogLevel LoggerName LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n# Debugging Pattern format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlog
 ger above if you want to use this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\
 n#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n# hdfs audit l
 ogging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=org.apache.eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=sandbox.eagle.incubator.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=org.apache.eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT
 .Layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n# mapred audit logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# Rolling File Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Logfile size and and 30-day backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\
 nlog4j.appender.RFA.MaxBackupIndex=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n# Custom Logging levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n# Jets3t library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n# Null Appender\r\n# Trap security logger on the hadoop client side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n# Event Counter Appender\r\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\r\n#\r\nlog4j.appender.EventCounter
 =org.apache.hadoop.log.metrics.EventCounter\r\n\r\n# Removes \"deprecated\" messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n# HDFS block state change log from block manager\r\n#\r\n# Uncomment the following to suppress normal block state change\r\n# messages from BlockManager in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
-         }
+          "content": "\r\n#\r\n# Licensed to the Apache Software Foundation (ASF) under one\r\n# or more contributor license agreements.  See the NOTICE file\r\n# distributed with this work for additional information\r\n# regarding copyright ownership.  The ASF licenses this file\r\n# to you under the Apache License, Version 2.0 (the\r\n# \"License\"); you may not use this file except in compliance\r\n# with the License.  You may obtain a copy of the License at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing,\r\n# software distributed under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n# KIND, either express or implied.  See the License for the\r\n# specific language governing permissions and limitations\r\n# under the License.\r\n#\r\n\r\n\r\n# Define some default values that can be overridden by system properties\r\n# To change daemon root logger use hadoop
 _root_logger in hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n# Define the root logger to the system property \"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# Daily Rolling File Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 30-day backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n# Pattern format: Date LogLevel LoggerName LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n# Debugging Pattern format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlogg
 er above if you want to use this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\n
 #\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n# hdfs audit lo
 gging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=org.apache.eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=sandbox.eagle.incubator.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=org.apache.eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.
 Layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n# mapred audit logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# Rolling File Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Logfile size and and 30-day backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\n
 log4j.appender.RFA.MaxBackupIndex=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n# Custom Logging levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n# Jets3t library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n# Null Appender\r\n# Trap security logger on the hadoop client side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n# Event Counter Appender\r\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\r\n#\r\nlog4j.appender.EventCounter=
 org.apache.hadoop.log.metrics.EventCounter\r\n\r\n# Removes \"deprecated\" messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n# HDFS block state change log from block manager\r\n#\r\n# Uncomment the following to suppress normal block state change\r\n# messages from BlockManager in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
+        }
       }
     }
   ],
@@ -94,20 +94,20 @@
         {
           "name": "MYSQL_SERVER"
         },
-	{ 
-	  "name": "GANGLIA_SERVER"
-	},
-	{
-	  "name" : "DRPC_SERVER"
-	},
-	{
-	  "name" : "STORM_UI_SERVER"
-	},
-	{
-          "name" : "NIMBUS"
+        {
+          "name": "GANGLIA_SERVER"
+        },
+        {
+          "name": "DRPC_SERVER"
+        },
+        {
+          "name": "STORM_UI_SERVER"
+        },
+        {
+          "name": "NIMBUS"
         },
         {
-          "name" : "KAFKA_BROKER"
+          "name": "KAFKA_BROKER"
         }
       ],
       "cardinality": "1"
@@ -130,7 +130,7 @@
         {
           "name": "DATANODE"
         },
-        { 
+        {
           "name": "ZOOKEEPER_SERVER"
         },
         {
@@ -142,14 +142,14 @@
         {
           "name": "MAPREDUCE2_CLIENT"
         },
-	{
-	  "name" : "KAFKA_BROKER"
-	},
-	{ 
-	  "name": "GANGLIA_MONITOR"
-	},
         {
-          "name" : "SUPERVISOR"
+          "name": "KAFKA_BROKER"
+        },
+        {
+          "name": "GANGLIA_MONITOR"
+        },
+        {
+          "name": "SUPERVISOR"
         }
       ],
       "cardinality": "1"
@@ -159,5 +159,5 @@
     "blueprint_name": "hdp-multinode-eagle",
     "stack_name": "HDP",
     "stack_version": "2.2"
-}
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a197eb02/eagle-external/eagle-docker/resource/eagle-singlenode.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/eagle-singlenode.json b/eagle-external/eagle-docker/resource/eagle-singlenode.json
index a693c7d..d070689 100644
--- a/eagle-external/eagle-docker/resource/eagle-singlenode.json
+++ b/eagle-external/eagle-docker/resource/eagle-singlenode.json
@@ -1,5 +1,5 @@
 {
-"configurations": [
+  "configurations": [
     {
       "hdfs-site": {
         "dfs.permissions.enabled": "false"
@@ -10,17 +10,17 @@
       }
     },
     {
-       "hadoop-env": {
-          "properties" : {
-            "content" : "\r\n# Set Hadoop-specific environment variables here.\r\n\r\n# The only required environment variable is JAVA_HOME.  All others are\r\n# optional.  When running a distributed configuration it is best to\r\n# set JAVA_HOME in this file, so that it is correctly defined on\r\n# remote nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop home directory\r\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# Hadoop Configuration Directory\r\n\r\n{# this is different for HDP1 #}\r\n# Path to jsvc required by secure HDP 2.0 datanode\r\nexport JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. Default is 1000.\r\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n# Extra Java runtime options.  Empty by default.\r\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=tr
 ue ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS when specified\r\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT ${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d
 %H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCE
 R_OPTS}\"\r\n\r\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode as after dropping privileges\r\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n# Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n# Where log files are stored.  $HADOOP_HOME/logs by default.\r\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n# History server logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n# Where log files are stored in the secure data environment.\r\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# e
 xport HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop code should be rsync'd from.  Unset by default.\r\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n# Seconds to sleep between slave commands.  Unset by default.  This\r\n# can be useful in large clusters, where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can service them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where pid files are stored. /tmp by default.\r\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# History server pid\r\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n# A string representing this instance of hadoop. $USER by default.\r\nexport HADOOP_IDENT_STRING=$USER\r\n\r\n# The scheduling priority for daemon processes.  See 'm
 an nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add libraries required by mysql connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by oracle connector\r\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/hdp/current/eagle/lib/log4jkafka/lib/*\r\n\r\n# added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\r\n  if [ -d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\r\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez
 -client/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command line\r\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly required for hadoop 2.0\r\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\r\n"
-	  }
+      "hadoop-env": {
+        "properties": {
+          "content": "\r\n# Set Hadoop-specific environment variables here.\r\n\r\n# The only required environment variable is JAVA_HOME.  All others are\r\n# optional.  When running a distributed configuration it is best to\r\n# set JAVA_HOME in this file, so that it is correctly defined on\r\n# remote nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop home directory\r\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# Hadoop Configuration Directory\r\n\r\n{# this is different for HDP1 #}\r\n# Path to jsvc required by secure HDP 2.0 datanode\r\nexport JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. Default is 1000.\r\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n# Extra Java runtime options.  Empty by default.\r\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true 
 ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS when specified\r\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT ${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%
 M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_O
 PTS}\"\r\n\r\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode as after dropping privileges\r\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n# Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n# Where log files are stored.  $HADOOP_HOME/logs by default.\r\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n# History server logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n# Where log files are stored in the secure data environment.\r\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# expo
 rt HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop code should be rsync'd from.  Unset by default.\r\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n# Seconds to sleep between slave commands.  Unset by default.  This\r\n# can be useful in large clusters, where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can service them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where pid files are stored. /tmp by default.\r\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# History server pid\r\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n# A string representing this instance of hadoop. $USER by default.\r\nexport HADOOP_IDENT_STRING=$USER\r\n\r\n# The scheduling priority for daemon processes.  See 'man 
 nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add libraries required by mysql connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by oracle connector\r\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/hdp/current/eagle/lib/log4jkafka/lib/*\r\n\r\n# added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\r\n  if [ -d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\r\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-cl
 ient/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command line\r\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly required for hadoop 2.0\r\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\r\n"
         }
+      }
     },
     {
       "hdfs-log4j": {
         "properties": {
-          "content" : "\r\n#\r\n# Licensed to the Apache Software Foundation (ASF) under one\r\n# or more contributor license agreements.  See the NOTICE file\r\n# distributed with this work for additional information\r\n# regarding copyright ownership.  The ASF licenses this file\r\n# to you under the Apache License, Version 2.0 (the\r\n# \"License\"); you may not use this file except in compliance\r\n# with the License.  You may obtain a copy of the License at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing,\r\n# software distributed under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n# KIND, either express or implied.  See the License for the\r\n# specific language governing permissions and limitations\r\n# under the License.\r\n#\r\n\r\n\r\n# Define some default values that can be overridden by system properties\r\n# To change daemon root logger use hadoo
 p_root_logger in hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n# Define the root logger to the system property \"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# Daily Rolling File Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 30-day backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n# Pattern format: Date LogLevel LoggerName LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n# Debugging Pattern format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlog
 ger above if you want to use this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\
 n#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n# hdfs audit l
 ogging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=org.apache.eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=sandbox.eagle.incubator.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=org.apache.eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT
 .Layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n# mapred audit logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# Rolling File Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Logfile size and and 30-day backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\
 nlog4j.appender.RFA.MaxBackupIndex=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n# Custom Logging levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n# Jets3t library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n# Null Appender\r\n# Trap security logger on the hadoop client side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n# Event Counter Appender\r\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\r\n#\r\nlog4j.appender.EventCounter
 =org.apache.hadoop.log.metrics.EventCounter\r\n\r\n# Removes \"deprecated\" messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n# HDFS block state change log from block manager\r\n#\r\n# Uncomment the following to suppress normal block state change\r\n# messages from BlockManager in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
-         }
+          "content": "\r\n#\r\n# Licensed to the Apache Software Foundation (ASF) under one\r\n# or more contributor license agreements.  See the NOTICE file\r\n# distributed with this work for additional information\r\n# regarding copyright ownership.  The ASF licenses this file\r\n# to you under the Apache License, Version 2.0 (the\r\n# \"License\"); you may not use this file except in compliance\r\n# with the License.  You may obtain a copy of the License at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing,\r\n# software distributed under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n# KIND, either express or implied.  See the License for the\r\n# specific language governing permissions and limitations\r\n# under the License.\r\n#\r\n\r\n\r\n# Define some default values that can be overridden by system properties\r\n# To change daemon root logger use hadoop
 _root_logger in hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n# Define the root logger to the system property \"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# Daily Rolling File Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 30-day backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n# Pattern format: Date LogLevel LoggerName LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n# Debugging Pattern format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlogg
 er above if you want to use this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\n
 #\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n# hdfs audit lo
 gging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=org.apache.eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=sandbox.eagle.incubator.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=org.apache.eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.
 Layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n# mapred audit logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# Rolling File Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Logfile size and and 30-day backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\n
 log4j.appender.RFA.MaxBackupIndex=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n# Custom Logging levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n# Jets3t library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n# Null Appender\r\n# Trap security logger on the hadoop client side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n# Event Counter Appender\r\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\r\n#\r\nlog4j.appender.EventCounter=
 org.apache.hadoop.log.metrics.EventCounter\r\n\r\n# Removes \"deprecated\" messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n# HDFS block state change log from block manager\r\n#\r\n# Uncomment the following to suppress normal block state change\r\n# messages from BlockManager in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
+        }
       }
     }
   ],
@@ -94,23 +94,28 @@
         {
           "name": "MYSQL_SERVER"
         },
-        { "name": "GANGLIA_SERVER"},
-
-	{ "name": "GANGLIA_MONITOR"},
-	
-	{ "name": "KAFKA_BROKER"},
         {
-          "name" : "DRPC_SERVER"
+          "name": "GANGLIA_SERVER"
         },
         {
-          "name" : "NIMBUS"
+          "name": "GANGLIA_MONITOR"
         },
         {
-          "name" : "STORM_UI_SERVER"
+          "name": "KAFKA_BROKER"
         },
-        { "name" : "SUPERVISOR"}
+        {
+          "name": "DRPC_SERVER"
+        },
+        {
+          "name": "NIMBUS"
+        },
+        {
+          "name": "STORM_UI_SERVER"
+        },
+        {
+          "name": "SUPERVISOR"
+        }
       ],
-
       "cardinality": "1"
     }
   ],

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a197eb02/eagle-external/eagle-docker/resource/serf/etc/ambari.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/serf/etc/ambari.json b/eagle-external/eagle-docker/resource/serf/etc/ambari.json
index 4409e84..6e41fb4 100644
--- a/eagle-external/eagle-docker/resource/serf/etc/ambari.json
+++ b/eagle-external/eagle-docker/resource/serf/etc/ambari.json
@@ -3,7 +3,7 @@
     "member-join=/usr/local/serf/handlers/ambari-bootstrap",
     "user:eagle=/usr/local/serf/handlers/eagle"
   ],
-  "tags" : {
+  "tags": {
     "ambari-agent": "true"
   }
 }



Mime
View raw message