eagle-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From h..@apache.org
Subject [10/20] incubator-eagle git commit: [EAGLE-53] Support boot and shell in eagle-docker
Date Mon, 30 Nov 2015 10:11:52 GMT
[EAGLE-53] Support boot and shell in eagle-docker


Project: http://git-wip-us.apache.org/repos/asf/incubator-eagle/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-eagle/commit/9f3d2721
Tree: http://git-wip-us.apache.org/repos/asf/incubator-eagle/tree/9f3d2721
Diff: http://git-wip-us.apache.org/repos/asf/incubator-eagle/diff/9f3d2721

Branch: refs/heads/master
Commit: 9f3d2721471279744bdafd183408b89be12cd224
Parents: a9d064a
Author: Hao Chen <hao@apache.org>
Authored: Sun Nov 29 21:11:10 2015 -0700
Committer: Hao Chen <hao@apache.org>
Committed: Sun Nov 29 21:11:10 2015 -0700

----------------------------------------------------------------------
 .gitignore                                      |  1 +
 eagle-external/eagle-docker/bin/eagle-docker.sh | 46 +++++++++++++++-----
 eagle-external/eagle-docker/bin/eagle-lib.sh    | 11 +++--
 .../eagle-docker/resource/eagle-multinode.json  |  2 +-
 .../eagle-docker/resource/eagle-singlenode.json |  2 +-
 5 files changed, 43 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/9f3d2721/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index e40eaef..fd68dd6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,6 +18,7 @@
 # See: https://github.com/github/gitignore/
 
 *.class
+*.out
 
 # Mobile Tools for Java (J2ME)
 .mtj.tmp/

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/9f3d2721/eagle-external/eagle-docker/bin/eagle-docker.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/bin/eagle-docker.sh b/eagle-external/eagle-docker/bin/eagle-docker.sh
index 5631d32..4faec18 100755
--- a/eagle-external/eagle-docker/bin/eagle-docker.sh
+++ b/eagle-external/eagle-docker/bin/eagle-docker.sh
@@ -17,10 +17,11 @@
 
 # NOTICE: This script is developed and maintained by Apache Eagle community under Apache
Softwarw Foundation but not from official Docker product or community.
 
-EAGLE_DOCKER_VERSION=0.3.0-snapshot
-EAGLE_DOCKER_NAME=apacheeagle/standalone
+EAGLE_DOCKER_VERSION=latest
+EAGLE_DOCKER_NAME=apacheeagle/sandbox
+EAGLE_DOCKER_PREFIX=eagle-sandbox
 
-NODE_NUM=1
+export NODE_NUM=1
 
 cd `dirname $0`/../
 source bin/eagle-lib.sh	
@@ -53,6 +54,8 @@ function usage() {
 	echo "  stop            Stop eagle docker instance"
 	echo "  status          List eagle docker image and instance status"
 	echo "  clean           Clean docker image and instance"
+	echo "  shell 		Execute docker instance bash, default: eagle-sandbox"
+	echo "  boot       	Simply bootstrap eagle docker by building then deploying"
 	echo ""
 	echo "Options:"
 	echo "  --node [number]         Docker instances node number, default is 1"
@@ -61,14 +64,6 @@ function usage() {
 	exit 0
 }
 
-function deploy(){
-	check_env
-	if [ "$NODE_NUM" == "" ];then
-		NODE_NUM=1
-	fi 
-	eagle-deploy-cluster $NODE_NUM
-}
-
 function build(){
 	check_env
 	# ==========================================
@@ -163,6 +158,7 @@ function build(){
 	fi
 }
 
+
 function start(){
 	check_env
 	docker ps -a | grep $EAGLE_DOCKER_NAME 1>/dev/null 
@@ -232,6 +228,26 @@ function clean(){
 	echo "Cleaning Done."
 }
 
+function deploy(){
+	check_env
+	if [ "$NODE_NUM" == "" ];then
+		export NODE_NUM=1
+	fi 
+	echo "Deploying $NODE_NUM nodes"
+	eagle-deploy-cluster $NODE_NUM
+	
+	status
+}
+
+function boot(){
+	check_env
+	build
+	deploy
+}
+
+function exec_bash(){
+	docker exec -it $EAGLE_DOCKER_PREFIX bash
+}
 
 case $1 in
 "--node")
@@ -250,6 +266,10 @@ case $1 in
         build	
 	exit
 	;;
+"boot")
+        boot	
+	exit
+	;;
 "status")
         status 
 	exit
@@ -266,6 +286,10 @@ case $1 in
 	clean	
 	exit
 	;;
+"shell")	
+	exec_bash
+	exit
+	;;
 *)
 	usage
 	exit 1

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/9f3d2721/eagle-external/eagle-docker/bin/eagle-lib.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/bin/eagle-lib.sh b/eagle-external/eagle-docker/bin/eagle-lib.sh
index e133f7c..94481a0 100755
--- a/eagle-external/eagle-docker/bin/eagle-lib.sh
+++ b/eagle-external/eagle-docker/bin/eagle-lib.sh
@@ -14,12 +14,11 @@
 # limitations under the License.
 
 : ${VERSION:=latest}
-: ${IMAGE:="apacheeagle/standalone:${VERSION}"}
+: ${IMAGE:="apacheeagle/sandbox:${VERSION}"}
 
-# : ${NODE_PREFIX:=docker}
-# : ${AMBARI_SERVER_NAME:=${NODE_PREFIX}-master}
-: ${AMBARI_SERVER_NAME:=master}
-: ${MYDOMAIN:=eagle.apache.org}
+: ${NODE_PREFIX:=eagle-sandbox}
+: ${AMBARI_SERVER_NAME:=${NODE_PREFIX}}
+: ${MYDOMAIN:=apache.org}
 : ${DOCKER_OPTS:="--dns 127.0.0.1 --entrypoint /usr/local/serf/bin/start-serf-agent.sh -e
KEYCHAIN=$KEYCHAIN --env EAGLE_SERVER_HOST=${AMBARI_SERVER_NAME}.${MYDOMAIN}"}
 : ${CLUSTER_SIZE:=1}
 : ${DEBUG:=1}
@@ -158,5 +157,5 @@ amb-start-node() {
     shift
     MORE_OPTIONS="$@"
   fi
-  run-command docker run $MORE_OPTIONS -e SERF_JOIN_IP=$AMBARI_SERVER_IP $DOCKER_OPTS --name
${NODE_PREFIX}$NUMBER -h ${NODE_PREFIX}${NUMBER}.$MYDOMAIN $IMAGE --log-level debug
+  run-command docker run $MORE_OPTIONS -e SERF_JOIN_IP=$AMBARI_SERVER_IP $DOCKER_OPTS --name
${NODE_PREFIX}_$NUMBER -h ${NODE_PREFIX}_${NUMBER}.$MYDOMAIN $IMAGE --log-level debug
 }

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/9f3d2721/eagle-external/eagle-docker/resource/eagle-multinode.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/eagle-multinode.json b/eagle-external/eagle-docker/resource/eagle-multinode.json
index 9e399ae..067ef85 100644
--- a/eagle-external/eagle-docker/resource/eagle-multinode.json
+++ b/eagle-external/eagle-docker/resource/eagle-multinode.json
@@ -19,7 +19,7 @@
     {
       "hdfs-log4j": {
         "properties": {
-          "content" : "\r\n#\r\n# Licensed to the Apache Software Foundation (ASF) under
one\r\n# or more contributor license agreements.  See the NOTICE file\r\n# distributed with
this work for additional information\r\n# regarding copyright ownership.  The ASF licenses
this file\r\n# to you under the Apache License, Version 2.0 (the\r\n# \"License\"); you may
not use this file except in compliance\r\n# with the License.  You may obtain a copy of the
License at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required
by applicable law or agreed to in writing,\r\n# software distributed under the License is
distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n# KIND,
either express or implied.  See the License for the\r\n# specific language governing permissions
and limitations\r\n# under the License.\r\n#\r\n\r\n\r\n# Define some default values that
can be overridden by system properties\r\n# To change daemon root logger use hadoo
 p_root_logger in hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n#
Define the root logger to the system property \"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger},
EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# Daily Rolling
File Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 30-day backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n#
Pattern format: Date LogLevel LoggerName LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\n# Debugging Pattern format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601}
%-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlog
 ger above if you want to use this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\
 n#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n#
hdfs audit l
 ogging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
%p %c{2}: %m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=org.apache.eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=master.eagle.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=org.apache.eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout.Con
 versionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n#
mapred audit logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
%p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# Rolling File
Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
Logfile size and and 30-day backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\nlog4j.appe
 nder.RFA.MaxBackupIndex=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
%-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L))
- %m%n\r\n\r\n\r\n# Custom Logging levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n#
Jets3t library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n#
Null Appender\r\n# Trap security logger on the hadoop client side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n#
Event Counter Appender\r\n# Sends counts of logging messages at different severity levels
to Hadoop Metrics.\r\n#\r\nlog4j.appender.EventCounter=org.apache
 .hadoop.log.metrics.EventCounter\r\n\r\n# Removes \"deprecated\" messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n#
HDFS block state change log from block manager\r\n#\r\n# Uncomment the following to suppress
normal block state change\r\n# messages from BlockManager in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
+          "content" : "\r\n#\r\n# Licensed to the Apache Software Foundation (ASF) under
one\r\n# or more contributor license agreements.  See the NOTICE file\r\n# distributed with
this work for additional information\r\n# regarding copyright ownership.  The ASF licenses
this file\r\n# to you under the Apache License, Version 2.0 (the\r\n# \"License\"); you may
not use this file except in compliance\r\n# with the License.  You may obtain a copy of the
License at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required
by applicable law or agreed to in writing,\r\n# software distributed under the License is
distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n# KIND,
either express or implied.  See the License for the\r\n# specific language governing permissions
and limitations\r\n# under the License.\r\n#\r\n\r\n\r\n# Define some default values that
can be overridden by system properties\r\n# To change daemon root logger use hadoo
 p_root_logger in hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n#
Define the root logger to the system property \"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger},
EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# Daily Rolling
File Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 30-day backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n#
Pattern format: Date LogLevel LoggerName LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\n# Debugging Pattern format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601}
%-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlog
 ger above if you want to use this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\
 n#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n#
hdfs audit l
 ogging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
%p %c{2}: %m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=org.apache.eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=eagle-sandbox.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=org.apache.eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout.Co
 nversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n#
mapred audit logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
%p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# Rolling File
Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
Logfile size and and 30-day backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\nlog4j.app
 ender.RFA.MaxBackupIndex=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
%-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L))
- %m%n\r\n\r\n\r\n# Custom Logging levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n#
Jets3t library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n#
Null Appender\r\n# Trap security logger on the hadoop client side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n#
Event Counter Appender\r\n# Sends counts of logging messages at different severity levels
to Hadoop Metrics.\r\n#\r\nlog4j.appender.EventCounter=org.apach
 e.hadoop.log.metrics.EventCounter\r\n\r\n# Removes \"deprecated\" messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n#
HDFS block state change log from block manager\r\n#\r\n# Uncomment the following to suppress
normal block state change\r\n# messages from BlockManager in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
          }
       }
     }

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/9f3d2721/eagle-external/eagle-docker/resource/eagle-singlenode.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/eagle-singlenode.json b/eagle-external/eagle-docker/resource/eagle-singlenode.json
index 1073107..8e1fa85 100644
--- a/eagle-external/eagle-docker/resource/eagle-singlenode.json
+++ b/eagle-external/eagle-docker/resource/eagle-singlenode.json
@@ -19,7 +19,7 @@
     {
       "hdfs-log4j": {
         "properties": {
-          "content" : "\r\n#\r\n# Licensed to the Apache Software Foundation (ASF) under
one\r\n# or more contributor license agreements.  See the NOTICE file\r\n# distributed with
this work for additional information\r\n# regarding copyright ownership.  The ASF licenses
this file\r\n# to you under the Apache License, Version 2.0 (the\r\n# \"License\"); you may
not use this file except in compliance\r\n# with the License.  You may obtain a copy of the
License at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required
by applicable law or agreed to in writing,\r\n# software distributed under the License is
distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n# KIND,
either express or implied.  See the License for the\r\n# specific language governing permissions
and limitations\r\n# under the License.\r\n#\r\n\r\n\r\n# Define some default values that
can be overridden by system properties\r\n# To change daemon root logger use hadoo
 p_root_logger in hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n#
Define the root logger to the system property \"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger},
EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# Daily Rolling
File Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 30-day backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n#
Pattern format: Date LogLevel LoggerName LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\n# Debugging Pattern format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601}
%-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlog
 ger above if you want to use this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\
 n#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n#
hdfs audit l
 ogging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
%p %c{2}: %m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=org.apache.eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=master.eagle.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=org.apache.eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout.Con
 versionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n#
mapred audit logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
%p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# Rolling File
Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
Logfile size and and 30-day backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\nlog4j.appe
 nder.RFA.MaxBackupIndex=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
%-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L))
- %m%n\r\n\r\n\r\n# Custom Logging levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n#
Jets3t library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n#
Null Appender\r\n# Trap security logger on the hadoop client side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n#
Event Counter Appender\r\n# Sends counts of logging messages at different severity levels
to Hadoop Metrics.\r\n#\r\nlog4j.appender.EventCounter=org.apache
 .hadoop.log.metrics.EventCounter\r\n\r\n# Removes \"deprecated\" messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n#
HDFS block state change log from block manager\r\n#\r\n# Uncomment the following to suppress
normal block state change\r\n# messages from BlockManager in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
+          "content" : "\r\n#\r\n# Licensed to the Apache Software Foundation (ASF) under
one\r\n# or more contributor license agreements.  See the NOTICE file\r\n# distributed with
this work for additional information\r\n# regarding copyright ownership.  The ASF licenses
this file\r\n# to you under the Apache License, Version 2.0 (the\r\n# \"License\"); you may
not use this file except in compliance\r\n# with the License.  You may obtain a copy of the
License at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required
by applicable law or agreed to in writing,\r\n# software distributed under the License is
distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n# KIND,
either express or implied.  See the License for the\r\n# specific language governing permissions
and limitations\r\n# under the License.\r\n#\r\n\r\n\r\n# Define some default values that
can be overridden by system properties\r\n# To change daemon root logger use hadoo
 p_root_logger in hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n#
Define the root logger to the system property \"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger},
EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# Daily Rolling
File Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 30-day backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n#
Pattern format: Date LogLevel LoggerName LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\n# Debugging Pattern format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601}
%-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlog
 ger above if you want to use this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\
 n#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
%p %c: %m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n#
hdfs audit l
 ogging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
%p %c{2}: %m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=org.apache.eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=eagle-sandbox.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=org.apache.eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout.Co
 nversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n#
mapred audit logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
%p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# Rolling File
Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
Logfile size and and 30-day backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\nlog4j.app
 ender.RFA.MaxBackupIndex=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
%-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L))
- %m%n\r\n\r\n\r\n# Custom Logging levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n#
Jets3t library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n#
Null Appender\r\n# Trap security logger on the hadoop client side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n#
Event Counter Appender\r\n# Sends counts of logging messages at different severity levels
to Hadoop Metrics.\r\n#\r\nlog4j.appender.EventCounter=org.apach
 e.hadoop.log.metrics.EventCounter\r\n\r\n# Removes \"deprecated\" messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n#
HDFS block state change log from block manager\r\n#\r\n# Uncomment the following to suppress
normal block state change\r\n# messages from BlockManager in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
          }
       }
     }


Mime
View raw message