eagle-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From h..@apache.org
Subject [07/20] incubator-eagle git commit: [EAGLE-53] Refactor eagle docker strucutre and implement eagle-docker.sh wrapper scripts
Date Mon, 30 Nov 2015 10:11:49 GMT
[EAGLE-53] Refactor eagle docker strucutre and implement eagle-docker.sh wrapper scripts


Project: http://git-wip-us.apache.org/repos/asf/incubator-eagle/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-eagle/commit/dc5b5ab3
Tree: http://git-wip-us.apache.org/repos/asf/incubator-eagle/tree/dc5b5ab3
Diff: http://git-wip-us.apache.org/repos/asf/incubator-eagle/diff/dc5b5ab3

Branch: refs/heads/master
Commit: dc5b5ab38f830c942d9c22bd9bb0003dbc915839
Parents: be7a620
Author: Hao Chen <hao@apache.org>
Authored: Fri Nov 27 03:22:23 2015 -0700
Committer: Hao Chen <hao@apache.org>
Committed: Fri Nov 27 03:22:23 2015 -0700

----------------------------------------------------------------------
 eagle-external/eagle-docker/Dockerfile          |  17 +-
 eagle-external/eagle-docker/deploy-eagle.sh     |  85 ----------
 eagle-external/eagle-docker/eagle-functions     | 158 ------------------
 .../eagle-docker/eagle-multinode.json           | 163 -------------------
 .../eagle-docker/eagle-singlenode.json          | 122 --------------
 eagle-external/eagle-docker/httpd/.mkdir        |   0
 eagle-external/eagle-docker/install-cluster.sh  |  36 ----
 eagle-external/eagle-docker/resource/.gitignore |   1 +
 .../eagle-docker/resource/deploy-eagle.sh       |  85 ++++++++++
 .../eagle-docker/resource/eagle-multinode.json  | 163 +++++++++++++++++++
 .../eagle-docker/resource/eagle-singlenode.json | 122 ++++++++++++++
 .../eagle-docker/resource/httpd/.mkdir          |   0
 .../eagle-docker/resource/install-cluster.sh    |  36 ++++
 .../eagle-docker/resource/serf/etc/ambari.json  |   9 +
 .../eagle-docker/resource/serf/handlers/eagle   |  22 +++
 .../eagle-docker/resource/wait-for-eagle.sh     |  38 +++++
 .../eagle-docker/serf/etc/ambari.json           |   9 -
 eagle-external/eagle-docker/serf/handlers/eagle |  22 ---
 eagle-external/eagle-docker/wait-for-eagle.sh   |  38 -----
 19 files changed, 483 insertions(+), 643 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/Dockerfile
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/Dockerfile b/eagle-external/eagle-docker/Dockerfile
index f4ebc4e..0dd8a83 100644
--- a/eagle-external/eagle-docker/Dockerfile
+++ b/eagle-external/eagle-docker/Dockerfile
@@ -24,17 +24,14 @@ RUN curl -sL $EAGLE_DOWNLOAD_LINK | tar -xz -C /usr/hdp/current
 RUN cd /usr/hdp/current && ln -s ./eagle-0.1.0 eagle
 ENV EAGLE_HOME=/usr/hdp/current/eagle
 
-
 RUN yum install -y httpd ganglia ganglia-gmetad ganglia-gmond ganglia-web nagios kafka zookeeper storm hbase tez hadoop snappy snappy-devel hadoop-libhdfs ambari-log4j hive hive-hcatalog hive-webhcat webhcat-tar-hive webhcat-tar-pig mysql-connector-java mysql-server
 
-ADD serf /usr/local/serf
-
-ADD httpd /var/log/httpd
-
-ADD install-cluster.sh /tmp/
-ADD eagle-singlenode.json /tmp/
-ADD eagle-multinode.json /tmp/
-ADD wait-for-eagle.sh /tmp/
-ADD deploy-eagle.sh /usr/hdp/current/eagle/deploy.sh
+ADD resource/serf /usr/local/serf
+ADD resource/httpd /var/log/httpd
+ADD resource/install-cluster.sh /tmp/
+ADD resource/eagle-singlenode.json /tmp/
+ADD resource/eagle-multinode.json /tmp/
+ADD resource/wait-for-eagle.sh /tmp/
+ADD resource/deploy-eagle.sh /usr/hdp/current/eagle/deploy.sh
 
 EXPOSE 9099 8744

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/deploy-eagle.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/deploy-eagle.sh b/eagle-external/eagle-docker/deploy-eagle.sh
deleted file mode 100755
index 834474c..0000000
--- a/eagle-external/eagle-docker/deploy-eagle.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -o pipefail  # trace ERR through pipes
-set -o errtrace  # trace ERR through 'time command' and other functions
-
-function error() {
-SCRIPT="$0"           # script name
-LASTLINE="$1"         # line of error occurrence
-LASTERR="$2"          # error code
-echo "ERROR exit from ${SCRIPT} : line ${LASTLINE} with exit code ${LASTERR}"
-exit 1
-}
-
-trap 'error ${LINENO} ${?}' ERR
-
-echo ""
-echo "Welcome to try Eagle"
-echo ""
-
-echo "Eagle home folder path is $EAGLE_HOME"
-cd $EAGLE_HOME
-
-
-#Initializing Eagle Service ...
-sh ./bin/eagle-service-init.sh
-
-sleep 10
-
-#Starting Eagle Service ...
-sh ./bin/eagle-service.sh start
-
-sleep 10
-
-echo "Creating kafka topics for eagle ... "
-KAFKA_HOME=/usr/hdp/current/kafka-broker
-EAGLE_ZOOKEEPER_QUORUM=$EAGLE_SERVER_HOST:2181
-topic=`${KAFKA_HOME}/bin/kafka-topics.sh --list --zookeeper $EAGLE_ZOOKEEPER_QUORUM --topic sandbox_hdfs_audit_log`
-if [ -z $topic ]; then
-        $KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper $EAGLE_ZOOKEEPER_QUORUM --replication-factor 1 --partitions 1 --topic sandbox_hdfs_audit_log
-fi
-
-if [ $? = 0 ]; then
-echo "==> Create kafka topic successfully for eagle"
-else
-echo "==> Failed, exiting"
-exit 1
-fi
-
-EAGLE_NIMBUS_HOST=$EAGLE_SERVER_HOST
-EAGLE_SERVICE_HOST=$EAGLE_SERVER_HOST
-EAGLE_TOPOLOGY_JAR=`ls ${EAGLE_HOME}/lib/topology/eagle-topology-*-assembly.jar`
-
-${EAGLE_HOME}/bin/eagle-topology-init.sh
-[ $? != 0 ] && exit 1
-${EAGLE_HOME}/examples/sample-sensitivity-resource-create.sh
-[ $? != 0 ] && exit 1
-${EAGLE_HOME}/examples/sample-policy-create.sh
-[ $? != 0 ] && exit 1
-storm jar $EAGLE_TOPOLOGY_JAR eagle.security.auditlog.HdfsAuditLogProcessorMain -D config.file=${EAGLE_HOME}/conf/sandbox-hdfsAuditLog-application.conf  -D eagleProps.eagleService.host=$EAGLE_SERVICE_HOST
-[ $? != 0 ] && exit 1
-storm jar $EAGLE_TOPOLOGY_JAR eagle.security.hive.jobrunning.HiveJobRunningMonitoringMain -D config.file=${EAGLE_HOME}/conf/sandbox-hiveQueryLog-application.conf  -D eagleProps.eagleService.host=$EAGLE_SERVICE_HOST
-[ $? != 0 ] && exit 1
-storm jar $EAGLE_TOPOLOGY_JAR eagle.security.userprofile.UserProfileDetectionMain -D config.file=${EAGLE_HOME}/conf/sandbox-userprofile-topology.conf  -D eagleProps.eagleService.host=$EAGLE_SERVICE_HOST
-[ $? != 0 ] && exit 1
-
-# TODO: More eagle start
-
-echo "Eagle is deployed successfully!"
-
-echo "Please visit http://<container_ip>:9099/eagle-service to play with Eagle!"

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/eagle-functions
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/eagle-functions b/eagle-external/eagle-docker/eagle-functions
deleted file mode 100644
index 708541e..0000000
--- a/eagle-external/eagle-docker/eagle-functions
+++ /dev/null
@@ -1,158 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-: ${NODE_PREFIX=eagle}
-: ${AMBARI_SERVER_NAME:=${NODE_PREFIX}-server}
-: ${MYDOMAIN:=apache.org}
-: ${IMAGE:="apache/eagle:0.1.0"}
-: ${DOCKER_OPTS:="--dns 127.0.0.1 --entrypoint /usr/local/serf/bin/start-serf-agent.sh -e KEYCHAIN=$KEYCHAIN --env EAGLE_SERVER_HOST=${AMBARI_SERVER_NAME}.${MYDOMAIN}"}
-: ${CLUSTER_SIZE:=1}
-: ${DEBUG:=1}
-: ${SLEEP_TIME:=2}
-: ${DRY_RUN:=false}
-run-command() {
-  CMD="$@"
-  if [ "$DRY_RUN" == "false" ]; then
-    debug "$CMD"
-    "$@"
-  else
-    debug [DRY_RUN] "$CMD"
-  fi
-}
-
-amb-clean() {
-  unset NODE_PREFIX AMBARI_SERVER_NAME MYDOMAIN IMAGE DOCKER_OPTS DEBUG SLEEP_TIME AMBARI_SERVER_IP DRY_RUN
-}
-
-get-ambari-server-ip() {
-  AMBARI_SERVER_IP=$(get-host-ip ${AMBARI_SERVER_NAME})
-}
-
-get-host-ip() {
-  HOST=$1
-  docker inspect --format="{{.NetworkSettings.IPAddress}}" ${HOST}
-}
-
-amb-members() {
-  get-ambari-server-ip
-  serf members --rpc-addr $(docker inspect --format "{{.NetworkSettings.IPAddress}}" ${AMBARI_SERVER_NAME}):7373
-}
-
-amb-settings() {
-  cat <<EOF
-  NODE_PREFIX=$NODE_PREFIX
-  MYDOMAIN=$MYDOMAIN
-  CLUSTER_SIZE=$CLUSTER_SIZE
-  AMBARI_SERVER_NAME=$AMBARI_SERVER_NAME
-  IMAGE=$IMAGE
-  DOCKER_OPTS=$DOCKER_OPTS
-  AMBARI_SERVER_IP=$AMBARI_SERVER_IP
-  DRY_RUN=$DRY_RUN
-EOF
-}
-
-debug() {
-  [ $DEBUG -gt 0 ] && echo [DEBUG] "$@" 1>&2
-}
-
-docker-ps() {
-  #docker ps|sed "s/ \{3,\}/#/g"|cut -d '#' -f 1,2,7|sed "s/#/\t/g"
-  docker inspect --format="{{.Name}} {{.NetworkSettings.IPAddress}} {{.Config.Image}} {{.Config.Entrypoint}} {{.Config.Cmd}}" $(docker ps -q)
-}
-
-docker-psa() {
-  #docker ps|sed "s/ \{3,\}/#/g"|cut -d '#' -f 1,2,7|sed "s/#/\t/g"
-  docker inspect --format="{{.Name}} {{.NetworkSettings.IPAddress}} {{.Config.Image}} {{.Config.Entrypoint}} {{.Config.Cmd}}" $(docker ps -qa)
-}
-
-amb-start-cluster() {
-  local act_cluster_size=$1
-  : ${act_cluster_size:=$CLUSTER_SIZE}
-  echo starting an ambari cluster with: $act_cluster_size nodes
-
-  amb-start-first
-  [ $act_cluster_size -gt 1 ] && for i in $(seq $((act_cluster_size - 1))); do
-    amb-start-node $i
-  done
-}
-
-_amb_run_shell() {
-  COMMAND=$1
-  : ${COMMAND:? required}
-  get-ambari-server-ip
-  NODES=$(docker inspect --format="{{.Config.Image}} {{.Name}}" $(docker ps -q)|grep $IMAGE|grep $NODE_PREFIX|wc -l|xargs)
-  run-command docker run -it --rm -e EXPECTED_HOST_COUNT=$NODES -e BLUEPRINT=$BLUEPRINT --link ${AMBARI_SERVER_NAME}:ambariserver --entrypoint /bin/sh $IMAGE -c $COMMAND
-}
-
-amb-shell() {
-  _amb_run_shell /tmp/ambari-shell.sh
-}
-
-eagle-deploy-cluster() {
-  local act_cluster_size=$1
-  : ${act_cluster_size:=$CLUSTER_SIZE}
-
-  if [ $# -gt 1 ]; then
-    BLUEPRINT=$2
-  else
-    [ $act_cluster_size -gt 1 ] && BLUEPRINT=hdp-multinode-eagle || BLUEPRINT=hdp-singlenode-eagle
-  fi
-
-  : ${BLUEPRINT:?" required (hdp-singlenode-eagle / hdp-multinode-eagle)"}
-
-  amb-start-cluster $act_cluster_size
-  _amb_run_shell /tmp/install-cluster.sh
-}
-
-amb-start-first() {
-  run-command docker run -P -d $DOCKER_OPTS --name $AMBARI_SERVER_NAME -h $AMBARI_SERVER_NAME.$MYDOMAIN --privileged=true $IMAGE --tag ambari-server=true
-}
-
-amb-copy-to-hdfs() {
-  get-ambari-server-ip
-  FILE_PATH=${1:?"usage: <FILE_PATH> <NEW_FILE_NAME_ON_HDFS> <HDFS_PATH>"}
-  FILE_NAME=${2:?"usage: <FILE_PATH> <NEW_FILE_NAME_ON_HDFS> <HDFS_PATH>"}
-  DIR=${3:?"usage: <FILE_PATH> <NEW_FILE_NAME_ON_HDFS> <HDFS_PATH>"}
-  amb-create-hdfs-dir $DIR
-  DATANODE=$(curl -si -X PUT "http://$AMBARI_SERVER_IP:50070/webhdfs/v1$DIR/$FILE_NAME?user.name=hdfs&op=CREATE" |grep Location | sed "s/\..*//; s@.*http://@@")
-  DATANODE_IP=$(get-host-ip $DATANODE)
-  curl -T $FILE_PATH "http://$DATANODE_IP:50075/webhdfs/v1$DIR/$FILE_NAME?op=CREATE&user.name=hdfs&overwrite=true&namenoderpcaddress=$AMBARI_SERVER_IP:8020"
-}
-
-amb-create-hdfs-dir() {
-  get-ambari-server-ip
-  DIR=$1
-  curl -X PUT "http://$AMBARI_SERVER_IP:50070/webhdfs/v1$DIR?user.name=hdfs&op=MKDIRS" > /dev/null 2>&1
-}
-
-amb-scp-to-first() {
-  get-ambari-server-ip
-  FILE_PATH=${1:?"usage: <FILE_PATH> <DESTINATION_PATH>"}
-  DEST_PATH=${2:?"usage: <FILE_PATH> <DESTINATION_PATH>"}
-  scp $FILE_PATH root@$AMBARI_SERVER_IP:$DEST_PATH
-}
-
-amb-start-node() {
-  get-ambari-server-ip
-  : ${AMBARI_SERVER_IP:?"AMBARI_SERVER_IP is needed"}
-  NUMBER=${1:?"please give a <NUMBER> parameter it will be used as node<NUMBER>"}
-  if [ $# -eq 1 ] ;then
-    MORE_OPTIONS="-d"
-  else
-    shift
-    MORE_OPTIONS="$@"
-  fi
-  run-command docker run $MORE_OPTIONS -e SERF_JOIN_IP=$AMBARI_SERVER_IP $DOCKER_OPTS --name ${NODE_PREFIX}$NUMBER -h ${NODE_PREFIX}${NUMBER}.$MYDOMAIN $IMAGE --log-level debug
-}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/eagle-multinode.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/eagle-multinode.json b/eagle-external/eagle-docker/eagle-multinode.json
deleted file mode 100644
index 1da2fb3..0000000
--- a/eagle-external/eagle-docker/eagle-multinode.json
+++ /dev/null
@@ -1,163 +0,0 @@
-{
-"configurations": [
-    {
-      "hdfs-site": {
-        "dfs.permissions.enabled": "false"
-      },
-      "hive-site": {
-        "javax.jdo.option.ConnectionUserName": "hive",
-        "javax.jdo.option.ConnectionPassword": "hive"
-      }
-    },
-    {
-       "hadoop-env": {
-          "properties" : {
-            "content" : "\r\n# Set Hadoop-specific environment variables here.\r\n\r\n# The only required environment variable is JAVA_HOME.  All others are\r\n# optional.  When running a distributed configuration it is best to\r\n# set JAVA_HOME in this file, so that it is correctly defined on\r\n# remote nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop home directory\r\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# Hadoop Configuration Directory\r\n\r\n{# this is different for HDP1 #}\r\n# Path to jsvc required by secure HDP 2.0 datanode\r\nexport JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. Default is 1000.\r\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n# Extra Java runtime options.  Empty by default.\r\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=tr
 ue ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS when specified\r\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT ${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d
 %H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCE
 R_OPTS}\"\r\n\r\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode as after dropping privileges\r\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n# Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n# Where log files are stored.  $HADOOP_HOME/logs by default.\r\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n# History server logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n# Where log files are stored in the secure data environment.\r\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# e
 xport HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop code should be rsync'd from.  Unset by default.\r\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n# Seconds to sleep between slave commands.  Unset by default.  This\r\n# can be useful in large clusters, where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can service them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where pid files are stored. /tmp by default.\r\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# History server pid\r\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n# A string representing this instance of hadoop. $USER by default.\r\nexport HADOOP_IDENT_STRING=$USER\r\n\r\n# The scheduling priority for daemon processes.  See 'm
 an nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add libraries required by mysql connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by oracle connector\r\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/hdp/current/eagle/lib/log4jkafka/lib/*\r\n\r\n# added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\r\n  if [ -d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\r\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez
 -client/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command line\r\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly required for hadoop 2.0\r\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\r\n"
-	  }
-        }
-    },
-    {
-      "hdfs-log4j": {
-        "properties": {
-          "content" : "\r\n#\r\n# Licensed to the Apache Software Foundation (ASF) under one\r\n# or more contributor license agreements.  See the NOTICE file\r\n# distributed with this work for additional information\r\n# regarding copyright ownership.  The ASF licenses this file\r\n# to you under the Apache License, Version 2.0 (the\r\n# \"License\"); you may not use this file except in compliance\r\n# with the License.  You may obtain a copy of the License at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing,\r\n# software distributed under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n# KIND, either express or implied.  See the License for the\r\n# specific language governing permissions and limitations\r\n# under the License.\r\n#\r\n\r\n\r\n# Define some default values that can be overridden by system properties\r\n# To change daemon root logger use hadoo
 p_root_logger in hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n# Define the root logger to the system property \"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# Daily Rolling File Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 30-day backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n# Pattern format: Date LogLevel LoggerName LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n# Debugging Pattern format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlog
 ger above if you want to use this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\
 n#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n# hdfs audit l
 ogging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=eagle-server.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout.ConversionPattern=%d{ISO8
 601} %p %c{2}: %m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n# mapred audit logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# Rolling File Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Logfile size and and 30-day backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\nlog4j.appender.RFA.MaxBackupInde
 x=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n# Custom Logging levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n# Jets3t library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n# Null Appender\r\n# Trap security logger on the hadoop client side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n# Event Counter Appender\r\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\r\n#\r\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.Ev
 entCounter\r\n\r\n# Removes \"deprecated\" messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n# HDFS block state change log from block manager\r\n#\r\n# Uncomment the following to suppress normal block state change\r\n# messages from BlockManager in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
-         }
-      }
-    }
-  ],
-  "host_groups": [
-    {
-      "name": "master",
-      "components": [
-        {
-          "name": "APP_TIMELINE_SERVER"
-        },
-        {
-          "name": "HISTORYSERVER"
-        },
-        {
-          "name": "HBASE_REGIONSERVER"
-        },
-        {
-          "name": "HBASE_CLIENT"
-        },
-        {
-          "name": "WEBHCAT_SERVER"
-        },
-        {
-          "name": "HCAT"
-        },
-        {
-          "name": "NAMENODE"
-        },
-        {
-          "name": "AMBARI_SERVER"
-        },
-        {
-          "name": "HDFS_CLIENT"
-        },
-        {
-          "name": "HIVE_CLIENT"
-        },
-        {
-          "name": "NODEMANAGER"
-        },
-        {
-          "name": "DATANODE"
-        },
-        {
-          "name": "RESOURCEMANAGER"
-        },
-        {
-          "name": "ZOOKEEPER_SERVER"
-        },
-        {
-          "name": "ZOOKEEPER_CLIENT"
-        },
-        {
-          "name": "HBASE_MASTER"
-        },
-        {
-          "name": "HIVE_SERVER"
-        },
-        {
-          "name": "SECONDARY_NAMENODE"
-        },
-        {
-          "name": "HIVE_METASTORE"
-        },
-        {
-          "name": "YARN_CLIENT"
-        },
-        {
-          "name": "MAPREDUCE2_CLIENT"
-        },
-        {
-          "name": "MYSQL_SERVER"
-        },
-	{ 
-	  "name": "GANGLIA_SERVER"
-	},
-	{
-	  "name" : "DRPC_SERVER"
-	},
-	{
-	  "name" : "STORM_UI_SERVER"
-	},
-	{
-          "name" : "NIMBUS"
-        },
-        {
-          "name" : "KAFKA_BROKER"
-        }
-      ],
-      "cardinality": "1"
-    },
-    {
-      "name": "slave_1",
-      "components": [
-        {
-          "name": "HBASE_CLIENT"
-        },
-        {
-          "name": "HDFS_CLIENT"
-        },
-        {
-          "name": "HIVE_CLIENT"
-        },
-        {
-          "name": "NODEMANAGER"
-        },
-        {
-          "name": "DATANODE"
-        },
-        { 
-          "name": "ZOOKEEPER_SERVER"
-        },
-        {
-          "name": "ZOOKEEPER_CLIENT"
-        },
-        {
-          "name": "YARN_CLIENT"
-        },
-        {
-          "name": "MAPREDUCE2_CLIENT"
-        },
-	{
-	  "name" : "KAFKA_BROKER"
-	},
-	{ 
-	  "name": "GANGLIA_MONITOR"
-	},
-        {
-          "name" : "SUPERVISOR"
-        }
-      ],
-      "cardinality": "1"
-    }
-  ],
-  "Blueprints": {
-    "blueprint_name": "hdp-multinode-eagle",
-    "stack_name": "HDP",
-    "stack_version": "2.2"
-}
-}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/eagle-singlenode.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/eagle-singlenode.json b/eagle-external/eagle-docker/eagle-singlenode.json
deleted file mode 100644
index 499b978..0000000
--- a/eagle-external/eagle-docker/eagle-singlenode.json
+++ /dev/null
@@ -1,122 +0,0 @@
-{
-"configurations": [
-    {
-      "hdfs-site": {
-        "dfs.permissions.enabled": "false"
-      },
-      "hive-site": {
-        "javax.jdo.option.ConnectionUserName": "hive",
-        "javax.jdo.option.ConnectionPassword": "hive"
-      }
-    },
-    {
-       "hadoop-env": {
-          "properties" : {
-            "content" : "\r\n# Set Hadoop-specific environment variables here.\r\n\r\n# The only required environment variable is JAVA_HOME.  All others are\r\n# optional.  When running a distributed configuration it is best to\r\n# set JAVA_HOME in this file, so that it is correctly defined on\r\n# remote nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop home directory\r\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# Hadoop Configuration Directory\r\n\r\n{# this is different for HDP1 #}\r\n# Path to jsvc required by secure HDP 2.0 datanode\r\nexport JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. Default is 1000.\r\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n# Extra Java runtime options.  Empty by default.\r\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=tr
 ue ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS when specified\r\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT ${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d
 %H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCE
 R_OPTS}\"\r\n\r\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode as after dropping privileges\r\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n# Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n# Where log files are stored.  $HADOOP_HOME/logs by default.\r\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n# History server logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n# Where log files are stored in the secure data environment.\r\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# e
 xport HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop code should be rsync'd from.  Unset by default.\r\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n# Seconds to sleep between slave commands.  Unset by default.  This\r\n# can be useful in large clusters, where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can service them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where pid files are stored. /tmp by default.\r\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# History server pid\r\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n# A string representing this instance of hadoop. $USER by default.\r\nexport HADOOP_IDENT_STRING=$USER\r\n\r\n# The scheduling priority for daemon processes.  See 'm
 an nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add libraries required by mysql connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by oracle connector\r\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/hdp/current/eagle/lib/log4jkafka/lib/*\r\n\r\n# added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\r\n  if [ -d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\r\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez
 -client/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command line\r\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly required for hadoop 2.0\r\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\r\n"
-	  }
-        }
-    },
-    {
-      "hdfs-log4j": {
-        "properties": {
-          "content" : "\r\n#\r\n# Licensed to the Apache Software Foundation (ASF) under one\r\n# or more contributor license agreements.  See the NOTICE file\r\n# distributed with this work for additional information\r\n# regarding copyright ownership.  The ASF licenses this file\r\n# to you under the Apache License, Version 2.0 (the\r\n# \"License\"); you may not use this file except in compliance\r\n# with the License.  You may obtain a copy of the License at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing,\r\n# software distributed under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n# KIND, either express or implied.  See the License for the\r\n# specific language governing permissions and limitations\r\n# under the License.\r\n#\r\n\r\n\r\n# Define some default values that can be overridden by system properties\r\n# To change daemon root logger use hadoo
 p_root_logger in hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n# Define the root logger to the system property \"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# Daily Rolling File Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 30-day backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n# Pattern format: Date LogLevel LoggerName LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n# Debugging Pattern format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlog
 ger above if you want to use this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\
 n#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n# hdfs audit l
 ogging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=eagle-server.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout.ConversionPattern=%d{ISO8
 601} %p %c{2}: %m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n# mapred audit logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# Rolling File Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Logfile size and and 30-day backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\nlog4j.appender.RFA.MaxBackupInde
 x=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n# Custom Logging levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n# Jets3t library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n# Null Appender\r\n# Trap security logger on the hadoop client side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n# Event Counter Appender\r\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\r\n#\r\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.Ev
 entCounter\r\n\r\n# Removes \"deprecated\" messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n# HDFS block state change log from block manager\r\n#\r\n# Uncomment the following to suppress normal block state change\r\n# messages from BlockManager in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
-         }
-      }
-    }
-  ],
-  "host_groups": [
-    {
-      "name": "master",
-      "components": [
-        {
-          "name": "APP_TIMELINE_SERVER"
-        },
-        {
-          "name": "HISTORYSERVER"
-        },
-        {
-          "name": "HBASE_REGIONSERVER"
-        },
-        {
-          "name": "WEBHCAT_SERVER"
-        },
-        {
-          "name": "HCAT"
-        },
-        {
-          "name": "HBASE_CLIENT"
-        },
-        {
-          "name": "NAMENODE"
-        },
-        {
-          "name": "AMBARI_SERVER"
-        },
-        {
-          "name": "HDFS_CLIENT"
-        },
-        {
-          "name": "HIVE_CLIENT"
-        },
-        {
-          "name": "NODEMANAGER"
-        },
-        {
-          "name": "DATANODE"
-        },
-        {
-          "name": "RESOURCEMANAGER"
-        },
-        {
-          "name": "ZOOKEEPER_SERVER"
-        },
-        {
-          "name": "ZOOKEEPER_CLIENT"
-        },
-        {
-          "name": "HBASE_MASTER"
-        },
-        {
-          "name": "HIVE_SERVER"
-        },
-        {
-          "name": "SECONDARY_NAMENODE"
-        },
-        {
-          "name": "HIVE_METASTORE"
-        },
-        {
-          "name": "YARN_CLIENT"
-        },
-        {
-          "name": "MAPREDUCE2_CLIENT"
-        },
-        {
-          "name": "MYSQL_SERVER"
-        },
-        { "name": "GANGLIA_SERVER"},
-
-	{ "name": "GANGLIA_MONITOR"},
-	
-	{ "name": "KAFKA_BROKER"},
-        {
-          "name" : "DRPC_SERVER"
-        },
-        {
-          "name" : "NIMBUS"
-        },
-        {
-          "name" : "STORM_UI_SERVER"
-        },
-        { "name" : "SUPERVISOR"}
-      ],
-
-      "cardinality": "1"
-    }
-  ],
-  "Blueprints": {
-    "blueprint_name": "hdp-singlenode-eagle",
-    "stack_name": "HDP",
-    "stack_version": "2.2"
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/httpd/.mkdir
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/httpd/.mkdir b/eagle-external/eagle-docker/httpd/.mkdir
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/install-cluster.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/install-cluster.sh b/eagle-external/eagle-docker/install-cluster.sh
deleted file mode 100755
index 4acd384..0000000
--- a/eagle-external/eagle-docker/install-cluster.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-export PATH=/usr/jdk64/jdk1.7.0_67/bin:$PATH
-
-
-./ambari-shell.sh << EOF
-blueprint add --file /tmp/eagle-singlenode.json
-blueprint add --file /tmp/eagle-multinode.json
-cluster build --blueprint $BLUEPRINT
-cluster autoAssign
-cluster create --exitOnFinish true
-EOF
-
-clear
-
-SERF_RPC_ADDR=${AMBARISERVER_PORT_7373_TCP##*/}
-serf event --rpc-addr=$SERF_RPC_ADDR eagle
-
-./wait-for-eagle.sh
-

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/resource/.gitignore
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/.gitignore b/eagle-external/eagle-docker/resource/.gitignore
new file mode 100644
index 0000000..335ec95
--- /dev/null
+++ b/eagle-external/eagle-docker/resource/.gitignore
@@ -0,0 +1 @@
+*.tar.gz

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/resource/deploy-eagle.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/deploy-eagle.sh b/eagle-external/eagle-docker/resource/deploy-eagle.sh
new file mode 100755
index 0000000..834474c
--- /dev/null
+++ b/eagle-external/eagle-docker/resource/deploy-eagle.sh
@@ -0,0 +1,85 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o pipefail  # trace ERR through pipes
+set -o errtrace  # trace ERR through 'time command' and other functions
+
+function error() {
+SCRIPT="$0"           # script name
+LASTLINE="$1"         # line of error occurrence
+LASTERR="$2"          # error code
+echo "ERROR exit from ${SCRIPT} : line ${LASTLINE} with exit code ${LASTERR}"
+exit 1
+}
+
+trap 'error ${LINENO} ${?}' ERR
+
+echo ""
+echo "Welcome to try Eagle"
+echo ""
+
+echo "Eagle home folder path is $EAGLE_HOME"
+cd $EAGLE_HOME
+
+
+#Initializing Eagle Service ...
+sh ./bin/eagle-service-init.sh
+
+sleep 10
+
+#Starting Eagle Service ...
+sh ./bin/eagle-service.sh start
+
+sleep 10
+
+echo "Creating kafka topics for eagle ... "
+KAFKA_HOME=/usr/hdp/current/kafka-broker
+EAGLE_ZOOKEEPER_QUORUM=$EAGLE_SERVER_HOST:2181
+topic=`${KAFKA_HOME}/bin/kafka-topics.sh --list --zookeeper $EAGLE_ZOOKEEPER_QUORUM --topic sandbox_hdfs_audit_log`
+if [ -z $topic ]; then
+        $KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper $EAGLE_ZOOKEEPER_QUORUM --replication-factor 1 --partitions 1 --topic sandbox_hdfs_audit_log
+fi
+
+if [ $? = 0 ]; then
+echo "==> Create kafka topic successfully for eagle"
+else
+echo "==> Failed, exiting"
+exit 1
+fi
+
+EAGLE_NIMBUS_HOST=$EAGLE_SERVER_HOST
+EAGLE_SERVICE_HOST=$EAGLE_SERVER_HOST
+EAGLE_TOPOLOGY_JAR=`ls ${EAGLE_HOME}/lib/topology/eagle-topology-*-assembly.jar`
+
+${EAGLE_HOME}/bin/eagle-topology-init.sh
+[ $? != 0 ] && exit 1
+${EAGLE_HOME}/examples/sample-sensitivity-resource-create.sh
+[ $? != 0 ] && exit 1
+${EAGLE_HOME}/examples/sample-policy-create.sh
+[ $? != 0 ] && exit 1
+storm jar $EAGLE_TOPOLOGY_JAR eagle.security.auditlog.HdfsAuditLogProcessorMain -D config.file=${EAGLE_HOME}/conf/sandbox-hdfsAuditLog-application.conf  -D eagleProps.eagleService.host=$EAGLE_SERVICE_HOST
+[ $? != 0 ] && exit 1
+storm jar $EAGLE_TOPOLOGY_JAR eagle.security.hive.jobrunning.HiveJobRunningMonitoringMain -D config.file=${EAGLE_HOME}/conf/sandbox-hiveQueryLog-application.conf  -D eagleProps.eagleService.host=$EAGLE_SERVICE_HOST
+[ $? != 0 ] && exit 1
+storm jar $EAGLE_TOPOLOGY_JAR eagle.security.userprofile.UserProfileDetectionMain -D config.file=${EAGLE_HOME}/conf/sandbox-userprofile-topology.conf  -D eagleProps.eagleService.host=$EAGLE_SERVICE_HOST
+[ $? != 0 ] && exit 1
+
+# TODO: More eagle start
+
+echo "Eagle is deployed successfully!"
+
+echo "Please visit http://<container_ip>:9099/eagle-service to play with Eagle!"

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/resource/eagle-multinode.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/eagle-multinode.json b/eagle-external/eagle-docker/resource/eagle-multinode.json
new file mode 100644
index 0000000..1da2fb3
--- /dev/null
+++ b/eagle-external/eagle-docker/resource/eagle-multinode.json
@@ -0,0 +1,163 @@
+{
+"configurations": [
+    {
+      "hdfs-site": {
+        "dfs.permissions.enabled": "false"
+      },
+      "hive-site": {
+        "javax.jdo.option.ConnectionUserName": "hive",
+        "javax.jdo.option.ConnectionPassword": "hive"
+      }
+    },
+    {
+       "hadoop-env": {
+          "properties" : {
+            "content" : "\r\n# Set Hadoop-specific environment variables here.\r\n\r\n# The only required environment variable is JAVA_HOME.  All others are\r\n# optional.  When running a distributed configuration it is best to\r\n# set JAVA_HOME in this file, so that it is correctly defined on\r\n# remote nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop home directory\r\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# Hadoop Configuration Directory\r\n\r\n{# this is different for HDP1 #}\r\n# Path to jsvc required by secure HDP 2.0 datanode\r\nexport JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. Default is 1000.\r\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n# Extra Java runtime options.  Empty by default.\r\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=tr
 ue ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS when specified\r\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT ${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d
 %H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCE
 R_OPTS}\"\r\n\r\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode as after dropping privileges\r\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n# Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n# Where log files are stored.  $HADOOP_HOME/logs by default.\r\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n# History server logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n# Where log files are stored in the secure data environment.\r\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# e
 xport HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop code should be rsync'd from.  Unset by default.\r\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n# Seconds to sleep between slave commands.  Unset by default.  This\r\n# can be useful in large clusters, where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can service them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where pid files are stored. /tmp by default.\r\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# History server pid\r\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n# A string representing this instance of hadoop. $USER by default.\r\nexport HADOOP_IDENT_STRING=$USER\r\n\r\n# The scheduling priority for daemon processes.  See 'm
 an nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add libraries required by mysql connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by oracle connector\r\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/hdp/current/eagle/lib/log4jkafka/lib/*\r\n\r\n# added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\r\n  if [ -d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\r\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez
 -client/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command line\r\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly required for hadoop 2.0\r\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\r\n"
+	  }
+        }
+    },
+    {
+      "hdfs-log4j": {
+        "properties": {
+          "content" : "\r\n#\r\n# Licensed to the Apache Software Foundation (ASF) under one\r\n# or more contributor license agreements.  See the NOTICE file\r\n# distributed with this work for additional information\r\n# regarding copyright ownership.  The ASF licenses this file\r\n# to you under the Apache License, Version 2.0 (the\r\n# \"License\"); you may not use this file except in compliance\r\n# with the License.  You may obtain a copy of the License at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing,\r\n# software distributed under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n# KIND, either express or implied.  See the License for the\r\n# specific language governing permissions and limitations\r\n# under the License.\r\n#\r\n\r\n\r\n# Define some default values that can be overridden by system properties\r\n# To change daemon root logger use hadoo
 p_root_logger in hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n# Define the root logger to the system property \"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# Daily Rolling File Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 30-day backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n# Pattern format: Date LogLevel LoggerName LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n# Debugging Pattern format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlog
 ger above if you want to use this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\
 n#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n# hdfs audit l
 ogging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=eagle-server.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout.ConversionPattern=%d{ISO8
 601} %p %c{2}: %m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n# mapred audit logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# Rolling File Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Logfile size and and 30-day backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\nlog4j.appender.RFA.MaxBackupInde
 x=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n# Custom Logging levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n# Jets3t library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n# Null Appender\r\n# Trap security logger on the hadoop client side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n# Event Counter Appender\r\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\r\n#\r\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.Ev
 entCounter\r\n\r\n# Removes \"deprecated\" messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n# HDFS block state change log from block manager\r\n#\r\n# Uncomment the following to suppress normal block state change\r\n# messages from BlockManager in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
+         }
+      }
+    }
+  ],
+  "host_groups": [
+    {
+      "name": "master",
+      "components": [
+        {
+          "name": "APP_TIMELINE_SERVER"
+        },
+        {
+          "name": "HISTORYSERVER"
+        },
+        {
+          "name": "HBASE_REGIONSERVER"
+        },
+        {
+          "name": "HBASE_CLIENT"
+        },
+        {
+          "name": "WEBHCAT_SERVER"
+        },
+        {
+          "name": "HCAT"
+        },
+        {
+          "name": "NAMENODE"
+        },
+        {
+          "name": "AMBARI_SERVER"
+        },
+        {
+          "name": "HDFS_CLIENT"
+        },
+        {
+          "name": "HIVE_CLIENT"
+        },
+        {
+          "name": "NODEMANAGER"
+        },
+        {
+          "name": "DATANODE"
+        },
+        {
+          "name": "RESOURCEMANAGER"
+        },
+        {
+          "name": "ZOOKEEPER_SERVER"
+        },
+        {
+          "name": "ZOOKEEPER_CLIENT"
+        },
+        {
+          "name": "HBASE_MASTER"
+        },
+        {
+          "name": "HIVE_SERVER"
+        },
+        {
+          "name": "SECONDARY_NAMENODE"
+        },
+        {
+          "name": "HIVE_METASTORE"
+        },
+        {
+          "name": "YARN_CLIENT"
+        },
+        {
+          "name": "MAPREDUCE2_CLIENT"
+        },
+        {
+          "name": "MYSQL_SERVER"
+        },
+	{ 
+	  "name": "GANGLIA_SERVER"
+	},
+	{
+	  "name" : "DRPC_SERVER"
+	},
+	{
+	  "name" : "STORM_UI_SERVER"
+	},
+	{
+          "name" : "NIMBUS"
+        },
+        {
+          "name" : "KAFKA_BROKER"
+        }
+      ],
+      "cardinality": "1"
+    },
+    {
+      "name": "slave_1",
+      "components": [
+        {
+          "name": "HBASE_CLIENT"
+        },
+        {
+          "name": "HDFS_CLIENT"
+        },
+        {
+          "name": "HIVE_CLIENT"
+        },
+        {
+          "name": "NODEMANAGER"
+        },
+        {
+          "name": "DATANODE"
+        },
+        { 
+          "name": "ZOOKEEPER_SERVER"
+        },
+        {
+          "name": "ZOOKEEPER_CLIENT"
+        },
+        {
+          "name": "YARN_CLIENT"
+        },
+        {
+          "name": "MAPREDUCE2_CLIENT"
+        },
+	{
+	  "name" : "KAFKA_BROKER"
+	},
+	{ 
+	  "name": "GANGLIA_MONITOR"
+	},
+        {
+          "name" : "SUPERVISOR"
+        }
+      ],
+      "cardinality": "1"
+    }
+  ],
+  "Blueprints": {
+    "blueprint_name": "hdp-multinode-eagle",
+    "stack_name": "HDP",
+    "stack_version": "2.2"
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/resource/eagle-singlenode.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/eagle-singlenode.json b/eagle-external/eagle-docker/resource/eagle-singlenode.json
new file mode 100644
index 0000000..499b978
--- /dev/null
+++ b/eagle-external/eagle-docker/resource/eagle-singlenode.json
@@ -0,0 +1,122 @@
+{
+"configurations": [
+    {
+      "hdfs-site": {
+        "dfs.permissions.enabled": "false"
+      },
+      "hive-site": {
+        "javax.jdo.option.ConnectionUserName": "hive",
+        "javax.jdo.option.ConnectionPassword": "hive"
+      }
+    },
+    {
+       "hadoop-env": {
+          "properties" : {
+            "content" : "\r\n# Set Hadoop-specific environment variables here.\r\n\r\n# The only required environment variable is JAVA_HOME.  All others are\r\n# optional.  When running a distributed configuration it is best to\r\n# set JAVA_HOME in this file, so that it is correctly defined on\r\n# remote nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop home directory\r\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# Hadoop Configuration Directory\r\n\r\n{# this is different for HDP1 #}\r\n# Path to jsvc required by secure HDP 2.0 datanode\r\nexport JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. Default is 1000.\r\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n# Extra Java runtime options.  Empty by default.\r\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=tr
 ue ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS when specified\r\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT ${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d
 %H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCE
 R_OPTS}\"\r\n\r\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode as after dropping privileges\r\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n# Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n# Where log files are stored.  $HADOOP_HOME/logs by default.\r\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n# History server logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n# Where log files are stored in the secure data environment.\r\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# e
 xport HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop code should be rsync'd from.  Unset by default.\r\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n# Seconds to sleep between slave commands.  Unset by default.  This\r\n# can be useful in large clusters, where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can service them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where pid files are stored. /tmp by default.\r\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n# History server pid\r\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n# A string representing this instance of hadoop. $USER by default.\r\nexport HADOOP_IDENT_STRING=$USER\r\n\r\n# The scheduling priority for daemon processes.  See 'm
 an nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add libraries required by mysql connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by oracle connector\r\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/hdp/current/eagle/lib/log4jkafka/lib/*\r\n\r\n# added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\r\n  if [ -d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\r\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez
 -client/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command line\r\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly required for hadoop 2.0\r\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\r\n"
+	  }
+        }
+    },
+    {
+      "hdfs-log4j": {
+        "properties": {
+          "content" : "\r\n#\r\n# Licensed to the Apache Software Foundation (ASF) under one\r\n# or more contributor license agreements.  See the NOTICE file\r\n# distributed with this work for additional information\r\n# regarding copyright ownership.  The ASF licenses this file\r\n# to you under the Apache License, Version 2.0 (the\r\n# \"License\"); you may not use this file except in compliance\r\n# with the License.  You may obtain a copy of the License at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing,\r\n# software distributed under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n# KIND, either express or implied.  See the License for the\r\n# specific language governing permissions and limitations\r\n# under the License.\r\n#\r\n\r\n\r\n# Define some default values that can be overridden by system properties\r\n# To change daemon root logger use hadoo
 p_root_logger in hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n# Define the root logger to the system property \"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# Daily Rolling File Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 30-day backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n# Pattern format: Date LogLevel LoggerName LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n# Debugging Pattern format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlog
 ger above if you want to use this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\
 n#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n# hdfs audit l
 ogging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=eagle-server.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout.ConversionPattern=%d{ISO8
 601} %p %c{2}: %m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n# mapred audit logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# Rolling File Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n# Logfile size and and 30-day backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\nlog4j.appender.RFA.MaxBackupInde
 x=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n# Custom Logging levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n# Jets3t library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n# Null Appender\r\n# Trap security logger on the hadoop client side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n# Event Counter Appender\r\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\r\n#\r\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.Ev
 entCounter\r\n\r\n# Removes \"deprecated\" messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n# HDFS block state change log from block manager\r\n#\r\n# Uncomment the following to suppress normal block state change\r\n# messages from BlockManager in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
+         }
+      }
+    }
+  ],
+  "host_groups": [
+    {
+      "name": "master",
+      "components": [
+        {
+          "name": "APP_TIMELINE_SERVER"
+        },
+        {
+          "name": "HISTORYSERVER"
+        },
+        {
+          "name": "HBASE_REGIONSERVER"
+        },
+        {
+          "name": "WEBHCAT_SERVER"
+        },
+        {
+          "name": "HCAT"
+        },
+        {
+          "name": "HBASE_CLIENT"
+        },
+        {
+          "name": "NAMENODE"
+        },
+        {
+          "name": "AMBARI_SERVER"
+        },
+        {
+          "name": "HDFS_CLIENT"
+        },
+        {
+          "name": "HIVE_CLIENT"
+        },
+        {
+          "name": "NODEMANAGER"
+        },
+        {
+          "name": "DATANODE"
+        },
+        {
+          "name": "RESOURCEMANAGER"
+        },
+        {
+          "name": "ZOOKEEPER_SERVER"
+        },
+        {
+          "name": "ZOOKEEPER_CLIENT"
+        },
+        {
+          "name": "HBASE_MASTER"
+        },
+        {
+          "name": "HIVE_SERVER"
+        },
+        {
+          "name": "SECONDARY_NAMENODE"
+        },
+        {
+          "name": "HIVE_METASTORE"
+        },
+        {
+          "name": "YARN_CLIENT"
+        },
+        {
+          "name": "MAPREDUCE2_CLIENT"
+        },
+        {
+          "name": "MYSQL_SERVER"
+        },
+        { "name": "GANGLIA_SERVER"},
+
+	{ "name": "GANGLIA_MONITOR"},
+	
+	{ "name": "KAFKA_BROKER"},
+        {
+          "name" : "DRPC_SERVER"
+        },
+        {
+          "name" : "NIMBUS"
+        },
+        {
+          "name" : "STORM_UI_SERVER"
+        },
+        { "name" : "SUPERVISOR"}
+      ],
+
+      "cardinality": "1"
+    }
+  ],
+  "Blueprints": {
+    "blueprint_name": "hdp-singlenode-eagle",
+    "stack_name": "HDP",
+    "stack_version": "2.2"
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/resource/httpd/.mkdir
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/httpd/.mkdir b/eagle-external/eagle-docker/resource/httpd/.mkdir
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/resource/install-cluster.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/install-cluster.sh b/eagle-external/eagle-docker/resource/install-cluster.sh
new file mode 100755
index 0000000..4acd384
--- /dev/null
+++ b/eagle-external/eagle-docker/resource/install-cluster.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+export PATH=/usr/jdk64/jdk1.7.0_67/bin:$PATH
+
+
+./ambari-shell.sh << EOF
+blueprint add --file /tmp/eagle-singlenode.json
+blueprint add --file /tmp/eagle-multinode.json
+cluster build --blueprint $BLUEPRINT
+cluster autoAssign
+cluster create --exitOnFinish true
+EOF
+
+clear
+
+SERF_RPC_ADDR=${AMBARISERVER_PORT_7373_TCP##*/}
+serf event --rpc-addr=$SERF_RPC_ADDR eagle
+
+./wait-for-eagle.sh
+

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/resource/serf/etc/ambari.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/serf/etc/ambari.json b/eagle-external/eagle-docker/resource/serf/etc/ambari.json
new file mode 100644
index 0000000..4409e84
--- /dev/null
+++ b/eagle-external/eagle-docker/resource/serf/etc/ambari.json
@@ -0,0 +1,9 @@
+{
+  "event_handlers": [
+    "member-join=/usr/local/serf/handlers/ambari-bootstrap",
+    "user:eagle=/usr/local/serf/handlers/eagle"
+  ],
+  "tags" : {
+    "ambari-agent": "true"
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/dc5b5ab3/eagle-external/eagle-docker/resource/serf/handlers/eagle
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/serf/handlers/eagle b/eagle-external/eagle-docker/resource/serf/handlers/eagle
new file mode 100755
index 0000000..7f7df4b
--- /dev/null
+++ b/eagle-external/eagle-docker/resource/serf/handlers/eagle
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# install Eagle on the Ambari server instance
+if [[ "$SERF_TAG_AMBARI_SERVER" == "true" ]] ;then
+  echo run eagle install script
+  nohup /usr/hdp/current/eagle/deploy.sh > /var/log/eagle-deploy.log
+fi



Mime
View raw message