eagle-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From h..@apache.org
Subject [02/20] incubator-eagle git commit: eagle docker work with single-node and mutiple-nodes
Date Mon, 30 Nov 2015 10:11:44 GMT
eagle docker work with single-node and mutiple-nodes


Project: http://git-wip-us.apache.org/repos/asf/incubator-eagle/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-eagle/commit/ca6abfbe
Tree: http://git-wip-us.apache.org/repos/asf/incubator-eagle/tree/ca6abfbe
Diff: http://git-wip-us.apache.org/repos/asf/incubator-eagle/diff/ca6abfbe

Branch: refs/heads/master
Commit: ca6abfbe5c49a2a5713e94fa6655296a68866852
Parents: db5d61a
Author: qinzhaokun <qinzhaokun@gmail.com>
Authored: Wed Nov 25 18:43:07 2015 -0700
Committer: qinzhaokun <qinzhaokun@gmail.com>
Committed: Wed Nov 25 18:43:07 2015 -0700

----------------------------------------------------------------------
 eagle-external/eagle-docker/Dockerfile            | 14 +++++++-------
 eagle-external/eagle-docker/README.md             |  2 +-
 eagle-external/eagle-docker/deploy-eagle.sh       | 12 ++++++------
 eagle-external/eagle-docker/eagle-functions       |  4 ++--
 eagle-external/eagle-docker/eagle-multinode.json  |  2 +-
 eagle-external/eagle-docker/eagle-singlenode.json |  2 +-
 eagle-external/eagle-docker/install-cluster.sh    |  1 -
 eagle-external/eagle-docker/serf/handlers/eagle   |  2 +-
 eagle-external/eagle-docker/wait-for-eagle.sh     |  2 +-
 9 files changed, 20 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/ca6abfbe/eagle-external/eagle-docker/Dockerfile
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/Dockerfile b/eagle-external/eagle-docker/Dockerfile
index 12ebe0f..024921d 100644
--- a/eagle-external/eagle-docker/Dockerfile
+++ b/eagle-external/eagle-docker/Dockerfile
@@ -1,24 +1,24 @@
 FROM sequenceiq/ambari:1.7.0
 
-MAINTAINER Zqin
+MAINTAINER dev@eagle.incubator.apache.org
 
-ENV EAGLE_DOWNLOAD_LINK http://66.211.190.194/eagle-0.1.0.tar.gz
+ENV EAGLE_DOWNLOAD_LINK http://10.65.246.34:8081/view/Eagle-Security/job/Eagle-SCM/ws/eagle-assembly/target/eagle-0.1.0-bin.tar.gz
 
-RUN curl -sL $EAGLE_DOWNLOAD_LINK | tar -xz -C /usr/local/
-RUN cd /usr/local && ln -s ./eagle-0.1.0 eagle
-ENV EAGLE_HOME=/usr/local/eagle
+RUN curl -sL $EAGLE_DOWNLOAD_LINK | tar -xz -C /usr/hdp/current
+RUN cd /usr/hdp/current && ln -s ./eagle-0.1.0 eagle
+ENV EAGLE_HOME=/usr/hdp/current/eagle
 
 
 RUN yum install -y httpd ganglia ganglia-gmetad ganglia-gmond ganglia-web nagios kafka zookeeper
storm hbase tez hadoop snappy snappy-devel hadoop-libhdfs ambari-log4j hive hive-hcatalog
hive-webhcat webhcat-tar-hive webhcat-tar-pig mysql-connector-java mysql-server
 
 ADD serf /usr/local/serf
 
-RUN mkdir -p /var/log/httpd
+ADD httpd /var/log/httpd
 
 ADD install-cluster.sh /tmp/
 ADD eagle-singlenode.json /tmp/
 ADD eagle-multinode.json /tmp/
 ADD wait-for-eagle.sh /tmp/
-ADD deploy-eagle.sh /usr/local/eagle/deploy.sh
+ADD deploy-eagle.sh /usr/hdp/current/eagle/deploy.sh
 
 EXPOSE 9099 8744

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/ca6abfbe/eagle-external/eagle-docker/README.md
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/README.md b/eagle-external/eagle-docker/README.md
index 09b8747..c2203d0 100644
--- a/eagle-external/eagle-docker/README.md
+++ b/eagle-external/eagle-docker/README.md
@@ -39,7 +39,7 @@ And this project is to build apache/eagle images and provide eagle-functions
to
 
 5. **Start to use Eagle**: Congratulations! You are able to start using Eagle now. Please
open eagle ui at following address (username: ADMIN, password: secret by default)
 
-        http://{{container_ip}}:9099  
+        http://{{container_ip}}:9099/eagle-service  
 
 6. **Manage Eagle Cluster**: This step is about how to managing the eagle cluster though
not must-have at starting. Eagle docker depends on Ambari to manage the cluster infrastructure
of Eagle. Following are some helpful links:
 

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/ca6abfbe/eagle-external/eagle-docker/deploy-eagle.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/deploy-eagle.sh b/eagle-external/eagle-docker/deploy-eagle.sh
index 2b78fde..8d9b029 100755
--- a/eagle-external/eagle-docker/deploy-eagle.sh
+++ b/eagle-external/eagle-docker/deploy-eagle.sh
@@ -21,19 +21,19 @@ echo "Eagle home folder path is $EAGLE_HOME"
 cd $EAGLE_HOME
 
 
-echo "Initializing Eagle Service ..."
+#Initializing Eagle Service ...
 sh ./bin/eagle-service-init.sh
 
 sleep 10
 
-echo "Starting Eagle Service ..."
+#Starting Eagle Service ...
 sh ./bin/eagle-service.sh start
 
 sleep 10
 
 echo "Creating kafka topics for eagle ... "
 KAFKA_HOME=/usr/hdp/current/kafka-broker
-EAGLE_ZOOKEEPER_QUORUM=localhost:2181
+EAGLE_ZOOKEEPER_QUORUM=$EAGLE_SERVER_HOST:2181
 topic=`${KAFKA_HOME}/bin/kafka-topics.sh --list --zookeeper $EAGLE_ZOOKEEPER_QUORUM --topic
sandbox_hdfs_audit_log`
 if [ -z $topic ]; then
         $KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper $EAGLE_ZOOKEEPER_QUORUM --replication-factor
1 --partitions 1 --topic sandbox_hdfs_audit_log
@@ -46,8 +46,8 @@ echo "==> Failed, exiting"
 exit 1
 fi
 
-EAGLE_NIMBUS_HOST=eagle-server.apache.org
-EAGLE_SERVICE_HOST=eagle-server.apache.org
+EAGLE_NIMBUS_HOST=$EAGLE_SERVER_HOST
+EAGLE_SERVICE_HOST=$EAGLE_SERVER_HOST
 EAGLE_TOPOLOGY_JAR=`ls ${EAGLE_HOME}/lib/topology/eagle-topology-*-assembly.jar`
 
 ${EAGLE_HOME}/bin/eagle-topology-init.sh
@@ -67,4 +67,4 @@ storm jar $EAGLE_TOPOLOGY_JAR eagle.security.userprofile.UserProfileDetectionMai
 
 echo "Eagle is deployed successfully!"
 
-echo "Please visit http://<your_sandbox_ip>:9099 to play with Eagle!"
+echo "Please visit http://<container_ip>:9099/eagle-service to play with Eagle!"

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/ca6abfbe/eagle-external/eagle-docker/eagle-functions
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/eagle-functions b/eagle-external/eagle-docker/eagle-functions
index 3895add..430a78a 100644
--- a/eagle-external/eagle-docker/eagle-functions
+++ b/eagle-external/eagle-docker/eagle-functions
@@ -1,8 +1,8 @@
 : ${NODE_PREFIX=eagle}
 : ${AMBARI_SERVER_NAME:=${NODE_PREFIX}-server}
 : ${MYDOMAIN:=apache.org}
-: ${IMAGE:="apache/eagle:latest"}
-: ${DOCKER_OPTS:="--dns 127.0.0.1 --entrypoint /usr/local/serf/bin/start-serf-agent.sh -e
KEYCHAIN=$KEYCHAIN"}
+: ${IMAGE:="apache/eagle:0.1.0"}
+: ${DOCKER_OPTS:="--dns 127.0.0.1 --entrypoint /usr/local/serf/bin/start-serf-agent.sh -e
KEYCHAIN=$KEYCHAIN --env EAGLE_SERVER_HOST=${AMBARI_SERVER_NAME}.${MYDOMAIN}"}
 : ${CLUSTER_SIZE:=1}
 : ${DEBUG:=1}
 : ${SLEEP_TIME:=2}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/ca6abfbe/eagle-external/eagle-docker/eagle-multinode.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/eagle-multinode.json b/eagle-external/eagle-docker/eagle-multinode.json
index b1735b3..1da2fb3 100644
--- a/eagle-external/eagle-docker/eagle-multinode.json
+++ b/eagle-external/eagle-docker/eagle-multinode.json
@@ -12,7 +12,7 @@
     {
        "hadoop-env": {
           "properties" : {
-            "content" : "\r\n# Set Hadoop-specific environment variables here.\r\n\r\n# The
only required environment variable is JAVA_HOME.  All others are\r\n# optional.  When running
a distributed configuration it is best to\r\n# set JAVA_HOME in this file, so that it is correctly
defined on\r\n# remote nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport
JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop home directory\r\nexport
HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# Hadoop Configuration Directory\r\n\r\n{#
this is different for HDP1 #}\r\n# Path to jsvc required by secure HDP 2.0 datanode\r\nexport
JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. Default is
1000.\r\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n#
Extra Java runtime options.  Empty by default.\r\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=tr
 ue ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS when specified\r\nexport
HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}}
-XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date
+'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT
${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}}
-XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date
+'%Y%m%d
 %H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}}
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA
${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}}
-Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport
HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log
-XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date
+'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps
-Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT
${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCE
 R_OPTS}\"\r\n\r\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The
following applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m
-XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode
as after dropping privileges\r\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n#
Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n#
Where log files are stored.  $HADOOP_HOME/logs by default.\r\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n#
History server logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n#
Where log files are stored in the secure data environment.\r\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# e
 xport HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop code should
be rsync'd from.  Unset by default.\r\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n#
Seconds to sleep between slave commands.  Unset by default.  This\r\n# can be useful in large
clusters, where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can service
them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where pid files are stored.
/tmp by default.\r\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
History server pid\r\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n#
A string representing this instance of hadoop. $USER by default.\r\nexport HADOOP_IDENT_STRING=$USER\r\n\r\n#
The scheduling priority for daemon processes.  See 'm
 an nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add
libraries required by mysql connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by oracle connector\r\nfor
jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n#
Add libraries required by nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/local/eagle/lib/log4jkafka/lib/*\r\n\r\n#
added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\r\n  if [
-d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned RPMs, the tez-client will be a
symlink to the current folder of tez in HDP.\r\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-clien
 t/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command line\r\nexport
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly required for hadoop 2.0\r\nexport
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION
$HADOOP_OPTS\"\r\n"
+            "content" : "\r\n# Set Hadoop-specific environment variables here.\r\n\r\n# The
only required environment variable is JAVA_HOME.  All others are\r\n# optional.  When running
a distributed configuration it is best to\r\n# set JAVA_HOME in this file, so that it is correctly
defined on\r\n# remote nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport
JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop home directory\r\nexport
HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# Hadoop Configuration Directory\r\n\r\n{#
this is different for HDP1 #}\r\n# Path to jsvc required by secure HDP 2.0 datanode\r\nexport
JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. Default is
1000.\r\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n#
Extra Java runtime options.  Empty by default.\r\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=tr
 ue ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS when specified\r\nexport
HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}}
-XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date
+'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT
${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}}
-XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date
+'%Y%m%d
 %H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}}
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA
${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}}
-Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport
HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log
-XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date
+'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps
-Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT
${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCE
 R_OPTS}\"\r\n\r\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The
following applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m
-XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode
as after dropping privileges\r\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n#
Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n#
Where log files are stored.  $HADOOP_HOME/logs by default.\r\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n#
History server logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n#
Where log files are stored in the secure data environment.\r\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# e
 xport HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop code should
be rsync'd from.  Unset by default.\r\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n#
Seconds to sleep between slave commands.  Unset by default.  This\r\n# can be useful in large
clusters, where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can service
them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where pid files are stored.
/tmp by default.\r\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
History server pid\r\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n#
A string representing this instance of hadoop. $USER by default.\r\nexport HADOOP_IDENT_STRING=$USER\r\n\r\n#
The scheduling priority for daemon processes.  See 'm
 an nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add
libraries required by mysql connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by oracle connector\r\nfor
jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n#
Add libraries required by nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/hdp/current/eagle/lib/log4jkafka/lib/*\r\n\r\n#
added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\r\n  if [
-d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned RPMs, the tez-client will be a
symlink to the current folder of tez in HDP.\r\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez
 -client/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command line\r\nexport
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly required for hadoop 2.0\r\nexport
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION
$HADOOP_OPTS\"\r\n"
 	  }
         }
     },

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/ca6abfbe/eagle-external/eagle-docker/eagle-singlenode.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/eagle-singlenode.json b/eagle-external/eagle-docker/eagle-singlenode.json
index 4c2c6e8..499b978 100644
--- a/eagle-external/eagle-docker/eagle-singlenode.json
+++ b/eagle-external/eagle-docker/eagle-singlenode.json
@@ -12,7 +12,7 @@
     {
        "hadoop-env": {
           "properties" : {
-            "content" : "\r\n# Set Hadoop-specific environment variables here.\r\n\r\n# The
only required environment variable is JAVA_HOME.  All others are\r\n# optional.  When running
a distributed configuration it is best to\r\n# set JAVA_HOME in this file, so that it is correctly
defined on\r\n# remote nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport
JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop home directory\r\nexport
HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# Hadoop Configuration Directory\r\n\r\n{#
this is different for HDP1 #}\r\n# Path to jsvc required by secure HDP 2.0 datanode\r\nexport
JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. Default is
1000.\r\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n#
Extra Java runtime options.  Empty by default.\r\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=tr
 ue ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS when specified\r\nexport
HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}}
-XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date
+'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT
${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}}
-XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date
+'%Y%m%d
 %H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}}
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA
${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}}
-Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport
HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log
-XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date
+'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps
-Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT
${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCE
 R_OPTS}\"\r\n\r\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The
following applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m
-XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode
as after dropping privileges\r\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n#
Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n#
Where log files are stored.  $HADOOP_HOME/logs by default.\r\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n#
History server logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n#
Where log files are stored in the secure data environment.\r\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# e
 xport HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop code should
be rsync'd from.  Unset by default.\r\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n#
Seconds to sleep between slave commands.  Unset by default.  This\r\n# can be useful in large
clusters, where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can service
them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where pid files are stored.
/tmp by default.\r\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
History server pid\r\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n#
A string representing this instance of hadoop. $USER by default.\r\nexport HADOOP_IDENT_STRING=$USER\r\n\r\n#
The scheduling priority for daemon processes.  See 'm
 an nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add
libraries required by mysql connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by oracle connector\r\nfor
jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n#
Add libraries required by nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/local/eagle/lib/log4jkafka/lib/*\r\n\r\n#
added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\r\n  if [
-d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned RPMs, the tez-client will be a
symlink to the current folder of tez in HDP.\r\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-clien
 t/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command line\r\nexport
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly required for hadoop 2.0\r\nexport
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION
$HADOOP_OPTS\"\r\n"
+            "content" : "\r\n# Set Hadoop-specific environment variables here.\r\n\r\n# The
only required environment variable is JAVA_HOME.  All others are\r\n# optional.  When running
a distributed configuration it is best to\r\n# set JAVA_HOME in this file, so that it is correctly
defined on\r\n# remote nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport
JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop home directory\r\nexport
HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# Hadoop Configuration Directory\r\n\r\n{#
this is different for HDP1 #}\r\n# Path to jsvc required by secure HDP 2.0 datanode\r\nexport
JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. Default is
1000.\r\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n#
Extra Java runtime options.  Empty by default.\r\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=tr
 ue ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS when specified\r\nexport
HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}}
-XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date
+'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT
${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}}
-XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date
+'%Y%m%d
 %H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}}
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA
${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}}
-Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport
HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log
-XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date
+'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps
-Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT
${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCE
 R_OPTS}\"\r\n\r\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The
following applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m
-XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode
as after dropping privileges\r\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n#
Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n#
Where log files are stored.  $HADOOP_HOME/logs by default.\r\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n#
History server logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n#
Where log files are stored in the secure data environment.\r\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# e
 xport HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop code should
be rsync'd from.  Unset by default.\r\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n#
Seconds to sleep between slave commands.  Unset by default.  This\r\n# can be useful in large
clusters, where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can service
them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where pid files are stored.
/tmp by default.\r\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
History server pid\r\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n#
A string representing this instance of hadoop. $USER by default.\r\nexport HADOOP_IDENT_STRING=$USER\r\n\r\n#
The scheduling priority for daemon processes.  See 'm
 an nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add
libraries required by mysql connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required by oracle connector\r\nfor
jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n#
Add libraries required by nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/hdp/current/eagle/lib/log4jkafka/lib/*\r\n\r\n#
added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\r\n  if [
-d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned RPMs, the tez-client will be a
symlink to the current folder of tez in HDP.\r\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez
 -client/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command line\r\nexport
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly required for hadoop 2.0\r\nexport
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION
$HADOOP_OPTS\"\r\n"
 	  }
         }
     },

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/ca6abfbe/eagle-external/eagle-docker/install-cluster.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/install-cluster.sh b/eagle-external/eagle-docker/install-cluster.sh
index eb730ae..74fe21f 100755
--- a/eagle-external/eagle-docker/install-cluster.sh
+++ b/eagle-external/eagle-docker/install-cluster.sh
@@ -17,6 +17,5 @@ clear
 SERF_RPC_ADDR=${AMBARISERVER_PORT_7373_TCP##*/}
 serf event --rpc-addr=$SERF_RPC_ADDR eagle
 
-echo "eagle environment is setted up successfully"
 ./wait-for-eagle.sh
 

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/ca6abfbe/eagle-external/eagle-docker/serf/handlers/eagle
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/serf/handlers/eagle b/eagle-external/eagle-docker/serf/handlers/eagle
index ce07c67..fef860a 100755
--- a/eagle-external/eagle-docker/serf/handlers/eagle
+++ b/eagle-external/eagle-docker/serf/handlers/eagle
@@ -3,5 +3,5 @@
 # install Eagle on the Ambari server instance
 if [[ "$SERF_TAG_AMBARI_SERVER" == "true" ]] ;then
   echo run eagle install script
-  nohup /usr/local/eagle/deploy.sh > /var/log/eagle-deploy.log
+  nohup /usr/hdp/current/eagle/deploy.sh > /var/log/eagle-deploy.log
 fi

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/ca6abfbe/eagle-external/eagle-docker/wait-for-eagle.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/wait-for-eagle.sh b/eagle-external/eagle-docker/wait-for-eagle.sh
index 39909a3..8449a6d 100755
--- a/eagle-external/eagle-docker/wait-for-eagle.sh
+++ b/eagle-external/eagle-docker/wait-for-eagle.sh
@@ -20,4 +20,4 @@ while ! get-server-state | grep 200 &>/dev/null ; do
   sleep $SLEEP
 done
 [ $DEBUG -gt 0 ] && echo
-debug eagle web started: $EAGLE_HOST:9099/eagle
\ No newline at end of file
+debug eagle web started: $EAGLE_HOST:9099/eagle-service


Mime
View raw message