chukwa-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ey...@apache.org
Subject [1/9] chukwa git commit: CHUKWA-776. Added Docker support. (Eric Yang)
Date Sun, 13 Sep 2015 20:27:54 GMT
Repository: chukwa
Updated Branches:
  refs/heads/master a31164908 -> 6e9e7899d


CHUKWA-776. Added Docker support.  (Eric Yang)


Project: http://git-wip-us.apache.org/repos/asf/chukwa/repo
Commit: http://git-wip-us.apache.org/repos/asf/chukwa/commit/112d3bbd
Tree: http://git-wip-us.apache.org/repos/asf/chukwa/tree/112d3bbd
Diff: http://git-wip-us.apache.org/repos/asf/chukwa/diff/112d3bbd

Branch: refs/heads/master
Commit: 112d3bbd652dcdb15b40bdcdc9b01398019e6eff
Parents: a311649
Author: Eric Yang <eyang@apache.org>
Authored: Sun Aug 23 20:16:20 2015 -0700
Committer: Eric Yang <eyang@apache.org>
Committed: Sun Aug 23 20:19:07 2015 -0700

----------------------------------------------------------------------
 CHANGES.txt                           |   2 +
 contrib/docker/Dockerfile             |  45 +++++++++
 contrib/docker/README                 |  14 +++
 contrib/docker/hadoop/core-site.xml   |  28 ++++++
 contrib/docker/hadoop/hadoop-env.sh   | 101 ++++++++++++++++++++
 contrib/docker/hadoop/mapred-site.xml |  28 ++++++
 contrib/docker/hadoop/yarn-env.sh     | 124 ++++++++++++++++++++++++
 contrib/docker/hadoop/yarn-site.xml   |  22 +++++
 contrib/docker/hbase/hbase-env.sh     | 146 +++++++++++++++++++++++++++++
 contrib/docker/hbase/hbase-site.xml   |  47 ++++++++++
 contrib/docker/setup-image.sh         | 120 ++++++++++++++++++++++++
 contrib/docker/start-all.sh           |  40 ++++++++
 12 files changed, 717 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/chukwa/blob/112d3bbd/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 32f42af..ca9eefa 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -4,6 +4,8 @@ Trunk (unreleased changes)
 
   NEW FEATURES
 
+    CHUKWA-776. Added Docker support.  (Eric Yang)
+
     CHUKWA-772. Added ChukwaParquetWriter.  (Eric Yang)
 
     CHUKWA-756. Added ajax-solr UI for log search.  (Eric Yang)

http://git-wip-us.apache.org/repos/asf/chukwa/blob/112d3bbd/contrib/docker/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/docker/Dockerfile b/contrib/docker/Dockerfile
new file mode 100644
index 0000000..371d39e
--- /dev/null
+++ b/contrib/docker/Dockerfile
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+FROM centos:6
+MAINTAINER Apache
+RUN yum install -y tar wget bind-utils ntpd java-1.7.0-openjdk which openssh-server openssh-clients
+RUN mkdir -p /opt/apache
+RUN wget https://www.apache.org/dist/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz
+RUN wget https://www.apache.org/dist/hadoop/common/hadoop-2.7.1/hadoop-2.7.1.tar.gz 
+RUN wget https://www.apache.org/dist/hbase/1.1.1/hbase-1.1.1-bin.tar.gz
+RUN wget https://www.apache.org/dist/lucene/solr/4.10.4/solr-4.10.4.tgz
+ADD chukwa-0.7.0.tar.gz /opt/apache/
+RUN tar xf zookeeper-3.4.6.tar.gz -C /opt/apache
+RUN tar xf hadoop-2.7.1.tar.gz -C /opt/apache
+RUN tar xf hbase-1.1.1-bin.tar.gz -C /opt/apache
+RUN tar xf solr-4.10.4.tgz -C /opt/apache
+RUN rm -f zookeeper-*.tar.gz hadoop-*.tar.gz hbase-*.tar.gz solr-*.tgz
+RUN ln -s /opt/apache/zookeeper-* /opt/apache/zookeeper
+RUN ln -s /opt/apache/hadoop-* /opt/apache/hadoop
+RUN ln -s /opt/apache/hbase-* /opt/apache/hbase
+RUN ln -s /opt/apache/solr-* /opt/apache/solr
+RUN ln -s /opt/apache/chukwa-* /opt/apache/chukwa
+RUN cp -f /opt/apache/chukwa/etc/chukwa/hadoop-log4j.properties /opt/apache/hadoop/etc/hadoop/log4j.properties
+RUN cp -f /opt/apache/chukwa/etc/chukwa/hadoop-metrics2.properties /opt/apache/hadoop/etc/hadoop/hadoop-metrics2.properties
+RUN cp -f /opt/apache/chukwa/etc/chukwa/hadoop-metrics2-hbase.properties /opt/apache/hbase/conf/hadoop-metrics2-hbase.properties
+RUN cp -f /opt/apache/chukwa/etc/chukwa/hbase-log4j.properties /opt/apache/hbase/conf/log4j.properties
+ADD hadoop/* /opt/apache/hadoop/etc/hadoop/
+ADD hbase/* /opt/apache/hbase/conf/
+ADD start-all.sh /etc/start-all.sh
+ADD setup-image.sh /
+RUN bash setup-image.sh
+RUN rm -f /setup-image.sh
+EXPOSE 4080 50070 8088 16010 7574
+CMD ["/etc/start-all.sh"]

http://git-wip-us.apache.org/repos/asf/chukwa/blob/112d3bbd/contrib/docker/README
----------------------------------------------------------------------
diff --git a/contrib/docker/README b/contrib/docker/README
new file mode 100644
index 0000000..e090edf
--- /dev/null
+++ b/contrib/docker/README
@@ -0,0 +1,14 @@
+This file contains instruction on how to build docker image for Chukwa
+
+- Copy chukwa binary tarball to the same directory as this README file.
+- Run docker build procedure:
+
+  docker build -t chukwa .
+
+- Trim down the size of docker image using docker-squash
+
+  docker save [id] | sudo /usr/local/bin/docker-squash -t chukwa/chukwa | docker load
+
+- Chukwa image can be launched using:
+
+  docker run -itP -p 4080:4080 -p 50070:50070 -p 8088:8088 -p 60010:60010 chukwa/chukwa

http://git-wip-us.apache.org/repos/asf/chukwa/blob/112d3bbd/contrib/docker/hadoop/core-site.xml
----------------------------------------------------------------------
diff --git a/contrib/docker/hadoop/core-site.xml b/contrib/docker/hadoop/core-site.xml
new file mode 100644
index 0000000..c278dfd
--- /dev/null
+++ b/contrib/docker/hadoop/core-site.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+    <property>
+        <name>fs.defaultFS</name>
+        <value>hdfs://localhost:9000</value>
+    </property>
+    <property>
+        <name>dfs.replication</name>
+        <value>1</value>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/chukwa/blob/112d3bbd/contrib/docker/hadoop/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/contrib/docker/hadoop/hadoop-env.sh b/contrib/docker/hadoop/hadoop-env.sh
new file mode 100644
index 0000000..593a5f7
--- /dev/null
+++ b/contrib/docker/hadoop/hadoop-env.sh
@@ -0,0 +1,101 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.
+export JAVA_HOME=/usr/lib/jvm/jre
+
+export HADOOP_LOG_DIR=/var/log/hadoop
+export HADOOP_PID_DIR=/var/run/hadoop
+
+# The jsvc implementation to use. Jsvc is required to run secure datanodes
+# that bind to privileged ports to provide authentication of data transfer
+# protocol.  Jsvc is not required if SASL is configured for authentication of
+# data transfer protocol using non-privileged ports.
+#export JSVC_HOME=${JSVC_HOME}
+
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+
+# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+  if [ "$HADOOP_CLASSPATH" ]; then
+    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+  else
+    export HADOOP_CLASSPATH=$f
+  fi
+done
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="$HADOOP_OPTS -Djava.security.krb5.realm= -Djava.security.krb5.kdc= -Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-Xmx256m -Dhadoop.log.port=9096 -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS}
-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
+export HADOOP_DATANODE_OPTS="-Dhadoop.log.port=9098 -Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-Xmx256m -Dhadoop.log.port=9097 -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS}
-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
+export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges.
+# This **MUST** be uncommented to enable secure HDFS if using privileged ports
+# to provide authentication of data transfer protocol.  This **MUST NOT** be
+# defined if SASL is configured for authentication of data transfer protocol
+# using non-privileged ports.
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+###
+# HDFS Mover specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Mover.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_MOVER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+# The directory where pid files are stored. /tmp by default.
+# NOTE: this should be set to a directory that can only be written to by 
+#       the user that will run the hadoop daemons.  Otherwise there is the
+#       potential for a symlink attack.
+export HADOOP_PID_DIR=${HADOOP_PID_DIR}
+export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER

http://git-wip-us.apache.org/repos/asf/chukwa/blob/112d3bbd/contrib/docker/hadoop/mapred-site.xml
----------------------------------------------------------------------
diff --git a/contrib/docker/hadoop/mapred-site.xml b/contrib/docker/hadoop/mapred-site.xml
new file mode 100644
index 0000000..007a9ec
--- /dev/null
+++ b/contrib/docker/hadoop/mapred-site.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+    <property>
+        <name>mapreduce.framework.name</name>
+        <value>yarn</value>
+    </property>
+    <property>
+        <name>yarn.nodemanager.aux-services</name>
+        <value>mapreduce_shuffle</value>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/chukwa/blob/112d3bbd/contrib/docker/hadoop/yarn-env.sh
----------------------------------------------------------------------
diff --git a/contrib/docker/hadoop/yarn-env.sh b/contrib/docker/hadoop/yarn-env.sh
new file mode 100644
index 0000000..d4da29a
--- /dev/null
+++ b/contrib/docker/hadoop/yarn-env.sh
@@ -0,0 +1,124 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+export YARN_LOG_DIR=/var/log/hadoop
+
+# some Java parameters
+export JAVA_HOME=/usr/lib/jvm/jre
+
+if [ "$JAVA_HOME" != "" ]; then
+  #echo "run java in $JAVA_HOME"
+  JAVA_HOME=$JAVA_HOME
+fi
+  
+if [ "$JAVA_HOME" = "" ]; then
+  echo "Error: JAVA_HOME is not set."
+  exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m 
+
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+YARN_HEAPSIZE=256
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
+
+# Resource Manager specific parameters
+
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+#export YARN_RESOURCEMANAGER_HEAPSIZE=1000
+
+# Specify the max Heapsize for the timeline server using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_TIMELINESERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+#export YARN_TIMELINESERVER_HEAPSIZE=1000
+
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+export YARN_RESOURCEMANAGER_OPTS="-Dhadoop.log.port=9099"
+
+# Node Manager specific parameters
+
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+#export YARN_NODEMANAGER_HEAPSIZE=1000
+
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+export YARN_NODEMANAGER_OPTS="-Dhadoop.log.port=9100"
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory & file
+if [ "$YARN_LOG_DIR" = "" ]; then
+  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+  YARN_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+  YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi  
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+
+

http://git-wip-us.apache.org/repos/asf/chukwa/blob/112d3bbd/contrib/docker/hadoop/yarn-site.xml
----------------------------------------------------------------------
diff --git a/contrib/docker/hadoop/yarn-site.xml b/contrib/docker/hadoop/yarn-site.xml
new file mode 100644
index 0000000..b61d427
--- /dev/null
+++ b/contrib/docker/hadoop/yarn-site.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+<!-- Site specific YARN configuration properties -->
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/chukwa/blob/112d3bbd/contrib/docker/hbase/hbase-env.sh
----------------------------------------------------------------------
diff --git a/contrib/docker/hbase/hbase-env.sh b/contrib/docker/hbase/hbase-env.sh
new file mode 100644
index 0000000..47a77de
--- /dev/null
+++ b/contrib/docker/hbase/hbase-env.sh
@@ -0,0 +1,146 @@
+#
+#/**
+# * Copyright 2007 The Apache Software Foundation
+# *
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set environment variables here.
+
+# This script sets variables multiple times over the course of starting an hbase process,
+# so try to keep things idempotent unless you want to take an even deeper look
+# into the startup scripts (bin/hbase, etc.)
+
+# The java implementation to use.  Java 1.7+ required.
+export JAVA_HOME=/usr/lib/jvm/jre
+
+export HADOOP_CONF_DIR=/opt/apache/hadoop/etc/hadoop
+
+# Extra Java CLASSPATH elements.  Optional.
+# export HBASE_CLASSPATH=
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HBASE_HEAPSIZE=256
+
+# Uncomment below if you intend to use off heap cache.
+# export HBASE_OFFHEAPSIZE=1000
+
+# For example, to allocate 8G of offheap, to 8G:
+# export HBASE_OFFHEAPSIZE=8G
+
+# Extra Java runtime options.
+# Below are what we set by default.  May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-XX:+UseConcMarkSweepGC -Dhbase.security.log.port=9105"
+
+# Uncomment one of the below three options to enable java garbage collection logging for
the server-side processes.
+
+# This enables basic gc logging to the .out file.
+# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
+
+# This enables basic gc logging to its own file.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR
.
+# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"
+
+# This enables basic GC logging to its own file with automatic log rolling. Only applies
to jdk 1.6.0_34+ and 1.7.0_2+.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR
.
+# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>
-XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
+
+# Uncomment one of the below three options to enable java garbage collection logging for
the client processes.
+
+# This enables basic gc logging to the .out file.
+# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
+
+# This enables basic gc logging to its own file.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR
.
+# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"
+
+# This enables basic GC logging to its own file with automatic log rolling. Only applies
to jdk 1.6.0_34+ and 1.7.0_2+.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR
.
+# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>
-XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
+
+# See the package documentation for org.apache.hadoop.hbase.io.hfile for other configurations
+# needed setting up off-heap block caching. 
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote
password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+# NOTE: HBase provides an alternative JMX implementation to fix the random ports issue, please
see JMX
+# section in HBase Reference Guide for instructions.
+
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+#export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10101"
+#export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10102"
+#export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+#export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+#export HBASE_REST_OPTS="$HBASE_REST_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10105"
+
+# File naming hosts on which HRegionServers will run.  $HBASE_HOME/conf/regionservers by
default.
+# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers
+
+# Uncomment and adjust to keep all the Region Server pages mapped to be memory resident
+#HBASE_REGIONSERVER_MLOCK=true
+#HBASE_REGIONSERVER_UID="hbase"
+
+# File naming hosts on which backup HMaster will run.  $HBASE_HOME/conf/backup-masters by
default.
+# export HBASE_BACKUP_MASTERS=${HBASE_HOME}/conf/backup-masters
+
+# Extra ssh options.  Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored.  $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR=/var/log/hbase
+
+# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers 
+# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070"
+# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071"
+# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8073"
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+# export HBASE_PID_DIR=/var/hadoop/pids
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+# The default log rolling policy is RFA, where the log file is rolled as per the size defined
for the 
+# RFA appender. Please refer to the log4j.properties file to see more details on this appender.
+# In case one needs to do log rolling on a date change, one should set the environment property
+# HBASE_ROOT_LOGGER to "<DESIRED_LOG LEVEL>,DRFA".
+# For example:
+# HBASE_ROOT_LOGGER=INFO,DRFA
+# The reason for changing default to RFA is to avoid the boundary case of filling out disk
space as 
+# DRFA doesn't put any cap on the log size. Please refer to HBase-5655 for more context.
+
+# Port number to stream hbase log to Chukwa
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Dhbase.log.port=9106"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Dhbase.log.port=9107"
+export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Dhbase.log.port=9108"
+export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Dhbase.log.port=9109"
+export HBASE_REST_OPTS="$HBASE_REST_OPTS -Dhbase.log.port=9110"

http://git-wip-us.apache.org/repos/asf/chukwa/blob/112d3bbd/contrib/docker/hbase/hbase-site.xml
----------------------------------------------------------------------
diff --git a/contrib/docker/hbase/hbase-site.xml b/contrib/docker/hbase/hbase-site.xml
new file mode 100644
index 0000000..1287e44
--- /dev/null
+++ b/contrib/docker/hbase/hbase-site.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.rootdir</name>
+    <value>hdfs://localhost:9000/hbase</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+      The port at which the clients will connect.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>localhost</value>
+  </property>
+  <property >
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.dataDir</name>
+    <value>/var/lib/zoo</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/chukwa/blob/112d3bbd/contrib/docker/setup-image.sh
----------------------------------------------------------------------
diff --git a/contrib/docker/setup-image.sh b/contrib/docker/setup-image.sh
new file mode 100755
index 0000000..b2b23f5
--- /dev/null
+++ b/contrib/docker/setup-image.sh
@@ -0,0 +1,120 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Initialize Users
+groupadd -g 123 hadoop
+groupadd -g 201 hdfs
+useradd -u 200 -g 123 zookeeper
+useradd -u 201 -g 201 -G 123 hdfs
+useradd -u 202 -g 123 yarn
+useradd -u 203 -g 123 hbase
+useradd -u 204 -g 123 solr
+useradd -u 205 -g 123 chukwa
+
+# Global SSH configuration
+echo StrictHostKeyChecking no >> /etc/ssh/ssh_config
+
+# Setup SSH configuration for service accounts
+su - hdfs -c 'cat /dev/zero | ssh-keygen -t dsa -N ""'
+su - yarn -c 'cat /dev/zero | ssh-keygen -t dsa -N ""'
+su - hbase -c 'cat /dev/zero | ssh-keygen -t dsa -N ""'
+su - chukwa -c 'cat /dev/zero | ssh-keygen -t dsa -N ""'
+cp /home/hdfs/.ssh/id_dsa.pub /home/hdfs/.ssh/authorized_keys
+cp /home/yarn/.ssh/id_dsa.pub /home/yarn/.ssh/authorized_keys
+cp /home/hbase/.ssh/id_dsa.pub /home/hbase/.ssh/authorized_keys
+cp /home/chukwa/.ssh/id_dsa.pub /home/chukwa/.ssh/authorized_keys
+chown hdfs:hdfs /home/hdfs/.ssh/authorized_keys
+chown yarn:hadoop /home/yarn/.ssh/authorized_keys
+chown hbase:hadoop /home/hbase/.ssh/authorized_keys
+chown chukwa:hadoop /home/chukwa/.ssh/authorized_keys
+
+# Create symlinks for current version
+ln -s /opt/apache/zookeeper-* /opt/apache/zookeeper
+ln -s /opt/apache/hadoop-* /opt/apache/hadoop
+ln -s /opt/apache/hbase-* /opt/apache/hbase
+ln -s /opt/apache/solr-* /opt/apache/solr
+ln -s /opt/apache/chukwa-* /opt/apache/chukwa
+ln -s /opt/apache/chukwa/share/chukwa/lib/json-simple-*.jar /opt/apache/hadoop/share/hadoop/common/lib/json-simple.jar
+ln -s /opt/apache/chukwa/share/chukwa/chukwa-*-client.jar /opt/apache/hadoop/share/hadoop/common/lib/chukwa-client.jar
+ln -s /opt/apache/chukwa/share/chukwa/lib/json-simple-*.jar /opt/apache/hbase/lib/json-simple.jar
+ln -s /opt/apache/chukwa/share/chukwa/chukwa-*-client.jar /opt/apache/hbase/lib/chukwa-client.jar
+ln -s /opt/apache/zookeeper/conf /etc/zookeeper
+ln -s /opt/apache/hadoop/etc/hadoop /etc/hadoop
+ln -s /opt/apache/hbase/conf /etc/hbase
+ln -s /opt/apache/chukwa/etc/chukwa /etc/chukwa
+
+# ZooKeeper configuration
+cat /opt/apache/zookeeper/conf/zoo_sample.cfg | \
+sed -e 's:/tmp/zoo:/var/lib/zoo:' > /opt/apache/zookeeper/conf/zoo.cfg
+
+# Configure Solr with Chukwa configuraiton
+mv -f /opt/apache/chukwa/etc/solr/logs /opt/apache/solr/example/solr/logs
+
+# Configure Chukwa configuration
+cat /opt/apache/chukwa/etc/chukwa/chukwa-env.sh | \
+sed -e 's:\${JAVA_HOME}:/usr/lib/jvm/jre:' | \
+sed -e 's:\${HBASE_CONF_DIR}:/opt/apache/hbase/conf:' | \
+sed -e 's:\${HADOOP_CONF_DIR}:/opt/apache/hadoop/etc/hadoop:' | \
+sed -e 's:/tmp/chukwa/pidDir:/var/run/chukwa:' | \
+sed -e 's:/tmp/chukwa/log:/var/log/chukwa:' > /tmp/chukwa-env.sh
+cp -f /tmp/chukwa-env.sh /opt/apache/chukwa/etc/chukwa/chukwa-env.sh
+rm -f /tmp/chukwa-env.sh
+cp -f /opt/apache/chukwa/etc/chukwa/hadoop-log4j.properties /opt/apache/hadoop/etc/hadoop/log4j.properties
+cp -f /opt/apache/chukwa/etc/chukwa/hadoop-metrics2.properties /opt/apache/hadoop/etc/hadoop/hadoop-metrics2.properties
+cp -f /opt/apache/chukwa/etc/chukwa/hadoop-metrics2-hbase.properties /opt/apache/hbase/conf/hadoop-metrics2-hbase.properties
+cp -f /opt/apache/chukwa/etc/chukwa/hbase-log4j.properties /opt/apache/hbase/conf/log4j.properties
+
+# Initialize Service file permissions
+mkdir -p /var/lib/zookeeper
+mkdir -p /var/lib/hdfs
+mkdir -p /var/lib/yarn
+mkdir -p /var/lib/chukwa
+mkdir -p /var/lib/solr
+mkdir -p /var/run/hadoop
+mkdir -p /var/run/hbase
+mkdir -p /var/run/chukwa
+mkdir -p /var/run/solr
+mkdir -p /var/log/hadoop
+mkdir -p /var/log/hbase
+mkdir -p /var/log/chukwa
+
+chown -R zookeeper:hadoop /opt/apache/zookeeper* /var/lib/zookeeper
+chmod 775 /var/run/hadoop
+chmod 775 /var/log/hadoop
+chown -R hdfs:hadoop /opt/apache/hadoop* /var/lib/hdfs /var/run/hadoop /var/log/hadoop
+chown -R hbase:hadoop /opt/apache/hbase* /var/run/hbase /var/log/hbase
+chown -R solr:hadoop /opt/apache/solr* /var/run/solr
+chown -R chukwa:hadoop /opt/apache/chukwa* /var/lib/chukwa /var/run/chukwa /var/log/chukwa
+
+# format HDFS
+export JAVA_HOME=/usr/lib/jvm/jre
+export HADOOP_CONF_DIR=/opt/apache/hadoop/etc/hadoop
+export HBASE_CONF_DIR=/opt/apache/hbase/conf
+export CHUKWA_CONF_DIR=/opt/apache/chukwa/etc/chukwa
+service sshd start
+su - hdfs -c '/opt/apache/hadoop/bin/hadoop namenode -format'
+su - hdfs -c '/opt/apache/hadoop/sbin/start-all.sh'
+su - zookeeper -c '/opt/apache/zookeeper/bin/zkServer.sh start'
+SAFE_MODE=`su - hdfs -c '/opt/apache/hadoop/bin/hadoop dfsadmin -safemode get 2>/dev/null'`
+while [ "$SAFE_MODE" == "Safe mode is ON" ]; do
+  SAFE_MODE=`su - hdfs -c '/opt/apache/hadoop/bin/hadoop dfsadmin -safemode get 2>/dev/null'`
+  sleep 3
+done
+su - hdfs -c '/opt/apache/hadoop/bin/hadoop fs -mkdir /hbase >/dev/null 2>&1'
+su - hdfs -c '/opt/apache/hadoop/bin/hadoop fs -chown hbase:hadoop /hbase >/dev/null 2>&1'
+su - hbase -c '/opt/apache/hbase/bin/start-hbase.sh'
+sleep 5
+su - hbase -c '/opt/apache/hbase/bin/hbase shell < /opt/apache/chukwa/etc/chukwa/hbase.schema'

http://git-wip-us.apache.org/repos/asf/chukwa/blob/112d3bbd/contrib/docker/start-all.sh
----------------------------------------------------------------------
diff --git a/contrib/docker/start-all.sh b/contrib/docker/start-all.sh
new file mode 100755
index 0000000..4cfe827
--- /dev/null
+++ b/contrib/docker/start-all.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+export PATH=${PATH}:/opt/apache/hadoop/bin:/opt/apache/hbase/bin
+export JAVA_HOME=/usr/lib/jvm/jre
+export HADOOP_CONF_DIR=/opt/apache/hadoop/etc/hadoop
+export HBASE_CONF_DIR=/opt/apache/hbase/conf
+export CHUKWA_CONF_DIR=/opt/apache/chukwa/etc/chukwa
+service sshd start
+su - zookeeper -c '/opt/apache/zookeeper/bin/zkServer.sh start'
+su - solr -c 'cd /opt/apache/solr/example; /usr/lib/jvm/jre/bin/java -Dbootstrap_confdir=/opt/apache/solr/example/solr/logs/conf
-Dcollection.configName=myconf -Djetty.port=7574 -DzkHost=localhost:2181 -jar /opt/apache/solr/example/start.jar
>/dev/null 2>&1 &'
+su - hdfs -c '/opt/apache/hadoop/sbin/start-dfs.sh >/dev/null 2>&1'
+su - yarn -c '/opt/apache/hadoop/sbin/start-yarn.sh >/dev/null 2>&1'
+SAFE_MODE=`su - hdfs -c '/opt/apache/hadoop/bin/hadoop dfsadmin -safemode get 2>/dev/null'`
+while [ "$SAFE_MODE" == "Safe mode is ON" ]; do
+  SAFE_MODE=`su - hdfs -c '/opt/apache/hadoop/bin/hadoop dfsadmin -safemode get 2>/dev/null'`
+  sleep 3
+done
+su - hbase -c '/opt/apache/hbase/bin/start-hbase.sh >/dev/null 2>&1'
+su - chukwa -c '/opt/apache/chukwa/sbin/start-chukwa.sh'
+echo
+echo "Chukwa Docker container is ready."
+echo "Use web browser to visit http://`hostname -f`:4080/ for demo."
+echo "Username: admin, password: admin"
+echo
+echo "Enjoy!"
+bash


Mime
View raw message