chukwa-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ey...@apache.org
Subject chukwa git commit: CHUKWA-783 Update documentation within the Quickstart to ease publication
Date Sat, 26 Sep 2015 22:19:04 GMT
Repository: chukwa
Updated Branches:
  refs/heads/master d3fba3fac -> 0d9f40360


CHUKWA-783 Update documentation within the Quickstart to ease publication


Project: http://git-wip-us.apache.org/repos/asf/chukwa/repo
Commit: http://git-wip-us.apache.org/repos/asf/chukwa/commit/0d9f4036
Tree: http://git-wip-us.apache.org/repos/asf/chukwa/tree/0d9f4036
Diff: http://git-wip-us.apache.org/repos/asf/chukwa/diff/0d9f4036

Branch: refs/heads/master
Commit: 0d9f40360edff12f0ee25aa461aa8a6063cde5fa
Parents: d3fba3f
Author: Eric Yang <eyang@apache.org>
Authored: Sat Sep 26 15:17:42 2015 -0700
Committer: Eric Yang <eyang@apache.org>
Committed: Sat Sep 26 15:17:42 2015 -0700

----------------------------------------------------------------------
 pom.xml                               |  40 ++++----
 src/site/apt/Quick_Start_Guide.apt    | 153 -----------------------------
 src/site/apt/Quick_Start_Guide.apt.vm | 153 +++++++++++++++++++++++++++++
 3 files changed, 173 insertions(+), 173 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/chukwa/blob/0d9f4036/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 79c6c39..f8f5b45 100644
--- a/pom.xml
+++ b/pom.xml
@@ -45,6 +45,10 @@
         <TODO_DEMUX_FS_INMEMORY_SIZE_MB>64</TODO_DEMUX_FS_INMEMORY_SIZE_MB>
         <TODO_DEMUX_IO_SORT_FACTOR>10</TODO_DEMUX_IO_SORT_FACTOR>
         <CHUKWA_DIR>/chukwa</CHUKWA_DIR>
+        <json-simpleVersion>1.1</json-simpleVersion>
+        <zookeeperVersion>3.4.5</zookeeperVersion>
+        <hbaseVersion>1.0.0</hbaseVersion>
+        <hadoopVersion>2.6.0</hadoopVersion>
         <!-- <JAVA_HOME>${java.home}</JAVA_HOME> -->
     </properties>
 
@@ -147,7 +151,7 @@
           <dependency>
             <groupId>com.googlecode.json-simple</groupId>
             <artifactId>json-simple</artifactId>
-            <version>1.1</version>
+            <version>${json-simpleVersion}</version>
           </dependency>
           <dependency>
             <groupId>org.apache.activemq</groupId>
@@ -262,7 +266,7 @@
           <dependency>
             <groupId>org.apache.zookeeper</groupId>
             <artifactId>zookeeper</artifactId>
-            <version>3.4.5</version>
+            <version>${zookeeperVersion}</version>
           </dependency>
           <dependency>
             <groupId>com.sun.jersey</groupId>
@@ -945,15 +949,11 @@
                     <name>!hbase.profile</name>
                 </property>
             </activation>
-            <properties>
-                <hbase.version>1.0.0</hbase.version>
-                <hadoop.version>2.6.0</hadoop.version>
-            </properties>
             <dependencies>
                 <dependency>
                     <groupId>org.apache.hbase</groupId>
                     <artifactId>hbase-common</artifactId>
-                    <version>${hbase.version}</version>
+                    <version>${hbaseVersion}</version>
                     <exclusions>
                         <exclusion>
                             <groupId>com.sun.jersey</groupId>
@@ -1000,7 +1000,7 @@
                 <dependency>
                     <groupId>org.apache.hbase</groupId>
                     <artifactId>hbase-server</artifactId>
-                    <version>${hbase.version}</version>
+                    <version>${hbaseVersion}</version>
                     <exclusions>
                         <exclusion>
                             <groupId>org.mortbay.jetty</groupId>
@@ -1040,14 +1040,14 @@
                     <groupId>org.apache.hbase</groupId>
                     <artifactId>hbase-common</artifactId>
                     <classifier>tests</classifier>
-                    <version>${hbase.version}</version>
+                    <version>${hbaseVersion}</version>
                     <scope>test</scope>
                 </dependency>
                 <dependency>
                     <groupId>org.apache.hbase</groupId>
                     <artifactId>hbase-server</artifactId>
                     <classifier>tests</classifier>
-                    <version>${hbase.version}</version>
+                    <version>${hbaseVersion}</version>
                     <scope>test</scope>
                     <exclusions>
                         <exclusion>
@@ -1076,20 +1076,20 @@
                     <groupId>org.apache.hbase</groupId>
                     <artifactId>hbase-hadoop-compat</artifactId>
                     <classifier>tests</classifier>
-                    <version>${hbase.version}</version>
+                    <version>${hbaseVersion}</version>
                     <scope>test</scope>
                 </dependency>
                 <dependency>
                     <groupId>org.apache.hbase</groupId>
                     <artifactId>hbase-hadoop2-compat</artifactId>
                     <classifier>tests</classifier>
-                    <version>${hbase.version}</version>
+                    <version>${hbaseVersion}</version>
                     <scope>test</scope>
                 </dependency>
                 <dependency>
                     <groupId>org.apache.hadoop</groupId>
                     <artifactId>hadoop-common</artifactId>
-                    <version>${hadoop.version}</version>
+                    <version>${hadoopVersion}</version>
                     <exclusions>
                         <exclusion>
                             <groupId>javax.servlet.jsp</groupId>
@@ -1112,38 +1112,38 @@
                 <dependency>
                     <groupId>org.apache.hadoop</groupId>
                     <artifactId>hadoop-auth</artifactId>
-                    <version>${hadoop.version}</version>
+                    <version>${hadoopVersion}</version>
                 </dependency>
                 <dependency>
                     <groupId>org.apache.hadoop</groupId>
                     <artifactId>hadoop-client</artifactId>
-                    <version>${hadoop.version}</version>
+                    <version>${hadoopVersion}</version>
                 </dependency>
                 <dependency>
                     <groupId>org.apache.hadoop</groupId>
                     <artifactId>hadoop-mapreduce-client-core</artifactId>
-                    <version>${hadoop.version}</version>
+                    <version>${hadoopVersion}</version>
                 </dependency>
                 <dependency>
                     <groupId>org.apache.hadoop</groupId>
                     <artifactId>hadoop-yarn-common</artifactId>
-                    <version>${hadoop.version}</version>
+                    <version>${hadoopVersion}</version>
                 </dependency>
                 <dependency>
                     <groupId>org.apache.hadoop</groupId>
                     <artifactId>hadoop-hdfs</artifactId>
-                    <version>${hadoop.version}</version>
+                    <version>${hadoopVersion}</version>
                 </dependency>
                 <dependency>
                     <groupId>org.apache.hadoop</groupId>
                     <artifactId>hadoop-yarn-server-tests</artifactId>
-                    <version>${hadoop.version}</version>
+                    <version>${hadoopVersion}</version>
                     <scope>test</scope>
                 </dependency>
                 <dependency>
                     <groupId>org.apache.hadoop</groupId>
 	            <artifactId>hadoop-minicluster</artifactId>
-                    <version>${hadoop.version}</version>
+                    <version>${hadoopVersion}</version>
                 </dependency>
             </dependencies>
         </profile>

http://git-wip-us.apache.org/repos/asf/chukwa/blob/0d9f4036/src/site/apt/Quick_Start_Guide.apt
----------------------------------------------------------------------
diff --git a/src/site/apt/Quick_Start_Guide.apt b/src/site/apt/Quick_Start_Guide.apt
deleted file mode 100644
index 2def29b..0000000
--- a/src/site/apt/Quick_Start_Guide.apt
+++ /dev/null
@@ -1,153 +0,0 @@
-~~ Licensed to the Apache Software Foundation (ASF) under one or more
-~~ contributor license agreements.  See the NOTICE file distributed with
-~~ this work for additional information regarding copyright ownership.
-~~ The ASF licenses this file to You under the Apache License, Version 2.0
-~~ (the "License"); you may not use this file except in compliance with
-~~ the License.  You may obtain a copy of the License at
-~~
-~~     http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an "AS IS" BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License.
-~~
-Chukwa Quick Start Guide
-
-Purpose
-
-  Chukwa is a system for large-scale reliable log collection and processing with Hadoop.
The Chukwa design overview discusses the overall architecture of Chukwa. You should read that
document before this one. The purpose of this document is to help you install and configure
Chukwa.
-
-
-Pre-requisites
-
-  Chukwa should work on any POSIX platform, but GNU/Linux is the only production platform
that has been tested extensively. Chukwa has also been used successfully on Mac OS X, which
several members of the Chukwa team use for development.
-
-  The only absolute software requirements are Java 1.6 or better, ZooKeeper 3.4.5, HBase
1.0.0 and Hadoop 2.6.0.
-
-  The Chukwa cluster management scripts rely on ssh; these scripts, however, are not required
if you have some alternate mechanism for starting and stopping daemons.
-
-
-Installing Chukwa
-
-  A minimal Chukwa deployment has five components:
-
-  * A Hadoop and HBase cluster on which Chukwa will process data (referred to as the Chukwa
cluster). 
-  
-  * One or more agent processes, that send monitoring data to HBase. The nodes with active
agent processes are referred to as the monitored source nodes.
-  
-  * Solr Cloud cluster which Chukwa will store indexed log files.
-
-  * Data analytics script, summarize Hadoop Cluster Health.
-
-  * HICC, the Chukwa visualization tool.
-
-[]
-
-[./images/chukwa_architecture.png] Chukwa 0.6.0 Architecture 
-
-First Steps
-
-  * Obtain a copy of Chukwa. You can find the latest release on the Chukwa release page.
-
-  * Un-tar the release, via tar xzf.
-
-  * Make sure a copy of Chukwa is available on each node being monitored.
-
-  * We refer to the directory containing Chukwa as CHUKWA_HOME. It may be useful to set CHUKWA_HOME
explicitly in your environment for ease of use.
-
-Setting Up Chukwa Cluster
-
-* Configure Hadoop and HBase
-
-  [[1]] Copy Chukwa files to Hadoop and HBase directories:
-
----
-cp $CHUKWA_CONF_DIR/hadoop-log4j.properties $HADOOP_CONF_DIR/log4j.properties
-cp $CHUKWA_HOME/etc/chukwa/hadoop-metrics2.properties $HADOOP_CONF_DIR/hadoop-metrics2.properties
-cp $CHUKWA_HOME/share/chukwa/chukwa-0.7.0-client.jar $HADOOP_HOME/share/hadoop/common/lib
-cp $CHUKWA_HOME/share/chukwa/lib/json-simple-1.1.jar $HADOOP_HOME/share/hadoop/common/lib
-cp $CHUKWA_CONF_DIR/hbase-log4j.properties $HBASE_CONF_DIR/log4j.properties
-cp $CHUKWA_HOME/etc/chukwa/hadoop-metrics2-hbase.properties $HBASE_CONF_DIR/hadoop-metrics2-hbase.properties
-cp $CHUKWA_HOME/share/chukwa/chukwa-0.7.0-client.jar $HBASE_HOME/lib
-cp $CHUKWA_HOME/share/chukwa/lib/json-simple-1.1.jar $HBASE_HOME/lib
----  
-
-  [[2]] Restart your Hadoop Cluster. General Hadoop configuration is available at: {{{http://hadoop.apache.org/common/docs/current/cluster_setup.html}Hadoop
Configuration}}
-  
-  [[3]] Make sure HBase is started. General HBASE configuration is available at: {{{http://hbase.apache.org/docs/current/api/overview-summary.html#overview_description}HBase
Configuration}}
-  
-  [[4]] After Hadoop and HBase are started, run:
-
----
-bin/hbase shell < CHUKWA_HOME/etc/chukwa/hbase.schema
----
-
-  This procedure initializes the default Chukwa HBase schema.
-
-* Configuring And Starting Chukwa Agent
-
-  [[1]] Edit CHUKWA_HOME/etc/chukwa/chukwa-env.sh. Make sure that JAVA_HOME, HADOOP_CONF_DIR,
and HBASE_CONF_DIR are set correctly.
-
-  [[2]] Edit CHUKWA_HOME/etc/chukwa/chukwa-agent-conf.xml. Make sure that solr.cloud.address
are set correctly.
-
-  [[3]] In CHUKWA_HOME, run:
-
----
-sbin/chukwa-daemon.sh start agent
----
-
-* Setup Solr to index Service log files
-
-  [[1]] Start Solr with Chukwa Solr configuration:
-
----
-java -Dbootstrap_confdir=$CHUKWA_HOME/etc/solr/logs/conf -Dcollection.configName=myconf -Djetty.port=7574
-DzkHost=localhost:2181 -jar start
----
-
-* Setup Cluster Aggregation Script
-
-  For data analytics with Apache Pig, there are some additional environment setup. Apache
Pig does not use the same environment variable name as Hadoop, therefore make sure the following
environment are setup correctly:
-
-  [[1]] Download and setup Apache Pig 0.9.1.
-
-  [[2]] Define Apache Pig class path:
-
----
-export PIG_CLASSPATH=$HADOOP_CONF_DIR:$HBASE_CONF_DIR
----
-
-  [[3]] Create a jar file of HBASE_CONF_DIR, run:
-
----
-jar cf $CHUKWA_HOME/hbase-env.jar $HBASE_CONF_DIR
----
-
-  [[4]] Setup a cron job or Hudson job for analytics script to run periodically:
-
----
-pig -Dpig.additional.jars=${HBASE_HOME}/hbase-0.90.4.jar:${HBASE_HOME}/lib/zookeeper-3.3.2.jar:${PIG_PATH}/pig.jar:${CHUKWA_HOME}/hbase-env.jar
${CHUKWA_HOME}/script/pig/ClusterSummary.pig
----
-
-* Start HICC
-
-  The Hadoop Infrastructure Care Center (HICC) is the Chukwa web user interface. 
-
-  [[1]] To start HICC, do the following:
-
----
-sbin/chukwa-daemon.sh start hicc
----
-
-* Data Visualization
-
-  [[1]] Once the webcontainer with HICC has been started, point your favorite browser to:
-
----
-http://<server>:4080/hicc/
----
-  
-  [[2]] The default user name and password is "admin" without quotes.
-  
-  [[3]] Metrics data collected by Chukwa Agent will be browsable through Graph Explorer widget.

http://git-wip-us.apache.org/repos/asf/chukwa/blob/0d9f4036/src/site/apt/Quick_Start_Guide.apt.vm
----------------------------------------------------------------------
diff --git a/src/site/apt/Quick_Start_Guide.apt.vm b/src/site/apt/Quick_Start_Guide.apt.vm
new file mode 100644
index 0000000..0019d1f
--- /dev/null
+++ b/src/site/apt/Quick_Start_Guide.apt.vm
@@ -0,0 +1,153 @@
+~~ Licensed to the Apache Software Foundation (ASF) under one or more
+~~ contributor license agreements.  See the NOTICE file distributed with
+~~ this work for additional information regarding copyright ownership.
+~~ The ASF licenses this file to You under the Apache License, Version 2.0
+~~ (the "License"); you may not use this file except in compliance with
+~~ the License.  You may obtain a copy of the License at
+~~
+~~     http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License.
+~~
+Chukwa Quick Start Guide
+
+Purpose
+
+  Chukwa is a system for large-scale reliable log collection and processing with Hadoop.
The Chukwa design overview discusses the overall architecture of Chukwa. You should read that
document before this one. The purpose of this document is to help you install and configure
Chukwa.
+
+
+Pre-requisites
+
+  Chukwa should work on any POSIX platform, but GNU/Linux is the only production platform
that has been tested extensively. Chukwa has also been used successfully on Mac OS X, which
several members of the Chukwa team use for development.
+
+  The only absolute software requirements are Java 1.6 or better, ZooKeeper {{${zookeeperVersion}}},
HBase {{${hbaseVersion}}} and Hadoop {{${hadoopVersion}}}.
+
+  The Chukwa cluster management scripts rely on ssh; these scripts, however, are not required
if you have some alternate mechanism for starting and stopping daemons.
+
+
+Installing Chukwa
+
+  A minimal Chukwa deployment has five components:
+
+  * A Hadoop and HBase cluster on which Chukwa will process data (referred to as the Chukwa
cluster). 
+  
+  * One or more agent processes, that send monitoring data to HBase. The nodes with active
agent processes are referred to as the monitored source nodes.
+  
+  * Solr Cloud cluster which Chukwa will store indexed log files.
+
+  * Data analytics script, summarize Hadoop Cluster Health.
+
+  * HICC, the Chukwa visualization tool.
+
+[]
+
+[./images/chukwa_architecture.png] Chukwa ${VERSION} Architecture 
+
+First Steps
+
+  * Obtain a copy of Chukwa. You can find the latest release on the Chukwa {{{http://www.apache.org/dyn/closer.cgi/chukwa/}release
page}} (or alternatively check the source code out from SCM).
+
+  * Un-tar the release, via tar xzf.
+
+  * Make sure a copy of Chukwa is available on each node being monitored.
+
+  * We refer to the directory containing Chukwa as CHUKWA_HOME. It may be useful to set CHUKWA_HOME
explicitly in your environment for ease of use.
+
+Setting Up Chukwa Cluster
+
+* Configure Hadoop and HBase
+
+  [[1]] Copy Chukwa files to Hadoop and HBase directories:
+
+---
+cp $CHUKWA_HOME/etc/chukwa/hadoop-log4j.properties $HADOOP_CONF_DIR/log4j.properties
+cp $CHUKWA_HOME/etc/chukwa/hadoop-metrics2.properties $HADOOP_CONF_DIR/hadoop-metrics2.properties
+cp $CHUKWA_HOME/share/chukwa/chukwa-${VERSION}-client.jar $HADOOP_HOME/share/hadoop/common/lib
+cp $CHUKWA_HOME/share/chukwa/lib/json-simple-${json-simpleVersion}.jar $HADOOP_HOME/share/hadoop/common/lib
+cp $CHUKWA_HOME/etc/chukwa/hbase-log4j.properties $HBASE_CONF_DIR/log4j.properties
+cp $CHUKWA_HOME/etc/chukwa/hadoop-metrics2-hbase.properties $HBASE_CONF_DIR/hadoop-metrics2-hbase.properties
+cp $CHUKWA_HOME/share/chukwa/chukwa-${VERSION}-client.jar $HBASE_HOME/lib
+cp $CHUKWA_HOME/share/chukwa/lib/json-simple-${json-simpleVersion}.jar $HBASE_HOME/lib
+---  
+
+  [[2]] Restart your Hadoop Cluster. General Hadoop configuration is available at: {{{http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/ClusterSetup.html}Hadoop
Configuration}}. <<N.B.>> You may see some additional logging messages at this
stage which looks as if error(s) are present. These messages are showing up because the log4j
socket appender writes to stderr for warn messages when it is unable to stream logs to a log4j
socket server. If the Chukwa agent is started with socket adaptors prior to Hadoop and HBase,
those messages will not show up. For the time being do not worry about these messages, they
will disappear once Chukwa agent is started with socket adaptors.
+  
+  [[3]] Make sure HBase is started. General HBASE configuration is available at: {{{http://hbase.apache.org/book.html#configuration}HBase
Configuration}}
+  
+  [[4]] After Hadoop and HBase are started, run:
+
+---
+bin/hbase shell < $CHUKWA_HOME/etc/chukwa/hbase.schema
+---
+
+  This procedure initializes the default Chukwa HBase schema.
+
+* Configuring And Starting Chukwa Agent
+
+  [[1]] Edit CHUKWA_HOME/etc/chukwa/chukwa-env.sh. Make sure that JAVA_HOME, HADOOP_CONF_DIR,
and HBASE_CONF_DIR are set correctly.
+
+  [[2]] Edit CHUKWA_HOME/etc/chukwa/chukwa-agent-conf.xml. Make sure that solr.cloud.address
are set correctly.
+
+  [[3]] In CHUKWA_HOME, run:
+
+---
+sbin/chukwa-daemon.sh start agent
+---
+
+* Setup Solr to index Service log files
+
+  [[1]] Start Solr with Chukwa Solr configuration:
+
+---
+java -Dbootstrap_confdir=$CHUKWA_HOME/etc/solr/logs/conf -Dcollection.configName=myconf -Djetty.port=7574
-DzkHost=localhost:2181 -jar start
+---
+
+* Setup Cluster Aggregation Script
+
+  For data analytics with Apache Pig, there are some additional environment setup. Apache
Pig does not use the same environment variable name as Hadoop, therefore make sure the following
environment are setup correctly:
+
+  [[1]] Download and setup Apache Pig 0.9.1.
+
+  [[2]] Define Apache Pig class path:
+
+---
+export PIG_CLASSPATH=$HADOOP_CONF_DIR:$HBASE_CONF_DIR
+---
+
+  [[3]] Create a jar file of HBASE_CONF_DIR, run:
+
+---
+jar cf $CHUKWA_HOME/hbase-env.jar $HBASE_CONF_DIR
+---
+
+  [[4]] Setup a cron job or Hudson job for analytics script to run periodically:
+
+---
+pig -Dpig.additional.jars=${HBASE_HOME}/hbase-${hbaseVersion}.jar:${HBASE_HOME}/lib/zookeeper-${zookeeperVersion}.jar:${PIG_PATH}/pig.jar:${CHUKWA_HOME}/hbase-env.jar
${CHUKWA_HOME}/script/pig/ClusterSummary.pig
+---
+
+* Start HICC
+
+  The Hadoop Infrastructure Care Center (HICC) is the Chukwa web user interface. 
+
+  [[1]] To start HICC, do the following:
+
+---
+sbin/chukwa-daemon.sh start hicc
+---
+
+* Data Visualization
+
+  [[1]] Once the webcontainer with HICC has been started, point your favorite browser to:
+
+---
+http://<server>:4080/hicc/
+---
+  
+  [[2]] The default user name and password is "admin" without quotes.
+  
+  [[3]] Metrics data collected by Chukwa Agent will be browsable through Graph Explorer widget.


Mime
View raw message