hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cdoug...@apache.org
Subject svn commit: r723855 [4/23] - in /hadoop/core/trunk: ./ src/contrib/ src/contrib/chukwa/ src/contrib/chukwa/bin/ src/contrib/chukwa/conf/ src/contrib/chukwa/docs/ src/contrib/chukwa/docs/paper/ src/contrib/chukwa/hadoop-packaging/ src/contrib/chukwa/lib...
Date Fri, 05 Dec 2008 20:30:21 GMT
Added: hadoop/core/trunk/src/contrib/chukwa/lib/jstl-LICENSE.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/lib/jstl-LICENSE.txt?rev=723855&view=auto
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/lib/jstl-LICENSE.txt (added)
+++ hadoop/core/trunk/src/contrib/chukwa/lib/jstl-LICENSE.txt Fri Dec  5 12:30:14 2008
@@ -0,0 +1,201 @@
+ Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

Added: hadoop/core/trunk/src/contrib/chukwa/lib/jstl-README.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/lib/jstl-README.txt?rev=723855&view=auto
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/lib/jstl-README.txt (added)
+++ hadoop/core/trunk/src/contrib/chukwa/lib/jstl-README.txt Fri Dec  5 12:30:14 2008
@@ -0,0 +1,344 @@
+---------------------------------------------------------------------------
+Standard Tag Library 1.1 -- SOURCE DISTRIBUTION
+---------------------------------------------------------------------------
+Thanks for downloading the source code of the Standard tag library, 
+an implementation of the JavaServer Pages(tm)(JSP) 
+Standard Tag Library (JSTL).
+
+JSTL is an effort of the Java Community Process (JCP) and
+comes out of the JSR-052 expert group. For more information on JSTL,
+please go to http://java.sun.com/products/jstl.
+
+---------------------------------------------------------------------------
+BUILD ENVIRONMENT SETUP
+
+---
+J2SE (Java 2 Standard Edition)
+
+Download and install Sun's distribution of J2SE 1.4.2 (or higher) 
+for your operating system platform. J2SE can be found at 
+http://java.sun.com/j2se.
+
+Sun's distribution of J2SE 1.4.2 includes many of the libraries that
+standard 1.1 depends on. However, please note that standard 1.1 will
+compile and run on previous versions of J2SE (from 1.3 up to 1.4.1) as
+long as the standard 1.1 dependencies not included in these versions
+of the J2SE are properly setup.  See section 'LIBRARY DEPENDENCIES'
+for details.
+
+  - Set a JAVA_HOME environment variable to point at the directory 
+    where J2SE is installed.
+  - Add the JAVA_HOME/bin directory to your PATH.
+
+---
+Ant
+
+Download and install version 1.5 or higher of the Jakarta Ant Project
+distribution. Ant can be fount at http://ant.apache.org.
+
+  - Set the ANT_HOME environment variable to point at your Ant 
+    distribution directory 
+  - Add the ANT_HOME/bin directory to your PATH.
+
+---------------------------------------------------------------------------
+LIBRARY DEPENDENCIES
+
+This version of the Standard Tag Library has the following compile-time
+dependencies:
+
+   1. Dependencies not included in J2SE:
+      - Servlet 2.4
+      - JSP 2.0
+
+   2. Dependencies included in newer J2SEs (1.4.2 and higher)
+      - JAXP 1.2 
+      - Xalan 2.5 
+      - JDBC Standard Extension 2.0
+
+Since all of the dependencies in (2) are included in Sun's
+distribution of J2SE 1.4.2 (or higher), this is therefore the J2SE
+version of choice to compile and run the standard tag library.
+
+If you still need the jar files for the dependencies listed in (2), 
+instructions on where you can get these jar files are included below.
+
+---
+build.properties
+
+- Copy the file standard/build_sample_standard.properties to build.properties.
+
+- Edit build.properties and make the following modifications:
+    - Set the "base.dir" property in build.properties to the base directory
+      of your 'standard' distribution. It must be an absolute path.
+    - Set the jar file properties to the absolute path and filename 
+      for the jar files required to build the standard tag library
+      (see below).
+
+---
+Servlet 2.4 and JSP 2.0
+
+Download and install the Servlet 2.4 and JSP 2.0 APIs.  The jar files
+for these APIs may be found in distributions of jakarta-servlet-api-5-bin
+and jakarta-jsp-api-5-bin at http://cvs.apache.org/builds/jakarta-tomcat-5/nightly.  
+They are also included in Tomcat 5, available at http://jakarta.apache.org/tomcat.
+
+Set the following properties in build.properties to the
+file paths of the jars:
+  - servlet24.jar
+  - jsp20.jar
+
+---
+JAXP 1.2
+[required only if building with versions of J2SE that do not include
+the JAXP APIs, such as J2SE 1.3]
+
+The JAXP 1.2 jar files can be obtained in the Java Web Services
+Developer Pack (JWSDP) available at 
+http://java.sun.com/products/jwsdp.
+
+Set the following properties in build.properties to the
+file paths of the jars:
+  - jaxp-api.jar
+  - dom.jar
+  - sax.jar
+  - xercesImpl.jar
+
+---
+Xalan 2.5
+[required only if building with J2SE 1.3 up to J2SE 1.4.1]
+
+The Xalan jar file can be obtained in the Java Web Services
+Developer Pack (JWSDP) available at 
+http://java.sun.com/products/jwsdp, as well as from 
+Apache at http://xml.apache.org/xalan-j.
+
+Set the "xalan.jar" property in build.properties to the
+jar file of Xalan.
+
+If using jdk 1.3, put xalan.jar in the lib directory
+of ant so XSLT transformations of documentation can be 
+properly done.
+
+---
+JDBC Standard Extension 2.0
+[required only if building with J2SE 1.3]
+
+The JDBC 2.0 Optional Package can be obtained from:
+http://java.sun.com/products/jdbc/download.html
+
+Set the "jdbc2_0-stdext.jar" property in build.properties to the
+JDBC 2.0 Standard Extensions jar file path.
+
+---------------------------------------------------------------------------
+Building the Standard tag library
+
+To build the distribution set your current directory to the 'standard' 
+directory into which you unpacked the distribution.
+
+Build 'standard' by executing ant in a shell. Some common build targets
+include:
+
+       > ant         <-- builds the intermediate form of the library,
+                         documentation, and example targets
+       > ant dist    <-- builds all the distribution targets
+       > ant clean   <-- deletes intermediate results so that target can
+                         be rebuilt from scratch.
+
+Two directory hierarchies are created to contain the results of the
+build:
+
+{base.dir}/
+    build/           <-- Contains intermediate form results of
+                         building standard custom library
+    dist/            <-- Contains the files that will be included
+                         in the binary distribution of the
+                         standard project
+
+The following directory and files are created when doing a build:
+
+   * build/standard - Location of all directories and files built for the 
+     standard taglib.
+   * build/standard/standard - Results of the build process
+     (classes, jar files, tlds)
+   * build/standard/standard-doc/ - Files used to create the
+     standard-doc.war file
+   * build/standard/standard-examples/ - Files used to create the 
+     standard-examples.war file.
+
+The following directory and files are created when doing a distribution
+build:
+
+   * dist/standard/ - Location of all files built for a binary
+     distribution of the taglib.
+   * dist/standard/README - Information to use the binary distribution
+     of the standard tablib.
+   * dist/standard/javadoc/ - The javadocs
+   * dist/standard/lib/ - The standard jar files: jstl.jar and
+     standard.jar
+   * dist/standard/tld/ - Directory with the Tag Lib Descriptors for 
+     the tag library.
+   * dist/standard/standard-doc.war - Tag Library documentation
+     war file.
+   * dist/standard/standard-examples.war - Tag Library examples
+     war file.
+   * dist/standard/tld - Directory with the Tag Lib Descriptors for 
+     the tag library.
+
+---------------------------------------------------------------------------
+USING THE STANDARD TAG LIBRARY
+
+See the README file of the binary distribution you have built with these
+instructions.
+
+---------------------------------------------------------------------------
+
+UNIT TESTING THE STANDARD TAG LIBRARY
+
+---------------------------------------------------------------------------
+OVERVIEW OF THE UNIT TEST FRAMEWORK
+
+The Unit test directory hierarchy should mimic the RI implementation hierarchy. This way, you have a mapping between the unit test and the file that it covers.
+
+If there's no appropriate mapping you can put the test in the general test directory:
+
+<JSTL_HOME>/test/org/apache/taglibs/standard 
+
+The Test should be written using Junit Test name conventions and start with
+"Test*.java"
+
+Here is the Unit test directory hierarchy:
+
+  <JSTL_HOME>/
+    test/
+      conf/                            <-- unit test configuration files
+      web/                             <-- unit test web application files
+        WEB-INF/
+      org/
+        apache/
+          taglibs/
+            standard/
+              extra/
+                spath/
+              functions/
+              lang/
+                jstl/
+              resources/
+              tag/
+                common/
+                  core/
+                  fmt/
+                  sql/
+                  xml/
+                el/
+                  core/
+                  fmt/
+                  sql/
+                  xml/
+                rt/
+                  core/
+                  fmt/
+                  sql/
+                  xml/
+              tei/
+              tlv/
+
+
+---------------------------------------------------------------------------
+UNIT TEST BUILD ENVIRONMENT SETUP
+
+In order to build the Standard Unit Test Framework, you will need to set the
+following properties and download the corresponding implementation files. See the "build_sample_standard.properties" for the definitions:
+
+        build.dir                     Base directory into which we are building
+                                      the components.
+        build.classes                 Base JSTL classes directory 
+
+        jstl.jar                      JSTL API jar file
+        standard.jar                  JSTL implementation jar file
+
+        servlet24.jar                 Servlet jar
+        jsp20.jar                     JSP jar
+
+        junit.jar                     JUnit jar
+
+        cactus.jar                    Cactus jar 
+        cactus.ant.jar                Cactus custom Ant tasks jar
+        aspectjrt.jar                 Cactus AspectJ Language Support jar
+        httpclient.jar                Cactus httpclient jar
+        commons-logging.jar           Cactus logging jar
+
+        tomcat.home                   Home directory where Tomcat 
+                                      is installed
+        tomcat.webapps                Tomcat webapps directory
+        username                      Tomcat username with manager role
+                                      privileges
+        password                      password for the user with manager
+                                      privileges
+
+        web.server.host               hostname for the running webserver
+        web.server.port               port for the running webserver
+
+        compile.debug                 debug mode for compilation
+        compile.deprecation           deprecation mode for compilation
+        compile.optimize              optimization mode for compilation
+
+---------------------------------------------------------------------------
+BUILDING THE UNIT TESTS
+
+Some common unit test build targets include:
+    > ant test                       <-- Build, deploy and run all the tests
+    > ant run.cactus                 <-- Build, deploy and run the Cactus tests
+    > ant run.junit                  <-- Build, deploy and run the Junit tests
+
+The following directories and files are created when building the unit tests:
+
+To execute the build-tests.xml unit test targets directly you can do the following:
+
+    > ant -f build-tests.xml <target>
+
+Some examples of targets available in the build-tests.xml file include:
+
+    > ant -f build-tests.xml props    <-- Print out test environment properties
+    > ant -f build-tests.xml undeploy <-- Manually undeploy the Cactus 
+                                          Tests web application
+    > ant -f build-tests.xml deploy   <-- Manually deploy the Cactus Test 
+                                          web application
+
+The following directories will be created when building the Unit Tests:
+    build/
+      standard/
+        standard/
+          test/
+            WEB-INF/
+              classes/
+                ...
+              lib/
+            org/
+                ...                  
+
+A Unit Test web application war file will also be created. It is located here:
+    build/standard/standard/jstl_test.war
+
+---------------------------------------------------------------------------
+UNIT TEST RUNTIME SETUP
+
+You will need to have the Tomcat manager administration application enabled in
+order to run the tests. The Unit Test build file uses the Tomcat manager to
+automatically deploy and undeploy the Cactus tests.
+
+The following runtime properties need to be properly set in order to successfully run the unit tests:
+
+        tomcat.webapps                Tomcat webapps directory
+        username                      Tomcat username with manager role
+                                      privileges
+        password                      password for the user with manager
+                                      privileges
+
+        web.server.host               hostname for the running webserver
+        web.server.port               port for the running webserver
+
+The Tomcat manager application may sometimes get into an unstable state after
+many web application deployments and undeployments. If the Cactus tests start
+failing for unknown reasons, stop your web container and clean out your work
+directory before restarting.
+---------------------------------------------------------------------------

Modified: hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/Chunk.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/Chunk.java?rev=723855&r1=723854&r2=723855&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/Chunk.java (original)
+++ hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/Chunk.java Fri Dec  5 12:30:14 2008
@@ -93,13 +93,13 @@
   public int getSerializedSizeEstimate();
   
 /**
- * @return tags from this chunk.
+ * @return name of cluster that this chunk comes from.
  * 
  */
   public String getTags();  
   
   /**
-   * Set tags to this chunk.
+   * Set the name of the cluster that this chunk comes from.
    * 
    */
     public void setTags(String tags);  

Modified: hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/ChunkImpl.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/ChunkImpl.java?rev=723855&r1=723854&r2=723855&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/ChunkImpl.java (original)
+++ hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/ChunkImpl.java Fri Dec  5 12:30:14 2008
@@ -30,7 +30,7 @@
 public class ChunkImpl implements org.apache.hadoop.io.Writable, Chunk 
 {
   public static int PROTOCOL_VERSION=1;
-	
+  
   private String source = "";
   private String application = "";
   private String dataType = "";
@@ -38,10 +38,10 @@
   private byte[] data = null;
   private int[] recordEndOffsets;
   private int protocolVersion=1;
-  private long seqID;
-  
   private String debuggingInfo="";
+  
   private transient Adaptor initiator;
+  long seqID;
   
   ChunkImpl() {
     this.tags = ChukwaAgent.getTags();

Modified: hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/conf/ChukwaConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/conf/ChukwaConfiguration.java?rev=723855&r1=723854&r2=723855&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/conf/ChukwaConfiguration.java (original)
+++ hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/conf/ChukwaConfiguration.java Fri Dec  5 12:30:14 2008
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.chukwa.conf;
 
+import java.io.File;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.log4j.Logger;
@@ -32,16 +34,30 @@
 	public ChukwaConfiguration(boolean loadDefaults) {
 		super();
 		if (loadDefaults) {
-		  String chukwaHome = System.getenv("CHUKWA_HOME");
-		  if (chukwaHome == null)
-		    chukwaHome = ".";
-		  log.info("chukwaHome is " + chukwaHome);
-		  
-			super.addResource(new Path(chukwaHome + "/conf/chukwa-collector-conf.xml"));
-			log.debug("added chukwa-collector-conf.xml to ChukwaConfiguration");
+
+			String chukwaHome = System.getenv("CHUKWA_HOME");
+			if (chukwaHome == null){
+				chukwaHome = ".";
+			}
+
+			if(!chukwaHome.endsWith("/"))
+			{  chukwaHome = chukwaHome + File.separator; }	
+			String chukwaConf = System.getenv("CHUKWA_CONF_DIR");
+			if (chukwaConf == null)
+			{  chukwaConf = chukwaHome + "conf" + File.separator; }
+
+			log.info("chukwaConf is " + chukwaConf);
+
+		  super.addResource(new Path(chukwaConf + "/chukwa-collector-conf.xml"));
+		  log.debug("added chukwa-collector-conf.xml to ChukwaConfiguration");
+
+		  super.addResource(new Path(chukwaConf + "/chukwa-agent-conf.xml"));
+		  log.debug("added chukwa-agent-conf.xml to ChukwaConfiguration");
+
+		  super.addResource(new Path(chukwaConf + "/hadoop-site.xml"));
+		  log.debug("added hadoop-site.xml to ChukwaConfiguration");
+
 			
-			super.addResource(new Path(chukwaHome + "/conf/chukwa-agent-conf.xml"));
-			log.debug("added chukwa-agent-conf.xml to ChukwaConfiguration");
 		}
 	}
 

Added: hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/Aggregator.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/Aggregator.java?rev=723855&view=auto
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/Aggregator.java (added)
+++ hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/Aggregator.java Fri Dec  5 12:30:14 2008
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.database;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.util.Calendar;
+import java.util.HashMap;
+import java.util.Iterator;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+import org.apache.hadoop.chukwa.util.PidFile;
+
+public class Aggregator {
+	private static DatabaseConfig dbc = null;
+
+	private static Log log = LogFactory.getLog(Consolidator.class);
+	private long current = 0;
+    private static PidFile loader=null;
+
+	public Aggregator() {
+
+		dbc = new DatabaseConfig();
+		Calendar now = Calendar.getInstance();
+		current = now.getTimeInMillis();
+	}
+
+	public HashMap<String,String> findMacros(String query) {
+		boolean add=false;
+		HashMap<String,String> macroList = new HashMap<String,String>();
+		String macro="";
+	    for(int i=0;i<query.length();i++) {
+	    	if(query.charAt(i)==']') {
+	    		add=false;
+	    		if(!macroList.containsKey(macro)) {
+		    		String subString = computeMacro(macro);
+		    		macroList.put(macro,subString);	    			
+	    		}
+	    		macro="";
+	    	}
+	    	if(add) {
+	    		macro=macro+query.charAt(i);
+	    	}
+	    	if(query.charAt(i)=='[') {
+	    		add=true;
+	    	}
+	    }
+	    return macroList;
+	}
+
+	public String computeMacro(String macro) {
+		if(macro.indexOf("avg(")==0) {
+			String meta="";
+			String[] table = dbc.findTableName(macro.substring(4,macro.indexOf(")")), current, current);
+			try {
+				String cluster = System.getProperty("CLUSTER");
+				if(cluster==null) {
+					cluster="unknown";
+				}
+				DatabaseWriter db = new DatabaseWriter(cluster);
+
+			    String query = "select * from "+table[0]+" order by timestamp desc limit 1";
+	            log.debug("Query: "+query);
+	            ResultSet rs = db.query(query);
+	            if(rs==null) {
+	          	    throw new SQLException("Table is undefined.");
+	            }
+	            ResultSetMetaData rmeta = rs.getMetaData();
+	            if(rs.next()) {
+	            	boolean first=true;
+	                for(int i=1;i<=rmeta.getColumnCount();i++) {
+	                	if(!first) {
+	                		meta=meta+",";
+	                	}
+		                if(rmeta.getColumnType(i)==java.sql.Types.VARCHAR) {
+		                	meta=meta+"count("+rmeta.getColumnName(i)+") as "+rmeta.getColumnName(i);
+		                	first=false;
+		                } else if(rmeta.getColumnType(i)==java.sql.Types.DOUBLE || 
+		                		  rmeta.getColumnType(i)==java.sql.Types.INTEGER || 
+		                		  rmeta.getColumnType(i)==java.sql.Types.FLOAT) {
+		                	meta=meta+"avg("+rmeta.getColumnName(i)+")";
+		                	first=false;
+		                } else if(rmeta.getColumnType(i)==java.sql.Types.TIMESTAMP) {
+		                	// Skip the column
+		                } else {
+		                	meta=meta+"avg("+rmeta.getColumnName(i)+")";
+		                	first=false;		                	
+		                }
+		            }
+	            }
+			} catch(SQLException ex) {
+				log.error(ex);
+			}
+			return meta;
+		} else if(macro.indexOf("now")==0) {
+			return DatabaseWriter.formatTimeStamp(current);
+		} else if(macro.indexOf("past_hour")==0) {
+			return DatabaseWriter.formatTimeStamp(current-3600*1000L);
+		}
+		String[] tableList = dbc.findTableName(macro,current,current);
+		return tableList[0];
+	}
+
+	public static String getContents(File aFile) {
+        StringBuffer contents = new StringBuffer();    
+        try {
+        	BufferedReader input =  new BufferedReader(new FileReader(aFile));
+        	try {
+        		String line = null; //not declared within while loop
+        		while (( line = input.readLine()) != null){
+        			contents.append(line);
+        			contents.append(System.getProperty("line.separator"));
+        		}
+        	} finally {
+        		input.close();
+        	}
+        } catch (IOException ex){
+        	ex.printStackTrace();
+        }    
+        return contents.toString();
+    }
+
+	public void process(String table, String query) {
+		ResultSet rs = null;
+	    long start = current;
+	    long end = current;
+        
+		String cluster = System.getProperty("CLUSTER");
+		if(cluster==null) {
+			cluster="unknown";
+		}
+	    DatabaseWriter db = new DatabaseWriter(cluster);
+			    // Find the last aggregated value from table
+			    String[] tmpList = dbc.findTableName(table,start,end);
+			    String timeTest = "select timestamp from "+tmpList[0]+" order by timestamp desc limit 1";
+			    try {
+					rs = db.query(timeTest);
+				    while(rs.next()) {
+				    	start=rs.getTimestamp(1).getTime();
+				    	end=start;
+				    }
+			    } catch (SQLException e) {
+					// TODO Auto-generated catch block
+					e.printStackTrace();
+				}
+			    // Transform table names
+                HashMap<String, String> macroList = findMacros(query);
+                Iterator<String> macroKeys = macroList.keySet().iterator();
+                while(macroKeys.hasNext()) {
+                	String mkey = macroKeys.next();
+                	log.debug("replacing:"+mkey+" with "+macroList.get(mkey));
+			    	query = query.replace("["+mkey+"]", macroList.get(mkey));
+                }
+				log.info(query);
+                db.execute(query);
+            db.close();
+	}
+
+    public static void main(String[] args) {
+        loader=new PidFile(System.getProperty("CLUSTER")+"Aggregator");
+    	dbc = new DatabaseConfig();    	
+    	String queries = Aggregator.getContents(new File(System.getenv("CHUKWA_CONF_DIR")+File.separator+"aggregator.sql"));
+    	String[] query = queries.split("\n");
+    	for(int i=0;i<query.length;i++) {
+    		    int startOffset = query[i].indexOf("[")+1;
+    		    int endOffset = query[i].indexOf("]");
+    		    if(query[i].equals("")) {
+    		    } else if(startOffset==-1 || endOffset==-1) {
+    		    	log.error("Unable to extract table name from query:"+query[i]);
+    		    } else if(query[i].indexOf("#")==0) {
+    		    	log.debug("skipping: "+query[i]);
+    		    } else {
+    		    	String table = query[i].substring(startOffset, endOffset);
+    		    	Aggregator dba = new Aggregator();
+    		    	dba.process(table, query[i]);
+    		    }
+        }
+        loader.clean();
+    }
+
+}

Added: hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/Consolidator.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/Consolidator.java?rev=723855&view=auto
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/Consolidator.java (added)
+++ hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/Consolidator.java Fri Dec  5 12:30:14 2008
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.database;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.inputtools.mdl.DataConfig;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+import org.apache.hadoop.chukwa.util.ExceptionUtil;
+import org.apache.hadoop.chukwa.util.PidFile;
+
+import java.sql.SQLException;
+import java.sql.ResultSet;
+import java.util.Calendar;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.sql.ResultSetMetaData;
+import java.text.SimpleDateFormat;
+
+public class Consolidator extends Thread {
+	private DatabaseConfig dbc = new DatabaseConfig();
+
+	private static Log log = LogFactory.getLog(Consolidator.class);
+	private String table = null;
+	private int[] intervals;
+    private static PidFile loader=null;
+
+	public Consolidator(String table, String intervalString) {
+		super(table);
+		try {
+			int i=0;
+			String[] temp = intervalString.split("\\,");
+			intervals = new int[temp.length];
+			for(String s: temp) {
+			    intervals[i]=Integer.parseInt(s);
+			    i++;
+			}
+			this.table = table;
+		} catch (NumberFormatException ex) {
+			log.error("Unable to parse summary interval");
+		}		
+	}
+	public void run() {
+		ResultSet rs = null;
+		String[] columns;
+		int[] columnsType;
+		String groupBy = "";
+        
+		for(int interval : intervals) {
+			// Start reducing from beginning of time;
+			Calendar aYearAgo = Calendar.getInstance();
+			aYearAgo.set(2008, 1, 1, 0, 0, 0);
+
+			long start = aYearAgo.getTimeInMillis();  //starting from 2008/01/01
+			long end = start + (interval*60000);
+			log.debug("start time: "+start);
+			log.debug("end time: "+end);
+			Calendar now = Calendar.getInstance();
+			String cluster = System.getProperty("CLUSTER");
+			if(cluster==null) {
+				cluster="unknown";
+			}
+			DatabaseWriter db = new DatabaseWriter(cluster);
+			String fields = null;
+			String dateclause = null;
+			boolean emptyPrimeKey = false;
+			log.info("Consolidate for "+interval+" minutes interval.");
+			
+			String[] tmpTable = dbc.findTableName(this.table, start, end);
+			String table = tmpTable[0];
+			String sumTable="";
+			if(interval==5) {
+				long partition=now.getTime().getTime() / DatabaseConfig.WEEK;
+				StringBuilder stringBuilder = new StringBuilder();
+				stringBuilder.append(this.table);
+				stringBuilder.append("_");
+				stringBuilder.append(partition);
+				stringBuilder.append("_week");
+				table=stringBuilder.toString();
+				long partition2=now.getTime().getTime() / DatabaseConfig.MONTH;
+				sumTable =this.table+"_"+partition2+"_month";
+			} else if(interval==30) {
+				long partition=now.getTime().getTime() / DatabaseConfig.MONTH;
+				table=this.table+"_"+partition+"_month";				
+				long partition2=now.getTime().getTime() / DatabaseConfig.QUARTER;
+				sumTable =this.table+"_"+partition2+"_month";
+			} else if(interval==180) {
+				long partition=now.getTime().getTime() / DatabaseConfig.QUARTER;
+				table=this.table+"_"+partition+"_quarter";
+				long partition2=now.getTime().getTime() / DatabaseConfig.YEAR;
+				sumTable =this.table+"_"+partition2+"_month";
+			} else if(interval==720) {
+				long partition=now.getTime().getTime() / DatabaseConfig.YEAR;
+				table=this.table+"_"+partition+"_year";
+				long partition2=now.getTime().getTime() / DatabaseConfig.DECADE;
+				sumTable =this.table+"_"+partition2+"_month";
+			}
+			// Find the most recent entry
+			try {
+			    String query = "select * from "+sumTable+" order by timestamp desc limit 1";
+	            log.debug("Query: "+query);
+	            rs = db.query(query);
+	            if(rs==null) {
+	          	    throw new SQLException("Table is undefined.");
+	            }
+	            ResultSetMetaData rmeta = rs.getMetaData();
+	            boolean empty=true;
+	            if(rs.next()) {
+	                for(int i=1;i<=rmeta.getColumnCount();i++) {
+		                if(rmeta.getColumnName(i).toLowerCase().equals("timestamp")) {
+		            	    start = rs.getTimestamp(i).getTime();
+		                }
+	                }
+	                empty=false;
+	            }
+	            if(empty) {
+	              	throw new SQLException("Table is empty.");
+	            }
+                end = start + (interval*60000);
+			} catch (SQLException ex) {
+			    try {
+				    String query = "select * from "+table+" order by timestamp limit 1";
+		            log.debug("Query: "+query);
+	                rs = db.query(query);
+	                if(rs.next()) {
+	    	            ResultSetMetaData rmeta = rs.getMetaData();
+	    	            for(int i=1;i<=rmeta.getColumnCount();i++) {
+	    	                if(rmeta.getColumnName(i).toLowerCase().equals("timestamp")) {
+	    	                	start = rs.getTimestamp(i).getTime();
+	    	                }
+	    	            }
+				    }
+                    end = start + (interval*60000);
+				} catch(SQLException ex2) {
+				    log.error("Unable to determine starting point in table: "+this.table);
+					log.error("SQL Error:"+ExceptionUtil.getStackTrace(ex2));
+					return;
+				}
+			}
+			try {
+                ResultSetMetaData rmeta = rs.getMetaData();
+                int col = rmeta.getColumnCount();
+                columns = new String[col];
+                columnsType = new int[col];
+                for(int i=1;i<=col;i++) {
+            	    columns[i-1]=rmeta.getColumnName(i);
+              	    columnsType[i-1]=rmeta.getColumnType(i);
+                }
+
+		        for(int i=0;i<columns.length;i++) {
+		    	    if(i==0) {
+		    		    fields=columns[i];
+	    	            if(columnsType[i]==java.sql.Types.VARCHAR) {
+	    	            	if(groupBy.equals("")) {
+	    	            	    groupBy = " group by "+columns[i];
+	    	            	} else {
+		    	            	groupBy = groupBy+","+columns[i];	    	            		
+	    	            	}
+	    	            }
+		    	    } else {
+		    		    if(columnsType[i]==java.sql.Types.VARCHAR || columnsType[i]==java.sql.Types.TIMESTAMP) {
+		    	            fields=fields+","+columns[i];
+		    	            if(columnsType[i]==java.sql.Types.VARCHAR) {
+		    	            	if(groupBy.equals("")) {
+		    	            	    groupBy = " group by "+columns[i];
+		    	            	} else {
+		    	            	    groupBy = groupBy+","+columns[i];		    	            		
+		    	            	}
+		    	            }
+		    		    } else {
+		    	            fields=fields+",AVG("+columns[i]+") as "+columns[i];
+		    		    }
+		    	    }
+		        }
+			} catch(SQLException ex) {
+			  	log.error("SQL Error:"+ExceptionUtil.getStackTrace(ex));
+			  	return;
+			}
+            if(groupBy.equals("")) {
+            	emptyPrimeKey = true;
+            }
+			long previousStart = start;
+			long partition = 0;
+			String timeWindowType="week";
+        	while(end < now.getTimeInMillis()-(interval*2*60000)) {
+			    // Select new data sample for the given intervals
+			    if(interval == 5) {
+			    	timeWindowType="month";
+					partition = start / DatabaseConfig.MONTH;
+			    } else if(interval == 30) {
+			    	timeWindowType="quarter";
+					partition = start / DatabaseConfig.QUARTER;
+			    } else if(interval == 180) {
+			    	timeWindowType="year";
+					partition = start / DatabaseConfig.YEAR;
+			    } else if(interval == 720) {
+			    	timeWindowType="decade";
+					partition = start / DatabaseConfig.DECADE;
+			    }
+	            SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+			    String startS = formatter.format(start);
+			    String endS = formatter.format(end);
+			    dateclause = "Timestamp >= '"+startS+"' and Timestamp <= '"+endS+"'";
+			    if(emptyPrimeKey) {
+			    	groupBy = " group by FLOOR(UNIX_TIMESTAMP(TimeStamp)/"+interval*60+")";
+			    }
+				String query = "replace into "+this.table+"_"+partition+"_"+timeWindowType+" (select "+fields+" from "+table+" where "+dateclause+groupBy+")";
+				log.debug(query);
+                db.execute(query);
+        		if(previousStart == start) {
+        			start = start + (interval*60000);
+        			end = start + (interval*60000);
+            		previousStart = start;
+        		}
+        	}
+            db.close();
+		}
+	}
+
+    public static void main(String[] args) {
+        DataConfig mdl = new DataConfig();
+        loader=new PidFile(System.getProperty("CLUSTER")+"Consolidator");
+        HashMap<String, String> tableNames = (HashMap<String, String>) mdl.startWith("consolidator.table.");
+        try {
+                Iterator<String> ti = (tableNames.keySet()).iterator();
+                while(ti.hasNext()) {
+                        String table = ti.next();
+                String interval=mdl.get(table);
+                table = table.substring(19);
+                        log.info("Summarizing table:"+table);
+                Consolidator dbc = new Consolidator(table, interval);
+                dbc.run();
+                }
+        } catch (NullPointerException e) {
+                log.error("Unable to summarize database.");
+                log.error("Error:"+ExceptionUtil.getStackTrace(e));
+        }
+        loader.clean();
+    }
+}

Added: hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/DataExpiration.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/DataExpiration.java?rev=723855&view=auto
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/DataExpiration.java (added)
+++ hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/DataExpiration.java Fri Dec  5 12:30:14 2008
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.database;
+
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Iterator;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+public class DataExpiration {
+	private static DatabaseConfig dbc = null;
+	private static Log log = LogFactory.getLog(DataExpiration.class);		
+	public DataExpiration() {
+    	if(dbc==null) {
+    	    dbc = new DatabaseConfig();
+    	}
+    }
+	public void dropTables(long start, long end) {
+		String cluster = System.getProperty("CLUSTER");
+		if(cluster==null) {
+			cluster="unknown";
+		}
+		DatabaseWriter dbw = new DatabaseWriter(cluster);
+		try {
+			HashMap<String, String> dbNames = dbc.startWith("report.db.name.");
+			Iterator<String> ki = dbNames.keySet().iterator();
+			while(ki.hasNext()) {
+				String name = ki.next();
+				String tableName = dbNames.get(name);
+				String[] tableList = dbc.findTableName(tableName, start, end);
+				for(String tl : tableList) {
+					log.debug("table name: "+tableList[0]);
+					try {
+						String[] parts = tl.split("_");
+						int partition = Integer.parseInt(parts[parts.length-2]);
+						String table = "";
+						for(int i=0;i<parts.length-2;i++) {
+							if(i!=0) {
+								table=table+"_";
+							}
+							table=table+parts[i];
+						}
+						partition=partition-3;
+						String dropPartition="drop table if exists "+table+"_"+partition+"_"+parts[parts.length-1];
+						dbw.execute(dropPartition);
+						partition--;
+						dropPartition="drop table if exists "+table+"_"+partition+"_"+parts[parts.length-1];
+						dbw.execute(dropPartition);
+					} catch(NumberFormatException e) {
+						log.error("Error in parsing table partition number, skipping table:"+tableList[0]);
+					} catch(ArrayIndexOutOfBoundsException e) {
+						log.debug("Skipping table:"+tableList[0]+", because it has no partition configuration.");
+					}
+				}
+			}
+		} catch(Exception e) {
+			e.printStackTrace();
+		}		
+	}
+	
+	public static void usage() {
+		System.out.println("DataExpiration usage:");
+		System.out.println("java -jar chukwa-core.jar org.apache.hadoop.chukwa.DataExpiration <date> <time window size>");
+		System.out.println("     date format: YYYY-MM-DD");
+		System.out.println("     time window size: 7, 30, 91, 365");		
+	}
+	
+	public static void main(String[] args) {
+		DataExpiration de = new DataExpiration();
+		long now = (new Date()).getTime();
+		long start = now;
+		long end = now;
+		if(args.length==2) {
+			SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
+			try {
+				start = sdf.parse(args[0]).getTime();				
+				end = start + (Long.parseLong(args[1])*1440*60*1000L);
+				de.dropTables(start, end);				
+			} catch(Exception e) {
+				usage();
+			}
+		} else {
+			usage();
+		}
+    }
+}

Added: hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/DatabaseConfig.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/DatabaseConfig.java?rev=723855&view=auto
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/DatabaseConfig.java (added)
+++ hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/DatabaseConfig.java Fri Dec  5 12:30:14 2008
@@ -0,0 +1,244 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.database;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import java.util.*;
+
+public class DatabaseConfig {
+    private Configuration config = null;
+	public final static long CENTURY=36500*24*60*60*1000L;
+    public final static long DECADE=3650*24*60*60*1000L;
+    public final static long YEAR=365*24*60*60*1000L;
+    public final static long QUARTER=91250*24*60*60L;
+    public final static long MONTH=30*24*60*60*1000L;
+	public final static long WEEK=7*24*60*60*1000L;
+	public final static long DAY=24*60*60*1000L;
+
+    public DatabaseConfig(String path) {
+        Path fileResource = new Path(path);
+        config = new Configuration();
+        config.addResource(fileResource);
+    }
+    public DatabaseConfig() {
+        Path fileResource = new Path(System.getenv("DATACONFIG"));
+        config = new Configuration();
+        config.addResource(fileResource);
+    }
+
+    public String get(String key) {
+        return config.get(key);
+    }
+    public void put(String key, String value) {
+        this.config.set(key, value);
+    }
+    public Iterator<?> iterator() {
+        return this.config.iterator();
+    }
+    public HashMap<String, String> startWith(String key) {
+        HashMap<String, String> transformer = new HashMap<String, String>();
+        Iterator<?> entries = config.iterator();
+        while(entries.hasNext()) {
+           String entry = entries.next().toString();
+           if(entry.startsWith(key)) {
+               String[] metrics = entry.split("=");
+               transformer.put(metrics[0],metrics[1]);
+           }
+        }
+        return transformer;
+    }    
+    public String[] findTableName(String tableName, long start, long end) {
+    	String[] tableNames = null;
+    	String tableType = "_week";
+		long now = (new Date()).getTime();
+		long timeWindow = end - start;
+		long partitionSize=WEEK;
+		boolean fallback=true;
+		
+		if(config.get("consolidator.table."+tableName)==null) {
+			tableNames = new String[1];
+			tableNames[0]=tableName;
+			return tableNames;
+		}
+		
+		if(timeWindow<=0) {
+			timeWindow=1;			
+		}
+		if(timeWindow > DECADE) {
+			tableType = "_century";
+			partitionSize=CENTURY;
+		} else if(timeWindow > YEAR) {
+			tableType = "_decade";
+			partitionSize=DECADE;
+		} else if(timeWindow > QUARTER) {
+			tableType = "_year";
+			partitionSize=YEAR;			
+		} else if(timeWindow > MONTH) {
+			tableType = "_quarter";
+			partitionSize=QUARTER;
+		} else if(timeWindow > WEEK) {
+			tableType = "_month";
+			partitionSize=MONTH;
+		} else {
+			tableType = "_week";
+			partitionSize=WEEK;
+		}
+
+		long currentPartition = now / partitionSize;
+		long startPartition = start / partitionSize;
+		long endPartition = end / partitionSize;
+		while(fallback && partitionSize!=CENTURY*100) {
+			// Check if the starting date is in the far distance from current time.  If it is, use down sampled data.
+			if(startPartition + 2 < currentPartition) {
+				fallback=true;
+			    if(partitionSize==DAY) {
+				    tableType = "_week";
+				    partitionSize=WEEK;
+			    } else if(partitionSize==WEEK) {
+				    tableType = "_month";
+				    partitionSize=MONTH;
+			    } else if(partitionSize==MONTH) {
+				    tableType = "_year";
+				    partitionSize=YEAR;
+			    } else if(partitionSize==YEAR) {
+					tableType = "_decade";
+					partitionSize=DECADE;				
+				} else if(partitionSize==DECADE) {
+					tableType = "_century";
+					partitionSize=CENTURY;
+				} else {
+					partitionSize=100*CENTURY;
+				}
+				currentPartition = now / partitionSize;
+				startPartition = start / partitionSize;
+				endPartition = end / partitionSize;
+			} else {
+				fallback=false;
+			}
+		}
+
+		if(startPartition!=endPartition) {
+			int delta = (int) (endPartition-startPartition);
+			tableNames=new String[delta+1];
+			for(int i=0;i<=delta;i++) {
+				long partition = startPartition+(long)i;
+				tableNames[i]=tableName+"_"+partition+tableType;
+			}
+		} else {
+			tableNames=new String[1];
+			tableNames[0]=tableName+"_"+startPartition+tableType;
+		}
+    	return tableNames;
+    }
+    public String[] findTableNameForCharts(String tableName, long start, long end) {
+    	String[] tableNames = null;
+    	String tableType = "_week";
+		long now = (new Date()).getTime();
+		long timeWindow = end - start;
+		if(timeWindow>60*60*1000) {
+		    timeWindow = timeWindow + 1;
+		}
+		long partitionSize=WEEK;
+		boolean fallback=true;
+		
+		if(config.get("consolidator.table."+tableName)==null) {
+			tableNames = new String[1];
+			tableNames[0]=tableName;
+			return tableNames;
+		}
+		
+		if(timeWindow<=0) {
+			timeWindow=1;			
+		}
+		if(timeWindow > YEAR) {
+			tableType = "_century";
+			partitionSize=CENTURY;			
+		} else if(timeWindow > QUARTER) {
+			tableType = "_century";
+			partitionSize=CENTURY;
+		} else if(timeWindow > MONTH) {
+			tableType = "_decade";
+			partitionSize=DECADE;
+		} else if(timeWindow > WEEK) {
+			tableType = "_year";
+			partitionSize=YEAR;
+		} else if(timeWindow > DAY) {
+			tableType = "_quarter";
+			partitionSize=QUARTER;
+		} else if(timeWindow > 60*60*1000) {
+			tableType = "_month";
+			partitionSize=MONTH;			
+		} else {
+			tableType = "_week";
+			partitionSize = WEEK;
+		}
+
+		long currentPartition = now / partitionSize;
+		long startPartition = start / partitionSize;
+		long endPartition = end / partitionSize;
+		while(fallback && partitionSize!=DECADE*100) {
+			// Check if the starting date is in the far distance from current time.  If it is, use down sampled data.
+			if(startPartition + 2 < currentPartition) {
+				fallback=true;
+			    if(partitionSize==DAY) {
+				    tableType = "_month";
+				    partitionSize=MONTH;
+			    } else if(partitionSize==WEEK) {
+				    tableType = "_quarter";
+				    partitionSize=QUARTER;
+			    } else if(partitionSize==MONTH) {
+				    tableType = "_year";
+				    partitionSize=YEAR;
+			    } else if(partitionSize==YEAR) {
+					tableType = "_decade";
+					partitionSize=DECADE;				
+				} else {
+					partitionSize=CENTURY;
+				}
+				currentPartition = now / partitionSize;
+				startPartition = start / partitionSize;
+				endPartition = end / partitionSize;
+			} else {
+				fallback=false;
+			}
+		}
+
+		if(startPartition!=endPartition) {
+			int delta = (int) (endPartition-startPartition);
+			tableNames=new String[delta+1];
+			for(int i=0;i<=delta;i++) {
+				long partition = startPartition+(long)i;
+				tableNames[i]=tableName+"_"+partition+tableType;
+			}
+		} else {
+			tableNames=new String[1];
+			tableNames[0]=tableName+"_"+startPartition+tableType;
+		}
+    	return tableNames;
+    }
+    
+    public static void main(String[] args) {
+    	DatabaseConfig dbc = new DatabaseConfig();
+    	String[] names = dbc.findTableName("system_metrics",1216140020000L,1218645620000L);
+    	for(String n: names) {
+    		System.out.println("name:"+n);
+    	}
+    }
+}

Added: hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/MetricsAggregation.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/MetricsAggregation.java?rev=723855&view=auto
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/MetricsAggregation.java (added)
+++ hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/MetricsAggregation.java Fri Dec  5 12:30:14 2008
@@ -0,0 +1,159 @@
+package org.apache.hadoop.chukwa.database;
+
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+public class MetricsAggregation
+{
+	 private static Log log = LogFactory.getLog(MetricsAggregation.class);
+	 private static Connection conn = null;    
+     private static Statement stmt = null; 
+     private static ResultSet rs = null; 
+     private static DatabaseConfig mdlConfig;
+     
+	/**
+	 * @param args
+	 * @throws SQLException 
+	 */
+	public static void main(String[] args) throws SQLException
+	{
+	       mdlConfig = new DatabaseConfig();
+		
+	       // Connect to the database
+	       String jdbc_url = System.getenv("JDBC_URL_PREFIX")+mdlConfig.get("jdbc.host")+"/"+mdlConfig.get("jdbc.db");
+	       if(mdlConfig.get("jdbc.user")!=null) {
+	           jdbc_url = jdbc_url + "?user=" + mdlConfig.get("jdbc.user");
+	           if(mdlConfig.get("jdbc.password")!=null) {
+	               jdbc_url = jdbc_url + "&password=" + mdlConfig.get("jdbc.password");
+	           }
+	       }
+	       try {
+	           // The newInstance() call is a work around for some
+	           // broken Java implementations
+                   String jdbcDriver = System.getenv("JDBC_DRIVER");
+	           Class.forName(jdbcDriver).newInstance();
+	           log.info("Initialized JDBC URL: "+jdbc_url);
+	       } catch (Exception ex) {
+	           // handle the error
+	    	   ex.printStackTrace();
+	           log.error(ex,ex);
+	       }
+	       try {
+	           conn = DriverManager.getConnection(jdbc_url);
+	       } catch (SQLException ex) 
+	       {
+	    	   ex.printStackTrace();
+	           log.error(ex,ex);
+	       }      
+	       
+	       // get the latest timestamp for aggregation on this table
+		   // Start = latest
+	       
+	      
+	       
+	       SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+	        
+	       long start = System.currentTimeMillis() - (1000*60*60*24);
+	       long end = System.currentTimeMillis() - (1000*60*10);
+	       // retrieve metadata for cluster_system_metrics
+	       DatabaseConfig dbConf = new DatabaseConfig();
+	       String[] tables = dbConf.findTableName("cluster_system_metrics_2018_week", start, end);
+	       for(String table: tables)
+	       {
+	    	   System.out.println("Table to aggregate per Ts: " + table);
+	    	   stmt = conn.createStatement();
+	    	   rs = stmt.executeQuery("select table_ts from aggregation_admin_table where table_name=\"" 
+	    			   + table + "\"");
+			   if (rs.next())
+			   {
+				   start = rs.getLong(1);
+			   }
+			   else
+			   {
+				   start = 0;
+			   }
+			   
+			   end = start + (1000*60*60*1); // do 1 hour aggregation max 
+			   long now = System.currentTimeMillis();
+			   now = now - (1000*60*10); // wait for 10 minutes
+			   end = Math.min(now, end);
+		     
+			   // TODO REMOVE DEBUG ONLY!
+			   end = now;
+			   
+			   System.out.println("Start Date:" + new Date(start));
+			   System.out.println("End Date:" + new Date(end));
+			   
+		       DatabaseMetaData dbm = conn.getMetaData ();
+		       rs = dbm.getColumns ( null,null,table, null);
+		      	
+		       List<String> cols = new ArrayList<String>();
+		       while (rs.next ())
+		       {
+		          	String s = rs.getString (4); // 4 is column name, 5 data type etc. 
+		          	System.out.println ("Name: " + s);
+		          	int type = rs.getInt(5);
+		          	if (type == java.sql.Types.VARCHAR)
+		          	{
+		          		System.out.println("Type: Varchar " + type);
+		          	}
+		          	else
+		          	{
+		          		cols.add(s);
+		          		System.out.println("Type: Number " + type);
+		          	}
+		       }// end of while.
+		       
+		       // build insert into from select query
+		       String initTable = table.replace("cluster_", "");
+		       StringBuilder sb0 = new StringBuilder();
+		       StringBuilder sb = new StringBuilder();
+		       sb0.append("insert into ").append(table).append(" (");
+		       sb.append(" ( select ");
+		       for (int i=0;i<cols.size();i++)
+		       {
+		    	   sb0.append(cols.get(i));
+		    	   sb.append("avg(").append(cols.get(i)).append(") ");
+		    	   if (i< cols.size()-1)
+		    	   {
+		    		   sb0.append(",");
+		    		   sb.append(",");
+		    	   }
+		       }
+			   sb.append(" from ").append(initTable);
+			   sb.append(" where timestamp between \"");
+			   sb.append(formatter.format(start));
+			   sb.append("\" and \"").append(formatter.format(end));
+			   sb.append("\" group by timestamp  )");
+			  
+		        
+			   // close fields
+			   sb0.append(" )").append(sb);
+			   System.out.println(sb0.toString());
+			   
+			   // run query
+			   conn.setAutoCommit(false);
+			   stmt = conn.createStatement();
+			   stmt.execute(sb0.toString());
+			   
+			   // update last run
+			   stmt = conn.createStatement();
+			   stmt.execute("insert into aggregation_admin_table set table_ts=\"" +  formatter.format(end) +
+					   "\" where table_name=\"" + table + "\"");
+			   conn.commit();
+	       }
+	
+	}
+
+}

Added: hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/TableCreator.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/TableCreator.java?rev=723855&view=auto
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/TableCreator.java (added)
+++ hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/database/TableCreator.java Fri Dec  5 12:30:14 2008
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+package org.apache.hadoop.chukwa.database;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Iterator;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+public class TableCreator {
+	private static DatabaseConfig dbc = null;
+	private static Log log = LogFactory.getLog(TableCreator.class);		
+	public TableCreator() {
+    	if(dbc==null) {
+    	    dbc = new DatabaseConfig();
+    	}
+    }
+	public void createTables() {
+		long now = (new Date()).getTime();
+        createTables(now,now);		
+	}
+	public void createTables(long start, long end) {
+		String cluster = System.getProperty("CLUSTER");
+		if(cluster==null) {
+			cluster="unknown";
+		}
+		DatabaseWriter dbw = new DatabaseWriter(cluster);
+		try {
+			HashMap<String, String> dbNames = dbc.startWith("report.db.name.");
+			Iterator<String> ki = dbNames.keySet().iterator();
+			while(ki.hasNext()) {
+				String name = ki.next();
+				String tableName = dbNames.get(name);
+				String[] tableList = dbc.findTableName(tableName, start, end);
+				log.debug("table name: "+tableList[0]);
+				try {
+				    String[] parts = tableList[0].split("_");
+				    int partition = Integer.parseInt(parts[parts.length-2]);
+				    String table = "";
+				    for(int i=0;i<parts.length-2;i++) {
+				    	if(i!=0) {
+				    		table=table+"_";
+				    	}
+				    	table=table+parts[i];
+				    }
+				    String query = "show create table "+table+"_template;";
+				    ResultSet rs = dbw.query(query);
+                    while(rs.next()) {				    
+                    	log.debug("table schema: "+rs.getString(2));
+                    	query=rs.getString(2);
+                    	log.debug("template table name:"+table+"_template");
+                    	log.debug("replacing with table name:"+table+"_"+partition+"_"+parts[parts.length-1]);
+                    	log.debug("creating table: "+query);
+                    	String createPartition=query.replaceFirst(table+"_template", table+"_"+partition+"_"+parts[parts.length-1]);
+                    	createPartition=createPartition.replaceFirst("TABLE","TABLE IF NOT EXISTS");
+                    	dbw.execute(createPartition);
+                    	partition++;
+                    	createPartition=query.replaceFirst(table+"_template", table+"_"+partition+"_"+parts[parts.length-1]);
+                    	createPartition=createPartition.replaceFirst("TABLE","TABLE IF NOT EXISTS");
+                    	dbw.execute(createPartition);
+                    	partition++;
+                    	createPartition=query.replaceFirst(table+"_template", table+"_"+partition+"_"+parts[parts.length-1]);
+                    	createPartition=createPartition.replaceFirst("TABLE","TABLE IF NOT EXISTS");
+                    	dbw.execute(createPartition);
+                    }
+				} catch(NumberFormatException e) {
+					log.error("Error in parsing table partition number, skipping table:"+tableList[0]);
+				} catch(ArrayIndexOutOfBoundsException e) {
+					log.debug("Skipping table:"+tableList[0]+", because it has no partition configuration.");
+				} catch(SQLException e) {
+					
+				}
+			}
+		} catch(Exception e) {
+			e.printStackTrace();
+		}		
+	}
+
+	public static void usage() {
+		System.out.println("TableCreator usage:");
+		System.out.println("java -jar chukwa-core.jar org.apache.hadoop.chukwa.TableCreator <date> <time window size>");
+		System.out.println("     date format: YYYY-MM-DD");
+		System.out.println("     time window size: 7, 30, 91, 365, 3650");
+	}
+	
+	public static void main(String[] args) {
+		TableCreator tc = new TableCreator();
+        if(args.length==2) {
+        	try {
+        		SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
+        		long start = sdf.parse(args[0]).getTime();
+        		long end = start + (Long.parseLong(args[1])*1440*60*1000L);
+        		tc.createTables(start, end);
+        	} catch(Exception e) {
+        		System.out.println("Invalid date format or time window size.");
+        		e.printStackTrace();
+				usage();
+        	}
+		} else {
+			tc.createTables();
+		}
+
+    }
+}

Modified: hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/ChunkQueue.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/ChunkQueue.java?rev=723855&r1=723854&r2=723855&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/ChunkQueue.java (original)
+++ hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/ChunkQueue.java Fri Dec  5 12:30:14 2008
@@ -31,16 +31,16 @@
 {
   /**
    *  Add a chunk to the queue, blocking if queue is full.
-   * @param event
+   * @param chunk
    * @throws InterruptedException if thread is interrupted while blocking
    */
-	public void add(Chunk event) throws InterruptedException;
+	public void add(Chunk chunk) throws InterruptedException;
 	
 	/**
-	 * Return at least one, and no more than count, Chunks into events.
+	 * Return at least one, and no more than count, Chunks into chunks.
 	 * Blocks if queue is empty.
 	 */
-	public void collect(List<Chunk> events,int count) throws InterruptedException;
+	public void collect(List<Chunk> chunks,int count) throws InterruptedException;
 	
 	/**
 	 * Return an approximation of the number of chunks in the queue currently.

Modified: hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/DataFactory.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/DataFactory.java?rev=723855&r1=723854&r2=723855&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/DataFactory.java (original)
+++ hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/DataFactory.java Fri Dec  5 12:30:14 2008
@@ -31,6 +31,7 @@
 {
   static Logger log = Logger.getLogger(DataFactory.class);
 	static final int QUEUE_SIZE_KB = 10 * 1024;
+	static final String COLLECTORS_FILENAME = "collectors";
 	private static DataFactory dataFactory = null;
 	private ChunkQueue chunkQueue = new MemLimitQueue(QUEUE_SIZE_KB * 1024);
 
@@ -54,14 +55,28 @@
 	 * @return empty list if file does not exist
 	 * @throws IOException on other error
 	 */
-	public Iterator<String> getCollectors() throws IOException
+	public Iterator<String> getCollectorURLs() throws IOException
 	{
-	  String chukwaHome = System.getenv("CHUKWA_HOME");
-	  if (chukwaHome == null){
-	    chukwaHome = ".";
-	  }
-	  log.info("setting up collectors file: " + chukwaHome + "/conf/collectors");
-		File collectors = new File(chukwaHome + "/conf/collectors");
+		  String chukwaHome = System.getenv("CHUKWA_HOME");
+		  if (chukwaHome == null){
+			  chukwaHome = ".";
+		  }
+
+		  if(!chukwaHome.endsWith("/"))
+		  {  chukwaHome = chukwaHome + File.separator; }
+		  log.info("Config - System.getenv(\"CHUKWA_HOME\"): [" + chukwaHome + "]" );
+
+		  String chukwaConf = System.getenv("CHUKWA_CONF_DIR");    
+		  if (chukwaConf == null)
+		  {
+			  chukwaConf = chukwaHome + "conf" + File.separator;
+		  }
+
+		  log.info("Config - System.getenv(\"chukwaConf\"): [" + chukwaConf + "]" );
+		  
+	  log.info("setting up collectors file: " + chukwaConf + 
+	      File.separator + COLLECTORS_FILENAME);
+		File collectors = new File(chukwaConf + File.separator + "collectors");
 		try{
 		  return new RetryListOfCollectors(collectors, 1000 * 15);//time is ms between tries
 		} catch(java.io.IOException e) {

Modified: hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/Adaptor.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/Adaptor.java?rev=723855&r1=723854&r2=723855&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/Adaptor.java (original)
+++ hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/Adaptor.java Fri Dec  5 12:30:14 2008
@@ -44,7 +44,7 @@
    * @param offset the stream offset of the first byte sent by this adaptor
    * @throws AdaptorException
    */
-	public void start(String type, String status, long offset, ChunkReceiver dest) throws AdaptorException;
+	public void start(long adaptorID, String type, String status, long offset, ChunkReceiver dest) throws AdaptorException;
 	
 	/**
 	 * Return the adaptor's state
@@ -54,6 +54,12 @@
 	 */
 	public String getCurrentStatus() throws AdaptorException;
 	public String getType();
+	
+	/**
+	 * Return the stream name
+	 * @return Stream name as a string
+	 */
+	public String getStreamName();
 	/**
 	 * Signals this adaptor to come to an orderly stop.
 	 * The adaptor ought to push out all the data it can

Modified: hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/ExecAdaptor.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/ExecAdaptor.java?rev=723855&r1=723854&r2=723855&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/ExecAdaptor.java (original)
+++ hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/ExecAdaptor.java Fri Dec  5 12:30:14 2008
@@ -17,18 +17,32 @@
  */
 
 package org.apache.hadoop.chukwa.datacollection.adaptor;
-
 import org.apache.hadoop.chukwa.ChunkImpl;
 import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
 import org.apache.hadoop.chukwa.inputtools.plugin.ExecPlugin;
+import org.apache.log4j.Logger;
 import org.apache.log4j.helpers.ISO8601DateFormat;
 import org.json.JSONException;
 import org.json.JSONObject;
 import java.util.*;
 
+/**
+ * Runs a command inside chukwa.  Takes as params the interval 
+ * in seconds at which to run the command, and the path and args
+ * to execute.
+ * 
+ * Interval is optional, and defaults to 5 seconds.
+ * 
+ * Example usage:  
+ * add org.apache.hadoop.chukwa.datacollection.adaptor.ExecAdaptor Ps 2 /bin/ps aux 0
+ * 
+ */
 public class ExecAdaptor extends ExecPlugin implements Adaptor {
 
   static final boolean FAKE_LOG4J_HEADER = true;
+  static final boolean SPLIT_LINES = false;
+  protected long adaptorID = 0;
+  static Logger log =Logger.getLogger(ExecAdaptor.class);
    
   class RunToolTask extends TimerTask {
     public void run() {
@@ -56,15 +70,19 @@
           data = stdout.getBytes();
         }
  
-        ArrayList<Integer> carriageReturns = new  ArrayList<Integer>();
-        for(int i = 0; i < data.length ; ++i)
-          if(data[i] == '\n')
-            carriageReturns.add(i);
-        
         sendOffset += data.length;
         ChunkImpl c = new ChunkImpl(ExecAdaptor.this.type,
             "results from " + cmd, sendOffset , data, ExecAdaptor.this);
-        c.setRecordOffsets(carriageReturns);
+        
+        if(SPLIT_LINES) {
+          ArrayList<Integer> carriageReturns = new  ArrayList<Integer>();
+          for(int i = 0; i < data.length ; ++i)
+            if(data[i] == '\n')
+              carriageReturns.add(i);
+          
+          c.setRecordOffsets(carriageReturns);
+        }  //else we get default one record
+        
         dest.add(c);
       } catch(JSONException e ) {
         //FIXME: log this somewhere
@@ -89,9 +107,12 @@
   
   @Override
   public String getCurrentStatus() throws AdaptorException {
-    return cmd;
+    return type + " " + period + " " + cmd + " " + sendOffset;
   }
 
+  public String getStreamName() {
+	  return cmd;
+  }
   @Override
   public void hardStop() throws AdaptorException {
     super.stop();
@@ -110,10 +131,22 @@
   }
 
   @Override
-  public void start(String type, String status, long offset, ChunkReceiver dest)
-      throws AdaptorException
-  {
-    cmd = status;
+  public void start(long adaptorID, String type, String status, long offset, ChunkReceiver dest)
+      throws AdaptorException  {
+    
+    int spOffset = status.indexOf(' ');
+    if(spOffset > 0) {
+    try {
+      period = Integer.parseInt(status.substring(0, spOffset));
+      cmd = status.substring(spOffset + 1);
+    } catch(NumberFormatException e) {
+      log.warn("ExecAdaptor: sample interval " + status.substring(0, spOffset) + " can't be parsed");
+      cmd = status;
+      }
+   }
+    else
+      cmd = status;
+    this.adaptorID = adaptorID;
     this.type = type;
     this.dest = dest;
     this.sendOffset = offset;

Added: hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/FileAdaptor.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/FileAdaptor.java?rev=723855&view=auto
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/FileAdaptor.java (added)
+++ hadoop/core/trunk/src/contrib/chukwa/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/FileAdaptor.java Fri Dec  5 12:30:14 2008
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.datacollection.adaptor;
+
+import org.apache.hadoop.chukwa.ChunkImpl;
+import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
+import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
+import org.apache.hadoop.chukwa.util.RecordConstants; 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.log4j.Logger;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.ArrayList;
+
+
+/**
+ * File Adaptor push small size file in one chunk to collector
+ */
+public class FileAdaptor  implements Adaptor
+{
+
+	static Logger log;
+
+	protected static Configuration conf = null;
+	private int attempts = 0;
+	
+	File toWatch;
+	/**
+	 * next PHYSICAL offset to read
+	 */
+	protected long fileReadOffset;
+	protected String type;
+	private ChunkReceiver dest;
+	protected RandomAccessFile reader = null;
+	protected long adaptorID;
+	
+	/**
+	 * The logical offset of the first byte of the file
+	 */
+	private long offsetOfFirstByte = 0;
+	
+	static {
+		log =Logger.getLogger(FileAdaptor.class);
+	}
+
+	public void start(long adaptorID, String type, String params, long bytes, ChunkReceiver dest) {
+	    //in this case params = filename 
+		log.info("adaptor id: "+adaptorID+" started file adaptor on file " + params);
+		this.adaptorID = adaptorID;
+	    this.type = type;
+	    this.dest = dest;
+	    this.attempts = 0;
+			  
+	    String[] words = params.split(" ");
+	    if(words.length > 1) {
+	        offsetOfFirstByte = Long.parseLong(words[0]);
+	        toWatch = new File(params.substring(words[0].length() + 1));
+	    } else {
+	        toWatch = new File(params);
+	    }
+	    try {
+	  		reader = new RandomAccessFile(toWatch, "r");
+	  		long bufSize = toWatch.length();
+			byte[] buf = new byte[(int) bufSize];
+			reader.read(buf);
+	        long fileTime = toWatch.lastModified();
+			int bytesUsed = extractRecords(dest, 0, buf, fileTime);
+	    } catch(Exception e) {
+	        e.printStackTrace();
+	    }
+		ChukwaAgent agent = ChukwaAgent.getAgent();
+		if (agent != null) {
+			agent.stopAdaptor(adaptorID, false);
+		} else {
+			log.info("Agent is null, running in default mode");
+		}
+		this.fileReadOffset= bytes;
+	}
+
+	/**
+	 * Do one last tail, and then stop
+	 * @see org.apache.hadoop.chukwa.datacollection.adaptor.Adaptor#shutdown()
+	 */
+	public long shutdown() throws AdaptorException {
+	  hardStop();
+	  return fileReadOffset + offsetOfFirstByte;
+	}
+	/**
+	 * Stop tailing the file, effective immediately.
+	 */
+	public void hardStop() throws AdaptorException {
+	}
+
+	public String getStreamName() {
+		return toWatch.getPath();
+	}
+	
+  /**
+   * Extract records from a byte sequence
+   * @param eq the queue to stick the new chunk[s] in
+   * @param buffOffsetInFile the byte offset in the stream at which buf[] begins
+   * @param buf the byte buffer to extract records from
+   * @return the number of bytes processed
+   * @throws InterruptedException
+   */
+  protected int extractRecords(ChunkReceiver eq, long buffOffsetInFile, byte[] buf, long fileTime) throws InterruptedException {
+    ChunkImpl chunk = new ChunkImpl(type, toWatch.getAbsolutePath(), buffOffsetInFile + buf.length,
+        buf, this);
+    String tags = chunk.getTags();
+    chunk.setTags(tags+" time=\""+fileTime+"\"");
+    eq.add(chunk);
+    return buf.length;
+  }
+
+  @Override
+  public String getType() {
+    return type;
+  }
+
+  @Override
+  public String getCurrentStatus() throws AdaptorException {
+    return type.trim() + " " + offsetOfFirstByte+ " " + toWatch.getPath() + " " + fileReadOffset;
+  }
+  
+}
\ No newline at end of file



Mime
View raw message