chukwa-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From asrab...@apache.org
Subject svn commit: r752666 [1/16] - in /hadoop/chukwa/trunk: ./ src/java/org/apache/hadoop/chukwa/ src/java/org/apache/hadoop/chukwa/conf/ src/java/org/apache/hadoop/chukwa/database/ src/java/org/apache/hadoop/chukwa/datacollection/ src/java/org/apache/hadoop...
Date Wed, 11 Mar 2009 22:39:32 GMT
Author: asrabkin
Date: Wed Mar 11 22:39:26 2009
New Revision: 752666

URL: http://svn.apache.org/viewvc?rev=752666&view=rev
Log:
CHUKWA-33.  Reformat the world to fit hadoop style.

Modified:
    hadoop/chukwa/trunk/CHANGES.txt
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChukwaArchiveKey.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/Chunk.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChunkBuilder.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChunkImpl.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/conf/ChukwaConfiguration.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/database/Aggregator.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/database/Consolidator.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/database/DataExpiration.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/database/DatabaseConfig.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/database/MetricsAggregation.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/database/TableCreator.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/ChunkQueue.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/ChunkReceiver.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/DataFactory.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/Adaptor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/AdaptorException.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/ExecAdaptor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/FileAdaptor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/CharFileTailingAdaptorUTF8.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/CharFileTailingAdaptorUTF8NewLineEscaped.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/FileTailer.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/FileTailingAdaptor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/TerminatorThread.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/agent/AdaptorFactory.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/agent/AgentControlSocketListener.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/agent/ChukwaAgent.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/agent/MemLimitQueue.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/agent/WaitingQueue.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/collector/CollectorStub.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/collector/servlet/ServletCollector.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/collector/servlet/ServletDiagnostics.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/connector/Connector.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/connector/http/HttpConnector.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/controller/ChukwaAgentController.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/protocol/Protocol.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/sender/ChukwaHttpSender.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/sender/ChukwaSender.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/sender/RetryListOfCollectors.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/test/ConsoleOutConnector.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/test/FileTailerStressTest.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/test/SinkFileValidator.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/writer/ChukwaWriter.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/writer/ClientAck.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/writer/ConsoleWriter.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/writer/Dedup.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/writer/InMemoryWriter.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/writer/PipelineStageWriter.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/writer/PipelineableWriter.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/writer/SeqFileWriter.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/datacollection/writer/WriterException.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/archive/ChuckwaArchiveBuilder.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveBuilder.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveDailyOutputFormat.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveDailyPartitioner.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveDataTypeOutputFormat.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveDataTypePartitioner.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveHourlyOutputFormat.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveHourlyPartitioner.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveMerger.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveStreamNameOutputFormat.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveStreamNamePartitioner.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/database/DBException.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/database/DBPlugin.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/database/DatabaseLoader.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/database/MRJobCounters.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/database/MetricDataLoader.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/ChukwaRecordOutputFormat.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/ChukwaRecordPartitioner.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/DailyChukwaRecordRolling.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/Demux.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/HourlyChukwaRecordRolling.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/MoveOrMergeRecordFile.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/MoveToRepository.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/RecordMerger.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/TaggerPlugin.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/ChukwaOutputCollector.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/Util.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/AbstractProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/ChunkProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/ChunkSaver.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/DFInvalidRecord.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/DebugOutputProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/DefaultProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/Df.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/DuplicateProcessorException.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/HadoopLogProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/HadoopMetricsProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/Iostat.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/JobConfProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/JobLogHistoryProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/Log4jJobHistoryProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/MapProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/MapProcessorFactory.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/PbsInvalidEntry.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/PbsNodes.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/ProcessorFactory.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/Sar.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/SysLog.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/Top.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/Torque.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/TsProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/UnknownRecordTypeException.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/YWatch.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/YwatchInvalidEntry.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/DuplicateReduceProcessorException.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/IdentityReducer.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/JobLogHistoryReduceProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/MRJobReduceProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/ReduceProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/ReduceProcessorFactory.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/SystemMetrics.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/UnknownReduceTypeException.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/ChukwaRecord.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/ChukwaRecordJT.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/ChukwaRecordKey.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/ChukwaSearchResult.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/ChukwaSearchService.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/Record.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/RecordUtil.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/SearchResult.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/SearchService.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/Token.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/datasource/DataSource.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/datasource/DataSourceException.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/datasource/DataSourceFactory.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/datasource/DsDirectory.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/datasource/database/DatabaseDS.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/datasource/record/ChukwaDSInternalResult.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/datasource/record/ChukwaFileParser.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/datasource/record/ChukwaRecordDataSource.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/datasource/record/ChukwaSequenceFileParser.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/datasource/record/MalformedFileFormat.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/extraction/engine/datasource/record/RecordDS.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/hicc/Chart.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/hicc/ClusterConfig.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/hicc/ColorPicker.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/hicc/Config.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/hicc/DatasetMapper.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/hicc/Iframe.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/hicc/JSONLoader.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/hicc/TimeHandler.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/hicc/Views.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/hicc/ViewsTag.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/hicc/Workspace.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/ChukwaInputFormat.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/ChukwaTTInstru.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/log4j/ChukwaDailyRollingFileAppender.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/log4j/Log4JMetricsContext.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/log4j/OneLineLogLayout.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/mdl/DataConfig.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/mdl/ErStreamHandler.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/mdl/LoaderServer.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/mdl/TorqueDataLoader.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/mdl/TorqueInfoProcessor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/mdl/TorqueTimerTask.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/plugin/ExecPlugin.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/plugin/IPlugin.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/plugin/metrics/Exec.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/plugin/metrics/ExecHelper.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/plugin/nodeactivity/NodeActivityPlugin.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/inputtools/plugin/pbsnode/PbsNodePlugin.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/util/ClusterConfig.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/util/ConstRateAdaptor.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/util/DatabaseWriter.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/util/DumpArchive.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/util/DumpDataType.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/util/DumpRecord.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/util/ExceptionUtil.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/util/MaxRateSender.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/util/PidFile.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/util/RecordConstants.java
    hadoop/chukwa/trunk/src/java/org/apache/hadoop/mapred/ChukwaJobTrackerInstrumentation.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/ChunkImplTest.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/TestChunkBuilder.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/TempFileUtil.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/adaptor/ChukwaTestAdaptor.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/adaptor/TestExecAdaptor.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/TestCharFileTailingAdaptorUTF8.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/TestFileExpirationPolicy.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/TestFileTailingAdaptorBigRecord.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/TestFileTailingAdaptors.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/TestLogRotate.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/TestRawAdaptor.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/TestStartAtOffset.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/agent/TestAgent.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/agent/TestAgentConfig.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/agent/TestCmd.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/collector/CaptureWriter.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/collector/CollectorTest.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/connector/ChunkCatcherConnector.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/connector/TestFailedCollector.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/controller/TestAgentClient.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/sender/TestRetryListOfCollectors.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/datacollection/writer/TestClientAck.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/extraction/demux/TestDemux.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/ChukwaTestOutputCollector.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/TestAbtractProcessor.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/TestHadoopLogProcessor.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/hicc/TestChart.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/inputtools/TestInputFormat.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/util/TestRecordConsts.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/validationframework/ChukwaAgentToCollectorValidator.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/validationframework/DemuxDirectoryValidator.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/validationframework/interceptor/ChunkDumper.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/validationframework/interceptor/ChunkQueueInterceptor.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/validationframework/interceptor/SetupTestClasses.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/validationframework/util/DataOperations.java
    hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/validationframework/util/MD5.java

Modified: hadoop/chukwa/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/CHANGES.txt?rev=752666&r1=752665&r2=752666&view=diff
==============================================================================
--- hadoop/chukwa/trunk/CHANGES.txt (original)
+++ hadoop/chukwa/trunk/CHANGES.txt Wed Mar 11 22:39:26 2009
@@ -32,6 +32,8 @@
   OPTIMIZATIONS
 
   BUG FIXES
+
+    CHUKWA-33.  Reformat code to fit hadoop style.  (asrabkin)
  
     CHUKWA-8.  Remove deprecated conf files.  (eyang via asrabkin)
 

Modified: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChukwaArchiveKey.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChukwaArchiveKey.java?rev=752666&r1=752665&r2=752666&view=diff
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChukwaArchiveKey.java (original)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChukwaArchiveKey.java Wed Mar 11 22:39:26 2009
@@ -1,57 +1,72 @@
 // File generated by hadoop record compiler. Do not edit.
 package org.apache.hadoop.chukwa;
 
+
 public class ChukwaArchiveKey extends org.apache.hadoop.record.Record {
   private static final org.apache.hadoop.record.meta.RecordTypeInfo _rio_recTypeInfo;
   private static org.apache.hadoop.record.meta.RecordTypeInfo _rio_rtiFilter;
   private static int[] _rio_rtiFilterFields;
   static {
-    _rio_recTypeInfo = new org.apache.hadoop.record.meta.RecordTypeInfo("ChukwaArchiveKey");
-    _rio_recTypeInfo.addField("timePartition", org.apache.hadoop.record.meta.TypeID.LongTypeID);
-    _rio_recTypeInfo.addField("dataType", org.apache.hadoop.record.meta.TypeID.StringTypeID);
-    _rio_recTypeInfo.addField("streamName", org.apache.hadoop.record.meta.TypeID.StringTypeID);
-    _rio_recTypeInfo.addField("seqId", org.apache.hadoop.record.meta.TypeID.LongTypeID);
+    _rio_recTypeInfo = new org.apache.hadoop.record.meta.RecordTypeInfo(
+        "ChukwaArchiveKey");
+    _rio_recTypeInfo.addField("timePartition",
+        org.apache.hadoop.record.meta.TypeID.LongTypeID);
+    _rio_recTypeInfo.addField("dataType",
+        org.apache.hadoop.record.meta.TypeID.StringTypeID);
+    _rio_recTypeInfo.addField("streamName",
+        org.apache.hadoop.record.meta.TypeID.StringTypeID);
+    _rio_recTypeInfo.addField("seqId",
+        org.apache.hadoop.record.meta.TypeID.LongTypeID);
   }
-  
+
   private long timePartition;
   private String dataType;
   private String streamName;
   private long seqId;
-  public ChukwaArchiveKey() { }
-  public ChukwaArchiveKey(
-    final long timePartition,
-    final String dataType,
-    final String streamName,
-    final long seqId) {
+
+  public ChukwaArchiveKey() {
+  }
+
+  public ChukwaArchiveKey(final long timePartition, final String dataType,
+                          final String streamName, final long seqId) {
     this.timePartition = timePartition;
     this.dataType = dataType;
     this.streamName = streamName;
     this.seqId = seqId;
   }
+
   public static org.apache.hadoop.record.meta.RecordTypeInfo getTypeInfo() {
     return _rio_recTypeInfo;
   }
-  public static void setTypeFilter(org.apache.hadoop.record.meta.RecordTypeInfo rti) {
-    if (null == rti) return;
+
+  public static void setTypeFilter(
+      org.apache.hadoop.record.meta.RecordTypeInfo rti) {
+    if (null == rti)
+      return;
     _rio_rtiFilter = rti;
     _rio_rtiFilterFields = null;
   }
-  private static void setupRtiFields()
-  {
-    if (null == _rio_rtiFilter) return;
+
+  private static void setupRtiFields() {
+    if (null == _rio_rtiFilter)
+      return;
     // we may already have done this
-    if (null != _rio_rtiFilterFields) return;
+    if (null != _rio_rtiFilterFields)
+      return;
     int _rio_i, _rio_j;
-    _rio_rtiFilterFields = new int [_rio_rtiFilter.getFieldTypeInfos().size()];
-    for (_rio_i=0; _rio_i<_rio_rtiFilterFields.length; _rio_i++) {
+    _rio_rtiFilterFields = new int[_rio_rtiFilter.getFieldTypeInfos().size()];
+    for (_rio_i = 0; _rio_i < _rio_rtiFilterFields.length; _rio_i++) {
       _rio_rtiFilterFields[_rio_i] = 0;
     }
-    java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_itFilter = _rio_rtiFilter.getFieldTypeInfos().iterator();
-    _rio_i=0;
+    java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_itFilter = _rio_rtiFilter
+        .getFieldTypeInfos().iterator();
+    _rio_i = 0;
     while (_rio_itFilter.hasNext()) {
-      org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfoFilter = _rio_itFilter.next();
-      java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_it = _rio_recTypeInfo.getFieldTypeInfos().iterator();
-      _rio_j=1;
+      org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfoFilter = _rio_itFilter
+          .next();
+      java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_it = _rio_recTypeInfo
+          .getFieldTypeInfos().iterator();
+      _rio_j = 1;
       while (_rio_it.hasNext()) {
         org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfo = _rio_it.next();
         if (_rio_tInfo.equals(_rio_tInfoFilter)) {
@@ -63,50 +78,62 @@
       _rio_i++;
     }
   }
+
   public long getTimePartition() {
     return timePartition;
   }
+
   public void setTimePartition(final long timePartition) {
-    this.timePartition=timePartition;
+    this.timePartition = timePartition;
   }
+
   public String getDataType() {
     return dataType;
   }
+
   public void setDataType(final String dataType) {
-    this.dataType=dataType;
+    this.dataType = dataType;
   }
+
   public String getStreamName() {
     return streamName;
   }
+
   public void setStreamName(final String streamName) {
-    this.streamName=streamName;
+    this.streamName = streamName;
   }
+
   public long getSeqId() {
     return seqId;
   }
+
   public void setSeqId(final long seqId) {
-    this.seqId=seqId;
+    this.seqId = seqId;
   }
-  public void serialize(final org.apache.hadoop.record.RecordOutput _rio_a, final String _rio_tag)
-  throws java.io.IOException {
-    _rio_a.startRecord(this,_rio_tag);
-    _rio_a.writeLong(timePartition,"timePartition");
-    _rio_a.writeString(dataType,"dataType");
-    _rio_a.writeString(streamName,"streamName");
-    _rio_a.writeLong(seqId,"seqId");
-    _rio_a.endRecord(this,_rio_tag);
+
+  public void serialize(final org.apache.hadoop.record.RecordOutput _rio_a,
+      final String _rio_tag) throws java.io.IOException {
+    _rio_a.startRecord(this, _rio_tag);
+    _rio_a.writeLong(timePartition, "timePartition");
+    _rio_a.writeString(dataType, "dataType");
+    _rio_a.writeString(streamName, "streamName");
+    _rio_a.writeLong(seqId, "seqId");
+    _rio_a.endRecord(this, _rio_tag);
   }
-  private void deserializeWithoutFilter(final org.apache.hadoop.record.RecordInput _rio_a, final String _rio_tag)
-  throws java.io.IOException {
+
+  private void deserializeWithoutFilter(
+      final org.apache.hadoop.record.RecordInput _rio_a, final String _rio_tag)
+      throws java.io.IOException {
     _rio_a.startRecord(_rio_tag);
-    timePartition=_rio_a.readLong("timePartition");
-    dataType=_rio_a.readString("dataType");
-    streamName=_rio_a.readString("streamName");
-    seqId=_rio_a.readLong("seqId");
+    timePartition = _rio_a.readLong("timePartition");
+    dataType = _rio_a.readString("dataType");
+    streamName = _rio_a.readString("streamName");
+    seqId = _rio_a.readLong("seqId");
     _rio_a.endRecord(_rio_tag);
   }
-  public void deserialize(final org.apache.hadoop.record.RecordInput _rio_a, final String _rio_tag)
-  throws java.io.IOException {
+
+  public void deserialize(final org.apache.hadoop.record.RecordInput _rio_a,
+      final String _rio_tag) throws java.io.IOException {
     if (null == _rio_rtiFilter) {
       deserializeWithoutFilter(_rio_a, _rio_tag);
       return;
@@ -114,42 +141,48 @@
     // if we're here, we need to read based on version info
     _rio_a.startRecord(_rio_tag);
     setupRtiFields();
-    for (int _rio_i=0; _rio_i<_rio_rtiFilter.getFieldTypeInfos().size(); _rio_i++) {
+    for (int _rio_i = 0; _rio_i < _rio_rtiFilter.getFieldTypeInfos().size(); _rio_i++) {
       if (1 == _rio_rtiFilterFields[_rio_i]) {
-        timePartition=_rio_a.readLong("timePartition");
-      }
-      else if (2 == _rio_rtiFilterFields[_rio_i]) {
-        dataType=_rio_a.readString("dataType");
-      }
-      else if (3 == _rio_rtiFilterFields[_rio_i]) {
-        streamName=_rio_a.readString("streamName");
-      }
-      else if (4 == _rio_rtiFilterFields[_rio_i]) {
-        seqId=_rio_a.readLong("seqId");
-      }
-      else {
-        java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo> typeInfos = (java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo>)(_rio_rtiFilter.getFieldTypeInfos());
-        org.apache.hadoop.record.meta.Utils.skip(_rio_a, typeInfos.get(_rio_i).getFieldID(), typeInfos.get(_rio_i).getTypeID());
+        timePartition = _rio_a.readLong("timePartition");
+      } else if (2 == _rio_rtiFilterFields[_rio_i]) {
+        dataType = _rio_a.readString("dataType");
+      } else if (3 == _rio_rtiFilterFields[_rio_i]) {
+        streamName = _rio_a.readString("streamName");
+      } else if (4 == _rio_rtiFilterFields[_rio_i]) {
+        seqId = _rio_a.readLong("seqId");
+      } else {
+        java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo> typeInfos = (java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo>) (_rio_rtiFilter
+            .getFieldTypeInfos());
+        org.apache.hadoop.record.meta.Utils.skip(_rio_a, typeInfos.get(_rio_i)
+            .getFieldID(), typeInfos.get(_rio_i).getTypeID());
       }
     }
     _rio_a.endRecord(_rio_tag);
   }
-  public int compareTo (final Object _rio_peer_) throws ClassCastException {
+
+  public int compareTo(final Object _rio_peer_) throws ClassCastException {
     if (!(_rio_peer_ instanceof ChukwaArchiveKey)) {
       throw new ClassCastException("Comparing different types of records.");
     }
     ChukwaArchiveKey _rio_peer = (ChukwaArchiveKey) _rio_peer_;
     int _rio_ret = 0;
-    _rio_ret = (timePartition == _rio_peer.timePartition)? 0 :((timePartition<_rio_peer.timePartition)?-1:1);
-    if (_rio_ret != 0) return _rio_ret;
+    _rio_ret = (timePartition == _rio_peer.timePartition) ? 0
+        : ((timePartition < _rio_peer.timePartition) ? -1 : 1);
+    if (_rio_ret != 0)
+      return _rio_ret;
     _rio_ret = dataType.compareTo(_rio_peer.dataType);
-    if (_rio_ret != 0) return _rio_ret;
+    if (_rio_ret != 0)
+      return _rio_ret;
     _rio_ret = streamName.compareTo(_rio_peer.streamName);
-    if (_rio_ret != 0) return _rio_ret;
-    _rio_ret = (seqId == _rio_peer.seqId)? 0 :((seqId<_rio_peer.seqId)?-1:1);
-    if (_rio_ret != 0) return _rio_ret;
+    if (_rio_ret != 0)
+      return _rio_ret;
+    _rio_ret = (seqId == _rio_peer.seqId) ? 0 : ((seqId < _rio_peer.seqId) ? -1
+        : 1);
+    if (_rio_ret != 0)
+      return _rio_ret;
     return _rio_ret;
   }
+
   public boolean equals(final Object _rio_peer_) {
     if (!(_rio_peer_ instanceof ChukwaArchiveKey)) {
       return false;
@@ -159,16 +192,21 @@
     }
     ChukwaArchiveKey _rio_peer = (ChukwaArchiveKey) _rio_peer_;
     boolean _rio_ret = false;
-    _rio_ret = (timePartition==_rio_peer.timePartition);
-    if (!_rio_ret) return _rio_ret;
+    _rio_ret = (timePartition == _rio_peer.timePartition);
+    if (!_rio_ret)
+      return _rio_ret;
     _rio_ret = dataType.equals(_rio_peer.dataType);
-    if (!_rio_ret) return _rio_ret;
+    if (!_rio_ret)
+      return _rio_ret;
     _rio_ret = streamName.equals(_rio_peer.streamName);
-    if (!_rio_ret) return _rio_ret;
-    _rio_ret = (seqId==_rio_peer.seqId);
-    if (!_rio_ret) return _rio_ret;
+    if (!_rio_ret)
+      return _rio_ret;
+    _rio_ret = (seqId == _rio_peer.seqId);
+    if (!_rio_ret)
+      return _rio_ret;
     return _rio_ret;
   }
+
   public Object clone() throws CloneNotSupportedException {
     ChukwaArchiveKey _rio_other = new ChukwaArchiveKey();
     _rio_other.timePartition = this.timePartition;
@@ -177,110 +215,146 @@
     _rio_other.seqId = this.seqId;
     return _rio_other;
   }
+
   public int hashCode() {
     int _rio_result = 17;
     int _rio_ret;
-    _rio_ret = (int) (timePartition^(timePartition>>>32));
-    _rio_result = 37*_rio_result + _rio_ret;
+    _rio_ret = (int) (timePartition ^ (timePartition >>> 32));
+    _rio_result = 37 * _rio_result + _rio_ret;
     _rio_ret = dataType.hashCode();
-    _rio_result = 37*_rio_result + _rio_ret;
+    _rio_result = 37 * _rio_result + _rio_ret;
     _rio_ret = streamName.hashCode();
-    _rio_result = 37*_rio_result + _rio_ret;
-    _rio_ret = (int) (seqId^(seqId>>>32));
-    _rio_result = 37*_rio_result + _rio_ret;
+    _rio_result = 37 * _rio_result + _rio_ret;
+    _rio_ret = (int) (seqId ^ (seqId >>> 32));
+    _rio_result = 37 * _rio_result + _rio_ret;
     return _rio_result;
   }
+
   public static String signature() {
     return "LChukwaArchiveKey(lssl)";
   }
-  public static class Comparator extends org.apache.hadoop.record.RecordComparator {
+
+  public static class Comparator extends
+      org.apache.hadoop.record.RecordComparator {
     public Comparator() {
       super(ChukwaArchiveKey.class);
     }
+
     static public int slurpRaw(byte[] b, int s, int l) {
       try {
         int os = s;
         {
           long i = org.apache.hadoop.record.Utils.readVLong(b, s);
           int z = org.apache.hadoop.record.Utils.getVIntSize(i);
-          s+=z; l-=z;
+          s += z;
+          l -= z;
         }
         {
           int i = org.apache.hadoop.record.Utils.readVInt(b, s);
           int z = org.apache.hadoop.record.Utils.getVIntSize(i);
-          s+=(z+i); l-= (z+i);
+          s += (z + i);
+          l -= (z + i);
         }
         {
           int i = org.apache.hadoop.record.Utils.readVInt(b, s);
           int z = org.apache.hadoop.record.Utils.getVIntSize(i);
-          s+=(z+i); l-= (z+i);
+          s += (z + i);
+          l -= (z + i);
         }
         {
           long i = org.apache.hadoop.record.Utils.readVLong(b, s);
           int z = org.apache.hadoop.record.Utils.getVIntSize(i);
-          s+=z; l-=z;
+          s += z;
+          l -= z;
         }
         return (os - s);
-      } catch(java.io.IOException e) {
+      } catch (java.io.IOException e) {
         throw new RuntimeException(e);
       }
     }
-    static public int compareRaw(byte[] b1, int s1, int l1,
-                                   byte[] b2, int s2, int l2) {
+
+    static public int compareRaw(byte[] b1, int s1, int l1, byte[] b2, int s2,
+        int l2) {
       try {
         int os1 = s1;
         {
           long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1);
           long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2);
           if (i1 != i2) {
-            return ((i1-i2) < 0) ? -1 : 0;
+            return ((i1 - i2) < 0) ? -1 : 0;
           }
           int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
           int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
-          s1+=z1; s2+=z2; l1-=z1; l2-=z2;
+          s1 += z1;
+          s2 += z2;
+          l1 -= z1;
+          l2 -= z2;
         }
         {
           int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
           int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
           int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
           int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
-          s1+=z1; s2+=z2; l1-=z1; l2-=z2;
-          int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
-          if (r1 != 0) { return (r1<0)?-1:0; }
-          s1+=i1; s2+=i2; l1-=i1; l1-=i2;
+          s1 += z1;
+          s2 += z2;
+          l1 -= z1;
+          l2 -= z2;
+          int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2,
+              s2, i2);
+          if (r1 != 0) {
+            return (r1 < 0) ? -1 : 0;
+          }
+          s1 += i1;
+          s2 += i2;
+          l1 -= i1;
+          l1 -= i2;
         }
         {
           int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
           int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
           int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
           int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
-          s1+=z1; s2+=z2; l1-=z1; l2-=z2;
-          int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
-          if (r1 != 0) { return (r1<0)?-1:0; }
-          s1+=i1; s2+=i2; l1-=i1; l1-=i2;
+          s1 += z1;
+          s2 += z2;
+          l1 -= z1;
+          l2 -= z2;
+          int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2,
+              s2, i2);
+          if (r1 != 0) {
+            return (r1 < 0) ? -1 : 0;
+          }
+          s1 += i1;
+          s2 += i2;
+          l1 -= i1;
+          l1 -= i2;
         }
         {
           long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1);
           long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2);
           if (i1 != i2) {
-            return ((i1-i2) < 0) ? -1 : 0;
+            return ((i1 - i2) < 0) ? -1 : 0;
           }
           int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
           int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
-          s1+=z1; s2+=z2; l1-=z1; l2-=z2;
+          s1 += z1;
+          s2 += z2;
+          l1 -= z1;
+          l2 -= z2;
         }
         return (os1 - s1);
-      } catch(java.io.IOException e) {
+      } catch (java.io.IOException e) {
         throw new RuntimeException(e);
       }
     }
-    public int compare(byte[] b1, int s1, int l1,
-                         byte[] b2, int s2, int l2) {
-      int ret = compareRaw(b1,s1,l1,b2,s2,l2);
-      return (ret == -1)? -1 : ((ret==0)? 1 : 0);}
+
+    public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
+      int ret = compareRaw(b1, s1, l1, b2, s2, l2);
+      return (ret == -1) ? -1 : ((ret == 0) ? 1 : 0);
+    }
   }
-  
+
   static {
-    org.apache.hadoop.record.RecordComparator.define(ChukwaArchiveKey.class, new Comparator());
+    org.apache.hadoop.record.RecordComparator.define(ChukwaArchiveKey.class,
+        new Comparator());
   }
 }

Modified: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/Chunk.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/Chunk.java?rev=752666&r1=752665&r2=752666&view=diff
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/Chunk.java (original)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/Chunk.java Wed Mar 11 22:39:26 2009
@@ -17,92 +17,104 @@
  */
 
 package org.apache.hadoop.chukwa;
+
+
 import java.io.DataOutput;
 import java.io.IOException;
-
 import org.apache.hadoop.chukwa.datacollection.adaptor.*;
 
 /**
  * A chunk is a sequence of bytes at a particular logical offset in a stream,
- * and containing one or more "records".
- *  Chunks have various metadata, such as source, format,
- * and pointers to record boundaries within the chunk.
+ * and containing one or more "records". Chunks have various metadata, such as
+ * source, format, and pointers to record boundaries within the chunk.
  * 
  */
 public interface Chunk {
-	
-//these conceptually are really network addresses
-	public String getSource();
-	public void setSource(String logSource);
-	
-	/**
-	 * Get the name of the stream that this Chunk is a chunk of
-	 * @return the name of this stream; e.g. file name
-	 */
-	public String getStreamName();
-	public void setStreamName(String streamName);
-	
-	public String getApplication();  
+
+  // these conceptually are really network addresses
+  public String getSource();
+
+  public void setSource(String logSource);
+
+  /**
+   * Get the name of the stream that this Chunk is a chunk of
+   * 
+   * @return the name of this stream; e.g. file name
+   */
+  public String getStreamName();
+
+  public void setStreamName(String streamName);
+
+  public String getApplication();
+
   public void setApplication(String a);
-	
-  //These describe the format of the data buffer
+
+  // These describe the format of the data buffer
   public String getDataType();
+
   public void setDataType(String t);
 
   /**
    * @return the user data in the chunk
    */
-	public byte[] getData();
-	/**
-	 * @param logEvent the user data in the chunk
-	 */
-	public void setData(byte[] logEvent);
-	
-	/**
-	 * get/set the <b>end</b> offsets of records in the buffer.
-	 * 
-	 * We use end, rather than start offsets, since the first start
-	 * offset is always 0, but the last end offset specifies how much of the buffer is valid.
-	 * 
-	 * More precisely, offsets[i] is the offset in the Chunk of the last byte of record i
-	 *  in this chunk.
-	 * @return a list of record end offsets
-	 */
-	public int[] getRecordOffsets();
-	public void setRecordOffsets(int[] offsets);
-	
-	/**
-	 * @return  the byte offset of the first byte not in this chunk.
-	 * 
-	 * We pick this convention so that subtracting sequence IDs yields length.
-	 */
-	public long getSeqID();
-	public void setSeqID(long l);
-
-	/**
-	 * Retrieve a reference to the adaptor that sent this event.
-	 * Used by LocalAgent and Connectors to deliver acks to the appropriate place.
-	 */
-	public Adaptor getInitiator();
-	
+  public byte[] getData();
+
   /**
-   * Estimate the size of this Chunk on the wire, assuming each char of metadata takes two bytes
-   * to serialize.  This is pessimistic.
+   * @param logEvent the user data in the chunk
+   */
+  public void setData(byte[] logEvent);
+
+  /**
+   * get/set the <b>end</b> offsets of records in the buffer.
+   * 
+   * We use end, rather than start offsets, since the first start offset is
+   * always 0, but the last end offset specifies how much of the buffer is
+   * valid.
+   * 
+   * More precisely, offsets[i] is the offset in the Chunk of the last byte of
+   * record i in this chunk.
+   * 
+   * @return a list of record end offsets
+   */
+  public int[] getRecordOffsets();
+
+  public void setRecordOffsets(int[] offsets);
+
+  /**
+   * @return the byte offset of the first byte not in this chunk.
+   * 
+   *         We pick this convention so that subtracting sequence IDs yields
+   *         length.
+   */
+  public long getSeqID();
+
+  public void setSeqID(long l);
+
+  /**
+   * Retrieve a reference to the adaptor that sent this event. Used by
+   * LocalAgent and Connectors to deliver acks to the appropriate place.
+   */
+  public Adaptor getInitiator();
+
+  /**
+   * Estimate the size of this Chunk on the wire, assuming each char of metadata
+   * takes two bytes to serialize. This is pessimistic.
+   * 
    * @return size in bytes that this Chunk might take once serialized.
    */
   public int getSerializedSizeEstimate();
-  
-/**
- * @return name of cluster that this chunk comes from.
- * 
- */
-  public String getTags();  
-  
+
+  /**
+   * @return name of cluster that this chunk comes from.
+   * 
+   */
+  public String getTags();
+
   /**
    * Set the name of the cluster that this chunk comes from.
    * 
    */
-    public void setTags(String tags);  
-  
+  public void setTags(String tags);
+
   public void write(DataOutput data) throws IOException;
 }

Modified: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChunkBuilder.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChunkBuilder.java?rev=752666&r1=752665&r2=752666&view=diff
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChunkBuilder.java (original)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChunkBuilder.java Wed Mar 11 22:39:26 2009
@@ -18,46 +18,46 @@
 
 package org.apache.hadoop.chukwa;
 
-import java.util.*;
 
+import java.util.*;
 import org.apache.hadoop.io.DataOutputBuffer;
 import java.io.*;
 
 /**
  * Right now, just handles record collection.
- *
+ * 
  */
 public class ChunkBuilder {
-  
+
   ArrayList<Integer> recOffsets = new ArrayList<Integer>();
   int lastRecOffset = -1;
   DataOutputBuffer buf = new DataOutputBuffer();
+
   /**
    * Adds the data in rec to an internal buffer; rec can be reused immediately.
+   * 
    * @param rec
    */
-  public void addRecord(byte[] rec)  {
+  public void addRecord(byte[] rec) {
     lastRecOffset = lastRecOffset + rec.length;
     recOffsets.add(lastRecOffset);
     try {
-    buf.write(rec);
-    } catch(IOException e) {
+      buf.write(rec);
+    } catch (IOException e) {
       throw new RuntimeException("buffer write failed.  Out of memory?", e);
     }
   }
-  
+
   public Chunk getChunk() {
     ChunkImpl c = new ChunkImpl();
     c.setData(buf.getData());
     c.setSeqID(buf.getLength());
     int[] offsets = new int[recOffsets.size()];
-    for(int i = 0; i < offsets.length; ++i)
+    for (int i = 0; i < offsets.length; ++i)
       offsets[i] = recOffsets.get(i);
     c.setRecordOffsets(offsets);
-    
+
     return c;
   }
-  
-  
 
 }

Modified: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChunkImpl.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChunkImpl.java?rev=752666&r1=752665&r2=752666&view=diff
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChunkImpl.java (original)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/ChunkImpl.java Wed Mar 11 22:39:26 2009
@@ -18,40 +18,40 @@
 
 package org.apache.hadoop.chukwa;
 
+
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
-
 import org.apache.hadoop.chukwa.datacollection.adaptor.Adaptor;
 import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
 
-public class ChunkImpl implements org.apache.hadoop.io.Writable, Chunk 
-{
-  public static int PROTOCOL_VERSION=1;
-  
+public class ChunkImpl implements org.apache.hadoop.io.Writable, Chunk {
+  public static int PROTOCOL_VERSION = 1;
+
   private String source = "";
   private String application = "";
   private String dataType = "";
   private String tags = "";
   private byte[] data = null;
   private int[] recordEndOffsets;
-  private int protocolVersion=1;
-  private String debuggingInfo="";
-  
+  private int protocolVersion = 1;
+  private String debuggingInfo = "";
+
   private transient Adaptor initiator;
   long seqID;
-  
+
   ChunkImpl() {
     this.tags = ChukwaAgent.getTags();
   }
-  
+
   public static ChunkImpl getBlankChunk() {
     return new ChunkImpl();
   }
-  
-  public ChunkImpl(String dataType, String streamName, long seq, byte[] data, Adaptor source) {
+
+  public ChunkImpl(String dataType, String streamName, long seq, byte[] data,
+                   Adaptor source) {
     this.seqID = seq;
     this.source = localHostAddr;
     this.tags = ChukwaAgent.getTags();
@@ -60,207 +60,211 @@
     this.data = data;
     this.initiator = source;
   }
-  
+
   /**
-   *  @see org.apache.hadoop.chukwa.Chunk#getData()
+   * @see org.apache.hadoop.chukwa.Chunk#getData()
    */
-  public byte[] getData()	{
-  	return data;
+  public byte[] getData() {
+    return data;
   }
-  
+
   /**
-   *  @see org.apache.hadoop.chukwa.Chunk#setData(byte[])
+   * @see org.apache.hadoop.chukwa.Chunk#setData(byte[])
    */
   public void setData(byte[] logEvent) {
-  	this.data = logEvent;
+    this.data = logEvent;
   }
-  
+
   /**
    * @see org.apache.hadoop.chukwa.Chunk#getStreamName()
    */
   public String getStreamName() {
-  	return application;
+    return application;
   }
-  
-  public void setStreamName(String logApplication)	{
-  	this.application = logApplication;
+
+  public void setStreamName(String logApplication) {
+    this.application = logApplication;
   }
-   
+
   public String getSource() {
     return source;
   }
-  
-  public void setSource(String logSource)	{
-  	this.source = logSource;
+
+  public void setSource(String logSource) {
+    this.source = logSource;
   }
-  
+
   public String getDebugInfo() {
-  	return debuggingInfo;
+    return debuggingInfo;
   }
-  
+
   public void setDebugInfo(String a) {
-  	this.debuggingInfo = a;
+    this.debuggingInfo = a;
   }
-  
+
   /**
    * @see org.apache.hadoop.chukwa.Chunk#getSeqID()
    */
-  public long getSeqID()  {
+  public long getSeqID() {
     return seqID;
   }
-  
+
   public void setSeqID(long l) {
-    seqID=l;
+    seqID = l;
   }
-  
+
   public int getProtocolVersion() {
-	  return protocolVersion;
+    return protocolVersion;
   }
-  
+
   public void setProtocolVersion(int pv) {
-	  this.protocolVersion = pv;
+    this.protocolVersion = pv;
   }
-  public String getApplication(){
+
+  public String getApplication() {
     return application;
   }
-  
-  public void setApplication(String a){
+
+  public void setApplication(String a) {
     application = a;
   }
-  
+
   public Adaptor getInitiator() {
     return initiator;
   }
-  
+
   public void setInitiator(Adaptor a) {
     initiator = a;
   }
-  
-  
+
   public void setLogSource() {
     source = localHostAddr;
   }
-  
+
   public int[] getRecordOffsets() {
 
-    if(recordEndOffsets == null)
-      recordEndOffsets = new int[] {data.length -1};
+    if (recordEndOffsets == null)
+      recordEndOffsets = new int[] { data.length - 1 };
     return recordEndOffsets;
   }
-  
+
   public void setRecordOffsets(int[] offsets) {
     recordEndOffsets = offsets;
   }
-  
+
   public String getDataType() {
     return dataType;
   }
-  
+
   public void setDataType(String t) {
     dataType = t;
   }
-  
+
   @Override
-  public void setTags(String tags)
-  {
-  	this.tags = tags;
-  }
-  
-/**
- * @see org.apache.hadoop.chukwa.Chunk#getTags()
- */
+  public void setTags(String tags) {
+    this.tags = tags;
+  }
+
+  /**
+   * @see org.apache.hadoop.chukwa.Chunk#getTags()
+   */
   public String getTags() {
     return tags;
   }
-  
+
   /**
    * @see org.apache.hadoop.io.Writable#readFields(java.io.DataInput)
    */
   public void readFields(DataInput in) throws IOException {
-	setProtocolVersion(in.readInt());
-	if(protocolVersion!=PROTOCOL_VERSION) {
-		throw new IOException("Protocol version mismatched, drop data.  source version: "+protocolVersion+", collector version:"+PROTOCOL_VERSION);
-	}
+    setProtocolVersion(in.readInt());
+    if (protocolVersion != PROTOCOL_VERSION) {
+      throw new IOException(
+          "Protocol version mismatched, drop data.  source version: "
+              + protocolVersion + ", collector version:" + PROTOCOL_VERSION);
+    }
     setSeqID(in.readLong());
     setSource(in.readUTF());
-    tags =  in.readUTF();    //no public set method here
+    tags = in.readUTF(); // no public set method here
     setApplication(in.readUTF());
     setDataType(in.readUTF());
     setDebugInfo(in.readUTF());
-    
+
     int numRecords = in.readInt();
     recordEndOffsets = new int[numRecords];
-    for(int i=0; i < numRecords; ++i)
+    for (int i = 0; i < numRecords; ++i)
       recordEndOffsets[i] = in.readInt();
-    data = new byte[recordEndOffsets[recordEndOffsets.length -1]+1 ] ;
+    data = new byte[recordEndOffsets[recordEndOffsets.length - 1] + 1];
     in.readFully(data);
-    
+
   }
 
   /**
    * @see org.apache.hadoop.io.Writable#write(java.io.DataOutput)
    */
   public void write(DataOutput out) throws IOException {
-	out.writeInt(PROTOCOL_VERSION);
+    out.writeInt(PROTOCOL_VERSION);
     out.writeLong(seqID);
     out.writeUTF(source);
     out.writeUTF(tags);
     out.writeUTF(application);
     out.writeUTF(dataType);
     out.writeUTF(debuggingInfo);
-    
-    if(recordEndOffsets == null)
-      recordEndOffsets = new int[] {data.length -1};
-      
+
+    if (recordEndOffsets == null)
+      recordEndOffsets = new int[] { data.length - 1 };
+
     out.writeInt(recordEndOffsets.length);
-    for(int i =0; i < recordEndOffsets.length; ++i)
+    for (int i = 0; i < recordEndOffsets.length; ++i)
       out.writeInt(recordEndOffsets[i]);
-    
-    out.write(data, 0, recordEndOffsets[recordEndOffsets.length -1] + 1); //byte at last offset is valid
+
+    out.write(data, 0, recordEndOffsets[recordEndOffsets.length - 1] + 1); // byte
+                                                                           // at
+                                                                           // last
+                                                                           // offset
+                                                                           // is
+                                                                           // valid
   }
-  
+
   public static ChunkImpl read(DataInput in) throws IOException {
     ChunkImpl w = new ChunkImpl();
     w.readFields(in);
     return w;
   }
-  
-    //FIXME: should do something better here, but this is OK for debugging
+
+  // FIXME: should do something better here, but this is OK for debugging
   public String toString() {
-    return source+":" + application +":"+ new String(data)+ "/"+seqID;
+    return source + ":" + application + ":" + new String(data) + "/" + seqID;
   }
-  
+
   private static String localHostAddr;
-  static
-  {
+  static {
     try {
       localHostAddr = InetAddress.getLocalHost().getHostName();
     } catch (UnknownHostException e) {
       localHostAddr = "localhost";
     }
   }
-  
+
   /**
    * @see org.apache.hadoop.chukwa.Chunk#getSerializedSizeEstimate()
    */
   public int getSerializedSizeEstimate() {
-    int size= 2 * (source.length() + application.length() + 
-        dataType.length() + debuggingInfo.length()); //length of strings (pessimistic)
+    int size = 2 * (source.length() + application.length() + dataType.length() + debuggingInfo
+        .length()); // length of strings (pessimistic)
     size += data.length + 4;
-    if(recordEndOffsets == null)
-      size+=8;
+    if (recordEndOffsets == null)
+      size += 8;
     else
-      size += 4 * (recordEndOffsets.length + 1); //+1 for length of array
-    size += 8; //uuid
+      size += 4 * (recordEndOffsets.length + 1); // +1 for length of array
+    size += 8; // uuid
     return size;
   }
 
-  public void setRecordOffsets(java.util.Collection<Integer> carriageReturns)
-  {
-    recordEndOffsets = new int [carriageReturns.size()];
+  public void setRecordOffsets(java.util.Collection<Integer> carriageReturns) {
+    recordEndOffsets = new int[carriageReturns.size()];
     int i = 0;
-    for(Integer offset:carriageReturns )
+    for (Integer offset : carriageReturns)
       recordEndOffsets[i++] = offset;
   }
-	
+
 }

Modified: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/conf/ChukwaConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/conf/ChukwaConfiguration.java?rev=752666&r1=752665&r2=752666&view=diff
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/conf/ChukwaConfiguration.java (original)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/conf/ChukwaConfiguration.java Wed Mar 11 22:39:26 2009
@@ -18,47 +18,48 @@
 
 package org.apache.hadoop.chukwa.conf;
 
-import java.io.File;
 
+import java.io.File;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.log4j.Logger;
 
 public class ChukwaConfiguration extends Configuration {
-	static Logger log = Logger.getLogger(ChukwaConfiguration.class);
-
-	public ChukwaConfiguration() {
-		this(true);
-	}
-
-	public ChukwaConfiguration(boolean loadDefaults) {
-		super();
-		if (loadDefaults) {
-
-			String chukwaHome = System.getenv("CHUKWA_HOME");
-			if (chukwaHome == null){
-				chukwaHome = ".";
-			}
-
-			if(!chukwaHome.endsWith("/"))
-			{  chukwaHome = chukwaHome + File.separator; }	
-			String chukwaConf = System.getenv("CHUKWA_CONF_DIR");
-			if (chukwaConf == null)
-			{  chukwaConf = chukwaHome + "conf" + File.separator; }
-
-			log.info("chukwaConf is " + chukwaConf);
+  static Logger log = Logger.getLogger(ChukwaConfiguration.class);
 
-		  super.addResource(new Path(chukwaConf + "/chukwa-collector-conf.xml"));
-		  log.debug("added chukwa-collector-conf.xml to ChukwaConfiguration");
+  public ChukwaConfiguration() {
+    this(true);
+  }
+
+  public ChukwaConfiguration(boolean loadDefaults) {
+    super();
+    if (loadDefaults) {
+
+      String chukwaHome = System.getenv("CHUKWA_HOME");
+      if (chukwaHome == null) {
+        chukwaHome = ".";
+      }
+
+      if (!chukwaHome.endsWith("/")) {
+        chukwaHome = chukwaHome + File.separator;
+      }
+      String chukwaConf = System.getenv("CHUKWA_CONF_DIR");
+      if (chukwaConf == null) {
+        chukwaConf = chukwaHome + "conf" + File.separator;
+      }
+
+      log.info("chukwaConf is " + chukwaConf);
+
+      super.addResource(new Path(chukwaConf + "/chukwa-collector-conf.xml"));
+      log.debug("added chukwa-collector-conf.xml to ChukwaConfiguration");
 
-		  super.addResource(new Path(chukwaConf + "/chukwa-agent-conf.xml"));
-		  log.debug("added chukwa-agent-conf.xml to ChukwaConfiguration");
+      super.addResource(new Path(chukwaConf + "/chukwa-agent-conf.xml"));
+      log.debug("added chukwa-agent-conf.xml to ChukwaConfiguration");
 
-		  super.addResource(new Path(chukwaConf + "/hadoop-site.xml"));
-		  log.debug("added hadoop-site.xml to ChukwaConfiguration");
+      super.addResource(new Path(chukwaConf + "/hadoop-site.xml"));
+      log.debug("added hadoop-site.xml to ChukwaConfiguration");
 
-			
-		}
-	}
+    }
+  }
 
 }

Modified: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/database/Aggregator.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/database/Aggregator.java?rev=752666&r1=752665&r2=752666&view=diff
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/database/Aggregator.java (original)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/database/Aggregator.java Wed Mar 11 22:39:26 2009
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.chukwa.database;
 
+
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileReader;
@@ -41,245 +42,249 @@
 import org.apache.hadoop.chukwa.util.PidFile;
 
 public class Aggregator {
-	private static DatabaseConfig dbc = null;
+  private static DatabaseConfig dbc = null;
 
-	private static Log log = LogFactory.getLog(Aggregator.class);
-	private String table = null;
-	private String jdbc = null;
-	private int[] intervals;
-	private long current = 0;
-    private static DatabaseWriter db = null;
-    public Aggregator() {
-		dbc = new DatabaseConfig();
-		Calendar now = Calendar.getInstance();
-		current = now.getTimeInMillis();
-	}
-
-	public HashMap<String,String> findMacros(String query) throws SQLException {
-		boolean add=false;
-		HashMap<String,String> macroList = new HashMap<String,String>();
-		String macro="";
-	    for(int i=0;i<query.length();i++) {
-	    	if(query.charAt(i)==']') {
-	    		add=false;
-	    		if(!macroList.containsKey(macro)) {
-		    		String subString = computeMacro(macro);
-		    		macroList.put(macro,subString);	    			
-	    		}
-	    		macro="";
-	    	}
-	    	if(add) {
-	    		macro=macro+query.charAt(i);
-	    	}
-	    	if(query.charAt(i)=='[') {
-	    		add=true;
-	    	}
-	    }
-	    return macroList;
-	}
-
-	public String computeMacro(String macro) throws SQLException {
-		Pattern p = Pattern.compile("past_(.*)_minutes");
-		Matcher matcher = p.matcher(macro);
-		if(macro.indexOf("avg(")==0 || macro.indexOf("group_avg(")==0 || macro.indexOf("sum(")==0) {
-			String meta="";
-			String[] table = dbc.findTableName(macro.substring(macro.indexOf("(")+1,macro.indexOf(")")), current, current);
-			try {
-				String cluster = System.getProperty("CLUSTER");
-				if(cluster==null) {
-					cluster="unknown";
-				}
-                DatabaseMetaData dbMetaData = db.getConnection().getMetaData();
-	            ResultSet rs = dbMetaData.getColumns ( null,null,table[0], null);
-	            boolean first=true;
-	            while(rs.next()) {
-	            	if(!first) {
-	            		meta = meta+",";
-	            	}
-	            	String name = rs.getString(4);
-	            	int type = rs.getInt(5);
-	            	if(type==java.sql.Types.VARCHAR) {
-	            		if(macro.indexOf("group_avg(")<0) {
-	            			meta=meta+"count("+name+") as "+name;
-	            		} else {
-	            			meta=meta+name;
-	            		}
-		            	first=false;
-	            	} else if(type==java.sql.Types.DOUBLE ||
-	            			  type==java.sql.Types.FLOAT ||
-	            			  type==java.sql.Types.INTEGER) {
-	            		if(macro.indexOf("sum(")==0) {
-	            		    meta=meta+"sum("+name+")";	            			
-	            		} else {
-	            		    meta=meta+"avg("+name+")";
-	            		}
-		            	first=false;
-	            	} else if(type==java.sql.Types.TIMESTAMP) {
-	            		// Skip the column
-	            	} else {
-	            		if(macro.indexOf("sum(")==0) {
-	            		    meta=meta+"SUM("+name+")";
-	            		} else {
-		            		meta=meta+"AVG("+name+")";	            			
-	            		}
-		            	first=false;
-	            	}
-	            }
-	            if(first) {
-	          	    throw new SQLException("Table is undefined.");
-	            }
-			} catch(SQLException ex) {
-				throw new SQLException("Table does not exist:"+ table[0]);
-			}
-			return meta;
-		} else if(macro.indexOf("now")==0) {
-			SimpleDateFormat sdf = new SimpleDateFormat();
-			return DatabaseWriter.formatTimeStamp(current);
-		} else if(matcher.find()) {
-			int period = Integer.parseInt(matcher.group(1));
-			long timestamp = current - (current % (period*60*1000L)) - (period*60*1000L);
-			return DatabaseWriter.formatTimeStamp(timestamp);
-		} else if(macro.indexOf("past_hour")==0) {
-			return DatabaseWriter.formatTimeStamp(current-3600*1000L);
-		} else if(macro.endsWith("_week")) {
-			long partition = current / DatabaseConfig.WEEK;
-			if(partition<=0) {
-				partition=1;
-			}
-			String[] buffers = macro.split("_");
-			StringBuffer tableName = new StringBuffer();
-			for(int i=0;i<buffers.length-1;i++) {
-				tableName.append(buffers[i]);
-				tableName.append("_");
-			}
-			tableName.append(partition);
-			tableName.append("_week");
-			return tableName.toString();
-		} else if(macro.endsWith("_month")) {
-			long partition = current / DatabaseConfig.MONTH;
-			if(partition<=0) {
-				partition=1;
-			}
-			String[] buffers = macro.split("_");
-			StringBuffer tableName = new StringBuffer();
-			for(int i=0;i<buffers.length-1;i++) {
-				tableName.append(buffers[i]);
-				tableName.append("_");
-			}
-			tableName.append(partition);
-			tableName.append("_month");
-			return tableName.toString();
-		} else if(macro.endsWith("_quarter")) {
-			long partition = current / DatabaseConfig.QUARTER;
-			if(partition<=0) {
-				partition=1;
-			}
-			String[] buffers = macro.split("_");
-			StringBuffer tableName = new StringBuffer();
-			for(int i=0;i<buffers.length-1;i++) {
-				tableName.append(buffers[i]);
-				tableName.append("_");
-			}
-			tableName.append(partition);
-			tableName.append("_quarter");
-			return tableName.toString();
-		} else if(macro.endsWith("_year")) {
-			long partition = current / DatabaseConfig.YEAR;
-			if(partition<=0) {
-				partition=1;
-			}
-			String[] buffers = macro.split("_");
-			StringBuffer tableName = new StringBuffer();
-			for(int i=0;i<buffers.length-1;i++) {
-				tableName.append(buffers[i]);
-				tableName.append("_");
-			}
-			tableName.append(partition);
-			tableName.append("_year");
-			return tableName.toString();
-		} else if(macro.endsWith("_decade")) {
-			long partition = current / DatabaseConfig.DECADE;
-			if(partition<=0) {
-				partition=1;
-			}
-			String[] buffers = macro.split("_");
-			StringBuffer tableName = new StringBuffer();
-			for(int i=0;i<buffers.length-1;i++) {
-				tableName.append(buffers[i]);
-				tableName.append("_");
-			}
-			tableName.append(partition);
-			tableName.append("_decade");
-			return tableName.toString();
-		}
-		String[] tableList = dbc.findTableName(macro,current,current);
-		return tableList[0];
-	}
-
-	public static String getContents(File aFile) {
-        StringBuffer contents = new StringBuffer();    
-        try {
-        	BufferedReader input =  new BufferedReader(new FileReader(aFile));
-        	try {
-        		String line = null; //not declared within while loop
-        		while (( line = input.readLine()) != null){
-        			contents.append(line);
-        			contents.append(System.getProperty("line.separator"));
-        		}
-        	} finally {
-        		input.close();
-        	}
-        } catch (IOException ex){
-        	ex.printStackTrace();
-        }    
-        return contents.toString();
+  private static Log log = LogFactory.getLog(Aggregator.class);
+  private String table = null;
+  private String jdbc = null;
+  private int[] intervals;
+  private long current = 0;
+  private static DatabaseWriter db = null;
+
+  public Aggregator() {
+    dbc = new DatabaseConfig();
+    Calendar now = Calendar.getInstance();
+    current = now.getTimeInMillis();
+  }
+
+  public HashMap<String, String> findMacros(String query) throws SQLException {
+    boolean add = false;
+    HashMap<String, String> macroList = new HashMap<String, String>();
+    String macro = "";
+    for (int i = 0; i < query.length(); i++) {
+      if (query.charAt(i) == ']') {
+        add = false;
+        if (!macroList.containsKey(macro)) {
+          String subString = computeMacro(macro);
+          macroList.put(macro, subString);
+        }
+        macro = "";
+      }
+      if (add) {
+        macro = macro + query.charAt(i);
+      }
+      if (query.charAt(i) == '[') {
+        add = true;
+      }
     }
+    return macroList;
+  }
 
-	public void process(String query) {
-		ResultSet rs = null;
-		String[] columns;
-		int[] columnsType;
-        String groupBy = "";
-	    long start = current;
-	    long end = current;
-        
-
-		try {
-            HashMap<String, String> macroList = findMacros(query);
-            Iterator<String> macroKeys = macroList.keySet().iterator();
-            while(macroKeys.hasNext()) {
-        	    String mkey = macroKeys.next();
-        	    log.debug("replacing:"+mkey+" with "+macroList.get(mkey));
-	    	    query = query.replace("["+mkey+"]", macroList.get(mkey));
+  public String computeMacro(String macro) throws SQLException {
+    Pattern p = Pattern.compile("past_(.*)_minutes");
+    Matcher matcher = p.matcher(macro);
+    if (macro.indexOf("avg(") == 0 || macro.indexOf("group_avg(") == 0
+        || macro.indexOf("sum(") == 0) {
+      String meta = "";
+      String[] table = dbc.findTableName(macro.substring(
+          macro.indexOf("(") + 1, macro.indexOf(")")), current, current);
+      try {
+        String cluster = System.getProperty("CLUSTER");
+        if (cluster == null) {
+          cluster = "unknown";
+        }
+        DatabaseMetaData dbMetaData = db.getConnection().getMetaData();
+        ResultSet rs = dbMetaData.getColumns(null, null, table[0], null);
+        boolean first = true;
+        while (rs.next()) {
+          if (!first) {
+            meta = meta + ",";
+          }
+          String name = rs.getString(4);
+          int type = rs.getInt(5);
+          if (type == java.sql.Types.VARCHAR) {
+            if (macro.indexOf("group_avg(") < 0) {
+              meta = meta + "count(" + name + ") as " + name;
+            } else {
+              meta = meta + name;
+            }
+            first = false;
+          } else if (type == java.sql.Types.DOUBLE
+              || type == java.sql.Types.FLOAT || type == java.sql.Types.INTEGER) {
+            if (macro.indexOf("sum(") == 0) {
+              meta = meta + "sum(" + name + ")";
+            } else {
+              meta = meta + "avg(" + name + ")";
             }
-            db.execute(query);
-		} catch(SQLException e) {
-		    log.error(query);
-			log.error(e.getMessage());
-		}
-	}
-
-    public static void main(String[] args) {
-        log.info("Aggregator started.");
-    	dbc = new DatabaseConfig();
-		String cluster = System.getProperty("CLUSTER");
-		if(cluster==null) {
-			cluster="unknown";
-		}
-    	db = new DatabaseWriter(cluster);
-    	String queries = Aggregator.getContents(new File(System.getenv("CHUKWA_CONF_DIR")+File.separator+"aggregator.sql"));
-    	String[] query = queries.split("\n");
-    	for(int i=0;i<query.length;i++) {
-    		    if(query[i].equals("")) {
-    		    } else if(query[i].indexOf("#")==0) {
-    		    	log.debug("skipping: "+query[i]);
-    		    } else {
-    		    	Aggregator dba = new Aggregator();
-    		    	dba.process(query[i]);
-    		    }
+            first = false;
+          } else if (type == java.sql.Types.TIMESTAMP) {
+            // Skip the column
+          } else {
+            if (macro.indexOf("sum(") == 0) {
+              meta = meta + "SUM(" + name + ")";
+            } else {
+              meta = meta + "AVG(" + name + ")";
+            }
+            first = false;
+          }
+        }
+        if (first) {
+          throw new SQLException("Table is undefined.");
         }
-        db.close();
-    	log.info("Aggregator finished.");
+      } catch (SQLException ex) {
+        throw new SQLException("Table does not exist:" + table[0]);
+      }
+      return meta;
+    } else if (macro.indexOf("now") == 0) {
+      SimpleDateFormat sdf = new SimpleDateFormat();
+      return DatabaseWriter.formatTimeStamp(current);
+    } else if (matcher.find()) {
+      int period = Integer.parseInt(matcher.group(1));
+      long timestamp = current - (current % (period * 60 * 1000L))
+          - (period * 60 * 1000L);
+      return DatabaseWriter.formatTimeStamp(timestamp);
+    } else if (macro.indexOf("past_hour") == 0) {
+      return DatabaseWriter.formatTimeStamp(current - 3600 * 1000L);
+    } else if (macro.endsWith("_week")) {
+      long partition = current / DatabaseConfig.WEEK;
+      if (partition <= 0) {
+        partition = 1;
+      }
+      String[] buffers = macro.split("_");
+      StringBuffer tableName = new StringBuffer();
+      for (int i = 0; i < buffers.length - 1; i++) {
+        tableName.append(buffers[i]);
+        tableName.append("_");
+      }
+      tableName.append(partition);
+      tableName.append("_week");
+      return tableName.toString();
+    } else if (macro.endsWith("_month")) {
+      long partition = current / DatabaseConfig.MONTH;
+      if (partition <= 0) {
+        partition = 1;
+      }
+      String[] buffers = macro.split("_");
+      StringBuffer tableName = new StringBuffer();
+      for (int i = 0; i < buffers.length - 1; i++) {
+        tableName.append(buffers[i]);
+        tableName.append("_");
+      }
+      tableName.append(partition);
+      tableName.append("_month");
+      return tableName.toString();
+    } else if (macro.endsWith("_quarter")) {
+      long partition = current / DatabaseConfig.QUARTER;
+      if (partition <= 0) {
+        partition = 1;
+      }
+      String[] buffers = macro.split("_");
+      StringBuffer tableName = new StringBuffer();
+      for (int i = 0; i < buffers.length - 1; i++) {
+        tableName.append(buffers[i]);
+        tableName.append("_");
+      }
+      tableName.append(partition);
+      tableName.append("_quarter");
+      return tableName.toString();
+    } else if (macro.endsWith("_year")) {
+      long partition = current / DatabaseConfig.YEAR;
+      if (partition <= 0) {
+        partition = 1;
+      }
+      String[] buffers = macro.split("_");
+      StringBuffer tableName = new StringBuffer();
+      for (int i = 0; i < buffers.length - 1; i++) {
+        tableName.append(buffers[i]);
+        tableName.append("_");
+      }
+      tableName.append(partition);
+      tableName.append("_year");
+      return tableName.toString();
+    } else if (macro.endsWith("_decade")) {
+      long partition = current / DatabaseConfig.DECADE;
+      if (partition <= 0) {
+        partition = 1;
+      }
+      String[] buffers = macro.split("_");
+      StringBuffer tableName = new StringBuffer();
+      for (int i = 0; i < buffers.length - 1; i++) {
+        tableName.append(buffers[i]);
+        tableName.append("_");
+      }
+      tableName.append(partition);
+      tableName.append("_decade");
+      return tableName.toString();
+    }
+    String[] tableList = dbc.findTableName(macro, current, current);
+    return tableList[0];
+  }
+
+  public static String getContents(File aFile) {
+    StringBuffer contents = new StringBuffer();
+    try {
+      BufferedReader input = new BufferedReader(new FileReader(aFile));
+      try {
+        String line = null; // not declared within while loop
+        while ((line = input.readLine()) != null) {
+          contents.append(line);
+          contents.append(System.getProperty("line.separator"));
+        }
+      } finally {
+        input.close();
+      }
+    } catch (IOException ex) {
+      ex.printStackTrace();
+    }
+    return contents.toString();
+  }
+
+  public void process(String query) {
+    ResultSet rs = null;
+    String[] columns;
+    int[] columnsType;
+    String groupBy = "";
+    long start = current;
+    long end = current;
+
+    try {
+      HashMap<String, String> macroList = findMacros(query);
+      Iterator<String> macroKeys = macroList.keySet().iterator();
+      while (macroKeys.hasNext()) {
+        String mkey = macroKeys.next();
+        log.debug("replacing:" + mkey + " with " + macroList.get(mkey));
+        query = query.replace("[" + mkey + "]", macroList.get(mkey));
+      }
+      db.execute(query);
+    } catch (SQLException e) {
+      log.error(query);
+      log.error(e.getMessage());
+    }
+  }
+
+  public static void main(String[] args) {
+    log.info("Aggregator started.");
+    dbc = new DatabaseConfig();
+    String cluster = System.getProperty("CLUSTER");
+    if (cluster == null) {
+      cluster = "unknown";
+    }
+    db = new DatabaseWriter(cluster);
+    String queries = Aggregator.getContents(new File(System
+        .getenv("CHUKWA_CONF_DIR")
+        + File.separator + "aggregator.sql"));
+    String[] query = queries.split("\n");
+    for (int i = 0; i < query.length; i++) {
+      if (query[i].equals("")) {
+      } else if (query[i].indexOf("#") == 0) {
+        log.debug("skipping: " + query[i]);
+      } else {
+        Aggregator dba = new Aggregator();
+        dba.process(query[i]);
+      }
     }
+    db.close();
+    log.info("Aggregator finished.");
+  }
 
 }



Mime
View raw message