hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r529410 [1/27] - in /lucene/hadoop/trunk: ./ src/contrib/abacus/src/examples/org/apache/hadoop/abacus/examples/ src/contrib/abacus/src/java/org/apache/hadoop/abacus/ src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ src/c...
Date Mon, 16 Apr 2007 21:44:46 GMT
Author: cutting
Date: Mon Apr 16 14:44:35 2007
New Revision: 529410

URL: http://svn.apache.org/viewvc?view=rev&rev=529410
Log:
HADOOP-1148.  Re-indent all source code to consistently use two spaces per indent level.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/contrib/abacus/src/examples/org/apache/hadoop/abacus/examples/WordHistogramCountDescriptor.java
    lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/JobBase.java
    lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/LongValueSum.java
    lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/UserDefinedValueAggregatorDescriptor.java
    lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorCombiner.java
    lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJobBase.java
    lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorMapper.java
    lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorReducer.java
    lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueHistogram.java
    lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java
    lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java
    lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java
    lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/CompoundDirSpec.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/MergerInputFormat.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/MuxOutputFormat.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeCombiner.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamLineRecordReader.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamSequenceRecordReader.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/UTF8ByteArrayUtils.java
    lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestGzipInput.java
    lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java
    lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java
    lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java
    lucene/hadoop/trunk/src/examples/org/apache/hadoop/examples/ExampleDriver.java
    lucene/hadoop/trunk/src/examples/org/apache/hadoop/examples/PiEstimator.java
    lucene/hadoop/trunk/src/examples/org/apache/hadoop/examples/RandomWriter.java
    lucene/hadoop/trunk/src/examples/org/apache/hadoop/examples/Sort.java
    lucene/hadoop/trunk/src/examples/org/apache/hadoop/examples/WordCount.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Block.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/BlockCommand.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSAdmin.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSFileInfo.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeProtocol.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DfsPath.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSConstants.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSEditLog.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSImage.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/InconsistentFSStateException.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/IncorrectVersionException.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/LocatedBlock.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/PendingReplicationBlocks.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/StreamFile.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/TransferFsImage.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/filecache/DistributedCache.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/ChecksumFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/DF.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FSInputStream.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FileUtil.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FilterFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FsShell.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/InMemoryFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/LocalFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/PositionedReadable.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/RawLocalFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/ArrayWritable.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/BooleanWritable.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/BytesWritable.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/SequenceFile.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/Text.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/TwoDArrayWritable.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/UTF8.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/WritableUtils.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/CompressionCodec.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/DecompressorStream.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/DefaultCodec.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/GzipCodec.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/retry/RetryProxy.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/SocketChannelOutputStream.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/Counters.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/InterTrackerProtocol.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobClient.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobProfile.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobStatus.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobSubmissionProtocol.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/KeyValueLineRecordReader.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/MRConstants.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/MapTask.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/MergeSorter.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/ReduceTask.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/ReduceTaskRunner.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/Reporter.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/RunningJob.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/SequenceFileAsTextInputFormat.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/SequenceFileAsTextRecordReader.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/SequenceFileInputFilter.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/StatusHttpServer.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/Task.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskLog.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskRunner.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskStatus.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTrackerStatus.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/jobcontrol/Job.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/jobcontrol/JobControl.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/lib/NullOutputFormat.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/ContextFactory.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/MetricsContext.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/MetricsException.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/MetricsRecord.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/Updater.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/file/FileContext.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/spi/MetricValue.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/spi/MetricsRecordImpl.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/spi/NullContext.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/spi/OutputRecord.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/spi/Util.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NetworkTopology.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NodeBase.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/BinaryRecordInput.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/BinaryRecordOutput.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/Buffer.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/CsvRecordInput.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/CsvRecordOutput.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/Record.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/RecordOutput.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/Utils.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/XmlRecordInput.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/XmlRecordOutput.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/CGenerator.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/CppGenerator.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JBoolean.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JBuffer.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JByte.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JDouble.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JFile.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JFloat.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JMap.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JRecord.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JString.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JType.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JVector.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JavaGenerator.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/generated/ParseException.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/generated/Rcc.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/generated/RccTokenManager.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/generated/SimpleCharStream.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/generated/Token.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/generated/TokenMgrError.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/tools/Logalyzer.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/CopyFiles.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/NativeCodeLoader.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/PlatformName.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/PriorityQueue.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/ProgramDriver.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/ReflectionUtils.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/StringUtils.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/Tool.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/ToolBase.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/XMLUtils.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/DFSTestUtil.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/NNBench.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestCheckpoint.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSMkdirs.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShell.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestLocalDFS.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPendingReplication.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestSeekBug.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestSmallBlock.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DFSCIOTest.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DistributedFSCheck.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestCopyFiles.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestFileSystem.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestGlobPaths.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestArrayFile.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestBytesWritable.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSequenceFile.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSetFile.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestText.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestTextNonUTF8.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestVersionedWritable.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestWritable.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/compress/TestCodec.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/ipc/TestIPC.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/ipc/TestRPC.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRBench.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRCaching.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/PiEstimator.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/SortValidator.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestEmptyJobWithDFS.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMapOutputType.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMapRed.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRBringup.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRClasspath.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRDFSCaching.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestTextOutputFormat.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/record/FromCpp.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/record/RecordBench.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordIO.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordMR.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/record/ToCpp.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/test/AllTestDriver.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Mon Apr 16 14:44:35 2007
@@ -210,6 +210,9 @@
 63. HADOOP-1258.  Fix TestCheckpoint test case to wait for 
     MiniDFSCluster to be active.  (Nigel Daley via tomwhite)
 
+64. HADOOP-1148.  Re-indent all Java source code to consistently use
+    two spaces per indent level.  (cutting)
+
 
 Release 0.12.3 - 2007-04-06
 

Modified: lucene/hadoop/trunk/src/contrib/abacus/src/examples/org/apache/hadoop/abacus/examples/WordHistogramCountDescriptor.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/abacus/src/examples/org/apache/hadoop/abacus/examples/WordHistogramCountDescriptor.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/abacus/src/examples/org/apache/hadoop/abacus/examples/WordHistogramCountDescriptor.java (original)
+++ lucene/hadoop/trunk/src/contrib/abacus/src/examples/org/apache/hadoop/abacus/examples/WordHistogramCountDescriptor.java Mon Apr 16 14:44:35 2007
@@ -39,21 +39,21 @@
    * 
    * @return a list of the generated pairs.
    */
-    public ArrayList<Entry> generateKeyValPairs(Object key, Object val) {
-        String words[] = val.toString().split(" |\t");
-        ArrayList<Entry> retv = new ArrayList<Entry>();
-        for (int i = 0; i < words.length; i++) {
-            Text valCount = new Text(words[i] + "\t" + "1");
-            Entry en = generateEntry(VALUE_HISTOGRAM, "WORD_HISTOGRAM",
-                    valCount);
-            retv.add(en);
-        }
-        return retv;
+  public ArrayList<Entry> generateKeyValPairs(Object key, Object val) {
+    String words[] = val.toString().split(" |\t");
+    ArrayList<Entry> retv = new ArrayList<Entry>();
+    for (int i = 0; i < words.length; i++) {
+      Text valCount = new Text(words[i] + "\t" + "1");
+      Entry en = generateEntry(VALUE_HISTOGRAM, "WORD_HISTOGRAM",
+                               valCount);
+      retv.add(en);
     }
+    return retv;
+  }
 
-    public void configure(JobConf job) {
+  public void configure(JobConf job) {
 
-    }
+  }
 
 
 }

Modified: lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/JobBase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/JobBase.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/JobBase.java (original)
+++ lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/JobBase.java Mon Apr 16 14:44:35 2007
@@ -137,13 +137,13 @@
     while (iter.hasNext()) {
       Entry e = (Entry) iter.next();
       sb.append(e.getKey().toString()).append("\t").append(e.getValue())
-          .append("\n");
+        .append("\n");
     }
     iter = this.doubleCounters.entrySet().iterator();
     while (iter.hasNext()) {
       Entry e = (Entry) iter.next();
       sb.append(e.getKey().toString()).append("\t").append(e.getValue())
-          .append("\n");
+        .append("\n");
     }
     return sb.toString();
   }

Modified: lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/LongValueSum.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/LongValueSum.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/LongValueSum.java (original)
+++ lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/LongValueSum.java Mon Apr 16 14:44:35 2007
@@ -27,69 +27,69 @@
  */
 public class LongValueSum implements ValueAggregator {
 
-    long sum = 0;
+  long sum = 0;
     
-    /**
-     *  the default constructor
-     *
-     */
-    public LongValueSum() {
-        reset();
-    }
+  /**
+   *  the default constructor
+   *
+   */
+  public LongValueSum() {
+    reset();
+  }
 
-    /**
-     * add a value to the aggregator
-     * 
-     * @param val
-     *          an object whose string representation represents a long value.
-     * 
-     */
-    public void addNextValue(Object val) {
-        this.sum += Long.parseLong(val.toString());
-    }
+  /**
+   * add a value to the aggregator
+   * 
+   * @param val
+   *          an object whose string representation represents a long value.
+   * 
+   */
+  public void addNextValue(Object val) {
+    this.sum += Long.parseLong(val.toString());
+  }
     
-    /**
-     * add a value to the aggregator
-     * 
-     * @param val
-     *          a long value.
-     * 
-     */
-    public void addNextValue(long val) {
-        this.sum += val;
-    }
+  /**
+   * add a value to the aggregator
+   * 
+   * @param val
+   *          a long value.
+   * 
+   */
+  public void addNextValue(long val) {
+    this.sum += val;
+  }
     
-    /**
-     * @return the aggregated value
-     */
-    public long getSum() {
-        return this.sum;
-    }
+  /**
+   * @return the aggregated value
+   */
+  public long getSum() {
+    return this.sum;
+  }
     
-    /**
-     * @return the string representation of the aggregated value
-     */
-    public String getReport() {
-        return ""+sum;
-    }
+  /**
+   * @return the string representation of the aggregated value
+   */
+  public String getReport() {
+    return ""+sum;
+  }
 
-    /**
-     * reset the aggregator
-     */
-    public void reset() {
-        sum = 0;
-    }
+  /**
+   * reset the aggregator
+   */
+  public void reset() {
+    sum = 0;
+  }
 
-    /**
-     * @return return an array of one element. The element is a string
-     *         representation of the aggregated value. The return value is
-     *         expected to be used by the a combiner.
-     */
-    public ArrayList getCombinerOutput() {
-        ArrayList retv = new ArrayList(1);
-        retv.add(getReport());
-        return retv;
-    }
+  /**
+   * @return return an array of one element. The element is a string
+   *         representation of the aggregated value. The return value is
+   *         expected to be used by the a combiner.
+   */
+  public ArrayList getCombinerOutput() {
+    ArrayList retv = new ArrayList(1);
+    retv.add(getReport());
+    return retv;
+  }
 }
 
 

Modified: lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/UserDefinedValueAggregatorDescriptor.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/UserDefinedValueAggregatorDescriptor.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/UserDefinedValueAggregatorDescriptor.java (original)
+++ lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/UserDefinedValueAggregatorDescriptor.java Mon Apr 16 14:44:35 2007
@@ -100,7 +100,7 @@
    */
   public String toString() {
     return "UserDefinedValueAggregatorDescriptor with class name:" + "\t"
-        + this.className;
+      + this.className;
   }
 
   /**

Modified: lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorCombiner.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorCombiner.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorCombiner.java (original)
+++ lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorCombiner.java Mon Apr 16 14:44:35 2007
@@ -47,12 +47,12 @@
    * @param output to collect combined values
    */
   public void reduce(WritableComparable key, Iterator values,
-      OutputCollector output, Reporter reporter) throws IOException {
+                     OutputCollector output, Reporter reporter) throws IOException {
     String keyStr = key.toString();
     int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
     String type = keyStr.substring(0, pos);
     ValueAggregator aggregator = ValueAggregatorBaseDescriptor
-        .generateValueAggregator(type);
+      .generateValueAggregator(type);
     if (aggregator == null) {
       LOG.info(key.toString());
     }
@@ -84,7 +84,7 @@
    *
    */
   public void map(WritableComparable arg0, Writable arg1, OutputCollector arg2,
-      Reporter arg3) throws IOException {
+                  Reporter arg3) throws IOException {
     throw new IOException ("should not be called\n");
   }
 }

Modified: lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJobBase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJobBase.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJobBase.java (original)
+++ lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJobBase.java Mon Apr 16 14:44:35 2007
@@ -31,70 +31,70 @@
  */
 public abstract class ValueAggregatorJobBase extends JobBase {
  
-    protected ArrayList aggregatorDescriptorList = null;
+  protected ArrayList aggregatorDescriptorList = null;
         
-    public void configure(JobConf job) {
-        super.configure(job);
+  public void configure(JobConf job) {
+    super.configure(job);
         
-        setLongValue("totalCount", 0);
-        setLongValue("errorCount", 0);
-        setLongValue("collectedCount", 0);
-        setLongValue("groupCount", 0);
+    setLongValue("totalCount", 0);
+    setLongValue("errorCount", 0);
+    setLongValue("collectedCount", 0);
+    setLongValue("groupCount", 0);
         
-        this.initializeMySpec(job);
-        this.logSpec();
-    }
+    this.initializeMySpec(job);
+    this.logSpec();
+  }
 
-    private static ValueAggregatorDescriptor getValueAggregatorDescriptor(
-            String spec, JobConf job) {
-        if (spec == null)
-            return null;
-        String[] segments = spec.split(",", -1);
-        String type = segments[0];
-        if (type.compareToIgnoreCase("UserDefined") == 0) {
-            String className = segments[1];
-            return new UserDefinedValueAggregatorDescriptor(className, job);
-        } 
-        return null;
-    }
+  private static ValueAggregatorDescriptor getValueAggregatorDescriptor(
+                                                                        String spec, JobConf job) {
+    if (spec == null)
+      return null;
+    String[] segments = spec.split(",", -1);
+    String type = segments[0];
+    if (type.compareToIgnoreCase("UserDefined") == 0) {
+      String className = segments[1];
+      return new UserDefinedValueAggregatorDescriptor(className, job);
+    } 
+    return null;
+  }
 
-    private static ArrayList getAggregatorDescriptors(JobConf job) {
-        String advn = "aggregator.descriptor";
-        int num = job.getInt(advn + ".num", 0);
-        ArrayList retv = new ArrayList(num);
-        for (int i = 0; i < num; i++) {
-            String spec = job.get(advn + "." + i);
-            ValueAggregatorDescriptor ad = getValueAggregatorDescriptor(spec, job);
-            if (ad != null) {
-                retv.add(ad);
-            }
-        }
-        return retv;
+  private static ArrayList getAggregatorDescriptors(JobConf job) {
+    String advn = "aggregator.descriptor";
+    int num = job.getInt(advn + ".num", 0);
+    ArrayList retv = new ArrayList(num);
+    for (int i = 0; i < num; i++) {
+      String spec = job.get(advn + "." + i);
+      ValueAggregatorDescriptor ad = getValueAggregatorDescriptor(spec, job);
+      if (ad != null) {
+        retv.add(ad);
+      }
     }
+    return retv;
+  }
     
-    private void initializeMySpec(JobConf job) {
-        this.aggregatorDescriptorList = getAggregatorDescriptors(job);
-        if (this.aggregatorDescriptorList.size() == 0) {
-            this.aggregatorDescriptorList.add(new UserDefinedValueAggregatorDescriptor(
-                    ValueAggregatorBaseDescriptor.class.getCanonicalName(), job));
-        }
+  private void initializeMySpec(JobConf job) {
+    this.aggregatorDescriptorList = getAggregatorDescriptors(job);
+    if (this.aggregatorDescriptorList.size() == 0) {
+      this.aggregatorDescriptorList.add(new UserDefinedValueAggregatorDescriptor(
+                                                                                 ValueAggregatorBaseDescriptor.class.getCanonicalName(), job));
     }
+  }
     
-    protected void logSpec() {
-        StringBuffer sb = new StringBuffer();
-        sb.append("\n");
-        if (aggregatorDescriptorList == null) {
-            sb.append(" aggregatorDescriptorList: null");
-        } else {
-            sb.append(" aggregatorDescriptorList: ");
-            for (int i = 0; i < aggregatorDescriptorList.size(); i++) {
-                sb.append(" ").append(aggregatorDescriptorList.get(i).toString());
-            }
-        }      
-        LOG.info(sb.toString());
-    }
+  protected void logSpec() {
+    StringBuffer sb = new StringBuffer();
+    sb.append("\n");
+    if (aggregatorDescriptorList == null) {
+      sb.append(" aggregatorDescriptorList: null");
+    } else {
+      sb.append(" aggregatorDescriptorList: ");
+      for (int i = 0; i < aggregatorDescriptorList.size(); i++) {
+        sb.append(" ").append(aggregatorDescriptorList.get(i).toString());
+      }
+    }      
+    LOG.info(sb.toString());
+  }
 
-    public void close() throws IOException {
-        report();
-    }
+  public void close() throws IOException {
+    report();
+  }
 }

Modified: lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorMapper.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorMapper.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorMapper.java (original)
+++ lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorMapper.java Mon Apr 16 14:44:35 2007
@@ -37,7 +37,7 @@
    *  list to generate aggregation id/value pairs and emit them.
    */
   public void map(WritableComparable key, Writable value,
-      OutputCollector output, Reporter reporter) throws IOException {
+                  OutputCollector output, Reporter reporter) throws IOException {
 
     addLongValue("groupCount", 1);
     Iterator iter = this.aggregatorDescriptorList.iterator();
@@ -47,7 +47,7 @@
       while (ens.hasNext()) {
         Entry en = ens.next();
         output.collect((WritableComparable) en.getKey(), (Writable) en
-            .getValue());
+                       .getValue());
         addLongValue("collectedCount", 1);
       }
     }
@@ -61,7 +61,7 @@
    * Do nothing. Should not be called.
    */
   public void reduce(WritableComparable arg0, Iterator arg1,
-      OutputCollector arg2, Reporter arg3) throws IOException {
+                     OutputCollector arg2, Reporter arg3) throws IOException {
     throw new IOException ("should not be called\n");
   }
 }

Modified: lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorReducer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorReducer.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorReducer.java (original)
+++ lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorReducer.java Mon Apr 16 14:44:35 2007
@@ -45,16 +45,16 @@
    * @value the values to be aggregated
    */
   public void reduce(WritableComparable key, Iterator values,
-      OutputCollector output, Reporter reporter) throws IOException {
+                     OutputCollector output, Reporter reporter) throws IOException {
     addLongValue("groupCount", 1);
     String keyStr = key.toString();
     int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
     String type = keyStr.substring(0, pos);
     keyStr = keyStr.substring(pos
-        + ValueAggregatorDescriptor.TYPE_SEPARATOR.length());
+                              + ValueAggregatorDescriptor.TYPE_SEPARATOR.length());
 
     ValueAggregator aggregator = ValueAggregatorBaseDescriptor
-        .generateValueAggregator(type);
+      .generateValueAggregator(type);
     while (values.hasNext()) {
       addLongValue("totalCount", 1);
       aggregator.addNextValue(values.next());
@@ -73,7 +73,7 @@
    * Do nothing. Should not be called
    */
   public void map(WritableComparable arg0, Writable arg1, OutputCollector arg2,
-      Reporter arg3) throws IOException {
+                  Reporter arg3) throws IOException {
     throw new IOException ("should not be called\n");
   }
 }

Modified: lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueHistogram.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueHistogram.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueHistogram.java (original)
+++ lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueHistogram.java Mon Apr 16 14:44:35 2007
@@ -96,7 +96,7 @@
       }
       acc += nextVal * (j - i);
       //sbVal.append("\t").append(nextVal).append("\t").append(j - i)
-          //.append("\n");
+      //.append("\n");
       i = j;
     }
     double average = 0.0;
@@ -137,7 +137,7 @@
       Object val = en.getKey();
       Long count = (Long) en.getValue();
       sb.append("\t").append(val.toString()).append("\t").append(
-          count.longValue()).append("\n");
+                                                                 count.longValue()).append("\n");
     }
     return sb.toString();
   }

Modified: lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java (original)
+++ lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java Mon Apr 16 14:44:35 2007
@@ -97,7 +97,7 @@
     job.setOutputPath(new Path(outputDir));
     job.setOutputFormat(outputFormat);
     SequenceFile.setCompressionType(job,
-        SequenceFile.CompressionType.BLOCK);
+                                    SequenceFile.CompressionType.BLOCK);
     job.setMapOutputKeyClass(Text.class);
     job.setMapOutputValueClass(mapoutputValueClass);
     job.setOutputKeyClass(Text.class);
@@ -107,7 +107,7 @@
     job.setNumMapTasks(1);
     job.setNumReduceTasks(numOfReducers);
     job.setLong("ultjoin.maxNumOfValuesPerGroup",
-        maxNumOfValuesPerGroup);
+                maxNumOfValuesPerGroup);
     job.set("mapred.child.java.opts", "-Xmx1024m");
     job.setKeepFailedTaskFiles(true);
     return job;
@@ -153,9 +153,9 @@
     boolean success;
     if (args.length < 7 || args.length > 9) {
       System.out.println("usage: DataJoinJob " + "inputdirs outputdir "
-          + "numofParts " + "mapper_class " + "reducer_class "
-          + "map_output_value_class "
-          + "output_value_class [maxNumOfValuesPerGroup [descriptionOfJob]]]");
+                         + "numofParts " + "mapper_class " + "reducer_class "
+                         + "map_output_value_class "
+                         + "output_value_class [maxNumOfValuesPerGroup [descriptionOfJob]]]");
       System.exit(-1);
     }
 

Modified: lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java (original)
+++ lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java Mon Apr 16 14:44:35 2007
@@ -90,7 +90,7 @@
   protected abstract Text generateGroupKey(TaggedMapOutput aRecord);
 
   public void map(WritableComparable key, Writable value,
-      OutputCollector output, Reporter reporter) throws IOException {
+                  OutputCollector output, Reporter reporter) throws IOException {
     if (this.reporter == null) {
       this.reporter = reporter;
     }
@@ -116,7 +116,7 @@
   }
 
   public void reduce(WritableComparable arg0, Iterator arg1,
-      OutputCollector arg2, Reporter arg3) throws IOException {
+                     OutputCollector arg2, Reporter arg3) throws IOException {
     // TODO Auto-generated method stub
 
   }

Modified: lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java (original)
+++ lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java Mon Apr 16 14:44:35 2007
@@ -69,7 +69,7 @@
     super.configure(job);
     this.job = job;
     this.maxNumOfValuesPerGroup = job.getLong("ultjoin.maxNumOfValuesPerGroup",
-        100);
+                                              100);
   }
 
   /**
@@ -92,7 +92,7 @@
    * @return
    */
   private SortedMap<Object, ResetableIterator> regroup(Writable key,
-      Iterator arg1, Reporter reporter) throws IOException {
+                                                       Iterator arg1, Reporter reporter) throws IOException {
     this.numOfValues = 0;
     SortedMap<Object, ResetableIterator> retv = new TreeMap<Object, ResetableIterator>();
     TaggedMapOutput aRecord = null;
@@ -101,7 +101,7 @@
       this.numOfValues += 1;
       if (this.numOfValues % 100 == 0) {
         reporter.setStatus("key: " + key.toString() + " numOfValues: "
-            + this.numOfValues);
+                           + this.numOfValues);
       }
       if (this.numOfValues > this.maxNumOfValuesPerGroup) {
         continue;
@@ -117,13 +117,13 @@
     if (this.numOfValues > this.largestNumOfValues) {
       this.largestNumOfValues = numOfValues;
       LOG.info("key: " + key.toString() + " this.largestNumOfValues: "
-          + this.largestNumOfValues);
+               + this.largestNumOfValues);
     }
     return retv;
   }
 
   public void reduce(WritableComparable key, Iterator values,
-      OutputCollector output, Reporter reporter) throws IOException {
+                     OutputCollector output, Reporter reporter) throws IOException {
     if (this.reporter == null) {
       this.reporter = reporter;
     }
@@ -152,7 +152,7 @@
    * @throws IOException
    */
   protected void collect(WritableComparable key, TaggedMapOutput aRecord,
-      OutputCollector output, Reporter reporter) throws IOException {
+                         OutputCollector output, Reporter reporter) throws IOException {
     this.collected += 1;
     addLongValue("collectedCount", 1);
     if (aRecord != null && this.collected % 1 == 0) {
@@ -174,8 +174,8 @@
    * @throws IOException
    */
   private void joinAndCollect(Object[] tags, ResetableIterator[] values,
-      WritableComparable key, OutputCollector output, Reporter reporter)
-      throws IOException {
+                              WritableComparable key, OutputCollector output, Reporter reporter)
+    throws IOException {
     if (values.length < 1) {
       return;
     }
@@ -199,8 +199,8 @@
    * @throws IOException
    */
   private void joinAndCollect(Object[] tags, ResetableIterator[] values,
-      int pos, Object[] partialList, WritableComparable key,
-      OutputCollector output, Reporter reporter) throws IOException {
+                              int pos, Object[] partialList, WritableComparable key,
+                              OutputCollector output, Reporter reporter) throws IOException {
 
     if (values.length == pos) {
       // get a value from each source. Combine them
@@ -232,7 +232,7 @@
   protected abstract TaggedMapOutput combine(Object[] tags, Object[] values);
 
   public void map(WritableComparable arg0, Writable arg1, OutputCollector arg2,
-      Reporter arg3) throws IOException {
+                  Reporter arg3) throws IOException {
     // TODO Auto-generated method stub
 
   }

Modified: lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java (original)
+++ lucene/hadoop/trunk/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java Mon Apr 16 14:44:35 2007
@@ -149,13 +149,13 @@
     while (iter.hasNext()) {
       Entry e = (Entry) iter.next();
       sb.append(e.getKey().toString()).append("\t").append(e.getValue())
-          .append("\n");
+        .append("\n");
     }
     iter = this.doubleCounters.entrySet().iterator();
     while (iter.hasNext()) {
       Entry e = (Entry) iter.next();
       sb.append(e.getKey().toString()).append("\t").append(e.getValue())
-          .append("\n");
+        .append("\n");
     }
     return sb.toString();
   }

Modified: lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/CompoundDirSpec.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/CompoundDirSpec.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/CompoundDirSpec.java (original)
+++ lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/CompoundDirSpec.java Mon Apr 16 14:44:35 2007
@@ -77,7 +77,7 @@
     if (false == isInputSpec_) {
       if (msup > 1) {
         throw new IllegalStateException("A -output spec cannot use merged streams ('" + MERGE_SEP
-            + "' delimiter)");
+                                        + "' delimiter)");
       }
     }
     for (int m = 0; m < msup; m++) {
@@ -121,7 +121,7 @@
 
   void throwBadNumPrimaryInputSpecs() throws IllegalStateException {
     String msg = "A compound -input spec needs exactly one primary path prefixed with "
-        + PRIMARY_PREFIX;
+      + PRIMARY_PREFIX;
     msg += ":\n";
     msg += toTableString();
     throw new IllegalStateException(msg);
@@ -151,13 +151,13 @@
   }
   
   /*
-   Example input spec in table form:
-   <1 +[/input/part-00] 
-   <2  [/input/part-01] 
-   <3  [/input/part-02] 
-   Example output spec in table form:
-   +[/my.output] 
-   */
+    Example input spec in table form:
+    <1 +[/input/part-00] 
+    <2  [/input/part-01] 
+    <3  [/input/part-02] 
+    Example output spec in table form:
+    +[/my.output] 
+  */
   public String toTableString() {
     StringBuffer buf = new StringBuffer();
     int maxWid = 0;
@@ -204,9 +204,9 @@
   }
 
   /** 
-   @see #primaryRow 
-   @see #primaryCol
-   */
+      @see #primaryRow 
+      @see #primaryCol
+  */
   public String[][] getPaths() {
     return paths_;
   }

Modified: lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java (original)
+++ lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java Mon Apr 16 14:44:35 2007
@@ -41,8 +41,8 @@
     if (OS.indexOf("Windows") > -1) {
       command = "cmd /C set";
     } else if (lowerOs.indexOf("ix") > -1 || lowerOs.indexOf("linux") > -1
-        || lowerOs.indexOf("freebsd") > -1 || lowerOs.indexOf("sunos") > -1
-        || lowerOs.indexOf("solaris") > -1 || lowerOs.indexOf("hp-ux") > -1) {
+               || lowerOs.indexOf("freebsd") > -1 || lowerOs.indexOf("sunos") > -1
+               || lowerOs.indexOf("solaris") > -1 || lowerOs.indexOf("hp-ux") > -1) {
       command = "env";
     } else if (lowerOs.startsWith("mac os x")) {
       command = "env";

Modified: lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/MergerInputFormat.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/MergerInputFormat.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/MergerInputFormat.java (original)
+++ lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/MergerInputFormat.java Mon Apr 16 14:44:35 2007
@@ -80,10 +80,10 @@
   }
 
   /** Delegate to the primary InputFormat. 
-   Force full-file splits since there's no index to sync secondaries.
-   (and if there was, this index may need to be created for the first time
-   full file at a time...    )
-   */
+      Force full-file splits since there's no index to sync secondaries.
+      (and if there was, this index may need to be created for the first time
+      full file at a time...    )
+  */
   public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
     return ((StreamInputFormat) primary_).getSplits(job, numSplits);
   }
@@ -119,20 +119,20 @@
   }
 
   /*
-   private FileSplit relatedSplit(FileSplit primarySplit, int i, CompoundDirSpec spec) throws IOException
-   {
-   if(i == 0) {
-   return primarySplit;
-   }
-
-   // TODO based on custom JobConf (or indirectly: InputFormat-s?)
-   String path = primarySplit.getFile().getAbsolutePath();
-   Path rpath = new Path(path + "." + i);
-
-   long rlength = fs_.getLength(rpath);
-   FileSplit related = new FileSplit(rpath, 0, rlength);
-   return related;    
-   }*/
+    private FileSplit relatedSplit(FileSplit primarySplit, int i, CompoundDirSpec spec) throws IOException
+    {
+    if(i == 0) {
+    return primarySplit;
+    }
+
+    // TODO based on custom JobConf (or indirectly: InputFormat-s?)
+    String path = primarySplit.getFile().getAbsolutePath();
+    Path rpath = new Path(path + "." + i);
+
+    long rlength = fs_.getLength(rpath);
+    FileSplit related = new FileSplit(rpath, 0, rlength);
+    return related;    
+    }*/
 
   class MergedRecordReader implements RecordReader {
 
@@ -237,7 +237,7 @@
             src = new Text(">" + tag + "\t" + src.toString()); // breaks anything?
           } else {
             throw new UnsupportedOperationException("Cannot use with tags with key class "
-                + src.getClass());
+                                                    + src.getClass());
           }
         }
         src.write(outBuf);

Modified: lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/MuxOutputFormat.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/MuxOutputFormat.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/MuxOutputFormat.java (original)
+++ lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/MuxOutputFormat.java Mon Apr 16 14:44:35 2007
@@ -88,7 +88,7 @@
         int c = Integer.parseInt(s1);
         if (c < 1 || c > max) {
           String msg = "Output channel '" + s + "': must be an integer between 1 and " + max
-              + " followed by '" + CHANOUT + "' and TAB";
+            + " followed by '" + CHANOUT + "' and TAB";
           throw new IndexOutOfBoundsException(msg);
         }
         return c;

Modified: lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java (original)
+++ lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java Mon Apr 16 14:44:35 2007
@@ -28,121 +28,121 @@
  */
 public class PathFinder
 {
-    String pathenv;        // a string of pathnames
-    String pathSep;        // the path seperator
-    String fileSep;        // the file seperator in a directory
-
-    /**
-     * Construct a PathFinder object using the path from
-     * java.class.path
-     */
-    public PathFinder()
-    {
-        pathenv = System.getProperty("java.class.path");
-        pathSep = System.getProperty("path.separator");
-        fileSep = System.getProperty("file.separator");
-    }
-
-    /**
-     * Construct a PathFinder object using the path from
-     * the specified system environment variable.
-     */
-    public PathFinder(String envpath)
-    {
-        pathenv = System.getenv(envpath);
-        pathSep = System.getProperty("path.separator");
-        fileSep = System.getProperty("file.separator");
-    }
-
-    /**
-     * Appends the specified component to the path list
-     */
-    public void prependPathComponent(String str)
-    {
-        pathenv = str + pathSep + pathenv;
-    }
-
-    /**
-     * Returns the full path name of this file if it is listed in the
-     * path
-     */
-    public File getAbsolutePath(String filename)
-    {
-        if (pathenv == null || pathSep == null  || fileSep == null)
-        {
-            return null;
-        }
-        int     val = -1;
-        String    classvalue = pathenv + pathSep;
-
-        while (((val = classvalue.indexOf(pathSep)) >= 0) &&
-               classvalue.length() > 0) {
-            //
-            // Extract each entry from the pathenv
-            //
-            String entry = classvalue.substring(0, val).trim();
-            File f = new File(entry);
-
-            try {
-                if (f.isDirectory()) {
-                    //
-                    // this entry in the pathenv is a directory.
-                    // see if the required file is in this directory
-                    //
-                    f = new File(entry + fileSep + filename);
-                }
-                //
-                // see if the filename matches and  we can read it
-                //
-                if (f.isFile() && f.canRead()) {
-                    return f;
-                }
-            } catch (Exception exp){ }
-            classvalue = classvalue.substring(val+1).trim();
-        }
+  String pathenv;        // a string of pathnames
+  String pathSep;        // the path seperator
+  String fileSep;        // the file seperator in a directory
+
+  /**
+   * Construct a PathFinder object using the path from
+   * java.class.path
+   */
+  public PathFinder()
+  {
+    pathenv = System.getProperty("java.class.path");
+    pathSep = System.getProperty("path.separator");
+    fileSep = System.getProperty("file.separator");
+  }
+
+  /**
+   * Construct a PathFinder object using the path from
+   * the specified system environment variable.
+   */
+  public PathFinder(String envpath)
+  {
+    pathenv = System.getenv(envpath);
+    pathSep = System.getProperty("path.separator");
+    fileSep = System.getProperty("file.separator");
+  }
+
+  /**
+   * Appends the specified component to the path list
+   */
+  public void prependPathComponent(String str)
+  {
+    pathenv = str + pathSep + pathenv;
+  }
+
+  /**
+   * Returns the full path name of this file if it is listed in the
+   * path
+   */
+  public File getAbsolutePath(String filename)
+  {
+    if (pathenv == null || pathSep == null  || fileSep == null)
+      {
         return null;
+      }
+    int     val = -1;
+    String    classvalue = pathenv + pathSep;
+
+    while (((val = classvalue.indexOf(pathSep)) >= 0) &&
+           classvalue.length() > 0) {
+      //
+      // Extract each entry from the pathenv
+      //
+      String entry = classvalue.substring(0, val).trim();
+      File f = new File(entry);
+
+      try {
+        if (f.isDirectory()) {
+          //
+          // this entry in the pathenv is a directory.
+          // see if the required file is in this directory
+          //
+          f = new File(entry + fileSep + filename);
+        }
+        //
+        // see if the filename matches and  we can read it
+        //
+        if (f.isFile() && f.canRead()) {
+          return f;
+        }
+      } catch (Exception exp){ }
+      classvalue = classvalue.substring(val+1).trim();
+    }
+    return null;
+  }
+
+  /**
+   * prints all environment variables for this process
+   */
+  private static void printEnvVariables() {
+    System.out.println("Environment Variables: ");
+    Map<String,String> map = System.getenv();
+    Set<String> keys = map.keySet();
+    Iterator iter = keys.iterator();
+    while(iter.hasNext()) {
+      String thiskey = (String)(iter.next()); 
+      String value = map.get(thiskey);
+      System.out.println(thiskey + " = " + value);
+    }
+  }
+
+  /**
+   * prints all system properties for this process
+   */
+  private static void printSystemProperties() {
+    System.out.println("System properties: ");
+    java.util.Properties p = System.getProperties();
+    java.util.Enumeration keys = p.keys();
+    while(keys.hasMoreElements()) {
+      String thiskey = (String)keys.nextElement();
+      String value = p.getProperty(thiskey);
+      System.out.println(thiskey + " = " + value);
+    }
+  }
+
+  public static void main(String args[]) throws IOException {
+
+    if (args.length < 1) {
+      System.out.println("Usage: java PathFinder <filename>");
+      System.exit(1);
+    }
+
+    PathFinder finder = new PathFinder("PATH");
+    File file = finder.getAbsolutePath(args[0]);
+    if (file != null) {
+      System.out.println("Full path name = " + file.getCanonicalPath());
     }
-
-    /**
-     * prints all environment variables for this process
-     */
-    private static void printEnvVariables() {
-        System.out.println("Environment Variables: ");
-        Map<String,String> map = System.getenv();
-        Set<String> keys = map.keySet();
-        Iterator iter = keys.iterator();
-        while(iter.hasNext()) {
-            String thiskey = (String)(iter.next()); 
-            String value = map.get(thiskey);
-            System.out.println(thiskey + " = " + value);
-        }
-    }
-
-    /**
-     * prints all system properties for this process
-     */
-    private static void printSystemProperties() {
-        System.out.println("System properties: ");
-        java.util.Properties p = System.getProperties();
-        java.util.Enumeration keys = p.keys();
-        while(keys.hasMoreElements()) {
-            String thiskey = (String)keys.nextElement();
-            String value = p.getProperty(thiskey);
-            System.out.println(thiskey + " = " + value);
-        }
-    }
-
-    public static void main(String args[]) throws IOException {
-
-        if (args.length < 1) {
-            System.out.println("Usage: java PathFinder <filename>");
-            System.exit(1);
-        }
-
-        PathFinder finder = new PathFinder("PATH");
-        File file = finder.getAbsolutePath(args[0]);
-        if (file != null) {
-            System.out.println("Full path name = " + file.getCanonicalPath());
-        }
-    }
+  }
 }

Modified: lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeCombiner.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeCombiner.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeCombiner.java (original)
+++ lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeCombiner.java Mon Apr 16 14:44:35 2007
@@ -55,8 +55,8 @@
     try {
       return URLDecoder.decode(str, "UTF-8");
     } catch (UnsupportedEncodingException e) {
-        System.err.println("stream.combine.streamprocessor in jobconf not found");
-        return null;
+      System.err.println("stream.combine.streamprocessor in jobconf not found");
+      return null;
     }
   }
 

Modified: lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java (original)
+++ lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java Mon Apr 16 14:44:35 2007
@@ -65,7 +65,7 @@
   abstract String getKeyColPropName();
 
   /** Write output as side-effect files rather than as map outputs.
-   This is useful to do "Map" tasks rather than "MapReduce" tasks. */
+      This is useful to do "Map" tasks rather than "MapReduce" tasks. */
   boolean getUseSideEffect() {
     return false;
   }
@@ -168,11 +168,11 @@
       }
       final Socket sock = new Socket(uri.getHost(), uri.getPort());
       OutputStream out = new FilterOutputStream(sock.getOutputStream()) {
-        public void close() throws IOException {
-          sock.close();
-          super.close();
-        }
-      };
+          public void close() throws IOException {
+            sock.close();
+            super.close();
+          }
+        };
       return out;
     } else {
       // a FSDataOutputStreamm, localFS or HDFS.
@@ -184,7 +184,7 @@
   String getSideEffectFileName() {
     FileSplit split = StreamUtil.getCurrentSplit(job_);
     return new String(split.getPath().getName() + "-" + split.getStart() + 
-            "-" + split.getLength());
+                      "-" + split.getLength());
   }
 
   public void configure(JobConf job) {
@@ -276,13 +276,13 @@
       // tasktracker's local working directory
       //
       if (!new File(argvSplit[0]).isAbsolute()) {
-          PathFinder finder = new PathFinder("PATH");
-          finder.prependPathComponent(jobCacheDir.toString());
-          File f = finder.getAbsolutePath(argvSplit[0]);
-          if (f != null) {
-              argvSplit[0] = f.getAbsolutePath();
-          }
-          f = null;
+        PathFinder finder = new PathFinder("PATH");
+        finder.prependPathComponent(jobCacheDir.toString());
+        File f = finder.getAbsolutePath(argvSplit[0]);
+        if (f != null) {
+          argvSplit[0] = f.getAbsolutePath();
+        }
+        f = null;
       }
       logprintln("PipeMapRed exec " + Arrays.asList(argvSplit));
       logprintln("sideEffectURI_=" + finalOutputURI);
@@ -293,11 +293,11 @@
       sim = Runtime.getRuntime().exec(argvSplit, childEnv.toArray());
 
       /* // This way required jdk1.5
-       Builder processBuilder = new ProcessBuilder(argvSplit);
-       Map<String, String> env = processBuilder.environment();
-       addEnvironment(env, job_.get("stream.addenvironment"));
-       sim = processBuilder.start();
-       */
+         Builder processBuilder = new ProcessBuilder(argvSplit);
+         Map<String, String> env = processBuilder.environment();
+         addEnvironment(env, job_.get("stream.addenvironment"));
+         sim = processBuilder.start();
+      */
 
       clientOut_ = new DataOutputStream(new BufferedOutputStream(sim.getOutputStream()));
       clientIn_ = new DataInputStream(new BufferedInputStream(sim.getInputStream()));
@@ -626,8 +626,8 @@
   String numRecInfo() {
     long elapsed = (System.currentTimeMillis() - startTime_) / 1000;
     return "R/W/S=" + numRecRead_ + "/" + numRecWritten_ + "/" + numRecSkipped_ + " in:"
-        + safeDiv(numRecRead_, elapsed) + " [rec/s]" + " out:" + safeDiv(numRecWritten_, elapsed)
-        + " [rec/s]";
+      + safeDiv(numRecRead_, elapsed) + " [rec/s]" + " out:" + safeDiv(numRecWritten_, elapsed)
+      + " [rec/s]";
   }
 
   String safeDiv(long n, long d) {

Modified: lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java (original)
+++ lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java Mon Apr 16 14:44:35 2007
@@ -45,8 +45,8 @@
     try {
       return URLDecoder.decode(str, "UTF-8");
     } catch (UnsupportedEncodingException e) {
-        System.err.println("stream.reduce.streamprocessor in jobconf not found");
-        return null;
+      System.err.println("stream.reduce.streamprocessor in jobconf not found");
+      return null;
     }
   }
 
@@ -61,7 +61,7 @@
   }
 
   public void reduce(WritableComparable key, Iterator values, OutputCollector output,
-      Reporter reporter) throws IOException {
+                     Reporter reporter) throws IOException {
 
     // init
     if (doPipe_ && outThread_ == null) {

Modified: lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java (original)
+++ lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java Mon Apr 16 14:44:35 2007
@@ -48,7 +48,7 @@
   final static String CONF_NS = "stream.recordreader.";
 
   public StreamBaseRecordReader(FSDataInputStream in, FileSplit split, Reporter reporter,
-      JobConf job, FileSystem fs) throws IOException {
+                                JobConf job, FileSystem fs) throws IOException {
     in_ = in;
     split_ = split;
     start_ = split_.getStart();
@@ -131,9 +131,9 @@
       recStr = record.toString();
     }
     String unqualSplit = split_.getFile().getName() + ":" + split_.getStart() + "+"
-        + split_.getLength();
+      + split_.getLength();
     String status = "HSTR " + StreamUtil.HOST + " " + numRec_ + ". pos=" + pos + " " + unqualSplit
-        + " Processing record=" + recStr;
+      + " Processing record=" + recStr;
     status += " " + splitName_;
     return status;
   }

Modified: lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java (original)
+++ lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java Mon Apr 16 14:44:35 2007
@@ -34,7 +34,7 @@
 public class StreamInputFormat extends KeyValueTextInputFormat {
 
   public RecordReader getRecordReader(final InputSplit genericSplit,
-      JobConf job, Reporter reporter) throws IOException {
+                                      JobConf job, Reporter reporter) throws IOException {
     String c = job.get("stream.recordreader.class");
     if (c == null || c.indexOf("LineRecordReader") >= 0) {
       return super.getRecordReader(genericSplit, job, reporter);
@@ -62,7 +62,7 @@
     Constructor ctor;
     try {
       ctor = readerClass.getConstructor(new Class[] { FSDataInputStream.class,
-          FileSplit.class, Reporter.class, JobConf.class, FileSystem.class });
+                                                      FileSplit.class, Reporter.class, JobConf.class, FileSystem.class });
     } catch (NoSuchMethodException nsm) {
       throw new RuntimeException(nsm);
     }
@@ -70,7 +70,7 @@
     RecordReader reader;
     try {
       reader = (RecordReader) ctor.newInstance(new Object[] { in, split,
-          reporter, job, fs });
+                                                              reporter, job, fs });
     } catch (Exception nsm) {
       throw new RuntimeException(nsm);
     }



Mime
View raw message