hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r530556 [1/12] - in /lucene/hadoop/trunk: ./ src/contrib/abacus/src/java/org/apache/hadoop/abacus/ src/contrib/hbase/src/java/org/apache/hadoop/hbase/ src/contrib/hbase/src/test/org/apache/hadoop/hbase/ src/contrib/streaming/src/java/org/ap...
Date Thu, 19 Apr 2007 21:34:53 GMT
Author: cutting
Date: Thu Apr 19 14:34:41 2007
New Revision: 530556

URL: http://svn.apache.org/viewvc?view=rev&rev=530556
Log:
HADOOP-1148.  More indentation and spacing fixes.

Modified:
    lucene/hadoop/trunk/build.xml
    lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJob.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/Environment.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/CompoundDirSpec.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/MergerInputFormat.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java
    lucene/hadoop/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/UTF8ByteArrayUtils.java
    lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java
    lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java
    lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UniqApp.java
    lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UtilTest.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/conf/Configuration.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Block.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/BlockCommand.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSFileInfo.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataStorage.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeProtocol.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeRegistration.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DisallowedDatanodeException.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSEditLog.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSImage.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/InconsistentFSStateException.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/IncorrectVersionException.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamespaceInfo.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SafeModeException.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/filecache/DistributedCache.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/ChecksumFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/DF.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FSDataInputStream.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FSDataOutputStream.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FileUtil.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FilterFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FsShell.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/InMemoryFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/LocalFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/Path.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/RawLocalFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/s3/S3FileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/s3/S3InputStream.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/s3/S3OutputStream.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/BytesWritable.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/GenericWritable.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/MapFile.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/ObjectWritable.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/SequenceFile.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/Text.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/VersionedWritable.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/WritableComparator.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/WritableUtils.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/BlockCompressorStream.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/LzoCodec.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/lzo/LzoCompressor.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/lzo/LzoDecompressor.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/io/retry/RetryPolicies.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/Client.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/RPC.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/Server.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/SocketChannelOutputStream.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/BasicTypeSorterBase.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/Counters.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/FileInputFormat.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/FileSplit.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/IsolationRunner.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobClient.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobConf.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobEndNotifier.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobHistory.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/KeyValueTextInputFormat.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/MapFileOutputFormat.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/MapOutputLocation.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/MapRunner.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/OutputFormat.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/OutputFormatBase.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/PhasedFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/ReduceTask.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/SequenceFileInputFilter.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/SequenceFileOutputFormat.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/SequenceFileRecordReader.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/StatusHttpServer.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/Task.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskCompletionEvent.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskLogAppender.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskReport.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskRunner.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskStatus.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTrackerAction.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTrackerStatus.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TextInputFormat.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TextOutputFormat.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/jobcontrol/Job.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/MetricsUtil.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NetworkTopology.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NodeBase.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/Index.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/XmlRecordInput.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JMap.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JRecord.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JVector.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JavaGenerator.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/generated/Rcc.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/tools/Logalyzer.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/CopyFiles.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/DiskChecker.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/MergeSort.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/PriorityQueue.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/RunJar.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/StringUtils.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/Tool.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/ToolBase.java
    lucene/hadoop/trunk/src/test/checkstyle.xml
    lucene/hadoop/trunk/src/test/org/apache/hadoop/conf/TestConfiguration.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/DFSTestUtil.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestHost2NodesMap.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestLocalDFS.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplicationPolicy.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/AccumulatingReducer.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DFSCIOTest.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DistributedFSCheck.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/IOMapperBase.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestCopyFiles.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestGlobPaths.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemTest.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystem.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestBytesWritable.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestMD5Hash.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSequenceFile.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestText.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestVersionedWritable.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/retry/TestRetryProxy.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/HadoopTestCase.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRBench.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRCaching.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/NotificationTestCase.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMapRed.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/net/TestNetworkTopology.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordMR.java

Modified: lucene/hadoop/trunk/build.xml
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/build.xml?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/build.xml (original)
+++ lucene/hadoop/trunk/build.xml Thu Apr 19 14:34:41 2007
@@ -482,7 +482,7 @@
   	
   	<checkstyle config="${test.src.dir}/checkstyle.xml"
   		failOnViolation="false">
-      <fileset dir="${src.dir}" includes="**/*.java"/>
+      <fileset dir="${src.dir}" includes="**/*.java" excludes="**/generated/**"/>
       <formatter type="xml" toFile="${test.build.dir}/checkstyle-errors.xml"/>
   	</checkstyle>
   	

Modified: lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJob.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJob.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJob.java (original)
+++ lucene/hadoop/trunk/src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJob.java Thu Apr 19 14:34:41 2007
@@ -80,7 +80,7 @@
 public class ValueAggregatorJob {
 
   public static JobControl createValueAggregatorJobs(String args[])
-      throws IOException {
+    throws IOException {
     JobControl theControl = new JobControl("ValueAggregatorJobs");
     ArrayList dependingJobs = new ArrayList();
     JobConf aJobConf = createValueAggregatorJob(args);
@@ -98,7 +98,7 @@
    * @throws IOException
    */
   public static JobConf createValueAggregatorJob(String args[])
-      throws IOException {
+    throws IOException {
 
     if (args.length < 2) {
       System.out.println("usage: inputDirs outDir [numOfReducer [textinputformat|seq [specfile [jobName]]]]");

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java Thu Apr 19 14:34:41 2007
@@ -60,17 +60,17 @@
       String column = col.toString();
       try {
         int colpos = column.indexOf(":") + 1;
-        if(colpos == 0) {
+        if (colpos == 0) {
           throw new IllegalArgumentException("Column name has no family indicator.");
         }
 
         String columnkey = column.substring(colpos);
 
-        if(columnkey == null || columnkey.length() == 0) {
+        if (columnkey == null || columnkey.length() == 0) {
           this.matchType = MATCH_TYPE.FAMILY_ONLY;
           this.family = column.substring(0, colpos);
 
-        } else if(isRegexPattern.matcher(columnkey).matches()) {
+        } else if (isRegexPattern.matcher(columnkey).matches()) {
           this.matchType = MATCH_TYPE.REGEX;
           this.columnMatcher = Pattern.compile(column);
 
@@ -86,13 +86,13 @@
     // Matching method
     
     boolean matches(Text col) throws IOException {
-      if(this.matchType == MATCH_TYPE.SIMPLE) {
+      if (this.matchType == MATCH_TYPE.SIMPLE) {
         return col.equals(this.col);
         
-      } else if(this.matchType == MATCH_TYPE.FAMILY_ONLY) {
+      } else if (this.matchType == MATCH_TYPE.FAMILY_ONLY) {
         return col.toString().startsWith(this.family);
         
-      } else if(this.matchType == MATCH_TYPE.REGEX) {
+      } else if (this.matchType == MATCH_TYPE.REGEX) {
         return this.columnMatcher.matcher(col.toString()).matches();
         
       } else {
@@ -121,7 +121,7 @@
     for(int i = 0; i < targetCols.length; i++) {
       Text family = HStoreKey.extractFamily(targetCols[i]);
       Vector<ColumnMatcher> matchers = okCols.get(family);
-      if(matchers == null) {
+      if (matchers == null) {
         matchers = new Vector<ColumnMatcher>();
       }
       matchers.add(new ColumnMatcher(targetCols[i]));
@@ -144,11 +144,11 @@
     Text column = keys[i].getColumn();
     Text family = HStoreKey.extractFamily(column);
     Vector<ColumnMatcher> matchers = okCols.get(family);
-    if(matchers == null) {
+    if (matchers == null) {
       return false;
     }
     for(int m = 0; m < matchers.size(); m++) {
-      if(matchers.get(m).matches(column)) {
+      if (matchers.get(m).matches(column)) {
         return true;
       }
     }
@@ -203,7 +203,7 @@
     // Grab all the values that match this row/timestamp
 
     boolean insertedItem = false;
-    if(chosenRow != null) {
+    if (chosenRow != null) {
       key.setRow(chosenRow);
       key.setVersion(chosenTimestamp);
       key.setColumn(new Text(""));
@@ -215,7 +215,7 @@
               && (keys[i].getRow().compareTo(chosenRow) == 0)
               && (keys[i].getTimestamp() == chosenTimestamp)) {
 
-          if(columnMatch(i)) {
+          if (columnMatch(i)) {
             outbuf.reset();
             vals[i].write(outbuf);
             byte byteresults[] = outbuf.getData();
@@ -226,7 +226,7 @@
             insertedItem = true;
           }
 
-          if (! getNext(i)) {
+          if (!getNext(i)) {
             closeSubScanner(i);
           }
         }
@@ -237,7 +237,7 @@
         while((keys[i] != null)
               && ((keys[i].getRow().compareTo(chosenRow) <= 0)
                   || (keys[i].getTimestamp() > this.timestamp)
-                  || (! columnMatch(i)))) {
+                  || (!columnMatch(i)))) {
 
           getNext(i);
         }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java Thu Apr 19 14:34:41 2007
@@ -95,12 +95,12 @@
   }
 
   public synchronized void openTable(Text tableName) throws IOException {
-    if(closed) {
+    if (closed) {
       throw new IllegalStateException("client is not open");
     }
 
     tableServers = tablesToServers.get(tableName);
-    if(tableServers == null ) {                 // We don't know where the table is
+    if (tableServers == null) {                 // We don't know where the table is
       findTableInMeta(tableName);               // Load the information from meta
     }
   }
@@ -108,9 +108,9 @@
   private void findTableInMeta(Text tableName) throws IOException {
     TreeMap<Text, TableInfo> metaServers = tablesToServers.get(META_TABLE_NAME);
     
-    if(metaServers == null) {                   // Don't know where the meta is
+    if (metaServers == null) {                   // Don't know where the meta is
       loadMetaFromRoot(tableName);
-      if(tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) {
+      if (tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) {
         // All we really wanted was the meta or root table
         return;
       }
@@ -119,7 +119,7 @@
 
     tableServers = new TreeMap<Text, TableInfo>();
     for(Iterator<TableInfo> i = metaServers.tailMap(tableName).values().iterator();
-        i.hasNext(); ) {
+        i.hasNext();) {
       
       TableInfo t = i.next();
       
@@ -133,7 +133,7 @@
    */
   private void loadMetaFromRoot(Text tableName) throws IOException {
     locateRootRegion();
-    if(tableName.equals(ROOT_TABLE_NAME)) {   // All we really wanted was the root
+    if (tableName.equals(ROOT_TABLE_NAME)) {   // All we really wanted was the root
       return;
     }
     scanRoot();
@@ -144,7 +144,7 @@
    * could be.
    */
   private void locateRootRegion() throws IOException {
-    if(master == null) {
+    if (master == null) {
       master = (HMasterInterface)RPC.getProxy(HMasterInterface.class, 
                                               HMasterInterface.versionID,
                                               masterLocation.getInetSocketAddress(), conf);
@@ -157,7 +157,7 @@
       while(rootRegionLocation == null && localTimeouts < numTimeouts) {
         rootRegionLocation = master.findRootRegion();
 
-        if(rootRegionLocation == null) {
+        if (rootRegionLocation == null) {
           try {
             Thread.sleep(clientTimeout);
 
@@ -166,7 +166,7 @@
           localTimeouts++;
         }
       }
-      if(rootRegionLocation == null) {
+      if (rootRegionLocation == null) {
         throw new IOException("Timed out trying to locate root region");
       }
       
@@ -174,7 +174,7 @@
       
       HRegionInterface rootRegion = getHRegionConnection(rootRegionLocation);
 
-      if(rootRegion.getRegionInfo(rootRegionInfo.regionName) != null) {
+      if (rootRegion.getRegionInfo(rootRegionInfo.regionName) != null) {
         tableServers = new TreeMap<Text, TableInfo>();
         tableServers.put(startRow, new TableInfo(rootRegionInfo, rootRegionLocation));
         tablesToServers.put(ROOT_TABLE_NAME, tableServers);
@@ -184,7 +184,7 @@
       
     } while(rootRegionLocation == null && tries++ < numRetries);
     
-    if(rootRegionLocation == null) {
+    if (rootRegionLocation == null) {
       closed = true;
       throw new IOException("unable to locate root region server");
     }
@@ -220,7 +220,7 @@
         HRegionInfo regionInfo = new HRegionInfo();
         regionInfo.readFields(inbuf);
         
-        if(! regionInfo.tableDesc.getName().equals(tableName)) {
+        if (!regionInfo.tableDesc.getName().equals(tableName)) {
           // We're done
           break;
         }
@@ -245,7 +245,7 @@
 
     HRegionInterface server = servers.get(regionServer.toString());
     
-    if(server == null) {                                // Get a connection
+    if (server == null) {                                // Get a connection
       
       server = (HRegionInterface)RPC.waitForProxy(HRegionInterface.class, 
                                                   HRegionInterface.versionID, regionServer.getInetSocketAddress(), conf);
@@ -257,7 +257,7 @@
 
   /** Close the connection to the HRegionServer */
   public synchronized void close() throws IOException {
-    if(! closed) {
+    if (!closed) {
       RPC.stopClient();
       closed = true;
     }
@@ -274,13 +274,13 @@
     TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>();
     
     TreeMap<Text, TableInfo> metaTables = tablesToServers.get(META_TABLE_NAME);
-    if(metaTables == null) {
+    if (metaTables == null) {
       // Meta is not loaded yet so go do that
       loadMetaFromRoot(META_TABLE_NAME);
       metaTables = tablesToServers.get(META_TABLE_NAME);
     }
 
-    for(Iterator<TableInfo>i = metaTables.values().iterator(); i.hasNext(); ) {
+    for(Iterator<TableInfo>i = metaTables.values().iterator(); i.hasNext();) {
       TableInfo t = i.next();
       HRegionInterface server = getHRegionConnection(t.serverAddress);
       HScannerInterface scanner = null;
@@ -297,7 +297,7 @@
 
           // Only examine the rows where the startKey is zero length
           
-          if(info.startKey.getLength() == 0) {
+          if (info.startKey.getLength() == 0) {
             uniqueTables.add(info.tableDesc);
           }
           results.clear();
@@ -311,7 +311,7 @@
   }
 
   private TableInfo getTableInfo(Text row) {
-    if(tableServers == null) {
+    if (tableServers == null) {
       throw new IllegalStateException("Must open table first");
     }
     
@@ -335,7 +335,7 @@
                                                                           info.regionInfo.regionName, row, column, numVersions);
     
     ArrayList<byte[]> bytes = new ArrayList<byte[]>();
-    for(int i = 0 ; i < values.length; i++) {
+    for(int i = 0; i < values.length; i++) {
       bytes.add(values[i].get());
     }
     return bytes.toArray(new byte[values.length][]);
@@ -351,7 +351,7 @@
                                                                           info.regionInfo.regionName, row, column, timestamp, numVersions);
     
     ArrayList<byte[]> bytes = new ArrayList<byte[]>();
-    for(int i = 0 ; i < values.length; i++) {
+    for(int i = 0; i < values.length; i++) {
       bytes.add(values[i].get());
     }
     return bytes.toArray(new byte[values.length][]);
@@ -369,7 +369,7 @@
    * Return the specified columns.
    */
   public HScannerInterface obtainScanner(Text[] columns, Text startRow) throws IOException {
-    if(tableServers == null) {
+    if (tableServers == null) {
       throw new IllegalStateException("Must open table first");
     }
     return new ClientScanner(columns, startRow);
@@ -481,11 +481,11 @@
      * Returns false if there are no more scanners.
      */
     private boolean nextScanner() throws IOException {
-      if(scanner != null) {
+      if (scanner != null) {
         scanner.close();
       }
       currentRegion += 1;
-      if(currentRegion == regions.length) {
+      if (currentRegion == regions.length) {
         close();
         return false;
       }
@@ -505,13 +505,13 @@
      * @see org.apache.hadoop.hbase.HScannerInterface#next(org.apache.hadoop.hbase.HStoreKey, java.util.TreeMap)
      */
     public boolean next(HStoreKey key, TreeMap<Text, byte[]> results) throws IOException {
-      if(closed) {
+      if (closed) {
         return false;
       }
       boolean status = scanner.next(key, results);
-      if(! status) {
+      if (!status) {
         status = nextScanner();
-        if(status) {
+        if (status) {
           status = scanner.next(key, results);
         }
       }
@@ -522,7 +522,7 @@
      * @see org.apache.hadoop.hbase.HScannerInterface#close()
      */
     public void close() throws IOException {
-      if(scanner != null) {
+      if (scanner != null) {
         scanner.close();
       }
       server = null;

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java Thu Apr 19 14:34:41 2007
@@ -101,12 +101,12 @@
       newlog.close();
     }
     
-    if(fs.exists(srcDir)) {
+    if (fs.exists(srcDir)) {
       
-      if(! fs.delete(srcDir)) {
+      if (!fs.delete(srcDir)) {
         LOG.error("Cannot delete: " + srcDir);
         
-        if(! FileUtil.fullyDelete(new File(srcDir.toString()))) {
+        if (!FileUtil.fullyDelete(new File(srcDir.toString()))) {
           throw new IOException("Cannot delete: " + srcDir);
         }
       }
@@ -127,7 +127,7 @@
     this.conf = conf;
     this.logSeqNum = 0;
 
-    if(fs.exists(dir)) {
+    if (fs.exists(dir)) {
       throw new IOException("Target HLog directory already exists: " + dir);
     }
     fs.mkdirs(dir);
@@ -154,7 +154,7 @@
 
       Vector<Path> toDeleteList = new Vector<Path>();
       synchronized(this) {
-        if(closed) {
+        if (closed) {
           throw new IOException("Cannot roll log; log is closed");
         }
 
@@ -174,10 +174,10 @@
 
         // Close the current writer (if any), and grab a new one.
         
-        if(writer != null) {
+        if (writer != null) {
           writer.close();
           
-          if(filenum > 0) {
+          if (filenum > 0) {
             outputfiles.put(logSeqNum-1, computeFilename(filenum-1));
           }
         }
@@ -192,10 +192,10 @@
         // over all the regions.
 
         long oldestOutstandingSeqNum = Long.MAX_VALUE;
-        for(Iterator<Long> it = regionToLastFlush.values().iterator(); it.hasNext(); ) {
+        for(Iterator<Long> it = regionToLastFlush.values().iterator(); it.hasNext();) {
           long curSeqNum = it.next().longValue();
           
-          if(curSeqNum < oldestOutstandingSeqNum) {
+          if (curSeqNum < oldestOutstandingSeqNum) {
             oldestOutstandingSeqNum = curSeqNum;
           }
         }
@@ -205,10 +205,10 @@
 
         LOG.debug("removing old log files");
         
-        for(Iterator<Long> it = outputfiles.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Long> it = outputfiles.keySet().iterator(); it.hasNext();) {
           long maxSeqNum = it.next().longValue();
           
-          if(maxSeqNum < oldestOutstandingSeqNum) {
+          if (maxSeqNum < oldestOutstandingSeqNum) {
             Path p = outputfiles.get(maxSeqNum);
             it.remove();
             toDeleteList.add(p);
@@ -221,7 +221,7 @@
 
       // Actually delete them, if any!
 
-      for(Iterator<Path> it = toDeleteList.iterator(); it.hasNext(); ) {
+      for(Iterator<Path> it = toDeleteList.iterator(); it.hasNext();) {
         Path p = it.next();
         fs.delete(p);
       }
@@ -262,7 +262,7 @@
    * We need to seize a lock on the writer so that writes are atomic.
    */
   public synchronized void append(Text regionName, Text tableName, Text row, TreeMap<Text, byte[]> columns, long timestamp) throws IOException {
-    if(closed) {
+    if (closed) {
       throw new IOException("Cannot append; log is closed");
     }
     
@@ -273,12 +273,12 @@
     // that don't have any flush yet, the relevant operation is the
     // first one that's been added.
     
-    if(regionToLastFlush.get(regionName) == null) {
+    if (regionToLastFlush.get(regionName) == null) {
       regionToLastFlush.put(regionName, seqNum[0]);
     }
 
     int counter = 0;
-    for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext();) {
       Text column = it.next();
       byte[] val = columns.get(column);
       HLogKey logKey = new HLogKey(regionName, tableName, row, seqNum[counter++]);
@@ -333,11 +333,11 @@
 
   /** Complete the cache flush */
   public synchronized void completeCacheFlush(Text regionName, Text tableName, long logSeqId) throws IOException {
-    if(closed) {
+    if (closed) {
       return;
     }
     
-    if(! insideCacheFlush) {
+    if (!insideCacheFlush) {
       throw new IOException("Impossible situation: inside completeCacheFlush(), but 'insideCacheFlush' flag is false");
     }
     

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java Thu Apr 19 14:34:41 2007
@@ -80,10 +80,10 @@
     HLogKey other = (HLogKey) o;
     int result = this.regionName.compareTo(other.regionName);
     
-    if(result == 0) {
+    if (result == 0) {
       result = this.row.compareTo(other.row);
       
-      if(result == 0) {
+      if (result == 0) {
         
         if (this.logSeqNum < other.logSeqNum) {
           result = -1;

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java Thu Apr 19 14:34:41 2007
@@ -108,7 +108,7 @@
       };
       Text firstRow = new Text();
   
-      while((! closed)) {
+      while((!closed)) {
         int metaRegions = 0;
         while(rootRegionLocation == null) {
           try {
@@ -155,8 +155,8 @@
             HServerInfo storedInfo = null;
             synchronized(serversToServerInfo) {
               storedInfo = serversToServerInfo.get(serverName);
-              if(storedInfo == null
-                 || storedInfo.getStartCode() != startCode) {
+              if (storedInfo == null
+                  || storedInfo.getStartCode() != startCode) {
               
                 // The current assignment is no good; load the region.
   
@@ -261,8 +261,8 @@
           HServerInfo storedInfo = null;
           synchronized(serversToServerInfo) {
             storedInfo = serversToServerInfo.get(serverName);
-            if(storedInfo == null
-               || storedInfo.getStartCode() != startCode) {
+            if (storedInfo == null
+                || storedInfo.getStartCode() != startCode) {
             
               // The current assignment is no good; load the region.
 
@@ -285,16 +285,16 @@
     }
 
     public void run() {
-      while((! closed)) {
+      while((!closed)) {
         MetaRegion region = null;
         
         while(region == null) {
           synchronized(metaRegionsToScan) {
-            if(metaRegionsToScan.size() != 0) {
+            if (metaRegionsToScan.size() != 0) {
               region = metaRegionsToScan.remove(0);
             }
           }
-          if(region == null) {
+          if (region == null) {
             try {
               metaRegionsToScan.wait();
               
@@ -307,7 +307,7 @@
         
         synchronized(knownMetaRegions) {
           knownMetaRegions.put(region.startKey, region);
-          if(rootScanned && knownMetaRegions.size() == numMetaRegions) {
+          if (rootScanned && knownMetaRegions.size() == numMetaRegions) {
             allMetaRegionsScanned = true;
             allMetaRegionsScanned.notifyAll();
           }
@@ -319,7 +319,7 @@
           
           } catch(InterruptedException ex) {
           }
-          if(! allMetaRegionsScanned) {
+          if (!allMetaRegionsScanned) {
             break;                              // A region must have split
           }
           
@@ -328,7 +328,7 @@
           Vector<MetaRegion> v = new Vector<MetaRegion>();
           v.addAll(knownMetaRegions.values());
           
-          for(Iterator<MetaRegion> i = v.iterator(); i.hasNext(); ) {
+          for(Iterator<MetaRegion> i = v.iterator(); i.hasNext();) {
             scanRegion(i.next());
           }
         } while(true);
@@ -391,12 +391,12 @@
 
     // Make sure the root directory exists!
     
-    if(! fs.exists(dir)) {
+    if (!fs.exists(dir)) {
       fs.mkdirs(dir);
     }
 
     Path rootRegionDir = HStoreFile.getHRegionDir(dir, rootRegionInfo.regionName);
-    if(! fs.exists(rootRegionDir)) {
+    if (!fs.exists(rootRegionDir)) {
       
       // Bootstrap! Need to create the root region and the first meta region.
       //TODO is the root region self referential?
@@ -521,7 +521,7 @@
     synchronized(serversToServerInfo) {
       storedInfo = serversToServerInfo.get(server);
         
-      if(storedInfo != null) {
+      if (storedInfo != null) {
         serversToServerInfo.remove(server);
 
         synchronized(msgQueue) {
@@ -548,7 +548,7 @@
     synchronized(serversToServerInfo) {
       HServerInfo storedInfo = serversToServerInfo.get(server);
       
-      if(storedInfo == null) {
+      if (storedInfo == null) {
         
         // The HBaseMaster may have been restarted.
         // Tell the RegionServer to start over and call regionServerStartup()
@@ -557,7 +557,7 @@
         returnMsgs[0] = new HMsg(HMsg.MSG_CALL_SERVER_STARTUP);
         return returnMsgs;
         
-      } else if(storedInfo.getStartCode() != serverInfo.getStartCode()) {
+      } else if (storedInfo.getStartCode() != serverInfo.getStartCode()) {
         
         // This state is reachable if:
         //
@@ -597,9 +597,9 @@
     // Process the kill list
     
     TreeMap<Text, HRegionInfo> regionsToKill = killList.get(info.toString());
-    if(regionsToKill != null) {
+    if (regionsToKill != null) {
       for(Iterator<HRegionInfo> i = regionsToKill.values().iterator();
-          i.hasNext(); ) {
+          i.hasNext();) {
         
         returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE_AND_DELETE, i.next()));
       }
@@ -616,7 +616,7 @@
         case HMsg.MSG_REPORT_OPEN:
           HRegionInfo regionInfo = unassignedRegions.get(region.regionName);
 
-          if(regionInfo == null) {
+          if (regionInfo == null) {
 
             // This Region should not have been opened.
             // Ask the server to shut it down, but don't report it as closed.  
@@ -632,7 +632,7 @@
             unassignedRegions.remove(region.regionName);
             assignAttempts.remove(region.regionName);
 
-            if(region.regionName.compareTo(rootRegionInfo.regionName) == 0) {
+            if (region.regionName.compareTo(rootRegionInfo.regionName) == 0) {
 
               // Store the Root Region location (in memory)
 
@@ -643,7 +643,7 @@
               rootRegionLocation.notifyAll();
               break;
               
-            } else if(region.regionName.find(META_TABLE_NAME.toString()) == 0) {
+            } else if (region.regionName.find(META_TABLE_NAME.toString()) == 0) {
 
               // It's a meta region. Put it on the queue to be scanned.
               
@@ -668,7 +668,7 @@
           break;
 
         case HMsg.MSG_REPORT_CLOSE:
-          if(region.regionName.compareTo(rootRegionInfo.regionName) == 0) { // Root region
+          if (region.regionName.compareTo(rootRegionInfo.regionName) == 0) { // Root region
             rootRegionLocation = null;
             unassignedRegions.put(region.regionName, region);
             assignAttempts.put(region.regionName, 0L);
@@ -676,10 +676,10 @@
           } else {
             boolean reassignRegion = true;
             
-            if(regionsToKill.containsKey(region.regionName)) {
+            if (regionsToKill.containsKey(region.regionName)) {
               regionsToKill.remove(region.regionName);
               
-              if(regionsToKill.size() > 0) {
+              if (regionsToKill.size() > 0) {
                 killList.put(info.toString(), regionsToKill);
                 
               } else {
@@ -701,7 +701,7 @@
           break;
 
         case HMsg.MSG_NEW_REGION:
-          if(region.regionName.find(META_TABLE_NAME.toString()) == 0) {
+          if (region.regionName.find(META_TABLE_NAME.toString()) == 0) {
             // A meta region has split.
             
             allMetaRegionsScanned = false;
@@ -720,7 +720,7 @@
 
       // Figure out what the RegionServer ought to do, and write back.
 
-      if(unassignedRegions.size() > 0) {
+      if (unassignedRegions.size() > 0) {
 
         // Open new regions as necessary
 
@@ -731,20 +731,20 @@
         long now = System.currentTimeMillis();
 
         for(Iterator<Text> it = unassignedRegions.keySet().iterator();
-            it.hasNext(); ) {
+            it.hasNext();) {
 
           Text curRegionName = it.next();
           HRegionInfo regionInfo = unassignedRegions.get(curRegionName);
           long assignedTime = assignAttempts.get(curRegionName);
 
-          if(now - assignedTime > maxRegionOpenTime) {
+          if (now - assignedTime > maxRegionOpenTime) {
             returnMsgs.add(new HMsg(HMsg.MSG_REGION_OPEN, regionInfo));
 
             assignAttempts.put(curRegionName, now);
             counter++;
           }
 
-          if(counter >= targetForServer) {
+          if (counter >= targetForServer) {
             break;
           }
         }
@@ -762,7 +762,7 @@
     }
     
     public void run() {
-      while(! closed) {
+      while(!closed) {
         PendingOperation op = null;
         
         synchronized(msgQueue) {
@@ -827,7 +827,7 @@
           byte serverBytes[] = results.get(META_COL_SERVER);
           String serverName = new String(serverBytes, UTF8_ENCODING);
 
-          if(deadServer.compareTo(serverName) != 0) {
+          if (deadServer.compareTo(serverName) != 0) {
             // This isn't the server you're looking for - move along
             continue;
           }
@@ -835,7 +835,7 @@
           byte startCodeBytes[] = results.get(META_COL_STARTCODE);
           long startCode = Long.decode(new String(startCodeBytes, UTF8_ENCODING));
 
-          if(oldStartCode != startCode) {
+          if (oldStartCode != startCode) {
             // Close but no cigar
             continue;
           }
@@ -869,7 +869,7 @@
       // Put all the regions we found on the unassigned region list
 
       for(Iterator<Map.Entry<Text, HRegionInfo>> i = regions.entrySet().iterator();
-          i.hasNext(); ) {
+          i.hasNext();) {
 
         Map.Entry<Text, HRegionInfo> e = i.next();
         Text region = e.getKey();
@@ -903,7 +903,7 @@
       
       scanMetaRegion(server, scanner, rootRegionInfo.regionName);
       for(Iterator<MetaRegion> i = knownMetaRegions.values().iterator();
-          i.hasNext(); ) {
+          i.hasNext();) {
         
         MetaRegion r = i.next();
 
@@ -929,7 +929,7 @@
       // If the region closing down is a meta region then we need to update
       // the ROOT table
       
-      if(this.regionInfo.regionName.find(metaTableDesc.getName().toString()) == 0) {
+      if (this.regionInfo.regionName.find(metaTableDesc.getName().toString()) == 0) {
         this.rootRegion = true;
         
       } else {
@@ -954,7 +954,7 @@
 
       Text metaRegionName;
       HRegionInterface server;
-      if(rootRegion) {
+      if (rootRegion) {
         metaRegionName = rootRegionInfo.regionName;
         server = client.getHRegionConnection(rootRegionLocation);
         
@@ -969,7 +969,7 @@
       server.delete(metaRegionName, clientId, lockid, META_COL_STARTCODE);
       server.commit(metaRegionName, clientId, lockid);
       
-      if(reassignRegion) {
+      if (reassignRegion) {
         synchronized(unassignedRegions) {
           unassignedRegions.put(regionInfo.regionName, regionInfo);
           assignAttempts.put(regionInfo.regionName, 0L);
@@ -986,7 +986,7 @@
     BytesWritable startCode;
     
     public PendingOpenReport(HServerInfo info, Text regionName) {
-      if(regionName.find(metaTableDesc.getName().toString()) == 0) {
+      if (regionName.find(metaTableDesc.getName().toString()) == 0) {
         
         // The region which just came on-line is a META region.
         // We need to look in the ROOT region for its information.
@@ -1030,7 +1030,7 @@
 
       Text metaRegionName;
       HRegionInterface server;
-      if(rootRegion) {
+      if (rootRegion) {
         metaRegionName = rootRegionInfo.regionName;
         server = client.getHRegionConnection(rootRegionLocation);
         
@@ -1074,13 +1074,13 @@
 
 
     BytesWritable bytes = server.get(metaRegionName, desc.getName(), META_COL_REGIONINFO);
-    if(bytes != null && bytes.getSize() != 0) {
+    if (bytes != null && bytes.getSize() != 0) {
       byte[] infoBytes = bytes.get();
       DataInputBuffer inbuf = new DataInputBuffer();
       inbuf.reset(infoBytes, infoBytes.length);
       HRegionInfo info = new HRegionInfo();
       info.readFields(inbuf);
-      if(info.tableDesc.getName().compareTo(desc.getName()) == 0) {
+      if (info.tableDesc.getName().compareTo(desc.getName()) == 0) {
         throw new IOException("table already exists");
       }
     }
@@ -1183,7 +1183,7 @@
     }
 
     for(Iterator<MetaRegion> i = knownMetaRegions.tailMap(tableName).values().iterator();
-        i.hasNext(); ) {
+        i.hasNext();) {
 
       // Find all the regions that make up this table
       
@@ -1206,7 +1206,7 @@
           HRegionInfo info = new HRegionInfo();
           info.readFields(inbuf);
 
-          if(info.tableDesc.getName().compareTo(tableName) > 0) {
+          if (info.tableDesc.getName().compareTo(tableName) > 0) {
             break;                      // Beyond any more entries for this table
           }
 
@@ -1220,12 +1220,12 @@
 
           synchronized(serversToServerInfo) {
             HServerInfo s = serversToServerInfo.get(serverName);
-            if(s != null && s.getStartCode() == startCode) {
+            if (s != null && s.getStartCode() == startCode) {
               
               // It is being served. Tell the server to stop it and not report back
               
               TreeMap<Text, HRegionInfo> regionsToKill = killList.get(serverName);
-              if(regionsToKill == null) {
+              if (regionsToKill == null) {
                 regionsToKill = new TreeMap<Text, HRegionInfo>();
               }
               regionsToKill.put(info.regionName, info);
@@ -1233,7 +1233,7 @@
             }
           }
         }
-        for(Iterator<Text> row = rowsToDelete.iterator(); row.hasNext(); ) {
+        for(Iterator<Text> row = rowsToDelete.iterator(); row.hasNext();) {
           long lockid = server.startUpdate(m.regionName, clientId, row.next());
           server.delete(m.regionName, clientId, lockid, columns[0]);
           server.commit(m.regionName, clientId, lockid);

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java Thu Apr 19 14:34:41 2007
@@ -65,10 +65,10 @@
 
     locking.obtainWriteLock();
     try {
-      if(snapshot != null) {
+      if (snapshot != null) {
         throw new IOException("Snapshot in progress!");
       }
-      if(memcache.size() == 0) {
+      if (memcache.size() == 0) {
         LOG.debug("memcache empty. Skipping snapshot");
         return retval;
       }
@@ -99,16 +99,16 @@
     locking.obtainWriteLock();
 
     try {
-      if(snapshot == null) {
+      if (snapshot == null) {
         throw new IOException("Snapshot not present!");
       }
       LOG.debug("deleting snapshot");
       
       for(Iterator<TreeMap<HStoreKey, BytesWritable>> it = history.iterator(); 
-          it.hasNext(); ) {
+          it.hasNext();) {
         
         TreeMap<HStoreKey, BytesWritable> cur = it.next();
-        if(snapshot == cur) {
+        if (snapshot == cur) {
           it.remove();
           break;
         }
@@ -130,7 +130,7 @@
   public void add(Text row, TreeMap<Text, byte[]> columns, long timestamp) {
     locking.obtainWriteLock();
     try {
-      for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext(); ) {
+      for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext();) {
         Text column = it.next();
         byte[] val = columns.get(column);
 
@@ -156,7 +156,7 @@
       results.addAll(0, result);
 
       for(int i = history.size()-1; i >= 0; i--) {
-        if(numVersions > 0 && results.size() >= numVersions) {
+        if (numVersions > 0 && results.size() >= numVersions) {
           break;
         }
         
@@ -164,7 +164,7 @@
         results.addAll(results.size(), result);
       }
       
-      if(results.size() == 0) {
+      if (results.size() == 0) {
         return null;
         
       } else {
@@ -203,16 +203,16 @@
     
     SortedMap<HStoreKey, BytesWritable> tailMap = map.tailMap(key);
     
-    for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext();) {
       HStoreKey itKey = it.next();
       Text itCol = itKey.getColumn();
 
-      if(results.get(itCol) == null
-         && key.matchesWithoutColumn(itKey)) {
+      if (results.get(itCol) == null
+          && key.matchesWithoutColumn(itKey)) {
         BytesWritable val = tailMap.get(itKey);
         results.put(itCol, val.get());
         
-      } else if(key.getRow().compareTo(itKey.getRow()) > 0) {
+      } else if (key.getRow().compareTo(itKey.getRow()) > 0) {
         break;
       }
     }
@@ -232,15 +232,15 @@
     HStoreKey curKey = new HStoreKey(key.getRow(), key.getColumn(), key.getTimestamp());
     SortedMap<HStoreKey, BytesWritable> tailMap = map.tailMap(curKey);
 
-    for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext();) {
       HStoreKey itKey = it.next();
       
-      if(itKey.matchesRowCol(curKey)) {
+      if (itKey.matchesRowCol(curKey)) {
         result.add(tailMap.get(itKey).get());
         curKey.setVersion(itKey.getTimestamp() - 1);
       }
       
-      if(numVersions > 0 && result.size() >= numVersions) {
+      if (numVersions > 0 && result.size() >= numVersions) {
         break;
       }
     }
@@ -266,7 +266,7 @@
     Iterator<HStoreKey> keyIterators[];
 
     @SuppressWarnings("unchecked")
-      public HMemcacheScanner(long timestamp, Text targetCols[], Text firstRow)
+    public HMemcacheScanner(long timestamp, Text targetCols[], Text firstRow)
       throws IOException {
       
       super(timestamp, targetCols);
@@ -276,7 +276,7 @@
         this.backingMaps = new TreeMap[history.size() + 1];
         int i = 0;
         for(Iterator<TreeMap<HStoreKey, BytesWritable>> it = history.iterator();
-            it.hasNext(); ) {
+            it.hasNext();) {
           
           backingMaps[i++] = it.next();
         }
@@ -290,7 +290,7 @@
 
         HStoreKey firstKey = new HStoreKey(firstRow);
         for(i = 0; i < backingMaps.length; i++) {
-          if(firstRow.getLength() != 0) {
+          if (firstRow.getLength() != 0) {
             keyIterators[i] = backingMaps[i].tailMap(firstKey).keySet().iterator();
             
           } else {
@@ -298,10 +298,10 @@
           }
           
           while(getNext(i)) {
-            if(! findFirstRow(i, firstRow)) {
+            if (!findFirstRow(i, firstRow)) {
               continue;
             }
-            if(columnMatch(i)) {
+            if (columnMatch(i)) {
               break;
             }
           }
@@ -331,7 +331,7 @@
      * @return - true if there is more data available
      */
     boolean getNext(int i) {
-      if(! keyIterators[i].hasNext()) {
+      if (!keyIterators[i].hasNext()) {
         closeSubScanner(i);
         return false;
       }
@@ -350,10 +350,10 @@
 
     /** Shut down map iterators, and release the lock */
     public void close() throws IOException {
-      if(! scannerClosed) {
+      if (!scannerClosed) {
         try {
           for(int i = 0; i < keys.length; i++) {
-            if(keyIterators[i] != null) {
+            if (keyIterators[i] != null) {
               closeSubScanner(i);
             }
           }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java Thu Apr 19 14:34:41 2007
@@ -61,21 +61,21 @@
     // Make sure that srcA comes first; important for key-ordering during
     // write of the merged file.
     
-    if(srcA.getStartKey() == null) {
-      if(srcB.getStartKey() == null) {
+    if (srcA.getStartKey() == null) {
+      if (srcB.getStartKey() == null) {
         throw new IOException("Cannot merge two regions with null start key");
       }
       // A's start key is null but B's isn't. Assume A comes before B
       
-    } else if((srcB.getStartKey() == null)         // A is not null but B is
-        || (srcA.getStartKey().compareTo(srcB.getStartKey()) > 0)) { // A > B
+    } else if ((srcB.getStartKey() == null)         // A is not null but B is
+               || (srcA.getStartKey().compareTo(srcB.getStartKey()) > 0)) { // A > B
       
       HRegion tmp = srcA;
       srcA = srcB;
       srcB = tmp;
     }
     
-    if (! srcA.getEndKey().equals(srcB.getStartKey())) {
+    if (!srcA.getEndKey().equals(srcB.getStartKey())) {
       throw new IOException("Cannot merge non-adjacent regions");
     }
 
@@ -89,7 +89,7 @@
     Text endKey = srcB.getEndKey();
 
     Path merges = new Path(srcA.getRegionDir(), MERGEDIR);
-    if(! fs.exists(merges)) {
+    if (!fs.exists(merges)) {
       fs.mkdirs(merges);
     }
     
@@ -98,14 +98,14 @@
     
     Path newRegionDir = HStoreFile.getHRegionDir(merges, newRegionInfo.regionName);
 
-    if(fs.exists(newRegionDir)) {
+    if (fs.exists(newRegionDir)) {
       throw new IOException("Cannot merge; target file collision at " + newRegionDir);
     }
 
     LOG.info("starting merge of regions: " + srcA.getRegionName() + " and " 
-        + srcB.getRegionName() + " new region start key is '" 
-        + (startKey == null ? "" : startKey) + "', end key is '" 
-        + (endKey == null ? "" : endKey) + "'");
+             + srcB.getRegionName() + " new region start key is '" 
+             + (startKey == null ? "" : startKey) + "', end key is '" 
+             + (endKey == null ? "" : endKey) + "'");
     
     // Flush each of the sources, and merge their files into a single 
     // target for each column family.
@@ -114,10 +114,10 @@
     
     TreeSet<HStoreFile> alreadyMerged = new TreeSet<HStoreFile>();
     TreeMap<Text, Vector<HStoreFile>> filesToMerge = new TreeMap<Text, Vector<HStoreFile>>();
-    for(Iterator<HStoreFile> it = srcA.flushcache(true).iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = srcA.flushcache(true).iterator(); it.hasNext();) {
       HStoreFile src = it.next();
       Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
-      if(v == null) {
+      if (v == null) {
         v = new Vector<HStoreFile>();
         filesToMerge.put(src.getColFamily(), v);
       }
@@ -126,10 +126,10 @@
     
     LOG.debug("flushing and getting file names for region " + srcB.getRegionName());
     
-    for(Iterator<HStoreFile> it = srcB.flushcache(true).iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = srcB.flushcache(true).iterator(); it.hasNext();) {
       HStoreFile src = it.next();
       Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
-      if(v == null) {
+      if (v == null) {
         v = new Vector<HStoreFile>();
         filesToMerge.put(src.getColFamily(), v);
       }
@@ -138,11 +138,11 @@
     
     LOG.debug("merging stores");
     
-    for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext();) {
       Text colFamily = it.next();
       Vector<HStoreFile> srcFiles = filesToMerge.get(colFamily);
       HStoreFile dst = new HStoreFile(conf, merges, newRegionInfo.regionName, 
-          colFamily, Math.abs(rand.nextLong()));
+                                      colFamily, Math.abs(rand.nextLong()));
       
       dst.mergeStoreFiles(srcFiles, fs, conf);
       alreadyMerged.addAll(srcFiles);
@@ -153,15 +153,15 @@
     // of any last-minute inserts
 
     LOG.debug("flushing changes since start of merge for region " 
-        + srcA.getRegionName());
+              + srcA.getRegionName());
 
     filesToMerge.clear();
-    for(Iterator<HStoreFile> it = srcA.close().iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = srcA.close().iterator(); it.hasNext();) {
       HStoreFile src = it.next();
       
-      if(! alreadyMerged.contains(src)) {
+      if (!alreadyMerged.contains(src)) {
         Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
-        if(v == null) {
+        if (v == null) {
           v = new Vector<HStoreFile>();
           filesToMerge.put(src.getColFamily(), v);
         }
@@ -170,14 +170,14 @@
     }
     
     LOG.debug("flushing changes since start of merge for region " 
-        + srcB.getRegionName());
+              + srcB.getRegionName());
     
-    for(Iterator<HStoreFile> it = srcB.close().iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = srcB.close().iterator(); it.hasNext();) {
       HStoreFile src = it.next();
       
-      if(! alreadyMerged.contains(src)) {
+      if (!alreadyMerged.contains(src)) {
         Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
-        if(v == null) {
+        if (v == null) {
           v = new Vector<HStoreFile>();
           filesToMerge.put(src.getColFamily(), v);
         }
@@ -187,11 +187,11 @@
     
     LOG.debug("merging changes since start of merge");
     
-    for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext();) {
       Text colFamily = it.next();
       Vector<HStoreFile> srcFiles = filesToMerge.get(colFamily);
       HStoreFile dst = new HStoreFile(conf, merges, newRegionInfo.regionName,
-          colFamily, Math.abs(rand.nextLong()));
+                                      colFamily, Math.abs(rand.nextLong()));
       
       dst.mergeStoreFiles(srcFiles, fs, conf);
     }
@@ -199,7 +199,7 @@
     // Done
     
     HRegion dstRegion = new HRegion(dir, log, fs, conf, newRegionInfo,
-        newRegionDir, null);
+                                    newRegionDir, null);
 
     // Get rid of merges directory
     
@@ -284,7 +284,7 @@
    * written-to before), then read it from the supplied path.
    */
   public HRegion(Path dir, HLog log, FileSystem fs, Configuration conf, 
-      HRegionInfo regionInfo, Path initialFiles, Path oldLogFile) throws IOException {
+                 HRegionInfo regionInfo, Path initialFiles, Path oldLogFile) throws IOException {
     
     this.dir = dir;
     this.log = log;
@@ -303,29 +303,29 @@
 
     // Move prefab HStore files into place (if any)
     
-    if(initialFiles != null && fs.exists(initialFiles)) {
+    if (initialFiles != null && fs.exists(initialFiles)) {
       fs.rename(initialFiles, regiondir);
     }
 
     // Load in all the HStores.
     
     for(Iterator<Text> it = this.regionInfo.tableDesc.families().iterator();
-        it.hasNext(); ) {
+        it.hasNext();) {
       
       Text colFamily = it.next();
       stores.put(colFamily, new HStore(dir, this.regionInfo.regionName, colFamily, 
-          this.regionInfo.tableDesc.getMaxVersions(), fs, oldLogFile, conf));
+                                       this.regionInfo.tableDesc.getMaxVersions(), fs, oldLogFile, conf));
     }
 
     // Get rid of any splits or merges that were lost in-progress
     
     Path splits = new Path(regiondir, SPLITDIR);
-    if(fs.exists(splits)) {
+    if (fs.exists(splits)) {
       fs.delete(splits);
     }
     
     Path merges = new Path(regiondir, MERGEDIR);
-    if(fs.exists(merges)) {
+    if (fs.exists(merges)) {
       fs.delete(merges);
     }
 
@@ -362,7 +362,7 @@
   public Vector<HStoreFile> close() throws IOException {
     boolean shouldClose = false;
     synchronized(writestate) {
-      if(writestate.closed) {
+      if (writestate.closed) {
         LOG.info("region " + this.regionInfo.regionName + " closed");
         return new Vector<HStoreFile>();
       }
@@ -376,13 +376,13 @@
       shouldClose = true;
     }
 
-    if(! shouldClose) {
+    if (!shouldClose) {
       return null;
       
     } else {
       LOG.info("closing region " + this.regionInfo.regionName);
       Vector<HStoreFile> allHStoreFiles = internalFlushcache();
-      for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
+      for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
         HStore store = it.next();
         store.close();
       }
@@ -406,8 +406,8 @@
    * Returns two brand-new (and open) HRegions
    */
   public HRegion[] closeAndSplit(Text midKey) throws IOException {
-    if(((regionInfo.startKey.getLength() != 0)
-        && (regionInfo.startKey.compareTo(midKey) > 0))
+    if (((regionInfo.startKey.getLength() != 0)
+         && (regionInfo.startKey.compareTo(midKey) > 0))
         || ((regionInfo.endKey.getLength() != 0)
             && (regionInfo.endKey.compareTo(midKey) < 0))) {
       throw new IOException("Region splitkey must lie within region boundaries.");
@@ -419,13 +419,13 @@
     // or compactions until close() is called.
     
     Path splits = new Path(regiondir, SPLITDIR);
-    if(! fs.exists(splits)) {
+    if (!fs.exists(splits)) {
       fs.mkdirs(splits);
     }
     
     long regionAId = Math.abs(rand.nextLong());
     HRegionInfo regionAInfo = new HRegionInfo(regionAId, regionInfo.tableDesc, 
-        regionInfo.startKey, midKey);
+                                              regionInfo.startKey, midKey);
         
     long regionBId = Math.abs(rand.nextLong());
     HRegionInfo regionBInfo
@@ -434,24 +434,24 @@
     Path dirA = HStoreFile.getHRegionDir(splits, regionAInfo.regionName);
     Path dirB = HStoreFile.getHRegionDir(splits, regionBInfo.regionName);
 
-    if(fs.exists(dirA) || fs.exists(dirB)) {
+    if (fs.exists(dirA) || fs.exists(dirB)) {
       throw new IOException("Cannot split; target file collision at " + dirA 
-          + " or " + dirB);
+                            + " or " + dirB);
     }
     
     TreeSet<HStoreFile> alreadySplit = new TreeSet<HStoreFile>();
     Vector<HStoreFile> hstoreFilesToSplit = flushcache(true);
-    for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext();) {
       HStoreFile hsf = it.next();
       
       LOG.debug("splitting HStore " + hsf.getRegionName() + "/" + hsf.getColFamily()
-          + "/" + hsf.fileId());
+                + "/" + hsf.fileId());
 
       HStoreFile dstA = new HStoreFile(conf, splits, regionAInfo.regionName, 
-          hsf.getColFamily(), Math.abs(rand.nextLong()));
+                                       hsf.getColFamily(), Math.abs(rand.nextLong()));
       
       HStoreFile dstB = new HStoreFile(conf, splits, regionBInfo.regionName, 
-          hsf.getColFamily(), Math.abs(rand.nextLong()));
+                                       hsf.getColFamily(), Math.abs(rand.nextLong()));
       
       hsf.splitStoreFile(midKey, dstA, dstB, fs, conf);
       alreadySplit.add(hsf);
@@ -461,18 +461,18 @@
     // and copy the small remainder
     
     hstoreFilesToSplit = close();
-    for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext();) {
       HStoreFile hsf = it.next();
       
-      if(! alreadySplit.contains(hsf)) {
+      if (!alreadySplit.contains(hsf)) {
         LOG.debug("splitting HStore " + hsf.getRegionName() + "/" + hsf.getColFamily()
-            + "/" + hsf.fileId());
+                  + "/" + hsf.fileId());
 
         HStoreFile dstA = new HStoreFile(conf, splits, regionAInfo.regionName, 
-            hsf.getColFamily(), Math.abs(rand.nextLong()));
+                                         hsf.getColFamily(), Math.abs(rand.nextLong()));
         
         HStoreFile dstB = new HStoreFile(conf, splits, regionBInfo.regionName, 
-            hsf.getColFamily(), Math.abs(rand.nextLong()));
+                                         hsf.getColFamily(), Math.abs(rand.nextLong()));
         
         hsf.splitStoreFile(midKey, dstA, dstB, fs, conf);
       }
@@ -494,7 +494,7 @@
     regions[1] = regionB;
     
     LOG.info("region split complete. new regions are: " + regions[0].getRegionName()
-        + ", " + regions[1].getRegionName());
+             + ", " + regions[1].getRegionName());
     
     return regions;
   }
@@ -565,10 +565,10 @@
     Text key = new Text();
     long maxSize = 0;
 
-    for(Iterator<HStore> i = stores.values().iterator(); i.hasNext(); ) {
+    for(Iterator<HStore> i = stores.values().iterator(); i.hasNext();) {
       long size = i.next().getLargestFileSize(key);
       
-      if(size > maxSize) {                      // Largest so far
+      if (size > maxSize) {                      // Largest so far
         maxSize = size;
         midKey.set(key);
       }
@@ -593,9 +593,9 @@
   public boolean compactStores() throws IOException {
     boolean shouldCompact = false;
     synchronized(writestate) {
-      if((! writestate.writesOngoing)
+      if ((!writestate.writesOngoing)
           && writestate.writesEnabled
-          && (! writestate.closed)
+          && (!writestate.closed)
           && recentCommits > MIN_COMMITS_FOR_COMPACTION) {
         
         writestate.writesOngoing = true;
@@ -603,14 +603,14 @@
       }
     }
 
-    if(! shouldCompact) {
+    if (!shouldCompact) {
       LOG.info("not compacting region " + this.regionInfo.regionName);
       return false;
       
     } else {
       try {
         LOG.info("starting compaction on region " + this.regionInfo.regionName);
-        for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
+        for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
           HStore store = it.next();
           store.compact();
         }
@@ -632,7 +632,7 @@
    * only take if there have been a lot of uncommitted writes.
    */
   public void optionallyFlush() throws IOException {
-    if(commitsSinceFlush > maxUnflushedEntries) {
+    if (commitsSinceFlush > maxUnflushedEntries) {
       flushcache(false);
     }
   }
@@ -657,20 +657,20 @@
   public Vector<HStoreFile> flushcache(boolean disableFutureWrites) throws IOException {
     boolean shouldFlush = false;
     synchronized(writestate) {
-      if((! writestate.writesOngoing)
+      if ((!writestate.writesOngoing)
           && writestate.writesEnabled
-          && (! writestate.closed)) {
+          && (!writestate.closed)) {
         
         writestate.writesOngoing = true;
         shouldFlush = true;
         
-        if(disableFutureWrites) {
+        if (disableFutureWrites) {
           writestate.writesEnabled = false;
         }
       }
     }
     
-    if(! shouldFlush) {
+    if (!shouldFlush) {
       LOG.debug("not flushing cache for region " + this.regionInfo.regionName);
       return null;
       
@@ -731,8 +731,8 @@
     
     HMemcache.Snapshot retval = memcache.snapshotMemcacheForLog(log);
     TreeMap<HStoreKey, BytesWritable> memcacheSnapshot = retval.memcacheSnapshot;
-    if(memcacheSnapshot == null) {
-      for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
+    if (memcacheSnapshot == null) {
+      for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
         HStore hstore = it.next();
         Vector<HStoreFile> hstoreFiles = hstore.getAllMapFiles();
         allHStoreFiles.addAll(0, hstoreFiles);
@@ -746,7 +746,7 @@
     
     LOG.debug("flushing memcache to HStores");
     
-    for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
+    for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
       HStore hstore = it.next();
       Vector<HStoreFile> hstoreFiles 
         = hstore.flushCache(memcacheSnapshot, logCacheFlushId);
@@ -762,7 +762,7 @@
     LOG.debug("writing flush cache complete to log");
     
     log.completeCacheFlush(this.regionInfo.regionName,
-        regionInfo.tableDesc.getName(), logCacheFlushId);
+                           regionInfo.tableDesc.getName(), logCacheFlushId);
 
     // C. Delete the now-irrelevant memcache snapshot; its contents have been 
     //    dumped to disk-based HStores.
@@ -784,7 +784,7 @@
   /** Fetch a single data item. */
   public byte[] get(Text row, Text column) throws IOException {
     byte results[][] = get(row, column, Long.MAX_VALUE, 1);
-    if(results == null) {
+    if (results == null) {
       return null;
       
     } else {
@@ -799,9 +799,9 @@
 
   /** Fetch multiple versions of a single data item, with timestamp. */
   public byte[][] get(Text row, Text column, long timestamp, int numVersions) 
-      throws IOException {
+    throws IOException {
     
-    if(writestate.closed) {
+    if (writestate.closed) {
       throw new IOException("HRegion is closed.");
     }
 
@@ -830,7 +830,7 @@
     // Check the memcache
 
     byte[][] result = memcache.get(key, numVersions);
-    if(result != null) {
+    if (result != null) {
       return result;
     }
 
@@ -838,7 +838,7 @@
 
     Text colFamily = HStoreKey.extractFamily(key.getColumn());
     HStore targetStore = stores.get(colFamily);
-    if(targetStore == null) {
+    if (targetStore == null) {
       return null;
     }
     
@@ -859,7 +859,7 @@
     HStoreKey key = new HStoreKey(row, System.currentTimeMillis());
 
     TreeMap<Text, byte[]> memResult = memcache.getFull(key);
-    for(Iterator<Text> it = stores.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = stores.keySet().iterator(); it.hasNext();) {
       Text colFamily = it.next();
       HStore targetStore = stores.get(colFamily);
       targetStore.getFull(key, memResult);
@@ -879,7 +879,7 @@
 
     HStore storelist[] = new HStore[families.size()];
     int i = 0;
-    for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = families.iterator(); it.hasNext();) {
       Text family = it.next();
       storelist[i++] = stores.get(family);
     }
@@ -918,16 +918,16 @@
    * method.
    */
   public void put(long lockid, Text targetCol, byte[] val) throws IOException {
-    if(val.length == HStoreKey.DELETE_BYTES.length) {
+    if (val.length == HStoreKey.DELETE_BYTES.length) {
       boolean matches = true;
       for(int i = 0; i < val.length; i++) {
-        if(val[i] != HStoreKey.DELETE_BYTES[i]) {
+        if (val[i] != HStoreKey.DELETE_BYTES[i]) {
           matches = false;
           break;
         }
       }
       
-      if(matches) {
+      if (matches) {
         throw new IOException("Cannot insert value: " + val);
       }
     }
@@ -951,7 +951,7 @@
    */
   void localput(long lockid, Text targetCol, byte[] val) throws IOException {
     Text row = getRowFromLock(lockid);
-    if(row == null) {
+    if (row == null) {
       throw new IOException("No write lock for lockid " + lockid);
     }
 
@@ -964,13 +964,13 @@
       // This check makes sure that another thread from the client
       // hasn't aborted/committed the write-operation.
 
-      if(row != getRowFromLock(lockid)) {
+      if (row != getRowFromLock(lockid)) {
         throw new IOException("Locking error: put operation on lock " + lockid 
-            + " unexpected aborted by another thread");
+                              + " unexpected aborted by another thread");
       }
       
       TreeMap<Text, byte[]> targets = targetColumns.get(lockid);
-      if(targets == null) {
+      if (targets == null) {
         targets = new TreeMap<Text, byte[]>();
         targetColumns.put(lockid, targets);
       }
@@ -985,7 +985,7 @@
    */
   public void abort(long lockid) throws IOException {
     Text row = getRowFromLock(lockid);
-    if(row == null) {
+    if (row == null) {
       throw new IOException("No write lock for lockid " + lockid);
     }
     
@@ -998,9 +998,9 @@
       // This check makes sure another thread from the client
       // hasn't aborted/committed the write-operation.
       
-      if(row != getRowFromLock(lockid)) {
+      if (row != getRowFromLock(lockid)) {
         throw new IOException("Locking error: abort() operation on lock " 
-            + lockid + " unexpected aborted by another thread");
+                              + lockid + " unexpected aborted by another thread");
       }
       
       targetColumns.remove(lockid);
@@ -1021,7 +1021,7 @@
     // that repeated executions won't screw this up.
     
     Text row = getRowFromLock(lockid);
-    if(row == null) {
+    if (row == null) {
       throw new IOException("No write lock for lockid " + lockid);
     }
     
@@ -1035,7 +1035,7 @@
 
       long commitTimestamp = System.currentTimeMillis();
       log.append(regionInfo.regionName, regionInfo.tableDesc.getName(), row, 
-          targetColumns.get(lockid), commitTimestamp);
+                 targetColumns.get(lockid), commitTimestamp);
       
       memcache.add(row, targetColumns.get(lockid), commitTimestamp);
 
@@ -1054,25 +1054,25 @@
 
   /** Make sure this is a valid row for the HRegion */
   void checkRow(Text row) throws IOException {
-    if(((regionInfo.startKey.getLength() == 0)
-        || (regionInfo.startKey.compareTo(row) <= 0))
+    if (((regionInfo.startKey.getLength() == 0)
+         || (regionInfo.startKey.compareTo(row) <= 0))
         && ((regionInfo.endKey.getLength() == 0)
             || (regionInfo.endKey.compareTo(row) > 0))) {
       // all's well
       
     } else {
       throw new IOException("Requested row out of range for HRegion "
-          + regionInfo.regionName + ", startKey='" + regionInfo.startKey
-          + "', endKey='" + regionInfo.endKey + "', row='" + row + "'");
+                            + regionInfo.regionName + ", startKey='" + regionInfo.startKey
+                            + "', endKey='" + regionInfo.endKey + "', row='" + row + "'");
     }
   }
 
   /** Make sure this is a valid column for the current table */
   void checkFamily(Text family) throws IOException {
-    if(! regionInfo.tableDesc.hasFamily(family)) {
+    if (!regionInfo.tableDesc.hasFamily(family)) {
       throw new IOException("Requested column family " + family 
-          + " does not exist in HRegion " + regionInfo.regionName
-          + " for table " + regionInfo.tableDesc.getName());
+                            + " does not exist in HRegion " + regionInfo.regionName
+                            + " for table " + regionInfo.tableDesc.getName());
     }
   }
 
@@ -1150,7 +1150,7 @@
         keys[i] = new HStoreKey();
         resultSets[i] = new TreeMap<Text, byte[]>();
 
-        if(! scanners[i].next(keys[i], resultSets[i])) {
+        if (!scanners[i].next(keys[i], resultSets[i])) {
           closeScanner(i);
         }
       }
@@ -1167,7 +1167,7 @@
       Text chosenRow = null;
       long chosenTimestamp = -1;
       for(int i = 0; i < keys.length; i++) {
-        if(scanners[i] != null
+        if (scanners[i] != null
             && (chosenRow == null
                 || (keys[i].getRow().compareTo(chosenRow) < 0)
                 || ((keys[i].getRow().compareTo(chosenRow) == 0)
@@ -1181,21 +1181,21 @@
       // Store the key and results for each sub-scanner. Merge them as appropriate.
       
       boolean insertedItem = false;
-      if(chosenTimestamp > 0) {
+      if (chosenTimestamp > 0) {
         key.setRow(chosenRow);
         key.setVersion(chosenTimestamp);
         key.setColumn(new Text(""));
 
         for(int i = 0; i < scanners.length; i++) {        
           while((scanners[i] != null)
-              && (keys[i].getRow().compareTo(chosenRow) == 0)
-              && (keys[i].getTimestamp() == chosenTimestamp)) {
+                && (keys[i].getRow().compareTo(chosenRow) == 0)
+                && (keys[i].getTimestamp() == chosenTimestamp)) {
             
             results.putAll(resultSets[i]);
             insertedItem = true;
 
             resultSets[i].clear();
-            if(! scanners[i].next(keys[i], resultSets[i])) {
+            if (!scanners[i].next(keys[i], resultSets[i])) {
               closeScanner(i);
             }
           }
@@ -1204,10 +1204,10 @@
           // row label, then its timestamp is bad.  We need to advance it.
 
           while((scanners[i] != null)
-              && (keys[i].getRow().compareTo(chosenRow) <= 0)) {
+                && (keys[i].getRow().compareTo(chosenRow) <= 0)) {
             
             resultSets[i].clear();
-            if(! scanners[i].next(keys[i], resultSets[i])) {
+            if (!scanners[i].next(keys[i], resultSets[i])) {
               closeScanner(i);
             }
           }
@@ -1231,7 +1231,7 @@
     /** All done with the scanner. */
     public void close() throws IOException {
       for(int i = 0; i < scanners.length; i++) {
-        if(scanners[i] != null) {
+        if (scanners[i] != null) {
           closeScanner(i);
         }
       }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java Thu Apr 19 14:34:41 2007
@@ -42,19 +42,19 @@
     
     this.regionId = regionId;
     
-    if(tableDesc == null) {
+    if (tableDesc == null) {
       throw new IllegalArgumentException("tableDesc cannot be null");
     }
     
     this.tableDesc = tableDesc;
     
     this.startKey = new Text();
-    if(startKey != null) {
+    if (startKey != null) {
       this.startKey.set(startKey);
     }
     
     this.endKey = new Text();
-    if(endKey != null) {
+    if (endKey != null) {
       this.endKey.set(endKey);
     }
     



Mime
View raw message