hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r532083 [4/4] - in /lucene/hadoop/trunk: ./ src/contrib/hbase/conf/ src/contrib/hbase/src/java/org/apache/hadoop/hbase/ src/contrib/hbase/src/test/org/apache/hadoop/hbase/
Date Tue, 24 Apr 2007 21:13:10 GMT
Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java Tue Apr 24 14:13:08 2007
@@ -61,7 +61,7 @@
   }
   
   public HStoreFile(Configuration conf, Path dir, Text regionName, 
-                    Text colFamily, long fileId) {
+      Text colFamily, long fileId) {
     
     this.conf = conf;
     this.dir = dir;
@@ -92,12 +92,12 @@
   
   public Path getMapFilePath() {
     return new Path(HStoreFile.getMapDir(dir, regionName, colFamily), 
-                    HSTORE_DATFILE_PREFIX + fileId);
+        HSTORE_DATFILE_PREFIX + fileId);
   }
   
   public Path getInfoFilePath() {
     return new Path(HStoreFile.getInfoDir(dir, regionName, colFamily), 
-                    HSTORE_INFOFILE_PREFIX + fileId);
+        HSTORE_INFOFILE_PREFIX + fileId);
   }
 
   // Static methods to build partial paths to internal directories.  Useful for 
@@ -105,17 +105,17 @@
   
   public static Path getMapDir(Path dir, Text regionName, Text colFamily) {
     return new Path(dir, new Path(HREGIONDIR_PREFIX + regionName, 
-                                  new Path(colFamily.toString(), HSTORE_DATFILE_DIR)));
+        new Path(colFamily.toString(), HSTORE_DATFILE_DIR)));
   }
 
   public static Path getInfoDir(Path dir, Text regionName, Text colFamily) {
     return new Path(dir, new Path(HREGIONDIR_PREFIX + regionName, 
-                                  new Path(colFamily.toString(), HSTORE_INFO_DIR)));
+        new Path(colFamily.toString(), HSTORE_INFO_DIR)));
   }
 
   public static Path getHStoreDir(Path dir, Text regionName, Text colFamily) {
     return new Path(dir, new Path(HREGIONDIR_PREFIX + regionName, 
-                                  colFamily.toString()));
+        colFamily.toString()));
   }
 
   public static Path getHRegionDir(Path dir, Text regionName) {
@@ -127,7 +127,7 @@
    * filesystem if the file already exists.
    */
   static HStoreFile obtainNewHStoreFile(Configuration conf, Path dir, 
-                                        Text regionName, Text colFamily, FileSystem fs) throws IOException {
+      Text regionName, Text colFamily, FileSystem fs) throws IOException {
     
     Path mapdir = HStoreFile.getMapDir(dir, regionName, colFamily);
     long fileId = Math.abs(rand.nextLong());
@@ -149,7 +149,7 @@
    * If only one exists, we'll delete it.
    */
   static Vector<HStoreFile> loadHStoreFiles(Configuration conf, Path dir, 
-                                            Text regionName, Text colFamily, FileSystem fs) throws IOException {
+      Text regionName, Text colFamily, FileSystem fs) throws IOException {
     
     Vector<HStoreFile> results = new Vector<HStoreFile>();
     Path mapdir = HStoreFile.getMapDir(dir, regionName, colFamily);
@@ -158,13 +158,13 @@
     for(int i = 0; i < datfiles.length; i++) {
       String name = datfiles[i].getName();
       
-      if (name.startsWith(HSTORE_DATFILE_PREFIX)) {
+      if(name.startsWith(HSTORE_DATFILE_PREFIX)) {
         Long fileId = Long.parseLong(name.substring(HSTORE_DATFILE_PREFIX.length()));
         HStoreFile curfile = new HStoreFile(conf, dir, regionName, colFamily, fileId);
         Path mapfile = curfile.getMapFilePath();
         Path infofile = curfile.getInfoFilePath();
         
-        if (fs.exists(infofile)) {
+        if(fs.exists(infofile)) {
           results.add(curfile);
           
         } else {
@@ -178,12 +178,12 @@
     for(int i = 0; i < infofiles.length; i++) {
       String name = infofiles[i].getName();
       
-      if (name.startsWith(HSTORE_INFOFILE_PREFIX)) {
+      if(name.startsWith(HSTORE_INFOFILE_PREFIX)) {
         long fileId = Long.parseLong(name.substring(HSTORE_INFOFILE_PREFIX.length()));
         HStoreFile curfile = new HStoreFile(conf, dir, regionName, colFamily, fileId);
         Path mapfile = curfile.getMapFilePath();
         
-        if (!fs.exists(mapfile)) {
+        if(! fs.exists(mapfile)) {
           fs.delete(curfile.getInfoFilePath());
         }
       }
@@ -200,18 +200,18 @@
    * brand-new HRegions.
    */
   public void splitStoreFile(Text midKey, HStoreFile dstA, HStoreFile dstB,
-                             FileSystem fs, Configuration conf) throws IOException {
+      FileSystem fs, Configuration conf) throws IOException {
 
     // Copy the appropriate tuples to one MapFile or the other.
 
     MapFile.Reader in = new MapFile.Reader(fs, getMapFilePath().toString(), conf);
     try {
       MapFile.Writer outA = new MapFile.Writer(conf, fs, 
-                                               dstA.getMapFilePath().toString(), HStoreKey.class, BytesWritable.class);
+          dstA.getMapFilePath().toString(), HStoreKey.class, BytesWritable.class);
       
       try {
         MapFile.Writer outB = new MapFile.Writer(conf, fs, 
-                                                 dstB.getMapFilePath().toString(), HStoreKey.class, BytesWritable.class);
+            dstB.getMapFilePath().toString(), HStoreKey.class, BytesWritable.class);
         
         try {
           HStoreKey readkey = new HStoreKey();
@@ -220,7 +220,7 @@
           while(in.next(readkey, readval)) {
             Text key = readkey.getRow();
             
-            if (key.compareTo(midKey) < 0) {
+            if(key.compareTo(midKey) < 0) {
               outA.append(readkey, readval);
               
             } else {
@@ -252,15 +252,15 @@
    * We are merging multiple regions into a single new one.
    */
   public void mergeStoreFiles(Vector<HStoreFile> srcFiles, FileSystem fs, 
-                              Configuration conf) throws IOException {
+      Configuration conf) throws IOException {
 
     // Copy all the source MapFile tuples into this HSF's MapFile
 
     MapFile.Writer out = new MapFile.Writer(conf, fs, getMapFilePath().toString(),
-                                            HStoreKey.class, BytesWritable.class);
+        HStoreKey.class, BytesWritable.class);
     
     try {
-      for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext();) {
+      for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext(); ) {
         HStoreFile src = it.next();
         MapFile.Reader in = new MapFile.Reader(fs, src.getMapFilePath().toString(), conf);
         
@@ -283,11 +283,11 @@
     // Build a unified InfoFile from the source InfoFiles.
 
     long unifiedSeqId = -1;
-    for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext();) {
+    for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext(); ) {
       HStoreFile hsf = it.next();
       long curSeqId = hsf.loadInfo(fs);
       
-      if (curSeqId > unifiedSeqId) {
+      if(curSeqId > unifiedSeqId) {
         unifiedSeqId = curSeqId;
       }
     }
@@ -301,7 +301,7 @@
     
     try {
       byte flag = in.readByte();
-      if (flag == INFO_SEQ_NUM) {
+      if(flag == INFO_SEQ_NUM) {
         return in.readLong();
         
       } else {
@@ -352,17 +352,17 @@
   public int compareTo(Object o) {
     HStoreFile other = (HStoreFile) o;
     int result = this.dir.compareTo(other.dir);    
-    if (result == 0) {
+    if(result == 0) {
       this.regionName.compareTo(other.regionName);
     }
-    if (result == 0) {
+    if(result == 0) {
       result = this.colFamily.compareTo(other.colFamily);
     }    
-    if (result == 0) {
-      if (this.fileId < other.fileId) {
+    if(result == 0) {
+      if(this.fileId < other.fileId) {
         result = -1;
         
-      } else if (this.fileId > other.fileId) {
+      } else if(this.fileId > other.fileId) {
         result = 1;
       }
     }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java Tue Apr 24 14:13:08 2007
@@ -29,7 +29,7 @@
   public static Text extractFamily(Text col) throws IOException {
     String column = col.toString();
     int colpos = column.indexOf(":");
-    if (colpos < 0) {
+    if(colpos < 0) {
       throw new IllegalArgumentException("Illegal column name has no family indicator: " + column);
     }
     return new Text(column.substring(0, colpos));
@@ -93,8 +93,13 @@
     return timestamp;
   }
   
+  /**
+   * @param other Key to compare against. Compares row and column.
+   * @return True if same row and column.
+   * @see {@link #matchesWithoutColumn(HStoreKey)}
+   */ 
   public boolean matchesRowCol(HStoreKey other) {
-    if (this.row.compareTo(other.row) == 0 &&
+    if(this.row.compareTo(other.row) == 0 &&
         this.column.compareTo(other.column) == 0) {
       return true;
       
@@ -103,8 +108,15 @@
     }
   }
   
+  /**
+   * @param other Key to copmare against. Compares row and
+   * timestamp.
+   * @return True if same row and timestamp is greater than
+   * <code>other</code>
+   * @see {@link #matchesRowCol(HStoreKey)}
+   */
   public boolean matchesWithoutColumn(HStoreKey other) {
-    if ((this.row.compareTo(other.row) == 0) &&
+    if((this.row.compareTo(other.row) == 0) &&
         (this.timestamp >= other.getTimestamp())) {
       return true;
       
@@ -124,14 +136,14 @@
   public int compareTo(Object o) {
     HStoreKey other = (HStoreKey) o;
     int result = this.row.compareTo(other.row);
-    if (result == 0) {
+    if(result == 0) {
       result = this.column.compareTo(other.column);
       
-      if (result == 0) {
-        if (this.timestamp < other.timestamp) {
+      if(result == 0) {
+        if(this.timestamp < other.timestamp) {
           result = 1;
           
-        } else if (this.timestamp > other.timestamp) {
+        } else if(this.timestamp > other.timestamp) {
           result = -1;
         }
       }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java Tue Apr 24 14:13:08 2007
@@ -54,7 +54,7 @@
 
   /** Do we contain a given column? */
   public boolean hasFamily(Text family) {
-    if (families.contains(family)) {
+    if(families.contains(family)) {
       return true;
       
     } else {
@@ -75,7 +75,7 @@
     name.write(out);
     out.writeInt(maxVersions);
     out.writeInt(families.size());
-    for(Iterator<Text> it = families.iterator(); it.hasNext();) {
+    for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
       it.next().write(out);
     }
   }
@@ -99,21 +99,21 @@
   public int compareTo(Object o) {
     HTableDescriptor htd = (HTableDescriptor) o;
     int result = name.compareTo(htd.name);
-    if (result == 0) {
+    if(result == 0) {
       result = maxVersions - htd.maxVersions;
     }
     
-    if (result == 0) {
+    if(result == 0) {
       result = families.size() - htd.families.size();
     }
     
-    if (result == 0) {
+    if(result == 0) {
       Iterator<Text> it2 = htd.families.iterator();
-      for(Iterator<Text> it = families.iterator(); it.hasNext();) {
+      for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
         Text family1 = it.next();
         Text family2 = it2.next();
         result = family1.compareTo(family2);
-        if (result != 0) {
+        if(result != 0) {
           return result;
         }
       }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LabelledData.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LabelledData.java?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LabelledData.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LabelledData.java Tue Apr 24 14:13:08 2007
@@ -32,7 +32,7 @@
   }
 
   public LabelledData(Text label, byte[] data) {
-    this.label.set(label);
+    this.label = new Text(label);
     this.data = new BytesWritable(data);
   }
 
@@ -40,7 +40,7 @@
     return label;
   }
 
-  public BytesWritable getDat() {
+  public BytesWritable getData() {
     return data;
   }
 

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java Tue Apr 24 14:13:08 2007
@@ -77,7 +77,7 @@
       synchronized(sortedLeases) {
         Lease lease = new Lease(holderId, resourceId, listener);
         Text leaseId = lease.getLeaseId();
-        if (leases.get(leaseId) != null) {
+        if(leases.get(leaseId) != null) {
           throw new IOException("Impossible state for createLease(): Lease for holderId " + holderId + " and resourceId " + resourceId + " is still held.");
         }
         leases.put(leaseId, lease);
@@ -92,7 +92,7 @@
       synchronized(sortedLeases) {
         Text leaseId = createLeaseId(holderId, resourceId);
         Lease lease = leases.get(leaseId);
-        if (lease == null) {
+        if(lease == null) {
           
           // It's possible that someone tries to renew the lease, but 
           // it just expired a moment ago.  So fail.
@@ -113,7 +113,7 @@
       synchronized(sortedLeases) {
         Text leaseId = createLeaseId(holderId, resourceId);
         Lease lease = leases.get(leaseId);
-        if (lease == null) {
+        if(lease == null) {
           
           // It's possible that someone tries to renew the lease, but 
           // it just expired a moment ago.  So fail.
@@ -137,9 +137,9 @@
           synchronized(sortedLeases) {
             Lease top;
             while((sortedLeases.size() > 0)
-                  && ((top = sortedLeases.first()) != null)) {
+                && ((top = sortedLeases.first()) != null)) {
               
-              if (top.shouldExpire()) {
+              if(top.shouldExpire()) {
                 leases.remove(top.getLeaseId());
                 sortedLeases.remove(top);
 
@@ -205,10 +205,10 @@
 
     public int compareTo(Object o) {
       Lease other = (Lease) o;
-      if (this.lastUpdate < other.lastUpdate) {
+      if(this.lastUpdate < other.lastUpdate) {
         return -1;
         
-      } else if (this.lastUpdate > other.lastUpdate) {
+      } else if(this.lastUpdate > other.lastUpdate) {
         return 1;
         
       } else {

Added: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LockException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LockException.java?view=auto&rev=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LockException.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LockException.java Tue Apr 24 14:13:08 2007
@@ -0,0 +1,28 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+public class LockException extends IOException {
+  public LockException() {
+    super();
+  }
+
+  public LockException(String s) {
+    super(s);
+  }
+}

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/Environment.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/Environment.java?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/Environment.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/Environment.java Tue Apr 24 14:13:08 2007
@@ -29,27 +29,27 @@
     String value = null;
     
     value = System.getenv("DEBUGGING");
-    if (value != null && value.equalsIgnoreCase("TRUE")) {
+    if(value != null && value.equalsIgnoreCase("TRUE")) {
       debugging = true;
     }
     
     value = System.getenv("LOGGING_LEVEL");
-    if (value != null && value.length() != 0) {
-      if (value.equalsIgnoreCase("ALL")) {
+    if(value != null && value.length() != 0) {
+      if(value.equalsIgnoreCase("ALL")) {
         logLevel = Level.ALL;
-      } else if (value.equalsIgnoreCase("DEBUG")) {
+      } else if(value.equalsIgnoreCase("DEBUG")) {
         logLevel = Level.DEBUG;
-      } else if (value.equalsIgnoreCase("ERROR")) {
+      } else if(value.equalsIgnoreCase("ERROR")) {
         logLevel = Level.ERROR;
-      } else if (value.equalsIgnoreCase("FATAL")) {
+      } else if(value.equalsIgnoreCase("FATAL")) {
         logLevel = Level.FATAL;
-      } else if (value.equalsIgnoreCase("INFO")) {
+      } else if(value.equalsIgnoreCase("INFO")) {
         logLevel = Level.INFO;
-      } else if (value.equalsIgnoreCase("OFF")) {
+      } else if(value.equalsIgnoreCase("OFF")) {
         logLevel = Level.OFF;
-      } else if (value.equalsIgnoreCase("TRACE")) {
+      } else if(value.equalsIgnoreCase("TRACE")) {
         logLevel = Level.TRACE;
-      } else if (value.equalsIgnoreCase("WARN")) {
+      } else if(value.equalsIgnoreCase("WARN")) {
         logLevel = Level.WARN;
       }
     }

Added: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java?view=auto&rev=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java Tue Apr 24 14:13:08 2007
@@ -0,0 +1,298 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.MiniDFSCluster;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * This class creates a single process HBase cluster for junit testing.
+ * One thread is created for each server.
+ */
+public class MiniHBaseCluster implements HConstants {
+  private Configuration conf;
+  private MiniDFSCluster cluster;
+  private FileSystem fs;
+  private Path parentdir;
+  private HMasterRunner master;
+  private Thread masterThread;
+  private HRegionServerRunner[] regionServers;
+  private Thread[] regionThreads;
+  
+  public MiniHBaseCluster(Configuration conf, int nRegionNodes) {
+    this.conf = conf;
+
+    try {
+      try {
+        if(System.getProperty("test.build.data") == null) {
+          File testDir = new File(new File("").getAbsolutePath(),
+              "build/contrib/hbase/test");
+
+          String dir = testDir.getAbsolutePath();
+          System.out.println(dir);
+          System.setProperty("test.build.data", dir);
+        }
+
+        // To run using configured filesystem, comment out this
+        // line below that starts up the MiniDFSCluster.
+        this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
+        this.fs = FileSystem.get(conf);
+        this.parentdir =
+          new Path(conf.get(HREGION_DIR, DEFAULT_HREGION_DIR));
+        fs.mkdirs(parentdir);
+
+      } catch(Throwable e) {
+        System.err.println("Mini DFS cluster failed to start");
+        e.printStackTrace();
+        throw e;
+      }
+
+      if(this.conf.get(MASTER_ADDRESS) == null) {
+        this.conf.set(MASTER_ADDRESS, "localhost:0");
+      }
+      
+      // Create the master
+
+      this.master = new HMasterRunner();
+      this.masterThread = new Thread(master, "HMaster");
+
+      // Start up the master
+
+      masterThread.start();
+      while(! master.isCrashed() && ! master.isInitialized()) {
+        try {
+          System.err.println("Waiting for HMaster to initialize...");
+          Thread.sleep(1000);
+
+        } catch(InterruptedException e) {
+        }
+        if(master.isCrashed()) {
+          throw new RuntimeException("HMaster crashed");
+        }
+      }
+
+      // Set the master's port for the HRegionServers
+
+      this.conf.set(MASTER_ADDRESS, master.getHMasterAddress().toString());
+
+      // Start the HRegionServers
+
+      if(this.conf.get(REGIONSERVER_ADDRESS) == null) {
+        this.conf.set(REGIONSERVER_ADDRESS, "localhost:0");
+      }
+      
+      startRegionServers(this.conf, nRegionNodes);
+
+      // Wait for things to get started
+
+      while(! master.isCrashed() && ! master.isUp()) {
+        try {
+          System.err.println("Waiting for Mini HBase cluster to start...");
+          Thread.sleep(1000);
+
+        } catch(InterruptedException e) {
+        }
+        if(master.isCrashed()) {
+          throw new RuntimeException("HMaster crashed");
+        }
+      }
+      
+    } catch(Throwable e) {
+
+      // Delete all DFS files
+
+      deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
+
+      throw new RuntimeException("Mini HBase cluster did not start");
+    }
+  }
+  
+  private void startRegionServers(Configuration conf, int nRegionNodes) {
+    this.regionServers = new HRegionServerRunner[nRegionNodes];
+    this.regionThreads = new Thread[nRegionNodes];
+    
+    for(int i = 0; i < nRegionNodes; i++) {
+      regionServers[i] = new HRegionServerRunner(conf);
+      regionThreads[i] = new Thread(regionServers[i], "HRegionServer-" + i);
+      regionThreads[i].start();
+    }
+  }
+  
+  /** 
+   * Returns the rpc address actually used by the master server, because the 
+   * supplied port is not necessarily the actual port used.
+   */
+  public HServerAddress getHMasterAddress() {
+    return master.getHMasterAddress();
+  }
+  
+  /** Shut down the HBase cluster */
+  public void shutdown() {
+    System.out.println("Shutting down the HBase Cluster");
+    for(int i = 0; i < regionServers.length; i++) {
+      regionServers[i].shutdown();
+    }
+    master.shutdown();
+    
+    for(int i = 0; i < regionServers.length; i++) {
+      try {
+        regionThreads[i].join();
+        
+      } catch(InterruptedException e) {
+      }
+    }
+    try {
+      masterThread.join();
+      
+    } catch(InterruptedException e) {
+    }
+    
+    System.out.println("Shutting down Mini DFS cluster");
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    
+    // Delete all DFS files
+
+    deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
+
+  }
+  
+  private void deleteFile(File f) {
+    if(f.isDirectory()) {
+      File[] children = f.listFiles();
+      for(int i = 0; i < children.length; i++) {
+        deleteFile(children[i]);
+      }
+    }
+    f.delete();
+  }
+  
+  private class HMasterRunner implements Runnable {
+    private HMaster master = null;
+    private volatile boolean isInitialized = false;
+    private boolean isCrashed = false;
+    private boolean isRunning = true;
+    
+    public HServerAddress getHMasterAddress() {
+      return master.getMasterAddress();
+    }
+    
+    public synchronized boolean isInitialized() {
+      return isInitialized;
+    }
+    
+    public synchronized boolean isCrashed() {
+      return isCrashed;
+    }
+    
+    public boolean isUp() {
+      if(master == null) {
+        return false;
+      }
+      synchronized(this) {
+        return isInitialized;
+      }
+    }
+    
+    /** Create the HMaster and run it */
+    public void run() {
+      try {
+        synchronized(this) {
+          if(isRunning) {
+            master = new HMaster(conf);
+          }
+          isInitialized = true;
+        }
+      } catch(Throwable e) {
+        shutdown();
+        System.err.println("HMaster crashed:");
+        e.printStackTrace();
+        synchronized(this) {
+          isCrashed = true;
+        }
+      }
+    }
+    
+    /** Shut down the HMaster and wait for it to finish */
+    public synchronized void shutdown() {
+      isRunning = false;
+      if(master != null) {
+        try {
+          master.stop();
+          
+        } catch(IOException e) {
+          System.err.println("Master crashed during stop");
+          e.printStackTrace();
+          
+        } finally {
+          master.join();
+          master = null;
+        }
+      }
+    }
+  }
+  
+  private class HRegionServerRunner implements Runnable {
+    private HRegionServer server = null;
+    private boolean isRunning = true;
+    private Configuration conf;
+    
+    public HRegionServerRunner(Configuration conf) {
+      this.conf = conf;
+    }
+    
+    /** Start up the HRegionServer */
+    public void run() {
+      try {
+        synchronized(this) {
+          if(isRunning) {
+            server = new HRegionServer(conf);
+          }
+        }
+        server.run();
+        
+      } catch(Throwable e) {
+        shutdown();
+        System.err.println("HRegionServer crashed:");
+        e.printStackTrace();
+      }
+    }
+    
+    /** Shut down the HRegionServer */
+    public synchronized void shutdown() {
+      isRunning = false;
+      if(server != null) {
+        try {
+          server.stop();
+          
+        } catch(IOException e) {
+          System.err.println("HRegionServer crashed during stop");
+          e.printStackTrace();
+          
+        } finally {
+          server.join();
+          server = null;
+        }
+      }
+    }
+  }
+}

Added: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java?view=auto&rev=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java Tue Apr 24 14:13:08 2007
@@ -0,0 +1,191 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+import java.util.TreeMap;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HMemcache.Snapshot;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.log4j.Logger;
+
+public class TestHMemcache extends TestCase {
+  private final Logger LOG =
+    Logger.getLogger(this.getClass().getName());
+  
+  private HMemcache hmemcache;
+
+  private Configuration conf;
+
+  private static final int ROW_COUNT = 3;
+
+  private static final int COLUMNS_COUNT = 3;
+  
+  private static final String COLUMN_FAMILY = "column";
+
+  protected void setUp() throws Exception {
+    super.setUp();
+
+    this.hmemcache = new HMemcache();
+
+    // Set up a configuration that has configuration for a file
+    // filesystem implementation.
+    this.conf = new HBaseConfiguration();
+    // The test hadoop-site.xml doesn't have a default file fs
+    // implementation. Remove below when gets added.
+    this.conf.set("fs.file.impl",
+        "org.apache.hadoop.fs.LocalFileSystem");
+  }
+
+  protected void tearDown() throws Exception {
+    super.tearDown();
+  }
+
+  private Text getRowName(final int index) {
+    return new Text("row" + Integer.toString(index));
+  }
+
+  private Text getColumnName(final int rowIndex,
+      final int colIndex) {
+    return new Text(COLUMN_FAMILY + ":" +
+        Integer.toString(rowIndex) + ";" +
+        Integer.toString(colIndex));
+  }
+
+  /**
+   * Adds {@link #ROW_COUNT} rows and {@link #COLUMNS_COUNT}
+   * @param hmc Instance to add rows to.
+   */
+  private void addRows(final HMemcache hmc) {
+    for (int i = 0; i < ROW_COUNT; i++) {
+      TreeMap<Text, byte[]> columns = new TreeMap<Text, byte[]>();
+      for (int ii = 0; ii < COLUMNS_COUNT; ii++) {
+        Text k = getColumnName(i, ii);
+        columns.put(k, k.toString().getBytes());
+      }
+      hmc.add(getRowName(i), columns, System.currentTimeMillis());
+    }
+  }
+
+  private HLog getLogfile() throws IOException {
+    // Create a log file.
+    Path testDir = new Path(conf.get("hadoop.tmp.dir", System
+        .getProperty("java.tmp.dir")), "hbase");
+    Path logFile = new Path(testDir, this.getName());
+    FileSystem fs = testDir.getFileSystem(conf);
+    // Cleanup any old log file.
+    if (fs.exists(logFile)) {
+      fs.delete(logFile);
+    }
+    return new HLog(fs, logFile, this.conf);
+  }
+
+  private Snapshot runSnapshot(final HMemcache hmc, final HLog log)
+      throws IOException {
+    // Save off old state.
+    int oldHistorySize = hmc.history.size();
+    TreeMap<HStoreKey, BytesWritable> oldMemcache = hmc.memcache;
+    // Run snapshot.
+    Snapshot s = hmc.snapshotMemcacheForLog(log);
+    // Make some assertions about what just happened.
+    assertEquals("Snapshot equals old memcache", hmc.snapshot,
+        oldMemcache);
+    assertEquals("Returned snapshot holds old memcache",
+        s.memcacheSnapshot, oldMemcache);
+    assertEquals("History has been incremented",
+        oldHistorySize + 1, hmc.history.size());
+    assertEquals("History holds old snapshot",
+        hmc.history.get(oldHistorySize), oldMemcache);
+    return s;
+  }
+
+  public void testSnapshotting() throws IOException {
+    final int snapshotCount = 5;
+    final Text tableName = new Text(getName());
+    HLog log = getLogfile();
+    try {
+      // Add some rows, run a snapshot. Do it a few times.
+      for (int i = 0; i < snapshotCount; i++) {
+        addRows(this.hmemcache);
+        Snapshot s = runSnapshot(this.hmemcache, log);
+        log.completeCacheFlush(new Text(Integer.toString(i)),
+            tableName, s.sequenceId);
+        // Clean up snapshot now we are done with it.
+        this.hmemcache.deleteSnapshot();
+      }
+      log.close();
+    } finally {
+      log.dir.getFileSystem(this.conf).delete(log.dir);
+    }
+  }
+  
+  private void isExpectedRow(final int rowIndex,
+      TreeMap<Text, byte[]> row) {
+    int i = 0;
+    for (Text colname: row.keySet()) {
+      String expectedColname =
+        getColumnName(rowIndex, i++).toString();
+      String colnameStr = colname.toString();
+      assertEquals("Column name", colnameStr, expectedColname);
+      // Value is column name as bytes.  Usually result is
+      // 100 bytes in size at least. This is the default size
+      // for BytesWriteable.  For comparison, comvert bytes to
+      // String and trim to remove trailing null bytes.
+      String colvalueStr =
+        new String(row.get(colname)).trim();
+      assertEquals("Content", colnameStr, colvalueStr);
+    }
+  }
+
+  public void testGetFull() throws IOException {
+    addRows(this.hmemcache);
+    for (int i = 0; i < ROW_COUNT; i++) {
+      HStoreKey hsk = new HStoreKey(getRowName(i));
+      TreeMap<Text, byte[]> all = this.hmemcache.getFull(hsk);
+      isExpectedRow(i, all);
+    }
+  }
+  
+  public void testScanner() throws IOException {
+    addRows(this.hmemcache);
+    long timestamp = System.currentTimeMillis();
+    Text [] cols = new Text[COLUMNS_COUNT * ROW_COUNT];
+    for (int i = 0; i < ROW_COUNT; i++) {
+      for (int ii = 0; ii < COLUMNS_COUNT; ii++) {
+        cols[(ii + (i * COLUMNS_COUNT))] = getColumnName(i, ii);
+      }
+    }
+    HScannerInterface scanner =
+      this.hmemcache.getScanner(timestamp, cols, new Text());
+    HStoreKey key = new HStoreKey();
+    TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
+    for (int i = 0; scanner.next(key, results); i++) {
+      assertTrue("Row name",
+          key.toString().startsWith(getRowName(i).toString()));
+      assertEquals("Count of columns", COLUMNS_COUNT,
+          results.size());
+      isExpectedRow(i, results);
+      // Clear out set.  Otherwise row results accumulate.
+      results.clear();
+    }
+  }
+}

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java?view=diff&rev=532083&r1=532082&r2=532083
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java Tue Apr 24 14:13:08 2007
@@ -21,7 +21,10 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Enumeration;
 import java.util.Iterator;
+import java.util.List;
 import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
@@ -30,6 +33,9 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 
+import org.apache.log4j.Appender;
+import org.apache.log4j.ConsoleAppender;
+import org.apache.log4j.Layout;
 import org.apache.log4j.Logger;
 import org.apache.log4j.Level;
 import org.apache.log4j.PatternLayout;
@@ -41,6 +47,7 @@
  * HRegions or in the HBaseMaster, so only basic testing is possible.
  */
 public class TestHRegion extends TestCase {
+  private Logger LOG = Logger.getLogger(this.getClass().getName());
   
   /** Constructor */
   public TestHRegion(String name) {
@@ -51,6 +58,8 @@
   public static Test suite() {
     TestSuite suite = new TestSuite();
     suite.addTest(new TestHRegion("testSetup"));
+    suite.addTest(new TestHRegion("testLocks"));
+    suite.addTest(new TestHRegion("testBadPuts"));
     suite.addTest(new TestHRegion("testBasic"));
     suite.addTest(new TestHRegion("testScan"));
     suite.addTest(new TestHRegion("testBatchWrite"));
@@ -61,7 +70,7 @@
   }
   
   
-  private static final int FIRST_ROW = 0;
+  private static final int FIRST_ROW = 1;
   private static final int N_ROWS = 1000000;
   private static final int NUM_VALS = 1000;
   private static final Text CONTENTS_BASIC = new Text("contents:basic");
@@ -88,28 +97,42 @@
 
   // Set up environment, start mini cluster, etc.
   
+  @SuppressWarnings("unchecked")
   public void testSetup() throws IOException {
     try {
-      if (System.getProperty("test.build.data") == null) {
+      if(System.getProperty("test.build.data") == null) {
         String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
         System.out.println(dir);
         System.setProperty("test.build.data", dir);
       }
-      conf = new Configuration();
+      conf = new HBaseConfiguration();
       
       Environment.getenv();
-      if (Environment.debugging) {
+      if(Environment.debugging) {
         Logger rootLogger = Logger.getRootLogger();
         rootLogger.setLevel(Level.WARN);
+
+        ConsoleAppender consoleAppender = null;
+        for(Enumeration<Appender> e = (Enumeration<Appender>)rootLogger.getAllAppenders();
+            e.hasMoreElements();) {
         
-        PatternLayout consoleLayout
-          = (PatternLayout)rootLogger.getAppender("console").getLayout();
-        consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
-      
+          Appender a = e.nextElement();
+          if(a instanceof ConsoleAppender) {
+            consoleAppender = (ConsoleAppender)a;
+            break;
+          }
+        }
+        if(consoleAppender != null) {
+          Layout layout = consoleAppender.getLayout();
+          if(layout instanceof PatternLayout) {
+            PatternLayout consoleLayout = (PatternLayout)layout;
+            consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
+          }
+        }
         Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
       }
       
-      cluster = new MiniDFSCluster(conf, 2, true, null);
+      cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
       fs = cluster.getFileSystem();
       parentdir = new Path("/hbase");
       fs.mkdirs(parentdir);
@@ -118,10 +141,10 @@
 
       log = new HLog(fs, newlogdir, conf);
       desc = new HTableDescriptor("test", 3);
-      desc.addFamily(new Text("contents"));
-      desc.addFamily(new Text("anchor"));
+      desc.addFamily(new Text("contents:"));
+      desc.addFamily(new Text("anchor:"));
       region = new HRegion(parentdir, log, fs, conf, 
-                           new HRegionInfo(1, desc, null, null), null, oldlogfile);
+          new HRegionInfo(1, desc, null, null), null, oldlogfile);
       
     } catch(IOException e) {
       failures = true;
@@ -133,26 +156,39 @@
   // Test basic functionality. Writes to contents:basic and anchor:anchornum-*
 
   public void testBasic() throws IOException {
-    if (!initialized) {
+    if(!initialized) {
       throw new IllegalStateException();
     }
 
     try {
+      long startTime = System.currentTimeMillis();
       
       // Write out a bunch of values
       
-      for (int k = 0; k < NUM_VALS; k++) {
+      for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
         long writeid = region.startUpdate(new Text("row_" + k));
         region.put(writeid, CONTENTS_BASIC, (CONTENTSTR + k).getBytes());
         region.put(writeid, new Text(ANCHORNUM + k), (ANCHORSTR + k).getBytes());
         region.commit(writeid);
       }
+      System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
+          + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+      // Flush cache
+      
+      startTime = System.currentTimeMillis();
+      
       region.flushcache(false);
+      
+      System.out.println("Cache flush elapsed time: "
+          + ((System.currentTimeMillis() - startTime) / 1000.0));
 
       // Read them back in
 
+      startTime = System.currentTimeMillis();
+      
       Text collabel = null;
-      for (int k = 0; k < NUM_VALS; k++) {
+      for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
         Text rowlabel = new Text("row_" + k);
 
         byte bodydata[] = region.get(rowlabel, CONTENTS_BASIC);
@@ -160,44 +196,127 @@
         String bodystr = new String(bodydata).toString().trim();
         String teststr = CONTENTSTR + k;
         assertEquals("Incorrect value for key: (" + rowlabel + "," + CONTENTS_BASIC
-                     + "), expected: '" + teststr + "' got: '" + bodystr + "'",
-                     bodystr, teststr);
+            + "), expected: '" + teststr + "' got: '" + bodystr + "'",
+            bodystr, teststr);
         collabel = new Text(ANCHORNUM + k);
         bodydata = region.get(rowlabel, collabel);
         bodystr = new String(bodydata).toString().trim();
         teststr = ANCHORSTR + k;
         assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
-                     + "), expected: '" + teststr + "' got: '" + bodystr + "'",
-                     bodystr, teststr);
-        /*
-        // Check to make sure that null values are actually null
-        for (int j = 0; j < Math.min(15, NUM_VALS); j++) {
-        if (k != j) {
-        collabel = new Text(ANCHORNUM + j);
-        byte results[] = region.get(rowlabel, collabel);
-        if (results != null) {
-        throw new IOException("Found incorrect value at [" + rowlabel + ", " + collabel + "] == " + new String(results).toString().trim());
-        }
-        }
-        }
-        */
+            + "), expected: '" + teststr + "' got: '" + bodystr + "'",
+            bodystr, teststr);
       }
+      
+      System.out.println("Read " + NUM_VALS + " rows. Elapsed time: "
+          + ((System.currentTimeMillis() - startTime) / 1000.0));
+
     } catch(IOException e) {
       failures = true;
       throw e;
     }
   }
+  
+  public void testBadPuts() throws IOException {
+    if(!initialized) {
+      throw new IllegalStateException();
+    }  
+    
+    // Try put with bad lockid.
+    boolean exceptionThrown = false;
+    try {
+      region.put(-1, CONTENTS_BASIC, "bad input".getBytes());
+    } catch (LockException e) {
+      exceptionThrown = true;
+    }
+    assertTrue("Bad lock id", exceptionThrown);
+
+    // Try column name not registered in the table.
+    exceptionThrown = false;
+    long lockid = -1;
+    try {
+      lockid = region.startUpdate(new Text("Some old key"));
+      String unregisteredColName = "FamilyGroup:FamilyLabel";
+      region.put(lockid, new Text(unregisteredColName),
+          unregisteredColName.getBytes());
+    } catch (IOException e) {
+      exceptionThrown = true;
+    } finally {
+      if (lockid != -1) {
+        region.abort(lockid);
+      }
+    }
+    assertTrue("Bad family", exceptionThrown);
+  }
+  
+  /**
+   * Test getting and releasing locks.
+   */
+  public void testLocks() {
+    final int threadCount = 10;
+    final int lockCount = 10;
+    
+    List<Thread>threads = new ArrayList<Thread>(threadCount);
+    for (int i = 0; i < threadCount; i++) {
+      threads.add(new Thread(Integer.toString(i)) {
+        public void run() {
+          long [] lockids = new long[lockCount];
+          // Get locks.
+          for (int i = 0; i < lockCount; i++) {
+            try {
+              Text rowid = new Text(Integer.toString(i));
+              lockids[i] = region.obtainLock(rowid);
+              rowid.equals(region.getRowFromLock(lockids[i]));
+              LOG.debug(getName() + " locked " + rowid.toString());
+            } catch (IOException e) {
+              e.printStackTrace();
+            }
+          }
+          LOG.debug(getName() + " set " +
+              Integer.toString(lockCount) + " locks");
+          
+          // Abort outstanding locks.
+          for (int i = lockCount - 1; i >= 0; i--) {
+            try {
+              region.abort(lockids[i]);
+              LOG.debug(getName() + " unlocked " +
+                  Integer.toString(i));
+            } catch (IOException e) {
+              e.printStackTrace();
+            }
+          }
+          LOG.debug(getName() + " released " +
+              Integer.toString(lockCount) + " locks");
+        }
+      });
+    }
+    
+    // Startup all our threads.
+    for (Thread t : threads) {
+      t.start();
+    }
+    
+    // Now wait around till all are done.
+    for (Thread t: threads) {
+      while (t.isAlive()) {
+        try {
+          Thread.sleep(1);
+        } catch (InterruptedException e) {
+          // Go around again.
+        }
+      }
+    }
+  }
 
   // Test scanners. Writes contents:firstcol and anchor:secondcol
   
   public void testScan() throws IOException {
-    if (!initialized) {
+    if(!initialized) {
       throw new IllegalStateException();
     }
 
     Text cols[] = new Text[] {
-      CONTENTS_FIRSTCOL,
-      ANCHOR_SECONDCOL
+        CONTENTS_FIRSTCOL,
+        ANCHOR_SECONDCOL
     };
 
     // Test the Scanner!!!
@@ -207,6 +326,9 @@
     }
 
     // 1.  Insert a bunch of values
+    
+    long startTime = System.currentTimeMillis();
+
     for(int k = 0; k < vals1.length / 2; k++) {
       String kLabel = String.format("%1$03d", k);
 
@@ -217,7 +339,13 @@
       numInserted += 2;
     }
 
-    // 2.  Scan
+    System.out.println("Write " + (vals1.length / 2) + " elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+    // 2.  Scan from cache
+    
+    startTime = System.currentTimeMillis();
+
     HScannerInterface s = region.getScanner(cols, new Text());
     int numFetched = 0;
     try {
@@ -225,16 +353,16 @@
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
 
           for(int j = 0; j < cols.length; j++) {
-            if (col.compareTo(cols[j]) == 0) {
+            if(col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
-                           + ", Value for " + col + " should be: " + k
-                           + ", but was fetched as: " + curval, k, curval);
+                  + ", Value for " + col + " should be: " + k
+                  + ", but was fetched as: " + curval, k, curval);
               numFetched++;
             }
           }
@@ -247,10 +375,23 @@
     }
     assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
 
+    System.out.println("Scanned " + (vals1.length / 2)
+        + " rows from cache. Elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
+
     // 3.  Flush to disk
+    
+    startTime = System.currentTimeMillis();
+    
     region.flushcache(false);
 
-    // 4.  Scan
+    System.out.println("Cache flush elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+    // 4.  Scan from disk
+    
+    startTime = System.currentTimeMillis();
+    
     s = region.getScanner(cols, new Text());
     numFetched = 0;
     try {
@@ -258,16 +399,16 @@
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
 
           for(int j = 0; j < cols.length; j++) {
-            if (col.compareTo(cols[j]) == 0) {
+            if(col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
-                           + ", Value for " + col + " should be: " + k
-                           + ", but was fetched as: " + curval, k, curval);
+                  + ", Value for " + col + " should be: " + k
+                  + ", but was fetched as: " + curval, k, curval);
               numFetched++;
             }
           }
@@ -280,7 +421,14 @@
     }
     assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
 
+    System.out.println("Scanned " + (vals1.length / 2)
+        + " rows from disk. Elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
+
     // 5.  Insert more values
+    
+    startTime = System.currentTimeMillis();
+
     for(int k = vals1.length/2; k < vals1.length; k++) {
       String kLabel = String.format("%1$03d", k);
       
@@ -291,7 +439,13 @@
       numInserted += 2;
     }
 
-    // 6.  Scan
+    System.out.println("Write " + (vals1.length / 2) + " rows. Elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+    // 6.  Scan from cache and disk
+    
+    startTime = System.currentTimeMillis();
+
     s = region.getScanner(cols, new Text());
     numFetched = 0;
     try {
@@ -299,16 +453,16 @@
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
 
           for(int j = 0; j < cols.length; j++) {
-            if (col.compareTo(cols[j]) == 0) {
+            if(col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
-                           + ", Value for " + col + " should be: " + k
-                           + ", but was fetched as: " + curval, k, curval);
+                  + ", Value for " + col + " should be: " + k
+                  + ", but was fetched as: " + curval, k, curval);
               numFetched++;
             }
           }
@@ -321,10 +475,23 @@
     }
     assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
 
+    System.out.println("Scanned " + vals1.length
+        + " rows from cache and disk. Elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
+    
     // 7.  Flush to disk
+    
+    startTime = System.currentTimeMillis();
+    
     region.flushcache(false);
 
-    // 8.  Scan
+    System.out.println("Cache flush elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
+    
+    // 8.  Scan from disk
+    
+    startTime = System.currentTimeMillis();
+    
     s = region.getScanner(cols, new Text());
     numFetched = 0;
     try {
@@ -332,7 +499,7 @@
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
@@ -340,7 +507,7 @@
           for (int j = 0; j < cols.length; j++) {
             if (col.compareTo(cols[j]) == 0) {
               assertEquals("Value for " + col + " should be: " + k
-                           + ", but was fetched as: " + curval, curval, k);
+                  + ", but was fetched as: " + curval, curval, k);
               numFetched++;
             }
           }
@@ -353,8 +520,14 @@
     }
     assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
     
+    System.out.println("Scanned " + vals1.length
+        + " rows from disk. Elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
+
     // 9. Scan with a starting point
 
+    startTime = System.currentTimeMillis();
+    
     s = region.getScanner(cols, new Text("row_vals1_500"));
     numFetched = 0;
     try {
@@ -362,7 +535,7 @@
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 500;
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
@@ -370,7 +543,7 @@
           for (int j = 0; j < cols.length; j++) {
             if (col.compareTo(cols[j]) == 0) {
               assertEquals("Value for " + col + " should be: " + k
-                           + ", but was fetched as: " + curval, curval, k);
+                  + ", but was fetched as: " + curval, curval, k);
               numFetched++;
             }
           }
@@ -383,6 +556,9 @@
     }
     assertEquals("Should have fetched " + (numInserted / 2) + " values, but fetched " + numFetched, (numInserted / 2), numFetched);
     
+    System.out.println("Scanned " + (numFetched / 2)
+        + " rows from disk with specified start point. Elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
   }
   
   // Do a large number of writes. Disabled if not debugging because it takes a 
@@ -390,10 +566,10 @@
   // Creates contents:body
   
   public void testBatchWrite() throws IOException {
-    if (!initialized || failures) {
+    if(!initialized || failures) {
       throw new IllegalStateException();
     }
-    if (!Environment.debugging) {
+    if(! Environment.debugging) {
       return;
     }
 
@@ -406,7 +582,7 @@
       // 1M writes
 
       int valsize = 1000;
-      for (int k = FIRST_ROW; k < N_ROWS; k++) {
+      for (int k = FIRST_ROW; k <= N_ROWS; k++) {
         // Come up with a random 1000-byte string
         String randstr1 = "" + System.currentTimeMillis();
         StringBuffer buf1 = new StringBuffer("val_" + k + "__");
@@ -437,7 +613,7 @@
         }
       }
       long startCompact = System.currentTimeMillis();
-      if (region.compactStores()) {
+      if(region.compactStores()) {
         totalCompact = System.currentTimeMillis() - startCompact;
         System.out.println("Region compacted - elapsedTime: " + (totalCompact / 1000.0));
         
@@ -457,7 +633,8 @@
       System.out.println("Total time, rows/second: " + (N_ROWS / (totalElapsed / 1000.0)));
       System.out.println("Adjusted time (not including flush, compact, or log): " + ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0));
       System.out.println("Adjusted time, rows/second: " + (N_ROWS / ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0)));
-
+      System.out.println();
+      
     } catch(IOException e) {
       failures = true;
       throw e;
@@ -467,14 +644,14 @@
   // NOTE: This test depends on testBatchWrite succeeding
   
   public void testSplitAndMerge() throws IOException {
-    if (!initialized || failures) {
+    if(!initialized || failures) {
       throw new IllegalStateException();
     }
     
     try {
       Text midKey = new Text();
       
-      if (region.needsSplit(midKey)) {
+      if(region.needsSplit(midKey)) {
         System.out.println("Needs split");
       }
       
@@ -482,15 +659,28 @@
 
       Text midkey = new Text("row_" + (Environment.debugging ? (N_ROWS / 2) : (NUM_VALS/2)));
       Path oldRegionPath = region.getRegionDir();
+      
+      long startTime = System.currentTimeMillis();
+      
       HRegion subregions[] = region.closeAndSplit(midkey);
+      
+      System.out.println("Split region elapsed time: "
+          + ((System.currentTimeMillis() - startTime) / 1000.0));
+      
       assertEquals("Number of subregions", subregions.length, 2);
 
       // Now merge it back together
 
       Path oldRegion1 = subregions[0].getRegionDir();
       Path oldRegion2 = subregions[1].getRegionDir();
+      
+      startTime = System.currentTimeMillis();
+      
       region = HRegion.closeAndMerge(subregions[0], subregions[1]);
 
+      System.out.println("Merge regions elapsed time: "
+          + ((System.currentTimeMillis() - startTime) / 1000.0));
+      
       fs.delete(oldRegionPath);
       fs.delete(oldRegion1);
       fs.delete(oldRegion2);
@@ -504,17 +694,19 @@
   // This test verifies that everything is still there after splitting and merging
   
   public void testRead() throws IOException {
-    if (!initialized || failures) {
+    if(!initialized || failures) {
       throw new IllegalStateException();
     }
 
     // First verify the data written by testBasic()
 
     Text[] cols = new Text[] {
-      new Text(ANCHORNUM + "[0-9]+"),
-      new Text(CONTENTS_BASIC)
+        new Text(ANCHORNUM + "[0-9]+"),
+        new Text(CONTENTS_BASIC)
     };
     
+    long startTime = System.currentTimeMillis();
+    
     HScannerInterface s = region.getScanner(cols, new Text());
 
     try {
@@ -525,23 +717,23 @@
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
           byte val[] = curVals.get(col);
           String curval = new String(val).trim();
 
-          if (col.compareTo(CONTENTS_BASIC) == 0) {
+          if(col.compareTo(CONTENTS_BASIC) == 0) {
             assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
-                       + ", Value for " + col + " should start with: " + CONTENTSTR
-                       + ", but was fetched as: " + curval,
-                       curval.startsWith(CONTENTSTR));
+                + ", Value for " + col + " should start with: " + CONTENTSTR
+                + ", but was fetched as: " + curval,
+                curval.startsWith(CONTENTSTR));
             contentsFetched++;
             
-          } else if (col.toString().startsWith(ANCHORNUM)) {
+          } else if(col.toString().startsWith(ANCHORNUM)) {
             assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
-                       + ", Value for " + col + " should start with: " + ANCHORSTR
-                       + ", but was fetched as: " + curval,
-                       curval.startsWith(ANCHORSTR));
+                + ", Value for " + col + " should start with: " + ANCHORSTR
+                + ", but was fetched as: " + curval,
+                curval.startsWith(ANCHORSTR));
             anchorFetched++;
             
           } else {
@@ -554,6 +746,10 @@
       assertEquals("Expected " + NUM_VALS + " " + CONTENTS_BASIC + " values, but fetched " + contentsFetched, NUM_VALS, contentsFetched);
       assertEquals("Expected " + NUM_VALS + " " + ANCHORNUM + " values, but fetched " + anchorFetched, NUM_VALS, anchorFetched);
 
+      System.out.println("Scanned " + NUM_VALS
+          + " rows from disk. Elapsed time: "
+          + ((System.currentTimeMillis() - startTime) / 1000.0));
+      
     } finally {
       s.close();
     }
@@ -561,9 +757,11 @@
     // Verify testScan data
     
     cols = new Text[] {
-      CONTENTS_FIRSTCOL,
-      ANCHOR_SECONDCOL
+        CONTENTS_FIRSTCOL,
+        ANCHOR_SECONDCOL
     };
+    
+    startTime = System.currentTimeMillis();
 
     s = region.getScanner(cols, new Text());
     try {
@@ -572,7 +770,7 @@
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           Text col = it.next();
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
@@ -580,7 +778,7 @@
           for (int j = 0; j < cols.length; j++) {
             if (col.compareTo(cols[j]) == 0) {
               assertEquals("Value for " + col + " should be: " + k
-                           + ", but was fetched as: " + curval, curval, k);
+                  + ", but was fetched as: " + curval, curval, k);
               numFetched++;
             }
           }
@@ -590,13 +788,19 @@
       }
       assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
 
+      System.out.println("Scanned " + (numFetched / 2)
+          + " rows from disk. Elapsed time: "
+          + ((System.currentTimeMillis() - startTime) / 1000.0));
+      
     } finally {
       s.close();
     }
     
     // Verify testBatchWrite data
 
-    if (Environment.debugging) {
+    if(Environment.debugging) {
+      startTime = System.currentTimeMillis();
+      
       s = region.getScanner(new Text[] { CONTENTS_BODY }, new Text());
       try {
         int numFetched = 0;
@@ -604,7 +808,7 @@
         TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
         int k = 0;
         while(s.next(curKey, curVals)) {
-          for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
+          for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
             Text col = it.next();
             byte val[] = curVals.get(col);
 
@@ -617,6 +821,10 @@
         }
         assertEquals("Inserted " + N_ROWS + " values, but fetched " + numFetched, N_ROWS, numFetched);
 
+        System.out.println("Scanned " + N_ROWS
+            + " rows from disk. Elapsed time: "
+            + ((System.currentTimeMillis() - startTime) / 1000.0));
+        
       } finally {
         s.close();
       }
@@ -625,9 +833,11 @@
     // Test a scanner which only specifies the column family name
     
     cols = new Text[] {
-      new Text("anchor:")
+        new Text("anchor:")
     };
     
+    startTime = System.currentTimeMillis();
+    
     s = region.getScanner(cols, new Text());
 
     try {
@@ -635,7 +845,7 @@
       HStoreKey curKey = new HStoreKey();
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
           it.next();
           fetched++;
         }
@@ -643,6 +853,10 @@
       }
       assertEquals("Inserted " + (NUM_VALS + numInserted/2) + " values, but fetched " + fetched, (NUM_VALS + numInserted/2), fetched);
 
+      System.out.println("Scanned " + fetched
+          + " rows from disk. Elapsed time: "
+          + ((System.currentTimeMillis() - startTime) / 1000.0));
+      
     } finally {
       s.close();
     }
@@ -650,7 +864,7 @@
 
   
   private static void deleteFile(File f) {
-    if (f.isDirectory()) {
+    if(f.isDirectory()) {
       File[] children = f.listFiles();
       for(int i = 0; i < children.length; i++) {
         deleteFile(children[i]);
@@ -660,7 +874,7 @@
   }
   
   public void testCleanup() throws IOException {
-    if (!initialized) {
+    if(!initialized) {
       throw new IllegalStateException();
     }
 
@@ -672,5 +886,5 @@
     
     deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
     
-  }
+    }
 }



Mime
View raw message