hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From j...@apache.org
Subject svn commit: r564780 [3/3] - in /lucene/hadoop/trunk/src/contrib/hbase: ./ src/java/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/
Date Fri, 10 Aug 2007 22:11:06 GMT
Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java Fri
Aug 10 15:11:05 2007
@@ -275,6 +275,7 @@
   int compactionThreshold = 0;
   private final HLocking lock = new HLocking();
   private long desiredMaxFileSize;
+  private final long maxSequenceId;
 
   //////////////////////////////////////////////////////////////////////////////
   // Constructor
@@ -324,12 +325,26 @@
     }
 
     // Load in all the HStores.
+
+    long maxSeqId = -1;
     for(Map.Entry<Text, HColumnDescriptor> e :
         this.regionInfo.tableDesc.families().entrySet()) {
       Text colFamily = HStoreKey.extractFamily(e.getKey());
-      stores.put(colFamily,
-        new HStore(rootDir, this.regionInfo.regionName, e.getValue(), fs,
-          oldLogFile, conf));
+      
+      HStore store = new HStore(rootDir, this.regionInfo.regionName, 
+          e.getValue(), fs, oldLogFile, conf); 
+      
+      stores.put(colFamily, store);
+      
+      long storeSeqId = store.getMaxSequenceId();
+      if (storeSeqId > maxSeqId) {
+        maxSeqId = storeSeqId;
+      }
+    }
+    this.maxSequenceId = maxSeqId;
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("maximum sequence id for region " + regionInfo.getRegionName() +
+          " is " + this.maxSequenceId);
     }
 
     // Get rid of any splits or merges that were lost in-progress
@@ -361,6 +376,10 @@
     this.writestate.writesOngoing = false;
     LOG.info("region " + this.regionInfo.regionName + " available");
   }
+  
+  long getMaxSequenceId() {
+    return this.maxSequenceId;
+  }
 
   /** Returns a HRegionInfo object for this region */
   HRegionInfo getRegionInfo() {
@@ -464,8 +483,8 @@
    * @throws IOException
    */
   HRegion[] closeAndSplit(final Text midKey,
-      final RegionUnavailableListener listener)
-  throws IOException {
+      final RegionUnavailableListener listener) throws IOException {
+    
     checkMidKey(midKey);
     long startTime = System.currentTimeMillis();
     Path splits = getSplitsDir();
@@ -496,6 +515,7 @@
     Vector<HStoreFile> hstoreFilesToSplit = close();
     if (hstoreFilesToSplit == null) {
       LOG.warn("Close came back null (Implement abort of close?)");
+      throw new RuntimeException("close returned empty vector of HStoreFiles");
     }
     
     // Tell listener that region is now closed and that they can therefore
@@ -690,8 +710,11 @@
           biggest = size;
         }
       }
-      biggest.setSplitable(splitable);
+      if (biggest != null) {
+        biggest.setSplitable(splitable);
+      }
       return biggest;
+      
     } finally {
       lock.releaseReadLock();
     }
@@ -1405,6 +1428,7 @@
     }
   }
   
+  /** {@inheritDoc} */
   @Override
   public String toString() {
     return getRegionName().toString();
@@ -1842,9 +1866,7 @@
     if (bytes == null || bytes.length == 0) {
       return null;
     }
-    return (HRegionInfo)((bytes == null || bytes.length == 0)?
-      null:
-      Writables.getWritable(bytes, new HRegionInfo()));
+    return (HRegionInfo) Writables.getWritable(bytes, new HRegionInfo());
   }
   
   /**
@@ -1905,6 +1927,13 @@
     return startCode;
   }
 
+  /**
+   * Computes the Path of the HRegion
+   * 
+   * @param dir parent directory
+   * @param regionName name of the region
+   * @return Path of HRegion directory
+   */
   public static Path getRegionDir(final Path dir, final Text regionName) {
     return new Path(dir, new Path(HREGIONDIR_PREFIX + regionName));
   }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
Fri Aug 10 15:11:05 2007
@@ -32,6 +32,7 @@
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.commons.logging.Log;
@@ -391,6 +392,9 @@
   
   // Leases
   private Leases leases;
+  
+  // Request counter
+  private AtomicInteger requestCount;
 
   /**
    * Starts a HRegionServer at the default location
@@ -424,6 +428,7 @@
       Collections.synchronizedSortedMap(new TreeMap<Text, HRegion>());
     
     this.outboundMsgs = new Vector<HMsg>();
+    this.requestCount = new AtomicInteger();
 
     // Config'ed params
     this.numRetries =  conf.getInt("hbase.client.retries.number", 2);
@@ -597,6 +602,8 @@
         if (LOG.isDebugEnabled()) {
           LOG.debug("Telling master we are up");
         }
+        requestCount.set(0);
+        serverInfo.setLoad(new HServerLoad(0, onlineRegions.size()));
         hbaseMaster.regionServerStartup(serverInfo);
         if (LOG.isDebugEnabled()) {
           LOG.debug("Done telling master we are up");
@@ -626,6 +633,10 @@
           }
 
           try {
+            serverInfo.setLoad(new HServerLoad(requestCount.get(),
+                onlineRegions.size()));
+            requestCount.set(0);
+            
             HMsg msgs[] =
               hbaseMaster.regionServerReport(serverInfo, outboundArray);
             lastMsg = System.currentTimeMillis();
@@ -897,6 +908,7 @@
 
       this.lock.writeLock().lock();
       try {
+        this.log.setSequenceNumber(region.getMaxSequenceId());
         this.onlineRegions.put(region.getRegionName(), region);
       } finally {
         this.lock.writeLock().unlock();
@@ -963,6 +975,7 @@
    */
   public HRegionInfo getRegionInfo(final Text regionName)
   throws NotServingRegionException {
+    requestCount.incrementAndGet();
     return getRegion(regionName).getRegionInfo();
   }
 
@@ -971,6 +984,7 @@
    */
   public void batchUpdate(Text regionName, long timestamp, BatchUpdate b)
   throws IOException {
+    requestCount.incrementAndGet();
     long clientid = rand.nextLong();
     long lockid = startUpdate(regionName, clientid, b.getRow());
     for(BatchOperation op: b) {
@@ -993,6 +1007,7 @@
   public byte [] get(final Text regionName, final Text row,
       final Text column)
   throws IOException {
+    requestCount.incrementAndGet();
     return getRegion(regionName).get(row, column);
   }
 
@@ -1002,6 +1017,7 @@
   public byte [][] get(final Text regionName, final Text row,
       final Text column, final int numVersions)
   throws IOException {  
+    requestCount.incrementAndGet();
     return getRegion(regionName).get(row, column, numVersions);
   }
 
@@ -1010,6 +1026,7 @@
    */
   public byte [][] get(final Text regionName, final Text row, final Text column, 
       final long timestamp, final int numVersions) throws IOException {
+    requestCount.incrementAndGet();
     return getRegion(regionName).get(row, column, timestamp, numVersions);
   }
 
@@ -1018,6 +1035,7 @@
    */
   public KeyedData[] getRow(final Text regionName, final Text row)
   throws IOException {
+    requestCount.incrementAndGet();
     HRegion region = getRegion(regionName);
     TreeMap<Text, byte[]> map = region.getFull(row);
     KeyedData result[] = new KeyedData[map.size()];
@@ -1034,6 +1052,7 @@
    */
   public KeyedData[] next(final long scannerId)
   throws IOException {
+    requestCount.incrementAndGet();
     String scannerName = String.valueOf(scannerId);
     HInternalScannerInterface s = scanners.get(scannerName);
     if (s == null) {
@@ -1077,6 +1096,7 @@
    */
   public long startUpdate(Text regionName, long clientid, Text row) 
       throws IOException {
+    requestCount.incrementAndGet();
     HRegion region = getRegion(regionName);
     long lockid = region.startUpdate(row);
     this.leases.createLease(clientid, lockid,
@@ -1120,6 +1140,7 @@
   public void put(final Text regionName, final long clientid,
       final long lockid, final Text column, final byte [] val)
   throws IOException {
+    requestCount.incrementAndGet();
     HRegion region = getRegion(regionName, true);
     leases.renewLease(clientid, lockid);
     region.put(lockid, column, val);
@@ -1130,6 +1151,7 @@
    */
   public void delete(Text regionName, long clientid, long lockid, Text column) 
   throws IOException {
+    requestCount.incrementAndGet();
     HRegion region = getRegion(regionName);
     leases.renewLease(clientid, lockid);
     region.delete(lockid, column);
@@ -1140,6 +1162,7 @@
    */
   public void abort(Text regionName, long clientid, long lockid) 
   throws IOException {
+    requestCount.incrementAndGet();
     HRegion region = getRegion(regionName, true);
     leases.cancelLease(clientid, lockid);
     region.abort(lockid);
@@ -1150,6 +1173,7 @@
    */
   public void commit(Text regionName, final long clientid, final long lockid,
       final long timestamp) throws IOException {
+    requestCount.incrementAndGet();
     HRegion region = getRegion(regionName, true);
     leases.cancelLease(clientid, lockid);
     region.commit(lockid, timestamp);
@@ -1159,6 +1183,7 @@
    * {@inheritDoc}
    */
   public void renewLease(long lockid, long clientid) throws IOException {
+    requestCount.incrementAndGet();
     leases.renewLease(clientid, lockid);
   }
 
@@ -1247,6 +1272,7 @@
   public long openScanner(Text regionName, Text[] cols, Text firstRow,
       final long timestamp, final RowFilterInterface filter)
   throws IOException {
+    requestCount.incrementAndGet();
     HRegion r = getRegion(regionName);
     long scannerId = -1L;
     try {
@@ -1277,6 +1303,7 @@
    * {@inheritDoc}
    */
   public void close(final long scannerId) throws IOException {
+    requestCount.incrementAndGet();
     String scannerName = String.valueOf(scannerId);
     HInternalScannerInterface s = null;
     synchronized(scanners) {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerInfo.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerInfo.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerInfo.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerInfo.java
Fri Aug 10 15:11:05 2007
@@ -33,21 +33,24 @@
 public class HServerInfo implements Writable {
   private HServerAddress serverAddress;
   private long startCode;
+  private HServerLoad load;
 
   /** default constructor - used by Writable */
   public HServerInfo() {
     this.serverAddress = new HServerAddress();
     this.startCode = 0;
+    this.load = new HServerLoad();
   }
   
   /**
-   * Constructs a fully initialized object
+   * Constructor
    * @param serverAddress
    * @param startCode
    */
   public HServerInfo(HServerAddress serverAddress, long startCode) {
     this.serverAddress = new HServerAddress(serverAddress);
     this.startCode = startCode;
+    this.load = new HServerLoad();
   }
   
   /**
@@ -57,6 +60,21 @@
   public HServerInfo(HServerInfo other) {
     this.serverAddress = new HServerAddress(other.getServerAddress());
     this.startCode = other.getStartCode();
+    this.load = other.getLoad();
+  }
+  
+  /**
+   * @return the load
+   */
+  public HServerLoad getLoad() {
+    return load;
+  }
+
+  /**
+   * @param load the load to set
+   */
+  public void setLoad(HServerLoad load) {
+    this.load = load;
   }
 
   /** @return the server address */
@@ -72,7 +90,8 @@
   /** {@inheritDoc} */
   @Override
   public String toString() {
-    return "address: " + this.serverAddress + ", startcode: " + this.startCode;
+    return "address: " + this.serverAddress + ", startcode: " + this.startCode
+    + ", load: (" + this.load.toString() + ")";
   }
 
   // Writable
@@ -81,11 +100,13 @@
   public void readFields(DataInput in) throws IOException {
     this.serverAddress.readFields(in);
     this.startCode = in.readLong();
+    this.load.readFields(in);
   }
 
   /** {@inheritDoc} */
   public void write(DataOutput out) throws IOException {
     this.serverAddress.write(out);
     out.writeLong(this.startCode);
+    this.load.write(out);
   }
 }

Added: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerLoad.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerLoad.java?view=auto&rev=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerLoad.java
(added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerLoad.java
Fri Aug 10 15:11:05 2007
@@ -0,0 +1,136 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.WritableComparable;
+
+/**
+ * This class encapsulates metrics for determining the load on a HRegionServer
+ */
+public class HServerLoad implements WritableComparable {
+  private int numberOfRequests;         // number of requests since last report
+  private int numberOfRegions;          // number of regions being served
+  
+  /*
+   * TODO: Other metrics that might be considered when the master is actually
+   * doing load balancing instead of merely trying to decide where to assign
+   * a region:
+   * <ul>
+   *   <li># of CPUs, heap size (to determine the "class" of machine). For
+   *       now, we consider them to be homogeneous.</li>
+   *   <li>#requests per region (Map<{String|HRegionInfo}, Integer>)</li>
+   *   <li>#compactions and/or #splits (churn)</li>
+   *   <li>server death rate (maybe there is something wrong with this server)</li>
+   * </ul>
+   */
+  
+  /** default constructior (used by Writable) */
+  public HServerLoad() {}
+  
+  /**
+   * Constructor
+   * @param numberOfRequests
+   * @param numberOfRegions
+   */
+  public HServerLoad(int numberOfRequests, int numberOfRegions) {
+    this.numberOfRequests = numberOfRequests;
+    this.numberOfRegions = numberOfRegions;
+  }
+  
+  /**
+   * @return load factor for this server
+   */
+  public int getLoad() {
+    int load = numberOfRequests == 0 ? 1 : numberOfRequests;
+    load *= numberOfRegions == 0 ? 1 : numberOfRegions;
+    return load;
+  }
+  
+  /** {@inheritDoc} */
+  @Override
+  public String toString() {
+    return "requests: " + numberOfRequests + " regions: " + numberOfRegions;
+  }
+  
+  /** {@inheritDoc} */
+  @Override
+  public boolean equals(Object o) {
+    return compareTo(o) == 0;
+  }
+  
+  /** {@inheritDoc} */
+  @Override
+  public int hashCode() {
+    int result = Integer.valueOf(numberOfRequests).hashCode();
+    result ^= Integer.valueOf(numberOfRegions).hashCode();
+    return result;
+  }
+  
+  // Getters
+  
+  /**
+   * @return the numberOfRegions
+   */
+  public int getNumberOfRegions() {
+    return numberOfRegions;
+  }
+
+  /**
+   * @return the numberOfRequests
+   */
+  public int getNumberOfRequests() {
+    return numberOfRequests;
+  }
+
+  // Setters
+  
+  /**
+   * @param numberOfRegions the numberOfRegions to set
+   */
+  public void setNumberOfRegions(int numberOfRegions) {
+    this.numberOfRegions = numberOfRegions;
+  }
+
+  // Writable
+
+  /** {@inheritDoc} */
+  public void readFields(DataInput in) throws IOException {
+    numberOfRequests = in.readInt();
+    numberOfRegions = in.readInt();
+  }
+
+  /** {@inheritDoc} */
+  public void write(DataOutput out) throws IOException {
+    out.writeInt(numberOfRequests);
+    out.writeInt(numberOfRegions);
+  }
+  
+  // Comparable
+
+  /** {@inheritDoc} */
+  public int compareTo(Object o) {
+    HServerLoad other = (HServerLoad) o;
+    return this.getLoad() - other.getLoad();
+  }
+}

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java Fri
Aug 10 15:11:05 2007
@@ -90,6 +90,8 @@
   TreeMap<Long, MapFile.Reader> readers = new TreeMap<Long, MapFile.Reader>();
 
   Random rand = new Random();
+  
+  private long maxSeqId;
 
   /**
    * An HStore is a set of zero or more MapFiles, which stretch backwards over 
@@ -196,6 +198,7 @@
     // If the HSTORE_LOGINFOFILE doesn't contain a number, just ignore it. That
     // means it was built prior to the previous run of HStore, and so it cannot 
     // contain any updates also contained in the log.
+    
     long maxSeqID = -1;
     for (HStoreFile hsf: hstoreFiles) {
       long seqid = hsf.loadInfo(fs);
@@ -205,8 +208,14 @@
         }
       }
     }
-
-    doReconstructionLog(reconstructionLog, maxSeqID);
+    this.maxSeqId = maxSeqID;
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("maximum sequence id for hstore " + storeName + " is " +
+          this.maxSeqId);
+    }
+    
+    doReconstructionLog(reconstructionLog, maxSeqId);
+    this.maxSeqId += 1;
 
     // Compact all the MapFiles into a single file.  The resulting MapFile 
     // should be "timeless"; that is, it should not have an associated seq-ID, 
@@ -228,6 +237,10 @@
     }
   }
   
+  long getMaxSequenceId() {
+    return this.maxSeqId;
+  }
+  
   /*
    * Read the reconstructionLog to see whether we need to build a brand-new 
    * MapFile out of non-flushed log entries.  
@@ -258,6 +271,11 @@
       while (login.next(key, val)) {
         maxSeqIdInLog = Math.max(maxSeqIdInLog, key.getLogSeqNum());
         if (key.getLogSeqNum() <= maxSeqID) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Skipping edit <" + key.toString() + "=" +
+                val.toString() + "> key sequence: " + key.getLogSeqNum() +
+                " max sequence: " + maxSeqID);
+          }
           continue;
         }
         // Check this edit is for me. Also, guard against writing
@@ -277,7 +295,8 @@
         }
         HStoreKey k = new HStoreKey(key.getRow(), column, val.getTimestamp());
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Applying edit " + k.toString());
+          LOG.debug("Applying edit <" + k.toString() + "=" + val.toString() +
+              ">");
         }
         reconstructedCache.put(k, val.getVal());
       }
@@ -428,16 +447,12 @@
       String name = flushedFile.toString();
       MapFile.Writer out = flushedFile.getWriter(this.fs, this.compression,
         this.bloomFilter);
-      int count = 0;
-      int total = 0;
       try {
         for (Map.Entry<HStoreKey, byte []> es: inputCache.entrySet()) {
           HStoreKey curkey = es.getKey();
-          total++;
           if (this.familyName.
               equals(HStoreKey.extractFamily(curkey.getColumn()))) {
             out.append(curkey, new ImmutableBytesWritable(es.getValue()));
-            count++;
           }
         }
       } finally {
@@ -1030,6 +1045,7 @@
   //////////////////////////////////////////////////////////////////////////////
   
   class HStoreScanner extends HAbstractScanner {
+    @SuppressWarnings("hiding")
     private MapFile.Reader[] readers;
     
     HStoreScanner(long timestamp, Text[] targetCols, Text firstRow)

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
Fri Aug 10 15:11:05 2007
@@ -94,7 +94,17 @@
   static final String HSTORE_DATFILE_DIR = "mapfiles";
   static final String HSTORE_INFO_DIR = "info";
   static final String HSTORE_FILTER_DIR = "filter";
-  public static enum Range {top, bottom}
+  
+  /** 
+   * For split HStoreFiles, specifies if the file covers the lower half or
+   * the upper half of the key range
+   */
+  public static enum Range {
+    /** HStoreFile contains upper half of key range */
+    top,
+    /** HStoreFile contains lower half of key range */
+    bottom
+  }
   
   /*
    * Regex that will work for straight filenames and for reference names.
@@ -156,7 +166,7 @@
   /*
    * Data structure to hold reference to a store file over in another region.
    */
-  static class Reference {
+  static class Reference implements Writable {
     Text regionName;
     long fileid;
     Range region;
@@ -190,11 +200,15 @@
       return this.regionName;
     }
    
+    /** {@inheritDoc} */
+    @Override
     public String toString() {
       return this.regionName + "/" + this.fileid + "/" + this.region;
     }
 
     // Make it serializable.
+
+    /** {@inheritDoc} */
     public void write(DataOutput out) throws IOException {
       this.regionName.write(out);
       out.writeLong(this.fileid);
@@ -203,6 +217,7 @@
       this.midkey.write(out);
     }
 
+    /** {@inheritDoc} */
     public void readFields(DataInput in) throws IOException {
       this.regionName = new Text();
       this.regionName.readFields(in);
@@ -417,6 +432,8 @@
   private static boolean isReference(final Path p, final Matcher m) {
     if (m == null || !m.matches()) {
       LOG.warn("Failed match of store file name " + p.toString());
+      throw new RuntimeException("Failed match of store file name " +
+          p.toString());
     }
     return m.groupCount() > 1 && m.group(2) != null;
   }
@@ -662,6 +679,7 @@
       }
     }
 
+    /** {@inheritDoc} */
     @SuppressWarnings({ "unused"})
     @Override
     public synchronized void finalKey(WritableComparable key)
@@ -669,6 +687,7 @@
       throw new UnsupportedOperationException("Unsupported");
     }
 
+    /** {@inheritDoc} */
     @Override
     public synchronized Writable get(WritableComparable key, Writable val)
         throws IOException {
@@ -676,6 +695,7 @@
       return super.get(key, val);
     }
 
+    /** {@inheritDoc} */
     @SuppressWarnings("unchecked")
     @Override
     public synchronized WritableComparable getClosest(WritableComparable key,
@@ -692,6 +712,7 @@
       return super.getClosest(key, val);
     }
 
+    /** {@inheritDoc} */
     @SuppressWarnings("unused")
     @Override
     public synchronized WritableComparable midKey() throws IOException {
@@ -699,6 +720,7 @@
       return null;
     }
 
+    /** {@inheritDoc} */
     @SuppressWarnings("unchecked")
     @Override
     public synchronized boolean next(WritableComparable key, Writable val)
@@ -727,6 +749,7 @@
       return false;
     }
 
+    /** {@inheritDoc} */
     @Override
     public synchronized void reset() throws IOException {
       if (top) {
@@ -737,6 +760,7 @@
       super.reset();
     }
 
+    /** {@inheritDoc} */
     @Override
     public synchronized boolean seek(WritableComparable key)
     throws IOException {
@@ -758,6 +782,15 @@
     static class Reader extends MapFile.Reader {
       private final Filter bloomFilter;
 
+      /**
+       * Constructor
+       * 
+       * @param fs
+       * @param dirName
+       * @param conf
+       * @param filter
+       * @throws IOException
+       */
       public Reader(FileSystem fs, String dirName, Configuration conf,
           final Filter filter)
       throws IOException {
@@ -810,6 +843,18 @@
       private final Filter bloomFilter;
       
 
+      /**
+       * Constructor
+       * 
+       * @param conf
+       * @param fs
+       * @param dirName
+       * @param keyClass
+       * @param valClass
+       * @param compression
+       * @param filter
+       * @throws IOException
+       */
       @SuppressWarnings("unchecked")
       public Writer(Configuration conf, FileSystem fs, String dirName,
           Class keyClass, Class valClass,
@@ -905,6 +950,7 @@
     return (isReference())? l / 2: l;
   }
 
+  /** {@inheritDoc} */
   @Override
   public String toString() {
     return this.regionName.toString() + "/" + this.colFamily.toString() +
@@ -912,6 +958,7 @@
       (isReference()? "/" + this.reference.toString(): "");
   }
   
+  /** {@inheritDoc} */
   @Override
   public boolean equals(Object o) {
     return this.compareTo(o) == 0;

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java Fri
Aug 10 15:11:05 2007
@@ -133,6 +133,7 @@
   }
   
 
+  /** @return the table name */
   public Text getTableName() {
     return this.tableName;
   }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
Fri Aug 10 15:11:05 2007
@@ -41,13 +41,13 @@
   
   /*
    * Legal table names can only contain 'word characters':
-   * i.e. <code>[a-zA-Z_0-9]</code>.
+   * i.e. <code>[a-zA-Z_0-9-.]</code>.
    * Lets be restrictive until a reason to be otherwise. One reason to limit
    * characters in table name is to ensure table regions as entries in META
    * regions can be found (See HADOOP-1581 'HBASE: Un-openable tablename bug').
    */
   private static final Pattern LEGAL_TABLE_NAME =
-    Pattern.compile("[\\w-]+");
+    Pattern.compile("^[\\w-.]+$");
 
   /** Constructs an empty object */
   public HTableDescriptor() {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
Fri Aug 10 15:11:05 2007
@@ -143,33 +143,43 @@
     }
   }
   
+  /** runs the master server */
   public static class MasterThread extends Thread {
     private final HMaster master;
     MasterThread(final HMaster m) {
       super(m, "Master:" + m.getMasterAddress().toString());
       this.master = m;
     }
+    
+    /** {@inheritDoc} */
     @Override
     public void run() {
       LOG.info("Starting " + getName());
       super.run();
     }
+    
+    /** @return master server */
     public HMaster getMaster() {
       return this.master;
     }
   }
   
+  /** runs region servers */
   public static class RegionServerThread extends Thread {
     private final HRegionServer regionServer;
     RegionServerThread(final HRegionServer r, final int index) {
       super(r, "RegionServer:" + index);
       this.regionServer = r;
     }
+    
+    /** {@inheritDoc} */
     @Override
     public void run() {
       LOG.info("Starting " + getName());
       super.run();
     }
+    
+    /** @return the region server */
     public HRegionServer getRegionServer() {
       return this.regionServer;
     }
@@ -227,6 +237,11 @@
     return threads;
   }
   
+  /**
+   * Starts a region server thread running
+   * 
+   * @throws IOException
+   */
   public void startRegionServer() throws IOException {
     RegionServerThread t =
       startRegionServer(this.conf, this.regionThreads.size());
@@ -275,6 +290,7 @@
    * Shut down the specified region server cleanly
    * 
    * @param serverNumber
+   * @return the region server that was stopped
    */
   public HRegionServer stopRegionServer(int serverNumber) {
     HRegionServer server =

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java
Fri Aug 10 15:11:05 2007
@@ -34,10 +34,10 @@
 
   /** constructor */
   public TestCleanRegionServerExit() {
-    super();
+    super(2);
     conf.setInt("ipc.client.timeout", 5000);            // reduce ipc client timeout
     conf.setInt("ipc.client.connect.max.retries", 5);   // and number of retries
-    conf.setInt("hbase.client.retries.number", 2);      // reduce HBase retries
+    conf.setInt("hbase.client.retries.number", 3);      // reduce HBase retries
     Logger.getRootLogger().setLevel(Level.WARN);
     Logger.getLogger(this.getClass().getPackage().getName()).setLevel(Level.DEBUG);
   }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
Fri Aug 10 15:11:05 2007
@@ -40,7 +40,7 @@
  * HRegions or in the HBaseMaster, so only basic testing is possible.
  */
 public class TestHRegion extends HBaseTestCase implements RegionUnavailableListener {
-  private static final Logger LOG =
+  static final Logger LOG =
     Logger.getLogger(TestHRegion.class.getName());
   
   /** Constructor */

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHStoreFile.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHStoreFile.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHStoreFile.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHStoreFile.java
Fri Aug 10 15:11:05 2007
@@ -34,6 +34,9 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableComparable;
 
+/**
+ * Test HStoreFile
+ */
 public class TestHStoreFile extends TestCase {
   static final Log LOG = LogFactory.getLog(TestHStoreFile.class);
   private static String DIR = System.getProperty("test.build.data", ".");
@@ -226,7 +229,9 @@
         }
         assertTrue(key.compareTo(midkey) < 0);
       }
-      LOG.info("Last in bottom: " + previous.toString());
+      if (previous != null) {
+        LOG.info("Last in bottom: " + previous.toString());
+      }
       // Now test reading from the top.
       top = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), this.conf,
           HStoreFile.Range.top, midkey);
@@ -249,17 +254,17 @@
       // Next test using a midkey that does not exist in the file.
       // First, do a key that is < than first key. Ensure splits behave
       // properly.
-      midkey = new HStoreKey(new Text("   "));
+      WritableComparable badkey = new HStoreKey(new Text("   "));
       bottom = new HStoreFile.HalfMapFileReader(this.fs, p.toString(),
-          this.conf, HStoreFile.Range.bottom, midkey);
-      // When midkey is < than the bottom, should return no values.
+          this.conf, HStoreFile.Range.bottom, badkey);
+      // When badkey is < than the bottom, should return no values.
       assertFalse(bottom.next(key, value));
       // Now read from the top.
       top = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), this.conf,
-          HStoreFile.Range.top, midkey);
+          HStoreFile.Range.top, badkey);
       first = true;
       while (top.next(key, value)) {
-        assertTrue(key.compareTo(midkey) >= 0);
+        assertTrue(key.compareTo(badkey) >= 0);
         if (first) {
           first = false;
           LOG.info("First top when key < bottom: " + key.toString());
@@ -275,10 +280,10 @@
         assertTrue(tmp.charAt(i) == 'z');
       }
 
-      // Test when midkey is > than last key in file ('||' > 'zz').
-      midkey = new HStoreKey(new Text("|||"));
+      // Test when badkey is > than last key in file ('||' > 'zz').
+      badkey = new HStoreKey(new Text("|||"));
       bottom = new HStoreFile.HalfMapFileReader(this.fs, p.toString(),
-          this.conf, HStoreFile.Range.bottom, midkey);
+          this.conf, HStoreFile.Range.bottom, badkey);
       first = true;
       while (bottom.next(key, value)) {
         if (first) {
@@ -297,7 +302,7 @@
       }
       // Now look at top. Should not return any values.
       top = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), this.conf,
-          HStoreFile.Range.top, midkey);
+          HStoreFile.Range.top, badkey);
       assertFalse(top.next(key, value));
       
     } finally {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java
Fri Aug 10 15:11:05 2007
@@ -34,10 +34,10 @@
 
   /** constructor */
   public TestRegionServerAbort() {
-    super();
+    super(2);
     conf.setInt("ipc.client.timeout", 5000);            // reduce client timeout
     conf.setInt("ipc.client.connect.max.retries", 5);   // and number of retries
-    conf.setInt("hbase.client.retries.number", 2);      // reduce HBase retries
+    conf.setInt("hbase.client.retries.number", 3);      // reduce HBase retries
     Logger.getRootLogger().setLevel(Level.WARN);
     Logger.getLogger(this.getClass().getPackage().getName()).setLevel(Level.DEBUG);
   }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java
Fri Aug 10 15:11:05 2007
@@ -206,7 +206,8 @@
       HRegionLocation rl = t.getRegionLocation(table);
       regionServer = t.getConnection().getHRegionConnection(rl.getServerAddress());
       scannerId = regionServer.openScanner(rl.getRegionInfo().getRegionName(),
-          HMaster.METACOLUMNS, new Text(), System.currentTimeMillis(), null);
+          HConstants.COLUMN_FAMILY_ARRAY, new Text(),
+          System.currentTimeMillis(), null);
       while (true) {
         TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
         KeyedData[] values = regionServer.next(scannerId);

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestSplit.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestSplit.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestSplit.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestSplit.java
Fri Aug 10 15:11:05 2007
@@ -46,6 +46,7 @@
   private static final char FIRST_CHAR = 'a';
   private static final char LAST_CHAR = 'z';
   
+  /** {@inheritDoc} */
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -59,6 +60,7 @@
     conf.setLong("hbase.hregion.max.filesize", 1024 * 128);
   }
   
+  /** {@inheritDoc} */
   @Override
   public void tearDown() throws Exception {
     try {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java?view=diff&rev=564780&r1=564779&r2=564780
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java
Fri Aug 10 15:11:05 2007
@@ -35,8 +35,8 @@
     HServerAddress address = new HServerAddress(hostport);
     assertEquals("HServerAddress toString", address.toString(), hostport);
     HServerInfo info = new HServerInfo(address, -1);
-    assertEquals("HServerInfo", info.toString(),
-        "address: " + hostport + ", startcode: " + -1);
+    assertEquals("HServerInfo", "address: " + hostport + ", startcode: -1" +
+        ", load: (requests: 0 regions: 0)", info.toString());
   }
   
   /**



Mime
View raw message