hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r583963 - in /lucene/hadoop/trunk/src/contrib/hbase/src: java/org/apache/hadoop/hbase/HLog.java java/org/apache/hadoop/hbase/HRegionServer.java test/org/apache/hadoop/hbase/MultiRegionTable.java
Date Thu, 11 Oct 2007 21:49:41 GMT
Author: stack
Date: Thu Oct 11 14:49:40 2007
New Revision: 583963

URL: http://svn.apache.org/viewvc?rev=583963&view=rev
Log:
HADOOP-2029 TestLogRolling fails too often in patch and nightlies
This is second commit against this issue.

Modified:
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MultiRegionTable.java

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java?rev=583963&r1=583962&r2=583963&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java Thu Oct
11 14:49:40 2007
@@ -319,8 +319,10 @@
           // Now remove old log files (if any)
           LOG.debug("Found " + sequenceNumbers.size() + " logs to remove " +
             "using oldest outstanding seqnum of " + oldestOutstandingSeqNum);
-          for (Long seq : sequenceNumbers) {
-            deleteLogFile(this.outputfiles.remove(seq), seq);
+          if (sequenceNumbers.size() > 0) {
+            for (Long seq : sequenceNumbers) {
+              deleteLogFile(this.outputfiles.remove(seq), seq);
+            }
           }
         }
       }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java?rev=583963&r1=583962&r2=583963&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
Thu Oct 11 14:49:40 2007
@@ -1104,10 +1104,9 @@
 
   /** {@inheritDoc} */
   public void batchUpdate(Text regionName, long timestamp, BatchUpdate b)
-    throws IOException {
-
+  throws IOException {
     checkOpen();
-    requestCount.incrementAndGet();
+    this.requestCount.incrementAndGet();
     // If timestamp == LATEST_TIMESTAMP and we have deletes, then they need
     // special treatment.  For these we need to first find the latest cell so
     // when we write the delete, we write it with the latest cells' timestamp
@@ -1116,7 +1115,7 @@
     List<Text> deletes = null;
     try {
       long lockid = startUpdate(regionName, b.getRow());
-      for(BatchOperation op: b) {
+      for (BatchOperation op: b) {
         switch(op.getOp()) {
         case PUT:
           put(regionName, lockid, op.getColumn(), op.getValue());
@@ -1239,20 +1238,19 @@
   //
   
   protected long startUpdate(Text regionName, Text row) throws IOException {
-    
-    HRegion region = getRegion(regionName);
+    HRegion region = getRegion(regionName, false);
     return region.startUpdate(row);
   }
 
   protected void put(final Text regionName, final long lockid,
-      final Text column, final byte [] val) throws IOException {
-
+      final Text column, final byte [] val)
+  throws IOException {
     HRegion region = getRegion(regionName, true);
     region.put(lockid, column, val);
   }
 
   protected void delete(Text regionName, long lockid, Text column) 
-    throws IOException {
+  throws IOException {
     HRegion region = getRegion(regionName);
     region.delete(lockid, column);
   }
@@ -1297,8 +1295,7 @@
    * @throws NotServingRegionException
    */
   protected HRegion getRegion(final Text regionName)
-    throws NotServingRegionException {
-    
+  throws NotServingRegionException {
     return getRegion(regionName, false);
   }
   
@@ -1311,8 +1308,8 @@
    * @throws NotServingRegionException
    */
   protected HRegion getRegion(final Text regionName,
-      final boolean checkRetiringRegions) throws NotServingRegionException {
-    
+      final boolean checkRetiringRegions)
+  throws NotServingRegionException {
     HRegion region = null;
     this.lock.readLock().lock();
     try {
@@ -1342,7 +1339,7 @@
    * @throws IOException
    */
   private void checkOpen() throws IOException {
-    if (stopRequested.get() || abortRequested) {
+    if (this.stopRequested.get() || this.abortRequested) {
       throw new IOException("Server not running");
     }
     if (!fsOk) {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MultiRegionTable.java?rev=583963&r1=583962&r2=583963&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
Thu Oct 11 14:49:40 2007
@@ -53,8 +53,8 @@
    */
   public static void makeMultiRegionTable(Configuration conf,
       MiniHBaseCluster cluster, FileSystem localFs, String tableName,
-      String columnName) throws IOException {
-    
+      String columnName)
+  throws IOException {  
     final int retries = 10; 
     final long waitTime =
       conf.getLong("hbase.master.meta.thread.rescanfrequency", 10L * 1000L);
@@ -62,7 +62,6 @@
     // This size should make it so we always split using the addContent
     // below.  After adding all data, the first region is 1.3M. Should
     // set max filesize to be <= 1M.
-    
     assertTrue(conf.getLong("hbase.hregion.max.filesize",
       HConstants.DEFAULT_MAX_FILE_SIZE) <= 1024 * 1024);
 
@@ -72,29 +71,25 @@
     Path d = fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR)));
 
     // Get connection on the meta table and get count of rows.
-    
     HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
-    int count = count(meta, HConstants.COLUMN_FAMILY_STR);
+    int count = count(meta, tableName);
     HTable t = new HTable(conf, new Text(tableName));
     addContent(new HTableIncommon(t), columnName);
     
     // All is running in the one JVM so I should be able to get the single
     // region instance and bring on a split.
-    
     HRegionInfo hri =
       t.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
     HRegion r = cluster.regionThreads.get(0).getRegionServer().
-    onlineRegions.get(hri.getRegionName());
+      onlineRegions.get(hri.getRegionName());
     
     // Flush will provoke a split next time the split-checker thread runs.
-    
     r.flushcache(false);
     
     // Now, wait until split makes it into the meta table.
-    
     int oldCount = count;
     for (int i = 0; i < retries;  i++) {
-      count = count(meta, HConstants.COLUMN_FAMILY_STR);
+      count = count(meta, tableName);
       if (count > oldCount) {
         break;
       }
@@ -111,7 +106,6 @@
     // Get info on the parent from the meta table.  Pass in 'hri'. Its the
     // region we have been dealing with up to this. Its the parent of the
     // region split.
-    
     Map<Text, byte []> data = getSplitParentInfo(meta, hri);
     HRegionInfo parent =
       Writables.getHRegionInfoOrNull(data.get(HConstants.COL_REGIONINFO));
@@ -185,24 +179,28 @@
   }
 
   /*
-   * Count of rows in table for given column. 
+   * Count of regions in passed meta table.
    * @param t
    * @param column
    * @return
    * @throws IOException
    */
-  private static int count(final HTable t, final String column)
+  private static int count(final HTable t, final String tableName)
     throws IOException {
     
     int size = 0;
-    Text [] cols = new Text[] {new Text(column)};
+    Text [] cols = new Text[] {HConstants.COLUMN_FAMILY};
     HScannerInterface s = t.obtainScanner(cols, HConstants.EMPTY_START_ROW,
       System.currentTimeMillis(), null);
     try {
       HStoreKey curKey = new HStoreKey();
       TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
       while(s.next(curKey, curVals)) {
-        size++;
+        HRegionInfo hri = Writables.
+          getHRegionInfoOrNull(curVals.get(HConstants.COL_REGIONINFO));
+        if (hri.getTableDesc().getName().toString().equals(tableName)) {
+          size++;
+        }
       }
       return size;
     } finally {
@@ -214,8 +212,8 @@
    * @return Return row info for passed in region or null if not found in scan.
    */
   private static Map<Text, byte []> getSplitParentInfo(final HTable t,
-      final HRegionInfo parent) throws IOException {
-    
+      final HRegionInfo parent)
+  throws IOException {  
     HScannerInterface s = t.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY,
         HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null);
     try {
@@ -223,10 +221,11 @@
       TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
       while(s.next(curKey, curVals)) {
         HRegionInfo hri = Writables.
-        getHRegionInfoOrNull(curVals.get(HConstants.COL_REGIONINFO));
+          getHRegionInfoOrNull(curVals.get(HConstants.COL_REGIONINFO));
         if (hri == null) {
           continue;
         }
+        // Make sure I get the parent.
         if (hri.getRegionName().toString().
             equals(parent.getRegionName().toString())) {
           return curVals;



Mime
View raw message