hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r928031 [3/5] - in /hadoop/hbase/trunk: contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ core/src/main/java/org/apache/hadoop/hbase/regionserver/...
Date Fri, 26 Mar 2010 19:33:28 GMT
Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java
(original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java
Fri Mar 26 19:33:27 2010
@@ -1,111 +1,111 @@
-/*
- * Copyright 2009 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * This class is responsible for the tracking and enforcement of Deletes
- * during the course of a Get operation.
- * <p>
- * This class is utilized through three methods:
- * <ul><li>{@link #add} when encountering a Delete
- * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
- * <li>{@link #update} when reaching the end of a StoreFile
- * <p>
- * This class is NOT thread-safe as queries are never multi-threaded 
- */
-public class GetDeleteTracker implements DeleteTracker {
-  private static long UNSET = -1L;
-  private long familyStamp = UNSET;
-  protected List<Delete> deletes = null;
-  private List<Delete> newDeletes = new ArrayList<Delete>();
-  private Iterator<Delete> iterator;
-  private Delete delete = null;
-
-  /**
-   * Constructor
-   */
-  public GetDeleteTracker() {}
-
-  /**
-   * Add the specified KeyValue to the list of deletes to check against for
-   * this row operation.
-   * <p>
-   * This is called when a Delete is encountered in a StoreFile.
-   * @param buffer
-   * @param qualifierOffset
-   * @param qualifierLength
-   * @param timestamp
-   * @param type
-   */
-  @Override
-  public void add(byte [] buffer, int qualifierOffset, int qualifierLength,
-      long timestamp, byte type) {
-    if (type == KeyValue.Type.DeleteFamily.getCode()) {
-      if(timestamp > familyStamp) {
-        familyStamp = timestamp;
-      }
-      return;
-    }
-    if(timestamp > familyStamp) {
-      this.newDeletes.add(new Delete(buffer, qualifierOffset, qualifierLength,
-          type, timestamp));
-    }
-  }
-
-  /** 
-   * Check if the specified KeyValue buffer has been deleted by a previously
-   * seen delete.
-   * @param buffer KeyValue buffer
-   * @param qualifierOffset column qualifier offset
-   * @param qualifierLength column qualifier length
-   * @param timestamp timestamp
-   * @return true is the specified KeyValue is deleted, false if not
-   */
-  @Override
-  public boolean isDeleted(byte [] buffer, int qualifierOffset,
-      int qualifierLength, long timestamp) {
-    // Check against DeleteFamily
-    if (timestamp <= familyStamp) {
-      return true;
-    }
-
-    // Check if there are other deletes
-    if (this.delete == null) {
-      return false;
-    }
-
-    // Check column
-    int ret = Bytes.compareTo(buffer, qualifierOffset, qualifierLength,
-        this.delete.buffer, this.delete.qualifierOffset, 
-        this.delete.qualifierLength);
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * This class is responsible for the tracking and enforcement of Deletes
+ * during the course of a Get operation.
+ * <p>
+ * This class is utilized through three methods:
+ * <ul><li>{@link #add} when encountering a Delete
+ * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
+ * <li>{@link #update} when reaching the end of a StoreFile
+ * <p>
+ * This class is NOT thread-safe as queries are never multi-threaded 
+ */
+public class GetDeleteTracker implements DeleteTracker {
+  private static long UNSET = -1L;
+  private long familyStamp = UNSET;
+  protected List<Delete> deletes = null;
+  private List<Delete> newDeletes = new ArrayList<Delete>();
+  private Iterator<Delete> iterator;
+  private Delete delete = null;
+
+  /**
+   * Constructor
+   */
+  public GetDeleteTracker() {}
+
+  /**
+   * Add the specified KeyValue to the list of deletes to check against for
+   * this row operation.
+   * <p>
+   * This is called when a Delete is encountered in a StoreFile.
+   * @param buffer
+   * @param qualifierOffset
+   * @param qualifierLength
+   * @param timestamp
+   * @param type
+   */
+  @Override
+  public void add(byte [] buffer, int qualifierOffset, int qualifierLength,
+      long timestamp, byte type) {
+    if (type == KeyValue.Type.DeleteFamily.getCode()) {
+      if(timestamp > familyStamp) {
+        familyStamp = timestamp;
+      }
+      return;
+    }
+    if(timestamp > familyStamp) {
+      this.newDeletes.add(new Delete(buffer, qualifierOffset, qualifierLength,
+          type, timestamp));
+    }
+  }
+
+  /** 
+   * Check if the specified KeyValue buffer has been deleted by a previously
+   * seen delete.
+   * @param buffer KeyValue buffer
+   * @param qualifierOffset column qualifier offset
+   * @param qualifierLength column qualifier length
+   * @param timestamp timestamp
+   * @return true is the specified KeyValue is deleted, false if not
+   */
+  @Override
+  public boolean isDeleted(byte [] buffer, int qualifierOffset,
+      int qualifierLength, long timestamp) {
+    // Check against DeleteFamily
+    if (timestamp <= familyStamp) {
+      return true;
+    }
+
+    // Check if there are other deletes
+    if (this.delete == null) {
+      return false;
+    }
+
+    // Check column
+    int ret = Bytes.compareTo(buffer, qualifierOffset, qualifierLength,
+        this.delete.buffer, this.delete.qualifierOffset, 
+        this.delete.qualifierLength);
     while (ret != 0) {
       if (ret <= -1) {
         // Have not reached the next delete yet
-        return false;
+        return false;
       } else if (ret >= 1) {
         // Deletes an earlier column, need to move down deletes
         if (this.iterator.hasNext()) {
@@ -118,287 +118,287 @@ public class GetDeleteTracker implements
             this.delete.buffer, this.delete.qualifierOffset,
             this.delete.qualifierLength);
 
-      }
-    }
+      }
+    }
    
-    // Check Timestamp
-    if(timestamp > this.delete.timestamp) {
-      return false;
-    }
-
-    // Check Type
-    switch(KeyValue.Type.codeToType(this.delete.type)) {
-    case Delete:
-      boolean equal = timestamp == this.delete.timestamp;
-
-      if(this.iterator.hasNext()) {
-        this.delete = this.iterator.next();
-      } else {
-        this.delete = null;
-      }
-
-      if(equal){
-        return true;
-      }
-      // timestamp < this.delete.timestamp
-      // Delete of an explicit column newer than current
-      return isDeleted(buffer, qualifierOffset, qualifierLength, timestamp);
-    case DeleteColumn:
-      return true;
-    }
-
-    // should never reach this
-    return false;
-  }
-
-  @Override
-  public boolean isEmpty() {
-    return this.familyStamp == UNSET && this.delete == null &&
-      this.newDeletes.isEmpty();
-  }
-
-  @Override
-  public void reset() {
-    this.deletes = null;
-    this.delete = null;
-    this.newDeletes = new ArrayList<Delete>();
-    this.familyStamp = UNSET;
-    this.iterator = null;
-  }
-
-  /**
-   * Called at the end of every StoreFile.
-   * <p>
-   * Many optimized implementations of Trackers will require an update at
-   * when the end of each StoreFile is reached.
-   */
-  @Override
-  public void update() {
-    // If no previous deletes, use new deletes and return
-    if (this.deletes == null || this.deletes.size() == 0) {
-      finalize(this.newDeletes);
-      return;
-    }
-
-    // If no new delete, retain previous deletes and return
-    if(this.newDeletes.size() == 0) {
-      return;
-    }
-
-    // Merge previous deletes with new deletes
-    List<Delete> mergeDeletes = 
-      new ArrayList<Delete>(this.newDeletes.size());
-    int oldIndex = 0;
-    int newIndex = 0;
-
-    Delete newDelete = newDeletes.get(oldIndex);
-    Delete oldDelete = deletes.get(oldIndex);
-    while(true) {
-      switch(compareDeletes(oldDelete,newDelete)) {
-      case NEXT_NEW: {
-        if(++newIndex == newDeletes.size()) {
-          // Done with new, add the rest of old to merged and return
-          mergeDown(mergeDeletes, deletes, oldIndex);
-          finalize(mergeDeletes);
-          return;
-        }
-        newDelete = this.newDeletes.get(newIndex);
-        break;
-      }
-
-      case INCLUDE_NEW_NEXT_NEW: {
-        mergeDeletes.add(newDelete);
-        if(++newIndex == newDeletes.size()) {
-          // Done with new, add the rest of old to merged and return
-          mergeDown(mergeDeletes, deletes, oldIndex);
-          finalize(mergeDeletes);
-          return;
-        }
-        newDelete = this.newDeletes.get(newIndex);
-        break;
-      }
-
-      case INCLUDE_NEW_NEXT_BOTH: {
-        mergeDeletes.add(newDelete);
-        ++oldIndex;
-        ++newIndex;
-        if(oldIndex == deletes.size()) {
-          if(newIndex == newDeletes.size()) {
-            finalize(mergeDeletes);
-            return;
-          }
-          mergeDown(mergeDeletes, newDeletes, newIndex);
-          finalize(mergeDeletes);
-          return;
-        } else if(newIndex == newDeletes.size()) {
-          mergeDown(mergeDeletes, deletes, oldIndex);
-          finalize(mergeDeletes);
-          return;
-        }
-        oldDelete = this.deletes.get(oldIndex);
-        newDelete = this.newDeletes.get(newIndex);
-        break;
-      }
-
-      case INCLUDE_OLD_NEXT_BOTH: {
-        mergeDeletes.add(oldDelete);
-        ++oldIndex;
-        ++newIndex;
-        if(oldIndex == deletes.size()) {
-          if(newIndex == newDeletes.size()) {
-            finalize(mergeDeletes);
-            return;
-          }
-          mergeDown(mergeDeletes, newDeletes, newIndex);
-          finalize(mergeDeletes);
-          return;
-        } else if(newIndex == newDeletes.size()) {
-          mergeDown(mergeDeletes, deletes, oldIndex);
-          finalize(mergeDeletes);
-          return;
-        }
-        oldDelete = this.deletes.get(oldIndex);
-        newDelete = this.newDeletes.get(newIndex);
-        break;
-      }
-
-      case INCLUDE_OLD_NEXT_OLD: {
-        mergeDeletes.add(oldDelete);
-        if(++oldIndex == deletes.size()) {
-          mergeDown(mergeDeletes, newDeletes, newIndex);
-          finalize(mergeDeletes);
-          return;
-        }
-        oldDelete = this.deletes.get(oldIndex);
-        break;
-      }
-
-      case NEXT_OLD: {
-        if(++oldIndex == deletes.size()) {
-          // Done with old, add the rest of new to merged and return
-          mergeDown(mergeDeletes, newDeletes, newIndex);
-          finalize(mergeDeletes);
-          return;
-        }
-        oldDelete = this.deletes.get(oldIndex);
-      }
-      }
-    }
-  }
-
-  private void finalize(List<Delete> mergeDeletes) {
-    this.deletes = mergeDeletes;
-    this.newDeletes = new ArrayList<Delete>();
-    if(this.deletes.size() > 0){
-      this.iterator = deletes.iterator();
-      this.delete = iterator.next();
-    }
-  }
-
-  private void mergeDown(List<Delete> mergeDeletes, List<Delete> srcDeletes,

-      int srcIndex) {
-    int index = srcIndex;
-    while(index < srcDeletes.size()) {
-      mergeDeletes.add(srcDeletes.get(index++));
-    }
-  }
-
-
-  protected DeleteCompare compareDeletes(Delete oldDelete, Delete newDelete) {
-
-    // Compare columns
-    // Just compairing qualifier portion, can keep on using Bytes.compareTo().
-    int ret = Bytes.compareTo(oldDelete.buffer, oldDelete.qualifierOffset,
-        oldDelete.qualifierLength, newDelete.buffer, newDelete.qualifierOffset,
-        newDelete.qualifierLength);
-
-    if(ret <= -1) {
-      return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
-    } else if(ret >= 1) {
-      return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
-    }
-
-    // Same column
-
-    // Branches below can be optimized.  Keeping like this until testing
-    // is complete.
-    if(oldDelete.type == newDelete.type) {
-      // the one case where we can merge 2 deletes -> 1 delete.
-      if(oldDelete.type == KeyValue.Type.Delete.getCode()){
-        if(oldDelete.timestamp > newDelete.timestamp) {
-          return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
-        } else if(oldDelete.timestamp < newDelete.timestamp) {
-          return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
-        } else {
-          return DeleteCompare.INCLUDE_OLD_NEXT_BOTH;
-        }
-      }
-      if(oldDelete.timestamp < newDelete.timestamp) {
-        return DeleteCompare.INCLUDE_NEW_NEXT_BOTH;
-      } 
-      return DeleteCompare.INCLUDE_OLD_NEXT_BOTH;
-    }
-
-    // old delete is more specific than the new delete.
-    // if the olddelete is newer then the newdelete, we have to
-    //  keep it
-    if(oldDelete.type < newDelete.type) {
-      if(oldDelete.timestamp > newDelete.timestamp) {
-        return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
-      } else if(oldDelete.timestamp < newDelete.timestamp) {
-        return DeleteCompare.NEXT_OLD;
-      } else {
-        return DeleteCompare.NEXT_OLD;
-      }
-    }
-
-    // new delete is more specific than the old delete.
-    if(oldDelete.type > newDelete.type) {
-      if(oldDelete.timestamp > newDelete.timestamp) {
-        return DeleteCompare.NEXT_NEW;
-      } else if(oldDelete.timestamp < newDelete.timestamp) {
-        return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
-      } else {
-        return DeleteCompare.NEXT_NEW;
-      }
-    }
-
-    // Should never reach,
-    // throw exception for assertion?
-    throw new RuntimeException("GetDeleteTracker:compareDelete reached terminal state");
-  }
-
-  /**
-   * Internal class used to store the necessary information for a Delete.
-   * <p>
-   * Rather than reparsing the KeyValue, or copying fields, this class points
-   * to the underlying KeyValue buffer with pointers to the qualifier and fields
-   * for type and timestamp.  No parsing work is done in DeleteTracker now.
-   * <p>
-   * Fields are public because they are accessed often, directly, and only
-   * within this class.
-   */
-  protected static class Delete {
-    byte [] buffer;
-    int qualifierOffset;
-    int qualifierLength;
-    byte type;
-    long timestamp;
-    /**
-     * Constructor
-     * @param buffer
-     * @param qualifierOffset
-     * @param qualifierLength
-     * @param type
-     * @param timestamp
-     */
-    public Delete(byte [] buffer, int qualifierOffset, int qualifierLength,
-        byte type, long timestamp) {
-      this.buffer = buffer;
-      this.qualifierOffset = qualifierOffset;
-      this.qualifierLength = qualifierLength;
-      this.type = type;
-      this.timestamp = timestamp;
-    }
-  }
-}
+    // Check Timestamp
+    if(timestamp > this.delete.timestamp) {
+      return false;
+    }
+
+    // Check Type
+    switch(KeyValue.Type.codeToType(this.delete.type)) {
+    case Delete:
+      boolean equal = timestamp == this.delete.timestamp;
+
+      if(this.iterator.hasNext()) {
+        this.delete = this.iterator.next();
+      } else {
+        this.delete = null;
+      }
+
+      if(equal){
+        return true;
+      }
+      // timestamp < this.delete.timestamp
+      // Delete of an explicit column newer than current
+      return isDeleted(buffer, qualifierOffset, qualifierLength, timestamp);
+    case DeleteColumn:
+      return true;
+    }
+
+    // should never reach this
+    return false;
+  }
+
+  @Override
+  public boolean isEmpty() {
+    return this.familyStamp == UNSET && this.delete == null &&
+      this.newDeletes.isEmpty();
+  }
+
+  @Override
+  public void reset() {
+    this.deletes = null;
+    this.delete = null;
+    this.newDeletes = new ArrayList<Delete>();
+    this.familyStamp = UNSET;
+    this.iterator = null;
+  }
+
+  /**
+   * Called at the end of every StoreFile.
+   * <p>
+   * Many optimized implementations of Trackers will require an update at
+   * when the end of each StoreFile is reached.
+   */
+  @Override
+  public void update() {
+    // If no previous deletes, use new deletes and return
+    if (this.deletes == null || this.deletes.size() == 0) {
+      finalize(this.newDeletes);
+      return;
+    }
+
+    // If no new delete, retain previous deletes and return
+    if(this.newDeletes.size() == 0) {
+      return;
+    }
+
+    // Merge previous deletes with new deletes
+    List<Delete> mergeDeletes = 
+      new ArrayList<Delete>(this.newDeletes.size());
+    int oldIndex = 0;
+    int newIndex = 0;
+
+    Delete newDelete = newDeletes.get(oldIndex);
+    Delete oldDelete = deletes.get(oldIndex);
+    while(true) {
+      switch(compareDeletes(oldDelete,newDelete)) {
+      case NEXT_NEW: {
+        if(++newIndex == newDeletes.size()) {
+          // Done with new, add the rest of old to merged and return
+          mergeDown(mergeDeletes, deletes, oldIndex);
+          finalize(mergeDeletes);
+          return;
+        }
+        newDelete = this.newDeletes.get(newIndex);
+        break;
+      }
+
+      case INCLUDE_NEW_NEXT_NEW: {
+        mergeDeletes.add(newDelete);
+        if(++newIndex == newDeletes.size()) {
+          // Done with new, add the rest of old to merged and return
+          mergeDown(mergeDeletes, deletes, oldIndex);
+          finalize(mergeDeletes);
+          return;
+        }
+        newDelete = this.newDeletes.get(newIndex);
+        break;
+      }
+
+      case INCLUDE_NEW_NEXT_BOTH: {
+        mergeDeletes.add(newDelete);
+        ++oldIndex;
+        ++newIndex;
+        if(oldIndex == deletes.size()) {
+          if(newIndex == newDeletes.size()) {
+            finalize(mergeDeletes);
+            return;
+          }
+          mergeDown(mergeDeletes, newDeletes, newIndex);
+          finalize(mergeDeletes);
+          return;
+        } else if(newIndex == newDeletes.size()) {
+          mergeDown(mergeDeletes, deletes, oldIndex);
+          finalize(mergeDeletes);
+          return;
+        }
+        oldDelete = this.deletes.get(oldIndex);
+        newDelete = this.newDeletes.get(newIndex);
+        break;
+      }
+
+      case INCLUDE_OLD_NEXT_BOTH: {
+        mergeDeletes.add(oldDelete);
+        ++oldIndex;
+        ++newIndex;
+        if(oldIndex == deletes.size()) {
+          if(newIndex == newDeletes.size()) {
+            finalize(mergeDeletes);
+            return;
+          }
+          mergeDown(mergeDeletes, newDeletes, newIndex);
+          finalize(mergeDeletes);
+          return;
+        } else if(newIndex == newDeletes.size()) {
+          mergeDown(mergeDeletes, deletes, oldIndex);
+          finalize(mergeDeletes);
+          return;
+        }
+        oldDelete = this.deletes.get(oldIndex);
+        newDelete = this.newDeletes.get(newIndex);
+        break;
+      }
+
+      case INCLUDE_OLD_NEXT_OLD: {
+        mergeDeletes.add(oldDelete);
+        if(++oldIndex == deletes.size()) {
+          mergeDown(mergeDeletes, newDeletes, newIndex);
+          finalize(mergeDeletes);
+          return;
+        }
+        oldDelete = this.deletes.get(oldIndex);
+        break;
+      }
+
+      case NEXT_OLD: {
+        if(++oldIndex == deletes.size()) {
+          // Done with old, add the rest of new to merged and return
+          mergeDown(mergeDeletes, newDeletes, newIndex);
+          finalize(mergeDeletes);
+          return;
+        }
+        oldDelete = this.deletes.get(oldIndex);
+      }
+      }
+    }
+  }
+
+  private void finalize(List<Delete> mergeDeletes) {
+    this.deletes = mergeDeletes;
+    this.newDeletes = new ArrayList<Delete>();
+    if(this.deletes.size() > 0){
+      this.iterator = deletes.iterator();
+      this.delete = iterator.next();
+    }
+  }
+
+  private void mergeDown(List<Delete> mergeDeletes, List<Delete> srcDeletes,

+      int srcIndex) {
+    int index = srcIndex;
+    while(index < srcDeletes.size()) {
+      mergeDeletes.add(srcDeletes.get(index++));
+    }
+  }
+
+
+  protected DeleteCompare compareDeletes(Delete oldDelete, Delete newDelete) {
+
+    // Compare columns
+    // Just compairing qualifier portion, can keep on using Bytes.compareTo().
+    int ret = Bytes.compareTo(oldDelete.buffer, oldDelete.qualifierOffset,
+        oldDelete.qualifierLength, newDelete.buffer, newDelete.qualifierOffset,
+        newDelete.qualifierLength);
+
+    if(ret <= -1) {
+      return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
+    } else if(ret >= 1) {
+      return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
+    }
+
+    // Same column
+
+    // Branches below can be optimized.  Keeping like this until testing
+    // is complete.
+    if(oldDelete.type == newDelete.type) {
+      // the one case where we can merge 2 deletes -> 1 delete.
+      if(oldDelete.type == KeyValue.Type.Delete.getCode()){
+        if(oldDelete.timestamp > newDelete.timestamp) {
+          return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
+        } else if(oldDelete.timestamp < newDelete.timestamp) {
+          return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
+        } else {
+          return DeleteCompare.INCLUDE_OLD_NEXT_BOTH;
+        }
+      }
+      if(oldDelete.timestamp < newDelete.timestamp) {
+        return DeleteCompare.INCLUDE_NEW_NEXT_BOTH;
+      } 
+      return DeleteCompare.INCLUDE_OLD_NEXT_BOTH;
+    }
+
+    // old delete is more specific than the new delete.
+    // if the olddelete is newer then the newdelete, we have to
+    //  keep it
+    if(oldDelete.type < newDelete.type) {
+      if(oldDelete.timestamp > newDelete.timestamp) {
+        return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
+      } else if(oldDelete.timestamp < newDelete.timestamp) {
+        return DeleteCompare.NEXT_OLD;
+      } else {
+        return DeleteCompare.NEXT_OLD;
+      }
+    }
+
+    // new delete is more specific than the old delete.
+    if(oldDelete.type > newDelete.type) {
+      if(oldDelete.timestamp > newDelete.timestamp) {
+        return DeleteCompare.NEXT_NEW;
+      } else if(oldDelete.timestamp < newDelete.timestamp) {
+        return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
+      } else {
+        return DeleteCompare.NEXT_NEW;
+      }
+    }
+
+    // Should never reach,
+    // throw exception for assertion?
+    throw new RuntimeException("GetDeleteTracker:compareDelete reached terminal state");
+  }
+
+  /**
+   * Internal class used to store the necessary information for a Delete.
+   * <p>
+   * Rather than reparsing the KeyValue, or copying fields, this class points
+   * to the underlying KeyValue buffer with pointers to the qualifier and fields
+   * for type and timestamp.  No parsing work is done in DeleteTracker now.
+   * <p>
+   * Fields are public because they are accessed often, directly, and only
+   * within this class.
+   */
+  protected static class Delete {
+    byte [] buffer;
+    int qualifierOffset;
+    int qualifierLength;
+    byte type;
+    long timestamp;
+    /**
+     * Constructor
+     * @param buffer
+     * @param qualifierOffset
+     * @param qualifierLength
+     * @param type
+     * @param timestamp
+     */
+    public Delete(byte [] buffer, int qualifierOffset, int qualifierLength,
+        byte type, long timestamp) {
+      this.buffer = buffer;
+      this.qualifierOffset = qualifierOffset;
+      this.qualifierLength = qualifierLength;
+      this.type = type;
+      this.timestamp = timestamp;
+    }
+  }
+}

Modified: hadoop/hbase/trunk/core/src/main/resources/webapps/master/master.jsp
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/resources/webapps/master/master.jsp?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/resources/webapps/master/master.jsp (original)
+++ hadoop/hbase/trunk/core/src/main/resources/webapps/master/master.jsp Fri Mar 26 19:33:27
2010
@@ -1,120 +1,120 @@
-<%@ page contentType="text/html;charset=UTF-8"
-  import="java.util.*"
-  import="java.net.URLEncoder" 
+<%@ page contentType="text/html;charset=UTF-8"
+  import="java.util.*"
+  import="java.net.URLEncoder" 
   import="org.apache.hadoop.conf.Configuration"
-  import="org.apache.hadoop.io.Text"
-  import="org.apache.hadoop.hbase.util.Bytes"
-  import="org.apache.hadoop.hbase.util.FSUtils"
-  import="org.apache.hadoop.hbase.master.HMaster"
-  import="org.apache.hadoop.hbase.HConstants"
-  import="org.apache.hadoop.hbase.master.MetaRegion"
-  import="org.apache.hadoop.hbase.client.HBaseAdmin"
-  import="org.apache.hadoop.hbase.io.ImmutableBytesWritable"
-  import="org.apache.hadoop.hbase.HServerInfo"
-  import="org.apache.hadoop.hbase.HServerAddress"
-  import="org.apache.hadoop.hbase.HBaseConfiguration"
-  import="org.apache.hadoop.hbase.HColumnDescriptor" 
-  import="org.apache.hadoop.hbase.HTableDescriptor" %><%
-  HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
-  Configuration conf = master.getConfiguration();
-  HServerAddress rootLocation = master.getRegionManager().getRootRegionLocation();
-  Map<byte [], MetaRegion> onlineRegions = master.getRegionManager().getOnlineMetaRegions();
-  Map<String, HServerInfo> serverToServerInfos =
-    master.getServerManager().getServersToServerInfo();
-  int interval = conf.getInt("hbase.regionserver.msginterval", 1000)/1000;
-  if (interval == 0) {
-      interval = 1;
-  }
-  Map<String, Integer> frags = master.getTableFragmentation();
-%><?xml version="1.0" encoding="UTF-8" ?>
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 
-  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> 
-<html xmlns="http://www.w3.org/1999/xhtml">
-<head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
-<title>HBase Master: <%= master.getMasterAddress().getHostname()%>:<%= master.getMasterAddress().getPort()
%></title>
-<link rel="stylesheet" type="text/css" href="/static/hbase.css" />
-</head>
-<body>
-
-<a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif"
alt="HBase Logo" title="HBase Logo" /></a>
-<h1 id="page_title">Master: <%=master.getMasterAddress().getHostname()%>:<%=master.getMasterAddress().getPort()%></h1>
-<p id="links_menu"><a href="/logs/">Local logs</a>, <a href="/stacks">Thread
Dump</a>, <a href="/logLevel">Log Level</a></p>
-<hr id="head_rule" />
-
-<h2>Master Attributes</h2>
-<table>
-<tr><th>Attribute Name</th><th>Value</th><th>Description</th></tr>
-<tr><td>HBase Version</td><td><%= org.apache.hadoop.hbase.util.VersionInfo.getVersion()
%>, r<%= org.apache.hadoop.hbase.util.VersionInfo.getRevision() %></td><td>HBase
version and svn revision</td></tr>
-<tr><td>HBase Compiled</td><td><%= org.apache.hadoop.hbase.util.VersionInfo.getDate()
%>, <%= org.apache.hadoop.hbase.util.VersionInfo.getUser() %></td><td>When
HBase version was compiled and by whom</td></tr>
-<tr><td>Hadoop Version</td><td><%= org.apache.hadoop.util.VersionInfo.getVersion()
%>, r<%= org.apache.hadoop.util.VersionInfo.getRevision() %></td><td>Hadoop
version and svn revision</td></tr>
-<tr><td>Hadoop Compiled</td><td><%= org.apache.hadoop.util.VersionInfo.getDate()
%>, <%= org.apache.hadoop.util.VersionInfo.getUser() %></td><td>When
Hadoop version was compiled and by whom</td></tr>
-<tr><td>HBase Root Directory</td><td><%= master.getRootDir().toString()
%></td><td>Location of HBase home directory</td></tr>
-<tr><td>Load average</td><td><%= master.getServerManager().getAverageLoad()
%></td><td>Average number of regions per regionserver. Naive computation.</td></tr>
-<tr><td>Regions On FS</td><td><%= master.getRegionManager().countRegionsOnFS()
%></td><td>Number of regions on FileSystem. Rough count.</td></tr>
-<tr><td>Fragmentation</td><td><%= frags.get("-TOTAL-") != null
? frags.get("-TOTAL-").intValue() + "%" : "n/a" %></td><td>Overall fragmentation
of all tables, including .META. and -ROOT-.</td></tr>
-<tr><td>Zookeeper Quorum</td><td><%= master.getZooKeeperWrapper().getQuorumServers()
%></td><td>Addresses of all registered ZK servers. For more, see <a href="/zk.jsp">zk
dump</a>.</td></tr>
-</table>
-
-<h2>Catalog Tables</h2>
-<% 
-  if (rootLocation != null) { %>
-<table>
-<tr><th>Table</th><th title="Fragmentation - Will be 0% after a major
compaction and fluctuate during normal usage.">Frag.</th><th>Description</th></tr>
-<tr><td><a href="/table.jsp?name=<%= Bytes.toString(HConstants.ROOT_TABLE_NAME)
%>"><%= Bytes.toString(HConstants.ROOT_TABLE_NAME) %></a></td>
-<td align="center"><%= frags.get("-ROOT-") != null ? frags.get("-ROOT-").intValue()
+ "%" : "n/a" %></td>
-<td>The -ROOT- table holds references to all .META. regions.</td>
-</tr>
-<%
-    if (onlineRegions != null && onlineRegions.size() > 0) { %>
-<tr><td><a href="/table.jsp?name=<%= Bytes.toString(HConstants.META_TABLE_NAME)
%>"><%= Bytes.toString(HConstants.META_TABLE_NAME) %></a></td>
-<td align="center"><%= frags.get(".META.") != null ? frags.get(".META.").intValue()
+ "%" : "n/a" %></td>
-<td>The .META. table holds references to all User Table regions</td>
-</tr>
-  
-<%  } %>
-</table>
-<%} %>
-
-<h2>User Tables</h2>
-<% HTableDescriptor[] tables = new HBaseAdmin(conf).listTables(); 
-   if(tables != null && tables.length > 0) { %>
-<table>
-<tr><th>Table</th><th title="Fragmentation - Will be 0% after a major
compaction and fluctuate during normal usage.">Frag.</th><th>Description</th></tr>
-<%   for(HTableDescriptor htDesc : tables ) { %>
-<tr><td><a href=/table.jsp?name=<%= htDesc.getNameAsString() %>><%=
htDesc.getNameAsString() %></a> </td>
-<td align="center"><%= frags.get(htDesc.getNameAsString()) != null ? frags.get(htDesc.getNameAsString()).intValue()
+ "%" : "n/a" %></td>
-<td><%= htDesc.toString() %></td>
-</tr>
-<%   }  %>
-
-<p> <%= tables.length %> table(s) in set.</p>
-</table>
-<% } %>
-
-<h2>Region Servers</h2>
-<% if (serverToServerInfos != null && serverToServerInfos.size() > 0) { %>
-<%   int totalRegions = 0;
-     int totalRequests = 0; 
-%>
-
-<table>
-<tr><th rowspan="<%= serverToServerInfos.size() + 1%>"></th><th>Address</th><th>Start
Code</th><th>Load</th></tr>
-<%   String[] serverNames = serverToServerInfos.keySet().toArray(new String[serverToServerInfos.size()]);
-     Arrays.sort(serverNames);
-     for (String serverName: serverNames) {
-       HServerInfo hsi = serverToServerInfos.get(serverName);
-       String hostname = hsi.getServerAddress().getHostname() + ":" + hsi.getInfoPort();
-       String url = "http://" + hostname + "/";
-       totalRegions += hsi.getLoad().getNumberOfRegions();
-       totalRequests += hsi.getLoad().getNumberOfRequests() / interval;
-       long startCode = hsi.getStartCode();
-%>
-<tr><td><a href="<%= url %>"><%= hostname %></a></td><td><%=
startCode %></td><td><%= hsi.getLoad().toString(interval) %></td></tr>
-<%   } %>
-<tr><th>Total: </th><td>servers: <%= serverToServerInfos.size()
%></td><td>&nbsp;</td><td>requests=<%= totalRequests %>,
regions=<%= totalRegions %></td></tr>
-</table>
-
-<p>Load is requests per second and count of regions loaded</p>
-<% } %>
-</body>
-</html>
+  import="org.apache.hadoop.io.Text"
+  import="org.apache.hadoop.hbase.util.Bytes"
+  import="org.apache.hadoop.hbase.util.FSUtils"
+  import="org.apache.hadoop.hbase.master.HMaster"
+  import="org.apache.hadoop.hbase.HConstants"
+  import="org.apache.hadoop.hbase.master.MetaRegion"
+  import="org.apache.hadoop.hbase.client.HBaseAdmin"
+  import="org.apache.hadoop.hbase.io.ImmutableBytesWritable"
+  import="org.apache.hadoop.hbase.HServerInfo"
+  import="org.apache.hadoop.hbase.HServerAddress"
+  import="org.apache.hadoop.hbase.HBaseConfiguration"
+  import="org.apache.hadoop.hbase.HColumnDescriptor" 
+  import="org.apache.hadoop.hbase.HTableDescriptor" %><%
+  HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
+  Configuration conf = master.getConfiguration();
+  HServerAddress rootLocation = master.getRegionManager().getRootRegionLocation();
+  Map<byte [], MetaRegion> onlineRegions = master.getRegionManager().getOnlineMetaRegions();
+  Map<String, HServerInfo> serverToServerInfos =
+    master.getServerManager().getServersToServerInfo();
+  int interval = conf.getInt("hbase.regionserver.msginterval", 1000)/1000;
+  if (interval == 0) {
+      interval = 1;
+  }
+  Map<String, Integer> frags = master.getTableFragmentation();
+%><?xml version="1.0" encoding="UTF-8" ?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> 
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
+<title>HBase Master: <%= master.getMasterAddress().getHostname()%>:<%= master.getMasterAddress().getPort()
%></title>
+<link rel="stylesheet" type="text/css" href="/static/hbase.css" />
+</head>
+<body>
+
+<a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif"
alt="HBase Logo" title="HBase Logo" /></a>
+<h1 id="page_title">Master: <%=master.getMasterAddress().getHostname()%>:<%=master.getMasterAddress().getPort()%></h1>
+<p id="links_menu"><a href="/logs/">Local logs</a>, <a href="/stacks">Thread
Dump</a>, <a href="/logLevel">Log Level</a></p>
+<hr id="head_rule" />
+
+<h2>Master Attributes</h2>
+<table>
+<tr><th>Attribute Name</th><th>Value</th><th>Description</th></tr>
+<tr><td>HBase Version</td><td><%= org.apache.hadoop.hbase.util.VersionInfo.getVersion()
%>, r<%= org.apache.hadoop.hbase.util.VersionInfo.getRevision() %></td><td>HBase
version and svn revision</td></tr>
+<tr><td>HBase Compiled</td><td><%= org.apache.hadoop.hbase.util.VersionInfo.getDate()
%>, <%= org.apache.hadoop.hbase.util.VersionInfo.getUser() %></td><td>When
HBase version was compiled and by whom</td></tr>
+<tr><td>Hadoop Version</td><td><%= org.apache.hadoop.util.VersionInfo.getVersion()
%>, r<%= org.apache.hadoop.util.VersionInfo.getRevision() %></td><td>Hadoop
version and svn revision</td></tr>
+<tr><td>Hadoop Compiled</td><td><%= org.apache.hadoop.util.VersionInfo.getDate()
%>, <%= org.apache.hadoop.util.VersionInfo.getUser() %></td><td>When
Hadoop version was compiled and by whom</td></tr>
+<tr><td>HBase Root Directory</td><td><%= master.getRootDir().toString()
%></td><td>Location of HBase home directory</td></tr>
+<tr><td>Load average</td><td><%= master.getServerManager().getAverageLoad()
%></td><td>Average number of regions per regionserver. Naive computation.</td></tr>
+<tr><td>Regions On FS</td><td><%= master.getRegionManager().countRegionsOnFS()
%></td><td>Number of regions on FileSystem. Rough count.</td></tr>
+<tr><td>Fragmentation</td><td><%= frags.get("-TOTAL-") != null
? frags.get("-TOTAL-").intValue() + "%" : "n/a" %></td><td>Overall fragmentation
of all tables, including .META. and -ROOT-.</td></tr>
+<tr><td>Zookeeper Quorum</td><td><%= master.getZooKeeperWrapper().getQuorumServers()
%></td><td>Addresses of all registered ZK servers. For more, see <a href="/zk.jsp">zk
dump</a>.</td></tr>
+</table>
+
+<h2>Catalog Tables</h2>
+<% 
+  if (rootLocation != null) { %>
+<table>
+<tr><th>Table</th><th title="Fragmentation - Will be 0% after a major
compaction and fluctuate during normal usage.">Frag.</th><th>Description</th></tr>
+<tr><td><a href="/table.jsp?name=<%= Bytes.toString(HConstants.ROOT_TABLE_NAME)
%>"><%= Bytes.toString(HConstants.ROOT_TABLE_NAME) %></a></td>
+<td align="center"><%= frags.get("-ROOT-") != null ? frags.get("-ROOT-").intValue()
+ "%" : "n/a" %></td>
+<td>The -ROOT- table holds references to all .META. regions.</td>
+</tr>
+<%
+    if (onlineRegions != null && onlineRegions.size() > 0) { %>
+<tr><td><a href="/table.jsp?name=<%= Bytes.toString(HConstants.META_TABLE_NAME)
%>"><%= Bytes.toString(HConstants.META_TABLE_NAME) %></a></td>
+<td align="center"><%= frags.get(".META.") != null ? frags.get(".META.").intValue()
+ "%" : "n/a" %></td>
+<td>The .META. table holds references to all User Table regions</td>
+</tr>
+  
+<%  } %>
+</table>
+<%} %>
+
+<h2>User Tables</h2>
+<% HTableDescriptor[] tables = new HBaseAdmin(conf).listTables(); 
+   if(tables != null && tables.length > 0) { %>
+<table>
+<tr><th>Table</th><th title="Fragmentation - Will be 0% after a major
compaction and fluctuate during normal usage.">Frag.</th><th>Description</th></tr>
+<%   for(HTableDescriptor htDesc : tables ) { %>
+<tr><td><a href=/table.jsp?name=<%= htDesc.getNameAsString() %>><%=
htDesc.getNameAsString() %></a> </td>
+<td align="center"><%= frags.get(htDesc.getNameAsString()) != null ? frags.get(htDesc.getNameAsString()).intValue()
+ "%" : "n/a" %></td>
+<td><%= htDesc.toString() %></td>
+</tr>
+<%   }  %>
+
+<p> <%= tables.length %> table(s) in set.</p>
+</table>
+<% } %>
+
+<h2>Region Servers</h2>
+<% if (serverToServerInfos != null && serverToServerInfos.size() > 0) { %>
+<%   int totalRegions = 0;
+     int totalRequests = 0; 
+%>
+
+<table>
+<tr><th rowspan="<%= serverToServerInfos.size() + 1%>"></th><th>Address</th><th>Start
Code</th><th>Load</th></tr>
+<%   String[] serverNames = serverToServerInfos.keySet().toArray(new String[serverToServerInfos.size()]);
+     Arrays.sort(serverNames);
+     for (String serverName: serverNames) {
+       HServerInfo hsi = serverToServerInfos.get(serverName);
+       String hostname = hsi.getServerAddress().getHostname() + ":" + hsi.getInfoPort();
+       String url = "http://" + hostname + "/";
+       totalRegions += hsi.getLoad().getNumberOfRegions();
+       totalRequests += hsi.getLoad().getNumberOfRequests() / interval;
+       long startCode = hsi.getStartCode();
+%>
+<tr><td><a href="<%= url %>"><%= hostname %></a></td><td><%=
startCode %></td><td><%= hsi.getLoad().toString(interval) %></td></tr>
+<%   } %>
+<tr><th>Total: </th><td>servers: <%= serverToServerInfos.size()
%></td><td>&nbsp;</td><td>requests=<%= totalRequests %>,
regions=<%= totalRegions %></td></tr>
+</table>
+
+<p>Load is requests per second and count of regions loaded</p>
+<% } %>
+</body>
+</html>



Mime
View raw message