hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r785076 [18/18] - in /hadoop/hbase/trunk_on_hadoop-0.18.3: ./ bin/ conf/ src/java/ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/io/ src/...
Date Tue, 16 Jun 2009 04:34:02 GMT
Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestWildcardColumnTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestWildcardColumnTracker.java?rev=785076&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestWildcardColumnTracker.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestWildcardColumnTracker.java Tue Jun 16 04:33:56 2009
@@ -0,0 +1,336 @@
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.TreeSet;
+
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class TestWildcardColumnTracker extends HBaseTestCase
+implements HConstants {
+  private boolean PRINT = false; 
+  
+  public void testGet_SingleVersion() {
+    if(PRINT) {
+      System.out.println("SingleVersion");
+    }
+    byte [] col1 = Bytes.toBytes("col1");
+    byte [] col2 = Bytes.toBytes("col2");
+    byte [] col3 = Bytes.toBytes("col3");
+    byte [] col4 = Bytes.toBytes("col4");
+    byte [] col5 = Bytes.toBytes("col5");
+    
+    //Create tracker
+    List<MatchCode> expected = new ArrayList<MatchCode>();
+    expected.add(MatchCode.INCLUDE);
+    expected.add(MatchCode.INCLUDE);
+    expected.add(MatchCode.INCLUDE);
+    expected.add(MatchCode.INCLUDE);
+    expected.add(MatchCode.INCLUDE);
+    int maxVersions = 1;
+    
+    ColumnTracker exp = new WildcardColumnTracker(maxVersions);
+        
+    //Create "Scanner"
+    List<byte[]> scanner = new ArrayList<byte[]>();
+    scanner.add(col1);
+    scanner.add(col2);
+    scanner.add(col3);
+    scanner.add(col4);
+    scanner.add(col5);
+    
+    //Initialize result
+    List<MatchCode> result = new ArrayList<MatchCode>(); 
+    
+    //"Match"
+    for(byte [] col : scanner){
+      result.add(exp.checkColumn(col, 0, col.length));
+    }
+    
+    assertEquals(expected.size(), result.size());
+    for(int i=0; i< expected.size(); i++){
+      assertEquals(expected.get(i), result.get(i));
+      if(PRINT){
+        System.out.println("Expected " +expected.get(i) + ", actual " +
+            result.get(i));
+      }
+    }
+  }
+
+  
+  public void testGet_MultiVersion() {
+    if(PRINT) {
+      System.out.println("\nMultiVersion");
+    }
+    byte [] col1 = Bytes.toBytes("col1");
+    byte [] col2 = Bytes.toBytes("col2");
+    byte [] col3 = Bytes.toBytes("col3");
+    byte [] col4 = Bytes.toBytes("col4");
+    byte [] col5 = Bytes.toBytes("col5");
+    
+    //Create tracker
+    List<MatchCode> expected = new ArrayList<MatchCode>();
+    int size = 5;
+    for(int i=0; i<size; i++){
+      expected.add(MatchCode.INCLUDE);
+      expected.add(MatchCode.INCLUDE);
+      expected.add(MatchCode.SKIP);
+    }
+    int maxVersions = 2;
+    
+    ColumnTracker exp = new WildcardColumnTracker(maxVersions);
+        
+    //Create "Scanner"
+    List<byte[]> scanner = new ArrayList<byte[]>();
+    scanner.add(col1);
+    scanner.add(col1);
+    scanner.add(col1);
+    scanner.add(col2);
+    scanner.add(col2);
+    scanner.add(col2);
+    scanner.add(col3);
+    scanner.add(col3);
+    scanner.add(col3);
+    scanner.add(col4);
+    scanner.add(col4);
+    scanner.add(col4);
+    scanner.add(col5);
+    scanner.add(col5);
+    scanner.add(col5);
+    
+    //Initialize result
+    List<MatchCode> result = new ArrayList<MatchCode>(); 
+    
+    //"Match"
+    for(byte [] col : scanner){
+      result.add(exp.checkColumn(col, 0, col.length));
+    }
+    
+    assertEquals(expected.size(), result.size());
+    for(int i=0; i< expected.size(); i++){
+      assertEquals(expected.get(i), result.get(i));
+      if(PRINT){
+        System.out.println("Expected " +expected.get(i) + ", actual " +
+            result.get(i));
+      }
+    }
+  }
+  
+  public void testUpdate_SameColumns(){
+    if(PRINT) {
+      System.out.println("\nUpdate_SameColumns");
+    }
+    byte [] col1 = Bytes.toBytes("col1");
+    byte [] col2 = Bytes.toBytes("col2");
+    byte [] col3 = Bytes.toBytes("col3");
+    byte [] col4 = Bytes.toBytes("col4");
+    byte [] col5 = Bytes.toBytes("col5");
+    
+    //Create tracker
+    List<MatchCode> expected = new ArrayList<MatchCode>();
+    int size = 10;
+    for(int i=0; i<size; i++){
+      expected.add(MatchCode.INCLUDE);
+    }
+    for(int i=0; i<5; i++){
+      expected.add(MatchCode.SKIP);
+    }
+    
+    int maxVersions = 2;
+    
+    ColumnTracker wild = new WildcardColumnTracker(maxVersions);
+        
+    //Create "Scanner"
+    List<byte[]> scanner = new ArrayList<byte[]>();
+    scanner.add(col1);
+    scanner.add(col2);
+    scanner.add(col3);
+    scanner.add(col4);
+    scanner.add(col5);
+    
+    //Initialize result
+    List<MatchCode> result = new ArrayList<MatchCode>(); 
+    
+    //"Match"
+    for(int i=0; i<3; i++){
+      for(byte [] col : scanner){
+        result.add(wild.checkColumn(col, 0, col.length));
+      }
+      wild.update();
+    }
+    
+    assertEquals(expected.size(), result.size());
+    for(int i=0; i<expected.size(); i++){
+      assertEquals(expected.get(i), result.get(i));
+      if(PRINT){
+        System.out.println("Expected " +expected.get(i) + ", actual " +
+            result.get(i));
+      }
+    }
+  }
+  
+  
+  public void testUpdate_NewColumns(){
+    if(PRINT) {
+      System.out.println("\nUpdate_NewColumns");
+    }
+    byte [] col1 = Bytes.toBytes("col1");
+    byte [] col2 = Bytes.toBytes("col2");
+    byte [] col3 = Bytes.toBytes("col3");
+    byte [] col4 = Bytes.toBytes("col4");
+    byte [] col5 = Bytes.toBytes("col5");
+    
+    byte [] col6 = Bytes.toBytes("col6");
+    byte [] col7 = Bytes.toBytes("col7");
+    byte [] col8 = Bytes.toBytes("col8");
+    byte [] col9 = Bytes.toBytes("col9");
+    byte [] col0 = Bytes.toBytes("col0");
+    
+    //Create tracker
+    List<MatchCode> expected = new ArrayList<MatchCode>();
+    int size = 10;
+    for(int i=0; i<size; i++){
+      expected.add(MatchCode.INCLUDE);
+    }
+    for(int i=0; i<5; i++){
+      expected.add(MatchCode.SKIP);
+    }
+    
+    int maxVersions = 1;
+    
+    ColumnTracker wild = new WildcardColumnTracker(maxVersions);
+        
+    //Create "Scanner"
+    List<byte[]> scanner = new ArrayList<byte[]>();
+    scanner.add(col0);
+    scanner.add(col1);
+    scanner.add(col2);
+    scanner.add(col3);
+    scanner.add(col4);
+    
+    //Initialize result
+    List<MatchCode> result = new ArrayList<MatchCode>(); 
+    
+    for(byte [] col : scanner){
+      result.add(wild.checkColumn(col, 0, col.length));
+    }
+    wild.update();
+
+    //Create "Scanner1"
+    List<byte[]> scanner1 = new ArrayList<byte[]>();
+    scanner1.add(col5);
+    scanner1.add(col6);
+    scanner1.add(col7);
+    scanner1.add(col8);
+    scanner1.add(col9);
+    for(byte [] col : scanner1){
+      result.add(wild.checkColumn(col, 0, col.length));
+    }
+    wild.update();
+
+    //Scanner again
+    for(byte [] col : scanner){
+      result.add(wild.checkColumn(col, 0, col.length));
+    }  
+      
+    //"Match"
+    assertEquals(expected.size(), result.size());
+    for(int i=0; i<expected.size(); i++){
+      assertEquals(expected.get(i), result.get(i));
+      if(PRINT){
+        System.out.println("Expected " +expected.get(i) + ", actual " +
+            result.get(i));
+      }
+    }
+  }
+  
+  
+  public void testUpdate_MixedColumns(){
+    if(PRINT) {
+      System.out.println("\nUpdate_NewColumns");
+    }
+    byte [] col0 = Bytes.toBytes("col0");
+    byte [] col1 = Bytes.toBytes("col1");
+    byte [] col2 = Bytes.toBytes("col2");
+    byte [] col3 = Bytes.toBytes("col3");
+    byte [] col4 = Bytes.toBytes("col4");
+    
+    byte [] col5 = Bytes.toBytes("col5");
+    byte [] col6 = Bytes.toBytes("col6");
+    byte [] col7 = Bytes.toBytes("col7");
+    byte [] col8 = Bytes.toBytes("col8");
+    byte [] col9 = Bytes.toBytes("col9");
+    
+    //Create tracker
+    List<MatchCode> expected = new ArrayList<MatchCode>();
+    int size = 5;
+    for(int i=0; i<size; i++){
+      expected.add(MatchCode.INCLUDE);
+    }
+    for(int i=0; i<size; i++){
+      expected.add(MatchCode.SKIP);
+    }
+    for(int i=0; i<size; i++){
+      expected.add(MatchCode.INCLUDE);
+    }
+    for(int i=0; i<size; i++){
+      expected.add(MatchCode.SKIP);
+    }
+    
+    int maxVersions = 1;
+    
+    ColumnTracker wild = new WildcardColumnTracker(maxVersions);
+        
+    //Create "Scanner"
+    List<byte[]> scanner = new ArrayList<byte[]>();
+    scanner.add(col0);
+    scanner.add(col2);
+    scanner.add(col4);
+    scanner.add(col6);
+    scanner.add(col8);
+    
+    //Initialize result
+    List<MatchCode> result = new ArrayList<MatchCode>(); 
+    
+    for(int i=0; i<2; i++){
+      for(byte [] col : scanner){
+        result.add(wild.checkColumn(col, 0, col.length));
+      }
+      wild.update();
+    }
+
+    //Create "Scanner1"
+    List<byte[]> scanner1 = new ArrayList<byte[]>();
+    scanner1.add(col1);
+    scanner1.add(col3);
+    scanner1.add(col5);
+    scanner1.add(col7);
+    scanner1.add(col9);
+    for(byte [] col : scanner1){
+      result.add(wild.checkColumn(col, 0, col.length));
+    }
+    wild.update();
+
+    //Scanner again
+    for(byte [] col : scanner){
+      result.add(wild.checkColumn(col, 0, col.length));
+    }  
+      
+    //"Match"
+    assertEquals(expected.size(), result.size());
+    
+    for(int i=0; i<expected.size(); i++){
+      assertEquals(expected.get(i), result.get(i));
+      if(PRINT){
+        System.out.println("Expected " +expected.get(i) + ", actual " +
+            result.get(i));
+      }
+    }
+  }
+  
+  
+  
+}

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/thrift/DisabledTestThriftServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/thrift/DisabledTestThriftServer.java?rev=785076&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/thrift/DisabledTestThriftServer.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/thrift/DisabledTestThriftServer.java Tue Jun 16 04:33:56 2009
@@ -0,0 +1,380 @@
+/**
+ * Copyright 2008-2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.thrift;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.thrift.generated.BatchMutation;
+import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
+import org.apache.hadoop.hbase.thrift.generated.IllegalArgument;
+import org.apache.hadoop.hbase.thrift.generated.Mutation;
+import org.apache.hadoop.hbase.thrift.generated.TCell;
+import org.apache.hadoop.hbase.thrift.generated.TRowResult;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Unit testing for ThriftServer.HBaseHandler, a part of the 
+ * org.apache.hadoop.hbase.thrift package.  
+ */
+public class DisabledTestThriftServer extends HBaseClusterTestCase {
+
+  // Static names for tables, columns, rows, and values
+  private static byte[] tableAname = Bytes.toBytes("tableA");
+  private static byte[] tableBname = Bytes.toBytes("tableB");
+  private static byte[] columnAname = Bytes.toBytes("columnA:");
+  private static byte[] columnBname = Bytes.toBytes("columnB:");
+  private static byte[] badColumnName = Bytes.toBytes("noColon:");
+  private static byte[] rowAname = Bytes.toBytes("rowA");
+  private static byte[] rowBname = Bytes.toBytes("rowB");
+  private static byte[] valueAname = Bytes.toBytes("valueA");
+  private static byte[] valueBname = Bytes.toBytes("valueB");
+  private static byte[] valueCname = Bytes.toBytes("valueC");
+  private static byte[] valueDname = Bytes.toBytes("valueD");
+
+  /**
+   * Runs all of the tests under a single JUnit test method.  We 
+   * consolidate all testing to one method because HBaseClusterTestCase
+   * is prone to OutOfMemoryExceptions when there are three or more 
+   * JUnit test methods.
+   * 
+   * @throws Exception
+   */
+  public void testAll() throws Exception {
+    // Run all tests
+    doTestTableCreateDrop();
+    doTestTableMutations();
+    doTestTableTimestampsAndColumns();
+    doTestTableScanners();
+  }
+
+  /**
+   * Tests for creating, enabling, disabling, and deleting tables.  Also 
+   * tests that creating a table with an invalid column name yields an 
+   * IllegalArgument exception.
+   * 
+   * @throws Exception
+   */
+  public void doTestTableCreateDrop() throws Exception {
+    ThriftServer.HBaseHandler handler = new ThriftServer.HBaseHandler();
+
+    // Create/enable/disable/delete tables, ensure methods act correctly
+    assertEquals(handler.getTableNames().size(), 0);
+    handler.createTable(tableAname, getColumnDescriptors());
+    assertEquals(handler.getTableNames().size(), 1);
+    assertEquals(handler.getColumnDescriptors(tableAname).size(), 2);
+    assertTrue(handler.isTableEnabled(tableAname));
+    handler.createTable(tableBname, new ArrayList<ColumnDescriptor>());
+    assertEquals(handler.getTableNames().size(), 2);
+    handler.disableTable(tableBname);
+    assertFalse(handler.isTableEnabled(tableBname));
+    handler.deleteTable(tableBname);
+    assertEquals(handler.getTableNames().size(), 1);
+    handler.disableTable(tableAname);
+    assertFalse(handler.isTableEnabled(tableAname));
+    handler.enableTable(tableAname);
+    assertTrue(handler.isTableEnabled(tableAname));
+    handler.disableTable(tableAname);
+    handler.deleteTable(tableAname);
+  }
+
+  /**
+   * Tests adding a series of Mutations and BatchMutations, including a 
+   * delete mutation.  Also tests data retrieval, and getting back multiple 
+   * versions.  
+   * 
+   * @throws Exception
+   */
+  public void doTestTableMutations() throws Exception {
+    // Setup
+    ThriftServer.HBaseHandler handler = new ThriftServer.HBaseHandler();
+    handler.createTable(tableAname, getColumnDescriptors());
+
+    // Apply a few Mutations to rowA
+    //     mutations.add(new Mutation(false, columnAname, valueAname));
+    //     mutations.add(new Mutation(false, columnBname, valueBname));
+    handler.mutateRow(tableAname, rowAname, getMutations());
+
+    // Assert that the changes were made
+    assertTrue(Bytes.equals(valueAname,
+      handler.get(tableAname, rowAname, columnAname).get(0).value));
+    TRowResult rowResult1 = handler.getRow(tableAname, rowAname).get(0);
+    assertTrue(Bytes.equals(rowAname, rowResult1.row));
+    assertTrue(Bytes.equals(valueBname,
+      rowResult1.columns.get(columnBname).value));
+
+    // Apply a few BatchMutations for rowA and rowB
+    // rowAmutations.add(new Mutation(true, columnAname, null));
+    // rowAmutations.add(new Mutation(false, columnBname, valueCname));
+    // batchMutations.add(new BatchMutation(rowAname, rowAmutations));
+    // Mutations to rowB
+    // rowBmutations.add(new Mutation(false, columnAname, valueCname));
+    // rowBmutations.add(new Mutation(false, columnBname, valueDname));
+    // batchMutations.add(new BatchMutation(rowBname, rowBmutations));
+    handler.mutateRows(tableAname, getBatchMutations());
+
+    // Assert that changes were made to rowA
+    List<TCell> cells = handler.get(tableAname, rowAname, columnAname);
+    assertFalse(cells.size() > 0);
+    assertTrue(Bytes.equals(valueCname, handler.get(tableAname, rowAname, columnBname).get(0).value));
+    List<TCell> versions = handler.getVer(tableAname, rowAname, columnBname, MAXVERSIONS);
+    assertTrue(Bytes.equals(valueCname, versions.get(0).value));
+    assertTrue(Bytes.equals(valueBname, versions.get(1).value));
+
+    // Assert that changes were made to rowB
+    TRowResult rowResult2 = handler.getRow(tableAname, rowBname).get(0);
+    assertTrue(Bytes.equals(rowBname, rowResult2.row));
+    assertTrue(Bytes.equals(valueCname, rowResult2.columns.get(columnAname).value));
+	  assertTrue(Bytes.equals(valueDname, rowResult2.columns.get(columnBname).value));
+
+    // Apply some deletes
+    handler.deleteAll(tableAname, rowAname, columnBname);
+    handler.deleteAllRow(tableAname, rowBname);
+
+    // Assert that the deletes were applied
+    int size = handler.get(tableAname, rowAname, columnBname).size();
+    assertEquals(0, size);
+    size = handler.getRow(tableAname, rowBname).size();
+    assertEquals(0, size);
+
+    // Teardown
+    handler.disableTable(tableAname);
+    handler.deleteTable(tableAname);
+  }
+
+  /**
+   * Similar to testTableMutations(), except Mutations are applied with 
+   * specific timestamps and data retrieval uses these timestamps to 
+   * extract specific versions of data.  
+   * 
+   * @throws Exception
+   */
+  public void doTestTableTimestampsAndColumns() throws Exception {
+    // Setup
+    ThriftServer.HBaseHandler handler = new ThriftServer.HBaseHandler();
+    handler.createTable(tableAname, getColumnDescriptors());
+
+    // Apply timestamped Mutations to rowA
+    long time1 = System.currentTimeMillis();
+    handler.mutateRowTs(tableAname, rowAname, getMutations(), time1);
+
+    // Sleep to assure that 'time1' and 'time2' will be different even with a
+    // coarse grained system timer.
+    Thread.sleep(1000);
+
+    // Apply timestamped BatchMutations for rowA and rowB
+    long time2 = System.currentTimeMillis();
+    handler.mutateRowsTs(tableAname, getBatchMutations(), time2);
+
+    // Apply an overlapping timestamped mutation to rowB
+    handler.mutateRowTs(tableAname, rowBname, getMutations(), time2);
+
+    // Assert that the timestamp-related methods retrieve the correct data
+    assertEquals(handler.getVerTs(tableAname, rowAname, columnBname, time2,
+      MAXVERSIONS).size(), 2);
+    assertEquals(handler.getVerTs(tableAname, rowAname, columnBname, time1,
+      MAXVERSIONS).size(), 1);
+
+    TRowResult rowResult1 = handler.getRowTs(tableAname, rowAname, time1).get(0);
+    TRowResult rowResult2 = handler.getRowTs(tableAname, rowAname, time2).get(0);
+    assertTrue(Bytes.equals(rowResult1.columns.get(columnAname).value, valueAname));
+    assertTrue(Bytes.equals(rowResult1.columns.get(columnBname).value, valueBname));
+    assertTrue(Bytes.equals(rowResult2.columns.get(columnBname).value, valueCname));
+    
+    // Maybe I'd reading this wrong but at line #187 above, the BatchMutations
+    // are adding a columnAname at time2 so the below should be true not false
+    // -- St.Ack
+    assertTrue(rowResult2.columns.containsKey(columnAname));
+    
+    List<byte[]> columns = new ArrayList<byte[]>();
+    columns.add(columnBname);
+
+    rowResult1 = handler.getRowWithColumns(tableAname, rowAname, columns).get(0);
+    assertTrue(Bytes.equals(rowResult1.columns.get(columnBname).value, valueCname));
+    assertFalse(rowResult1.columns.containsKey(columnAname));
+
+    rowResult1 = handler.getRowWithColumnsTs(tableAname, rowAname, columns, time1).get(0);
+    assertTrue(Bytes.equals(rowResult1.columns.get(columnBname).value, valueBname));
+    assertFalse(rowResult1.columns.containsKey(columnAname));
+    
+    // Apply some timestamped deletes
+    handler.deleteAllTs(tableAname, rowAname, columnBname, time1);
+    handler.deleteAllRowTs(tableAname, rowBname, time2);
+
+    // Assert that the timestamp-related methods retrieve the correct data
+    int size = handler.getVerTs(tableAname, rowAname, columnBname, time1, MAXVERSIONS).size();
+    assertFalse(size > 0);
+    assertTrue(Bytes.equals(handler.get(tableAname, rowAname, columnBname).get(0).value, valueCname));
+    assertFalse(handler.getRow(tableAname, rowBname).size() > 0);
+
+    // Teardown
+    handler.disableTable(tableAname);
+    handler.deleteTable(tableAname);
+  }
+
+  /**
+   * Tests the four different scanner-opening methods (with and without 
+   * a stoprow, with and without a timestamp).  
+   * 
+   * @throws Exception
+   */
+  public void doTestTableScanners() throws Exception {
+    // Setup
+    ThriftServer.HBaseHandler handler = new ThriftServer.HBaseHandler();
+    handler.createTable(tableAname, getColumnDescriptors());
+
+    // Apply timestamped Mutations to rowA
+    long time1 = System.currentTimeMillis();
+    handler.mutateRowTs(tableAname, rowAname, getMutations(), time1);
+
+    // Sleep to assure that 'time1' and 'time2' will be different even with a
+    // coarse grained system timer.
+    Thread.sleep(1000);
+
+    // Apply timestamped BatchMutations for rowA and rowB
+    long time2 = System.currentTimeMillis();
+    handler.mutateRowsTs(tableAname, getBatchMutations(), time2);
+
+    // Test a scanner on all rows and all columns, no timestamp
+    int scanner1 = handler.scannerOpen(tableAname, rowAname, getColumnList(true, true));
+    TRowResult rowResult1a = handler.scannerGet(scanner1).get(0);
+    assertTrue(Bytes.equals(rowResult1a.row, rowAname));
+    // This used to be '1'.  I don't know why when we are asking for two columns
+    // and when the mutations above would seem to add two columns to the row.
+    // -- St.Ack 05/12/2009
+    assertEquals(rowResult1a.columns.size(), 2);
+    assertTrue(Bytes.equals(rowResult1a.columns.get(columnBname).value, valueCname));
+    TRowResult rowResult1b = handler.scannerGet(scanner1).get(0);
+    assertTrue(Bytes.equals(rowResult1b.row, rowBname));
+    assertEquals(rowResult1b.columns.size(), 2);
+    assertTrue(Bytes.equals(rowResult1b.columns.get(columnAname).value, valueCname));
+    assertTrue(Bytes.equals(rowResult1b.columns.get(columnBname).value, valueDname));
+    closeScanner(scanner1, handler);
+
+    // Test a scanner on all rows and all columns, with timestamp
+    int scanner2 = handler.scannerOpenTs(tableAname, rowAname, getColumnList(true, true), time1);
+    TRowResult rowResult2a = handler.scannerGet(scanner2).get(0);
+    assertEquals(rowResult2a.columns.size(), 2);
+    assertTrue(Bytes.equals(rowResult2a.columns.get(columnAname).value, valueAname));
+    assertTrue(Bytes.equals(rowResult2a.columns.get(columnBname).value, valueBname));
+    closeScanner(scanner2, handler);
+
+    // Test a scanner on the first row and first column only, no timestamp
+    int scanner3 = handler.scannerOpenWithStop(tableAname, rowAname, rowBname, 
+        getColumnList(true, false));
+    closeScanner(scanner3, handler);
+
+    // Test a scanner on the first row and second column only, with timestamp
+    int scanner4 = handler.scannerOpenWithStopTs(tableAname, rowAname, rowBname, 
+        getColumnList(false, true), time1);
+    TRowResult rowResult4a = handler.scannerGet(scanner4).get(0);
+    assertEquals(rowResult4a.columns.size(), 1);
+    assertTrue(Bytes.equals(rowResult4a.columns.get(columnBname).value, valueBname));
+
+    // Teardown
+    handler.disableTable(tableAname);
+    handler.deleteTable(tableAname);
+  }
+
+  /**
+   * 
+   * @return a List of ColumnDescriptors for use in creating a table.  Has one 
+   * default ColumnDescriptor and one ColumnDescriptor with fewer versions
+   */
+  private List<ColumnDescriptor> getColumnDescriptors() {
+    ArrayList<ColumnDescriptor> cDescriptors = new ArrayList<ColumnDescriptor>();
+
+    // A default ColumnDescriptor
+    ColumnDescriptor cDescA = new ColumnDescriptor();
+    cDescA.name = columnAname;
+    cDescriptors.add(cDescA);
+
+    // A slightly customized ColumnDescriptor (only 2 versions)
+    ColumnDescriptor cDescB = new ColumnDescriptor(columnBname, 2, "NONE", 
+        false, 2147483647, "NONE", 0, 0, false, -1);
+    cDescriptors.add(cDescB);
+
+    return cDescriptors;
+  }
+
+  /**
+   * 
+   * @param includeA whether or not to include columnA
+   * @param includeB whether or not to include columnB
+   * @return a List of column names for use in retrieving a scanner
+   */
+  private List<byte[]> getColumnList(boolean includeA, boolean includeB) {
+    List<byte[]> columnList = new ArrayList<byte[]>();
+    if (includeA) columnList.add(columnAname);
+    if (includeB) columnList.add(columnBname);
+    return columnList;
+  }
+
+  /**
+   * 
+   * @return a List of Mutations for a row, with columnA having valueA 
+   * and columnB having valueB
+   */
+  private List<Mutation> getMutations() {
+    List<Mutation> mutations = new ArrayList<Mutation>();
+    mutations.add(new Mutation(false, columnAname, valueAname));
+    mutations.add(new Mutation(false, columnBname, valueBname));
+    return mutations;
+  }
+
+  /**
+   * 
+   * @return a List of BatchMutations with the following effects:
+   * (rowA, columnA): delete
+   * (rowA, columnB): place valueC
+   * (rowB, columnA): place valueC
+   * (rowB, columnB): place valueD  
+   */
+  private List<BatchMutation> getBatchMutations() {
+    List<BatchMutation> batchMutations = new ArrayList<BatchMutation>();
+    // Mutations to rowA.  You can't mix delete and put anymore.
+    List<Mutation> rowAmutations = new ArrayList<Mutation>();
+    rowAmutations.add(new Mutation(true, columnAname, null));
+    batchMutations.add(new BatchMutation(rowAname, rowAmutations));
+    rowAmutations = new ArrayList<Mutation>();
+    rowAmutations.add(new Mutation(false, columnBname, valueCname));
+    batchMutations.add(new BatchMutation(rowAname, rowAmutations));
+    // Mutations to rowB
+    List<Mutation> rowBmutations = new ArrayList<Mutation>();
+    rowBmutations.add(new Mutation(false, columnAname, valueCname));
+    rowBmutations.add(new Mutation(false, columnBname, valueDname));
+    batchMutations.add(new BatchMutation(rowBname, rowBmutations));
+    return batchMutations;
+  }
+
+  /**
+   * Asserts that the passed scanner is exhausted, and then closes 
+   * the scanner.
+   * 
+   * @param scannerId the scanner to close
+   * @param handler the HBaseHandler interfacing to HBase
+   * @throws Exception
+   */
+  private void closeScanner(int scannerId, ThriftServer.HBaseHandler handler) throws Exception {
+    handler.scannerGet(scannerId);
+    handler.scannerClose(scannerId);
+  }
+}

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/DisabledTestMergeTool.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/DisabledTestMergeTool.java?rev=785076&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/DisabledTestMergeTool.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/DisabledTestMergeTool.java Tue Jun 16 04:33:56 2009
@@ -0,0 +1,271 @@
+/**
+ * Copyright 2008 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.util;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.dfs.MiniDFSCluster;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.regionserver.HLog;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.util.ToolRunner;
+
+/** Test stand alone merge tool that can merge arbitrary regions */
+public class DisabledTestMergeTool extends HBaseTestCase {
+  static final Log LOG = LogFactory.getLog(DisabledTestMergeTool.class);
+//  static final byte [] COLUMN_NAME = Bytes.toBytes("contents:");
+  static final byte [] FAMILY = Bytes.toBytes("contents");
+  static final byte [] QUALIFIER = Bytes.toBytes("dc");
+  
+  private final HRegionInfo[] sourceRegions = new HRegionInfo[5];
+  private final HRegion[] regions = new HRegion[5];
+  private HTableDescriptor desc;
+  private byte [][][] rows;
+  private MiniDFSCluster dfsCluster = null;
+  
+  @Override
+  public void setUp() throws Exception {
+    this.conf.set("hbase.hstore.compactionThreshold", "2");
+
+    // Create table description
+    this.desc = new HTableDescriptor("TestMergeTool");
+    this.desc.addFamily(new HColumnDescriptor(FAMILY));
+
+    /*
+     * Create the HRegionInfos for the regions.
+     */
+    // Region 0 will contain the key range [row_0200,row_0300)
+    sourceRegions[0] = new HRegionInfo(this.desc, Bytes.toBytes("row_0200"),
+      Bytes.toBytes("row_0300"));
+    
+    // Region 1 will contain the key range [row_0250,row_0400) and overlaps
+    // with Region 0
+    sourceRegions[1] =
+      new HRegionInfo(this.desc, Bytes.toBytes("row_0250"),
+          Bytes.toBytes("row_0400"));
+    
+    // Region 2 will contain the key range [row_0100,row_0200) and is adjacent
+    // to Region 0 or the region resulting from the merge of Regions 0 and 1
+    sourceRegions[2] =
+      new HRegionInfo(this.desc, Bytes.toBytes("row_0100"), 
+          Bytes.toBytes("row_0200"));
+    
+    // Region 3 will contain the key range [row_0500,row_0600) and is not
+    // adjacent to any of Regions 0, 1, 2 or the merged result of any or all
+    // of those regions
+    sourceRegions[3] =
+      new HRegionInfo(this.desc, Bytes.toBytes("row_0500"), 
+          Bytes.toBytes("row_0600"));
+    
+    // Region 4 will have empty start and end keys and overlaps all regions.
+    sourceRegions[4] =
+      new HRegionInfo(this.desc, HConstants.EMPTY_BYTE_ARRAY, 
+          HConstants.EMPTY_BYTE_ARRAY);
+    
+    /*
+     * Now create some row keys
+     */
+    this.rows = new byte [5][][];
+    this.rows[0] = Bytes.toByteArrays(new String[] { "row_0210", "row_0280" });
+    this.rows[1] = Bytes.toByteArrays(new String[] { "row_0260", "row_0350", 
+        "row_035" });
+    this.rows[2] = Bytes.toByteArrays(new String[] { "row_0110", "row_0175", 
+        "row_0175", "row_0175"});
+    this.rows[3] = Bytes.toByteArrays(new String[] { "row_0525", "row_0560", 
+        "row_0560", "row_0560", "row_0560"});
+    this.rows[4] = Bytes.toByteArrays(new String[] { "row_0050", "row_1000", 
+        "row_1000", "row_1000", "row_1000", "row_1000" });
+    
+    // Start up dfs
+    this.dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
+    this.fs = this.dfsCluster.getFileSystem();
+    conf.set("fs.default.name", fs.getUri().toString());
+    Path parentdir = fs.getHomeDirectory();
+    conf.set(HConstants.HBASE_DIR, parentdir.toString());
+    fs.mkdirs(parentdir);
+    FSUtils.setVersion(fs, parentdir);
+
+    // Note: we must call super.setUp after starting the mini cluster or
+    // we will end up with a local file system
+    
+    super.setUp();
+    try {
+      // Create root and meta regions
+      createRootAndMetaRegions();
+      /*
+       * Create the regions we will merge
+       */
+      for (int i = 0; i < sourceRegions.length; i++) {
+        regions[i] =
+          HRegion.createHRegion(this.sourceRegions[i], this.testDir, this.conf);
+        /*
+         * Insert data
+         */
+        for (int j = 0; j < rows[i].length; j++) {
+          byte [] row = rows[i][j];
+          Put put = new Put(row);
+          put.add(FAMILY, QUALIFIER, row);
+          regions[i].put(put);
+        }
+        HRegion.addRegionToMETA(meta, regions[i]);
+      }
+      // Close root and meta regions
+      closeRootAndMeta();
+      
+    } catch (Exception e) {
+      shutdownDfs(dfsCluster);
+      throw e;
+    }
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    super.tearDown();
+    shutdownDfs(dfsCluster);
+  }
+  
+  /*
+   * @param msg Message that describes this merge
+   * @param regionName1
+   * @param regionName2
+   * @param log Log to use merging.
+   * @param upperbound Verifying, how high up in this.rows to go.
+   * @return Merged region.
+   * @throws Exception
+   */
+  private HRegion mergeAndVerify(final String msg, final String regionName1,
+    final String regionName2, final HLog log, final int upperbound)
+  throws Exception {
+    Merge merger = new Merge(this.conf);
+    LOG.info(msg);
+    int errCode = ToolRunner.run(merger,
+      new String[] {this.desc.getNameAsString(), regionName1, regionName2}
+    );
+    assertTrue("'" + msg + "' failed", errCode == 0);
+    HRegionInfo mergedInfo = merger.getMergedHRegionInfo();
+  
+    // Now verify that we can read all the rows from regions 0, 1
+    // in the new merged region.
+    HRegion merged =
+      HRegion.openHRegion(mergedInfo, this.testDir, log, this.conf);
+    verifyMerge(merged, upperbound);
+    merged.close();
+    LOG.info("Verified " + msg);
+    return merged;
+  }
+  
+  private void verifyMerge(final HRegion merged, final int upperbound)
+  throws IOException {
+    //Test
+    Scan scan = new Scan();
+    scan.addFamily(FAMILY);
+    InternalScanner scanner = merged.getScanner(scan);
+    List<KeyValue> testRes = null;
+    while(true) {
+      testRes = new ArrayList<KeyValue>();
+      boolean hasNext = scanner.next(testRes);
+      if(!hasNext) {
+        break;
+      }
+    }
+    
+    //!Test
+    
+    for (int i = 0; i < upperbound; i++) {
+      for (int j = 0; j < rows[i].length; j++) {
+        Get get = new Get(rows[i][j]);
+        get.addFamily(FAMILY);
+        Result result = merged.get(get, null);
+        assertEquals(1, result.size());
+        byte [] bytes = result.sorted()[0].getValue();
+        assertNotNull(rows[i][j].toString(), bytes);
+        assertTrue(Bytes.equals(bytes, rows[i][j]));
+      }
+    }
+  }
+
+  /**
+   * Test merge tool.
+   * @throws Exception
+   */
+  public void testMergeTool() throws Exception {
+    // First verify we can read the rows from the source regions and that they
+    // contain the right data.
+    for (int i = 0; i < regions.length; i++) {
+      for (int j = 0; j < rows[i].length; j++) {
+        Get get = new Get(rows[i][j]);
+        get.addFamily(FAMILY);
+        Result result = regions[i].get(get, null);
+        byte [] bytes = result.sorted()[0].getValue();
+        assertNotNull(bytes);
+        assertTrue(Bytes.equals(bytes, rows[i][j]));
+      }
+      // Close the region and delete the log
+      regions[i].close();
+      regions[i].getLog().closeAndDelete();
+    }
+
+    // Create a log that we can reuse when we need to open regions
+    Path logPath = new Path("/tmp", HConstants.HREGION_LOGDIR_NAME + "_" +
+      System.currentTimeMillis());
+    LOG.info("Creating log " + logPath.toString());
+    HLog log = new HLog(this.fs, logPath, this.conf, null);
+    try {
+       // Merge Region 0 and Region 1
+      HRegion merged = mergeAndVerify("merging regions 0 and 1",
+        this.sourceRegions[0].getRegionNameAsString(),
+        this.sourceRegions[1].getRegionNameAsString(), log, 2);
+
+      // Merge the result of merging regions 0 and 1 with region 2
+      merged = mergeAndVerify("merging regions 0+1 and 2",
+        merged.getRegionInfo().getRegionNameAsString(),
+        this.sourceRegions[2].getRegionNameAsString(), log, 3);
+
+      // Merge the result of merging regions 0, 1 and 2 with region 3
+      merged = mergeAndVerify("merging regions 0+1+2 and 3",
+        merged.getRegionInfo().getRegionNameAsString(),
+        this.sourceRegions[3].getRegionNameAsString(), log, 4);
+      
+      // Merge the result of merging regions 0, 1, 2 and 3 with region 4
+      merged = mergeAndVerify("merging regions 0+1+2+3 and 4",
+        merged.getRegionInfo().getRegionNameAsString(),
+        this.sourceRegions[4].getRegionNameAsString(), log, rows.length);
+    } finally {
+      log.closeAndDelete();
+    }
+  }
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/MigrationTest.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/MigrationTest.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/MigrationTest.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/MigrationTest.java Tue Jun 16 04:33:56 2009
@@ -35,12 +35,15 @@
 import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Scanner;
-import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ResultScanner;
 
 /**
  * Runs migration of filesystem from hbase 0.x to 0.x
@@ -128,9 +131,11 @@
       HTable t = new HTable(this.conf, TABLENAME);
       int count = 0;
       LOG.info("OPENING SCANNER");
-      Scanner s = t.getScanner(TABLENAME_COLUMNS);
+      Scan scan = new Scan();
+      scan.addColumns(TABLENAME_COLUMNS);
+      ResultScanner s = t.getScanner(scan);
       try {
-        for (RowResult r: s) {
+        for (Result r: s) {
           if (r == null || r.size() == 0) {
             break;
           }
@@ -168,7 +173,15 @@
     long startcode = -1;
     boolean changed = false;
     for (int i = 0; i < retries; i++) {
-      startcode = Writables.cellToLong(m.get(row, HConstants.COL_STARTCODE));
+      Get get = new Get(row);
+      get.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
+      Result res = m.get(get);
+      KeyValue [] kvs = res.raw();
+      if(kvs.length <= 0){
+        return;
+      }
+      byte [] value = kvs[0].getValue();
+      startcode = Bytes.toLong(value);
       if (startcode != oldStartCode) {
         changed = true;
         break;

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/SoftValueSortedMapTest.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/SoftValueSortedMapTest.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/SoftValueSortedMapTest.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/SoftValueSortedMapTest.java Tue Jun 16 04:33:56 2009
@@ -17,7 +17,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.util;
 
 import java.util.SortedMap;
@@ -30,6 +29,7 @@
       map.put(new Integer(i), new Integer(i));
     }
     System.out.println(map.size());
+    @SuppressWarnings("unused")
     byte[] block = new byte[849*1024*1024]; // 10 MB
     System.out.println(map.size());
   }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/TestRootPath.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/TestRootPath.java?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/TestRootPath.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/TestRootPath.java Tue Jun 16 04:33:56 2009
@@ -60,4 +60,4 @@
       LOG.info("Got expected exception when checking invalid path:", e);
     }
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/master/master.jsp
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/master/master.jsp?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/master/master.jsp (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/master/master.jsp Tue Jun 16 04:33:56 2009
@@ -147,7 +147,7 @@
      Arrays.sort(serverNames);
      for (String serverName: serverNames) {
        HServerInfo hsi = serverToServerInfos.get(serverName);
-       String hostname = hsi.getName() + ":" + hsi.getInfoPort();
+       String hostname = hsi.getServerAddress().getInetSocketAddress().getAddress().getHostAddress() + ":" + hsi.getInfoPort();
        String url = "http://" + hostname + "/";
        totalRegions += hsi.getLoad().getNumberOfRegions();
        totalRequests += hsi.getLoad().getNumberOfRequests() / interval;

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/master/regionhistorian.jsp
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/master/regionhistorian.jsp?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/master/regionhistorian.jsp (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/master/regionhistorian.jsp Tue Jun 16 04:33:56 2009
@@ -5,10 +5,12 @@
   import="org.apache.hadoop.hbase.RegionHistorian"
   import="org.apache.hadoop.hbase.master.HMaster"
   import="org.apache.hadoop.hbase.RegionHistorian.RegionHistoryInformation"
-  import="org.apache.hadoop.hbase.HConstants"%><%
-  String regionName = request.getParameter("regionname");
+  import="org.apache.hadoop.hbase.HConstants"%>
+<%@ page import="org.apache.hadoop.hbase.util.Bytes" %>
+<%
+    String regionName = request.getParameter("regionname");
   HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
-  List<RegionHistoryInformation> informations = RegionHistorian.getInstance().getRegionHistory(regionName);
+  List<RegionHistoryInformation> informations = RegionHistorian.getInstance().getRegionHistory(Bytes.toBytesBinary(regionName));
   // Pattern used so we can wrap a regionname in an href.
   Pattern pattern = Pattern.compile(RegionHistorian.SPLIT_PREFIX + "(.*)$");
 %><?xml version="1.0" encoding="UTF-8" ?>

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/master/table.jsp
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/master/table.jsp?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/master/table.jsp (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/master/table.jsp Tue Jun 16 04:33:56 2009
@@ -15,6 +15,8 @@
   import="org.apache.hadoop.hbase.master.HMaster" 
   import="org.apache.hadoop.hbase.master.MetaRegion"
   import="org.apache.hadoop.hbase.util.Bytes"
+  import="java.io.IOException"
+  import="java.util.Map"
   import="org.apache.hadoop.hbase.HConstants"%><%
   HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
   HBaseConfiguration conf = master.getConfiguration();
@@ -38,7 +40,6 @@
   if ( action != null ) {
 %>
 <head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
-      <meta http-equiv="refresh" content="5; url=/"/>
 <link rel="stylesheet" type="text/css" href="/static/hbase.css" />
 </head>
 <body>
@@ -50,18 +51,18 @@
     if (key != null && key.length() > 0) {
       Writable[] arr = new Writable[1];
       arr[0] = new ImmutableBytesWritable(Bytes.toBytes(key));
-      master.modifyTable(Bytes.toBytes(tableName), HConstants.MODIFY_TABLE_SPLIT, arr);
+      master.modifyTable(Bytes.toBytes(tableName), HConstants.Modify.TABLE_SPLIT, arr);
     } else {
-      master.modifyTable(Bytes.toBytes(tableName), HConstants.MODIFY_TABLE_SPLIT, null);
+      master.modifyTable(Bytes.toBytes(tableName), HConstants.Modify.TABLE_SPLIT, null);
     }
     %> Split request accepted. <%
   } else if (action.equals("compact")) {
     if (key != null && key.length() > 0) {
       Writable[] arr = new Writable[1];
       arr[0] = new ImmutableBytesWritable(Bytes.toBytes(key));
-      master.modifyTable(Bytes.toBytes(tableName), HConstants.MODIFY_TABLE_COMPACT, arr);
+      master.modifyTable(Bytes.toBytes(tableName), HConstants.Modify.TABLE_COMPACT, arr);
     } else {
-      master.modifyTable(Bytes.toBytes(tableName), HConstants.MODIFY_TABLE_COMPACT, null);
+      master.modifyTable(Bytes.toBytes(tableName), HConstants.Modify.TABLE_COMPACT, null);
     }
     %> Compact request accepted. <%
   }
@@ -132,17 +133,17 @@
         hriEntry.getValue()).getInfoPort();
     
     String urlRegionHistorian =
-        "/regionhistorian.jsp?regionname=" + 
-        URLEncoder.encode(hriEntry.getKey().getRegionNameAsString(), "UTF-8");
+        "/regionhistorian.jsp?regionname="+
+                Bytes.toStringBinary(hriEntry.getKey().getRegionName());
 
     String urlRegionServer =
         "http://" + hriEntry.getValue().getHostname().toString() + ":" + infoPort + "/";
 %>
 <tr>
-  <td><a href="<%= urlRegionHistorian %>"><%= hriEntry.getKey().getRegionNameAsString()%></a></td>
+  <td><a href="<%= urlRegionHistorian %>"><%= Bytes.toStringBinary(hriEntry.getKey().getRegionName())%></a></td>
   <td><a href="<%= urlRegionServer %>"><%= hriEntry.getValue().toString() %></a></td>
-  <td><%= hriEntry.getKey().getEncodedName()%></td> <td><%= Bytes.toString(hriEntry.getKey().getStartKey())%></td>
-  <td><%= Bytes.toString(hriEntry.getKey().getEndKey())%></td>
+  <td><%= hriEntry.getKey().getEncodedName()%></td> <td><%= Bytes.toStringBinary(hriEntry.getKey().getStartKey())%></td>
+  <td><%= Bytes.toStringBinary(hriEntry.getKey().getEndKey())%></td>
 </tr>
 <% } %>
 </table>

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/regionserver/regionserver.jsp
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/regionserver/regionserver.jsp?rev=785076&r1=785075&r2=785076&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/regionserver/regionserver.jsp (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/regionserver/regionserver.jsp Tue Jun 16 04:33:56 2009
@@ -48,7 +48,7 @@
         HServerLoad.RegionLoad load = regionServer.createRegionLoad(r.getRegionName());
  %>
 <tr><td><%= r.getRegionNameAsString() %></td><td><%= r.getEncodedName() %></td>
-    <td><%= Bytes.toString(r.getStartKey()) %></td><td><%= Bytes.toString(r.getEndKey()) %></td>
+    <td><%= Bytes.toStringBinary(r.getStartKey()) %></td><td><%= Bytes.toStringBinary(r.getEndKey()) %></td>
     <td><%= load.toString() %></td>
     </tr>
 <%   } %>



Mime
View raw message