hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r774595 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/regionserver/ src/java/org/apache/hadoop/hbase/regionserver/transactional/ src/test/org/apache/hadoop/hbase/client/transactional/...
Date Wed, 13 May 2009 23:27:08 GMT
Author: stack
Date: Wed May 13 23:27:07 2009
New Revision: 774595

URL: http://svn.apache.org/viewvc?rev=774595&view=rev
Log:
HBASE-1411 Remove HLogEdit

Added:
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/transactional/DisabledTestTransactions.java
Removed:
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/transactional/TestTransactions.java
Modified:
    hadoop/hbase/trunk/CHANGES.txt
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestRegExpRowFilter.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=774595&r1=774594&r2=774595&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed May 13 23:27:07 2009
@@ -18,6 +18,7 @@
    HBASE-1361  Disable bloom filters
    HBASE-1367  Get rid of Thrift exception 'NotFound'
    HBASE-1381  Remove onelab and bloom filters files from hbase
+   HBASE-1411  Remove HLogEdit.
 
   BUG FIXES
    HBASE-1140  "ant clean test" fails (Nitay Joffe via Stack)

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java?rev=774595&r1=774594&r2=774595&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java Wed May
13 23:27:07 2009
@@ -33,7 +33,6 @@
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.regionserver.HLogEdit;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.util.Bytes;
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java?rev=774595&r1=774594&r2=774595&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java Wed May 13
23:27:07 2009
@@ -22,6 +22,7 @@
 import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.io.UnsupportedEncodingException;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
@@ -147,6 +148,15 @@
    */
   private final int maxLogs;
 
+  static byte [] COMPLETE_CACHE_FLUSH;
+  static {
+    try {
+      COMPLETE_CACHE_FLUSH = "HBASE::CACHEFLUSH".getBytes(UTF8_ENCODING);
+    } catch (UnsupportedEncodingException e) {
+      assert(false);
+    }
+  }
+
   /**
    * Create an edit log at the given <code>dir</code> location.
    *
@@ -199,6 +209,10 @@
 
   /**
    * Get the compression type for the hlog files.
+   * Commit logs SHOULD NOT be compressed.  You'll lose edits if the compression
+   * record is not complete.  In gzip, record is 32k so you could lose up to
+   * 32k of edits (All of this is moot till we have sync/flush in hdfs but
+   * still...).
    * @param c Configuration to use.
    * @return the kind of compression to use
    */
@@ -266,7 +280,7 @@
         Path newPath = computeFilename(this.filenum);
 
         this.writer = SequenceFile.createWriter(this.fs, this.conf, newPath,
-          HLogKey.class, HLogEdit.class,
+          HLogKey.class, KeyValue.class,
           fs.getConf().getInt("io.file.buffer.size", 4096),
           fs.getDefaultReplication(), this.blocksize,
           SequenceFile.CompressionType.NONE, new DefaultCodec(), null,
@@ -441,7 +455,7 @@
    * @param logEdit
    * @throws IOException
    */
-  public void append(HRegionInfo regionInfo, HLogEdit logEdit)
+  public void append(HRegionInfo regionInfo, KeyValue logEdit)
   throws IOException {
     this.append(regionInfo, new byte[0], logEdit);
   }
@@ -453,7 +467,7 @@
    * @param logEdit
    * @throws IOException
    */
-  public void append(HRegionInfo regionInfo, byte [] row, HLogEdit logEdit)
+  public void append(HRegionInfo regionInfo, byte [] row, KeyValue logEdit)
   throws IOException {
     if (this.closed) {
       throw new IOException("Cannot append; log is closed");
@@ -520,7 +534,7 @@
       int counter = 0;
       for (KeyValue kv: edits) {
         HLogKey logKey = new HLogKey(regionName, tableName, seqNum[counter++]);
-        doWrite(logKey, new HLogEdit(kv), sync);
+        doWrite(logKey, kv, sync);
         this.numEntries.incrementAndGet();
       }
       updateLock.notifyAll();
@@ -563,7 +577,7 @@
     }
   }
   
-  private void doWrite(HLogKey logKey, HLogEdit logEdit, boolean sync)
+  private void doWrite(HLogKey logKey, KeyValue logEdit, boolean sync)
   throws IOException {
     try {
       long now = System.currentTimeMillis();
@@ -663,9 +677,9 @@
     }
   }
 
-  private HLogEdit completeCacheFlushLogEdit() {
-    return new HLogEdit(new KeyValue(METAROW, METACOLUMN,
-      System.currentTimeMillis(), HLogEdit.COMPLETE_CACHE_FLUSH));
+  private KeyValue completeCacheFlushLogEdit() {
+    return new KeyValue(METAROW, METACOLUMN, System.currentTimeMillis(),
+      COMPLETE_CACHE_FLUSH);
   }
 
   /**
@@ -747,7 +761,7 @@
         // HADOOP-4751 is committed.
         long length = logfiles[i].getLen();
         HLogKey key = new HLogKey();
-        HLogEdit val = new HLogEdit();
+        KeyValue val = new KeyValue();
         try {
           SequenceFile.Reader in =
             new SequenceFile.Reader(fs, logfiles[i].getPath(), conf);
@@ -773,7 +787,7 @@
                   old = new SequenceFile.Reader(fs, oldlogfile, conf);
                 }
                 w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class,
-                    HLogEdit.class, getCompressionType(conf));
+                  KeyValue.class, getCompressionType(conf));
                 // Use copy of regionName; regionName object is reused inside in
                 // HStoreKey.getRegionName so its content changes as we iterate.
                 logWriters.put(regionName, w);
@@ -785,7 +799,7 @@
                 if (old != null) {
                   // Copy from existing log file
                   HLogKey oldkey = new HLogKey();
-                  HLogEdit oldval = new HLogEdit();
+                  KeyValue oldval = new KeyValue();
                   for (; old.next(oldkey, oldval); count++) {
                     if (LOG.isDebugEnabled() && count > 0 && count % 10000
== 0) {
                       LOG.debug("Copied " + count + " edits");
@@ -918,7 +932,7 @@
         Reader log = new SequenceFile.Reader(fs, logPath, conf);
         try {
           HLogKey key = new HLogKey();
-          HLogEdit val = new HLogEdit();
+          KeyValue val = new KeyValue();
           while (log.next(key, val)) {
             System.out.println(key.toString() + " " + val.toString());
           }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=774595&r1=774594&r2=774595&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java Wed May 13
23:27:07 2009
@@ -290,7 +290,7 @@
       reconstructionLog, this.conf);
     try {
       HLogKey key = new HLogKey();
-      HLogEdit val = new HLogEdit();
+      KeyValue val = new KeyValue();
       long skippedEdits = 0;
       long editsCount = 0;
       // How many edits to apply before we send a progress report.
@@ -304,15 +304,14 @@
         }
         // Check this edit is for me. Also, guard against writing the special
         // METACOLUMN info such as HBASE::CACHEFLUSH entries
-        KeyValue kv = val.getKeyValue();
-        if (val.isTransactionEntry() ||
-            kv.matchingColumnNoDelimiter(HLog.METACOLUMN,
+        if (/* Commented out for now -- St.Ack val.isTransactionEntry() ||*/
+            val.matchingColumnNoDelimiter(HLog.METACOLUMN,
               HLog.METACOLUMN.length - 1) ||
           !Bytes.equals(key.getRegionName(), regioninfo.getRegionName()) ||
-          !kv.matchingFamily(family.getName())) {
+          !val.matchingFamily(family.getName())) {
           continue;
         }
-        reconstructedCache.add(kv);
+        reconstructedCache.add(val);
         editsCount++;
         // Every 2k edits, tell the reporter we're making progress.
         // Have seen 60k edits taking 3minutes to complete.

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java?rev=774595&r1=774594&r2=774595&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java
Wed May 13 23:27:07 2009
@@ -37,10 +37,10 @@
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.BatchOperation;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.regionserver.HLog;
-import org.apache.hadoop.hbase.regionserver.HLogEdit;
 import org.apache.hadoop.hbase.regionserver.HLogKey;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.SequenceFile;
@@ -51,6 +51,18 @@
  * to/from the HLog.
  */
 class TransactionalHLogManager {
+  /** If transactional log entry, these are the op codes */
+  // TODO: Make these into types on the KeyValue!!! -- St.Ack
+  public enum TransactionalOperation {
+    /** start transaction */
+    START,
+    /** Equivalent to append in non-transactional environment */
+    WRITE,
+    /** Transaction commit entry */
+    COMMIT,
+    /** Abort transaction entry */
+    ABORT
+  }
 
   private static final Log LOG = LogFactory
       .getLog(TransactionalHLogManager.class);
@@ -84,10 +96,11 @@
    * @throws IOException
    */
   public void writeStartToLog(final long transactionId) throws IOException {
+    /*
     HLogEdit logEdit;
-    logEdit = new HLogEdit(transactionId, HLogEdit.TransactionalOperation.START);
-
-    hlog.append(regionInfo, logEdit);
+    logEdit = new HLogEdit(transactionId, TransactionalOperation.START);
+*/
+    hlog.append(regionInfo, null/*logEdit*/);
   }
 
   /**
@@ -103,8 +116,8 @@
         : update.getTimestamp();
 
     for (BatchOperation op : update) {
-      HLogEdit logEdit = new HLogEdit(transactionId, update.getRow(), op, commitTime);
-      hlog.append(regionInfo, update.getRow(), logEdit);
+      // COMMENTED OUT  HLogEdit logEdit = new HLogEdit(transactionId, update.getRow(), op,
commitTime);
+      hlog.append(regionInfo, update.getRow(), null /*logEdit*/);
     }
   }
 
@@ -113,11 +126,11 @@
    * @throws IOException
    */
   public void writeCommitToLog(final long transactionId) throws IOException {
-    HLogEdit logEdit;
+    /*HLogEdit logEdit;
     logEdit = new HLogEdit(transactionId,
         HLogEdit.TransactionalOperation.COMMIT);
-
-    hlog.append(regionInfo, logEdit);
+*/
+    hlog.append(regionInfo, null /*logEdit*/);
   }
 
   /**
@@ -125,10 +138,10 @@
    * @throws IOException
    */
   public void writeAbortToLog(final long transactionId) throws IOException {
-    HLogEdit logEdit;
+    /*HLogEdit logEdit;
     logEdit = new HLogEdit(transactionId, HLogEdit.TransactionalOperation.ABORT);
-
-    hlog.append(regionInfo, logEdit);
+*/
+    hlog.append(regionInfo, null /*logEdit*/);
   }
 
   /**
@@ -161,10 +174,10 @@
 
     SequenceFile.Reader logReader = new SequenceFile.Reader(fileSystem,
         reconstructionLog, conf);
-
+    /*
     try {
       HLogKey key = new HLogKey();
-      HLogEdit val = new HLogEdit();
+      KeyValue val = new KeyValue();
       long skippedEdits = 0;
       long totalEdits = 0;
       long startCount = 0;
@@ -174,6 +187,7 @@
       // How many edits to apply before we send a progress report.
       int reportInterval = conf.getInt("hbase.hstore.report.interval.edits",
           2000);
+
       while (logReader.next(key, val)) {
         LOG.debug("Processing edit: key: " + key.toString() + " val: "
             + val.toString());
@@ -185,6 +199,7 @@
         // against a KeyValue.  Each invocation creates a new instance.  St.Ack.
 
         // Check this edit is for me.
+
         byte[] column = val.getKeyValue().getColumn();
         Long transactionId = val.getTransactionId();
         if (!val.isTransactionEntry() || HLog.isMetaColumn(column)
@@ -194,6 +209,7 @@
 
         List<BatchUpdate> updates = pendingTransactionsById.get(transactionId);
         switch (val.getOperation()) {
+
         case START:
           if (updates != null || abortedTransactions.contains(transactionId)
               || commitedTransactionsById.containsKey(transactionId)) {
@@ -259,6 +275,7 @@
           pendingTransactionsById.remove(transactionId);
           commitedTransactionsById.put(transactionId, updates);
           commitCount++;
+
         }
         totalEdits++;
 
@@ -283,6 +300,7 @@
               + " unfinished transactions. Going to the transaction log to resolve");
       throw new RuntimeException("Transaction log not yet implemented");
     }
+              */
 
     return commitedTransactionsById;
   }

Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/transactional/DisabledTestTransactions.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/transactional/DisabledTestTransactions.java?rev=774595&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/transactional/DisabledTestTransactions.java
(added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/transactional/DisabledTestTransactions.java
Wed May 13 23:27:07 2009
@@ -0,0 +1,143 @@
+/**
+ * Copyright 2008 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client.transactional;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.ipc.TransactionalRegionInterface;
+import org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegionServer;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Test the transaction functionality. This requires to run an
+ * {@link TransactionalRegionServer}.
+ */
+public class DisabledTestTransactions extends HBaseClusterTestCase {
+
+  private static final String TABLE_NAME = "table1";
+
+  private static final byte[] FAMILY = Bytes.toBytes("family:");
+  private static final byte[] COL_A = Bytes.toBytes("family:a");
+
+  private static final byte[] ROW1 = Bytes.toBytes("row1");
+  private static final byte[] ROW2 = Bytes.toBytes("row2");
+  private static final byte[] ROW3 = Bytes.toBytes("row3");
+
+  private HBaseAdmin admin;
+  private TransactionalTable table;
+  private TransactionManager transactionManager;
+
+  /** constructor */
+  public DisabledTestTransactions() {
+    conf.set(HConstants.REGION_SERVER_CLASS, TransactionalRegionInterface.class
+        .getName());
+    conf.set(HConstants.REGION_SERVER_IMPL, TransactionalRegionServer.class
+        .getName());
+  }
+
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
+
+    HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
+    desc.addFamily(new HColumnDescriptor(FAMILY));
+    admin = new HBaseAdmin(conf);
+    admin.createTable(desc);
+    table = new TransactionalTable(conf, desc.getName());
+
+    transactionManager = new TransactionManager(conf);
+    writeInitalRow();
+  }
+
+  private void writeInitalRow() throws IOException {
+    BatchUpdate update = new BatchUpdate(ROW1);
+    update.put(COL_A, Bytes.toBytes(1));
+    table.commit(update);
+  }
+
+  public void testSimpleTransaction() throws IOException,
+      CommitUnsuccessfulException {
+    TransactionState transactionState = makeTransaction1();
+    transactionManager.tryCommit(transactionState);
+  }
+
+  public void testTwoTransactionsWithoutConflict() throws IOException,
+      CommitUnsuccessfulException {
+    TransactionState transactionState1 = makeTransaction1();
+    TransactionState transactionState2 = makeTransaction2();
+
+    transactionManager.tryCommit(transactionState1);
+    transactionManager.tryCommit(transactionState2);
+  }
+
+  public void TestTwoTransactionsWithConflict() throws IOException,
+      CommitUnsuccessfulException {
+    TransactionState transactionState1 = makeTransaction1();
+    TransactionState transactionState2 = makeTransaction2();
+
+    transactionManager.tryCommit(transactionState2);
+
+    try {
+      transactionManager.tryCommit(transactionState1);
+      fail();
+    } catch (CommitUnsuccessfulException e) {
+      // Good
+    }
+  }
+
+  // Read from ROW1,COL_A and put it in ROW2_COLA and ROW3_COLA
+  private TransactionState makeTransaction1() throws IOException {
+    TransactionState transactionState = transactionManager.beginTransaction();
+
+    Cell row1_A = table.get(transactionState, ROW1, COL_A);
+
+    BatchUpdate write1 = new BatchUpdate(ROW2);
+    write1.put(COL_A, row1_A.getValue());
+    table.commit(transactionState, write1);
+
+    BatchUpdate write2 = new BatchUpdate(ROW3);
+    write2.put(COL_A, row1_A.getValue());
+    table.commit(transactionState, write2);
+
+    return transactionState;
+  }
+
+  // Read ROW1,COL_A, increment its (integer) value, write back
+  private TransactionState makeTransaction2() throws IOException {
+    TransactionState transactionState = transactionManager.beginTransaction();
+
+    Cell row1_A = table.get(transactionState, ROW1, COL_A);
+
+    int value = Bytes.toInt(row1_A.getValue());
+
+    BatchUpdate write = new BatchUpdate(ROW1);
+    write.put(COL_A, Bytes.toBytes(value + 1));
+    table.commit(transactionState, write);
+
+    return transactionState;
+  }
+}

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestRegExpRowFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestRegExpRowFilter.java?rev=774595&r1=774594&r2=774595&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestRegExpRowFilter.java
(original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestRegExpRowFilter.java
Wed May 13 23:27:07 2009
@@ -31,7 +31,6 @@
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.io.Cell;
-import org.apache.hadoop.hbase.regionserver.HLogEdit;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java?rev=774595&r1=774594&r2=774595&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java Wed May
13 23:27:07 2009
@@ -119,23 +119,23 @@
       // Now open a reader on the log and assert append worked.
       reader = new SequenceFile.Reader(fs, filename, conf);
       HLogKey key = new HLogKey();
-      HLogEdit val = new HLogEdit();
+      KeyValue val = new KeyValue();
       for (int i = 0; i < COL_COUNT; i++) {
         reader.next(key, val);
         assertTrue(Bytes.equals(regionName, key.getRegionName()));
         assertTrue(Bytes.equals(tableName, key.getTablename()));
-        assertTrue(Bytes.equals(row, val.getKeyValue().getRow()));
-        assertEquals((byte)(i + '0'), val.getKeyValue().getValue()[0]);
+        assertTrue(Bytes.equals(row, val.getRow()));
+        assertEquals((byte)(i + '0'), val.getValue()[0]);
         System.out.println(key + " " + val);
       }
       while (reader.next(key, val)) {
         // Assert only one more row... the meta flushed row.
         assertTrue(Bytes.equals(regionName, key.getRegionName()));
         assertTrue(Bytes.equals(tableName, key.getTablename()));
-        assertTrue(Bytes.equals(HLog.METAROW, val.getKeyValue().getRow()));
-        assertTrue(Bytes.equals(HLog.METACOLUMN, val.getKeyValue().getColumn()));
-        assertEquals(0, Bytes.compareTo(HLogEdit.COMPLETE_CACHE_FLUSH,
-          val.getKeyValue().getValue()));
+        assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
+        assertTrue(Bytes.equals(HLog.METACOLUMN, val.getColumn()));
+        assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
+          val.getValue()));
         System.out.println(key + " " + val);
       }
     } finally {



Mime
View raw message