hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r785008 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
Date Mon, 15 Jun 2009 22:20:55 GMT
Author: apurtell
Date: Mon Jun 15 22:20:55 2009
New Revision: 785008

URL: http://svn.apache.org/viewvc?rev=785008&view=rev
Log:
HBASE-1526 mapreduce fixup

Modified:
    hadoop/hbase/trunk/CHANGES.txt
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=785008&r1=785007&r2=785008&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Mon Jun 15 22:20:55 2009
@@ -88,7 +88,9 @@
    HBASE-1310  Off by one error in Bytes.vintToBytes
    HBASE-1202  getRow does not always work when specifying number of versions
    HBASE-1324  hbase-1234 broke testget2 unit test (and broke the build)
-   HBASE-1321  hbase-1234 broke TestCompaction; fix and reenable HBASE-1330  binary keys
broken on trunk (Ryan Rawson via Stack) HBASE-1332  regionserver carrying .META. starts sucking
all cpu, drives load
+   HBASE-1321  hbase-1234 broke TestCompaction; fix and reenable
+   HBASE-1330  binary keys broken on trunk (Ryan Rawson via Stack)
+   HBASE-1332  regionserver carrying .META. starts sucking all cpu, drives load
                up - infinite loop? (Ryan Rawson via Stack)
    HBASE-1334  .META. region running into hfile errors (Ryan Rawson via Stack)
    HBASE-1338  lost use of compaction.dir; we were compacting into live store
@@ -168,8 +170,10 @@
    HBASE-1493  New TableMapReduceUtil methods should be static (Billy Pearson
                via Andrew Purtell)
    HBASE-1486  BLOCKCACHE always on even when disabled (Lars George via Stack)
-   HBASE-1491  ZooKeeper errors: "Client has seen zxid 0xe our last zxid is 0xd"
-   HBASE-1499  Fix javadoc warnings after HBASE-1304 commit (Lars George via Stack)
+   HBASE-1491  ZooKeeper errors: "Client has seen zxid 0xe our last zxid
+               is 0xd"
+   HBASE-1499  Fix javadoc warnings after HBASE-1304 commit (Lars George via
+               Stack)
    HBASE-1504  Remove left-over debug from 1304 commit
    HBASE-1518  Delete Trackers using compareRow, should just use raw
                binary comparator (Jon Gray via Stack)
@@ -181,7 +185,7 @@
    HBASE-1522  We delete splits before their time occasionally
    HBASE-1523  NPE in BaseScanner
    HBASE-1525  HTable.incrementColumnValue hangs()
-
+   HBASE-1526  mapreduce fixup
 
   IMPROVEMENTS
    HBASE-1089  Add count of regions on filesystem to master UI; add percentage

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java?rev=785008&r1=785007&r2=785008&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java Mon
Jun 15 22:20:55 2009
@@ -75,7 +75,7 @@
  * </pre>
  */
 public abstract class TableInputFormatBase
-implements InputFormat<ImmutableBytesWritable, Result> {
+implements InputFormat<ImmutableBytesWritable, RowResult> {
   final Log LOG = LogFactory.getLog(TableInputFormatBase.class);
   private byte [][] inputColumns;
   private HTable table;
@@ -86,7 +86,7 @@
    * Iterate over an HBase table data, return (Text, RowResult) pairs
    */
   protected class TableRecordReader
-  implements RecordReader<ImmutableBytesWritable, Result> {
+  implements RecordReader<ImmutableBytesWritable, RowResult> {
     private byte [] startRow;
     private byte [] endRow;
     private byte [] lastRow;
@@ -189,8 +189,8 @@
      *
      * @see org.apache.hadoop.mapred.RecordReader#createValue()
      */
-    public Result createValue() {
-      return new Result();
+    public RowResult createValue() {
+      return new RowResult();
     }
 
     public long getPos() {
@@ -210,7 +210,7 @@
      * @return true if there was more data
      * @throws IOException
      */
-    public boolean next(ImmutableBytesWritable key, Result value)
+    public boolean next(ImmutableBytesWritable key, RowResult value)
     throws IOException {
       Result result;
       try {
@@ -225,7 +225,7 @@
       if (result != null && result.size() > 0) {
         key.set(result.getRow());
         lastRow = key.get();
-        Writables.copyWritable(result, value);
+        Writables.copyWritable(result.getRowResult(), value);
         return true;
       }
       return false;
@@ -239,7 +239,7 @@
    * @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit,
    *      JobConf, Reporter)
    */
-  public RecordReader<ImmutableBytesWritable, Result> getRecordReader(
+  public RecordReader<ImmutableBytesWritable, RowResult> getRecordReader(
       InputSplit split, JobConf job, Reporter reporter)
   throws IOException {
     TableSplit tSplit = (TableSplit) split;

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java?rev=785008&r1=785007&r2=785008&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java Mon
Jun 15 22:20:55 2009
@@ -26,7 +26,6 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.mapred.FileAlreadyExistsException;
@@ -41,7 +40,7 @@
  * Convert Map/Reduce output and write it to an HBase table
  */
 public class TableOutputFormat extends
-FileOutputFormat<ImmutableBytesWritable, Put> {
+FileOutputFormat<ImmutableBytesWritable, BatchUpdate> {
 
   /** JobConf parameter that specifies the output table */
   public static final String OUTPUT_TABLE = "hbase.mapred.outputtable";
@@ -52,7 +51,7 @@
    * and write to an HBase table
    */
   protected static class TableRecordWriter
-    implements RecordWriter<ImmutableBytesWritable, Put> {
+    implements RecordWriter<ImmutableBytesWritable, BatchUpdate> {
     private HTable m_table;
 
     /**
@@ -70,8 +69,8 @@
     }
 
     public void write(ImmutableBytesWritable key,
-        Put value) throws IOException {
-      m_table.put(new Put(value));
+        BatchUpdate value) throws IOException {
+      m_table.commit(new BatchUpdate(value));
     }
   }
   



Mime
View raw message