hadoop-hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From zs...@apache.org
Subject svn commit: r901644 [10/37] - in /hadoop/hive/trunk: ./ ql/src/java/org/apache/hadoop/hive/ql/ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/ ql/src/java/org/apache/hadoop/hive/ql/history/ ql/src/jav...
Date Thu, 21 Jan 2010 10:38:15 GMT
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java Thu Jan 21 10:37:58 2010
@@ -20,7 +20,6 @@
 
 import java.io.Externalizable;
 import java.io.IOException;
-import java.lang.Exception;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import java.util.ArrayList;
@@ -30,7 +29,6 @@
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
-import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Writable;
 
 /**
@@ -38,8 +36,8 @@
  */
 public class MapJoinObjectKey implements Externalizable {
 
-  transient protected int     metadataTag;
-  transient protected ArrayList<Object>  obj;
+  transient protected int metadataTag;
+  transient protected ArrayList<Object> obj;
 
   public MapJoinObjectKey() {
   }
@@ -53,20 +51,25 @@
     this.obj = obj;
   }
 
+  @Override
   public boolean equals(Object o) {
     if (o instanceof MapJoinObjectKey) {
-      MapJoinObjectKey mObj = (MapJoinObjectKey)o;
+      MapJoinObjectKey mObj = (MapJoinObjectKey) o;
       if (mObj.getMetadataTag() == metadataTag) {
-        if ((obj == null) && (mObj.getObj() == null))
+        if ((obj == null) && (mObj.getObj() == null)) {
           return true;
-        if ((obj != null) && (mObj.getObj() != null) && (mObj.getObj().equals(obj)))
+        }
+        if ((obj != null) && (mObj.getObj() != null)
+            && (mObj.getObj().equals(obj))) {
           return true;
+        }
       }
     }
 
     return false;
   }
 
+  @Override
   public int hashCode() {
     return (obj == null) ? metadataTag : obj.hashCode();
   }
@@ -78,16 +81,14 @@
       metadataTag = in.readInt();
 
       // get the tableDesc from the map stored in the mapjoin operator
-      MapJoinObjectCtx ctx = MapJoinOperator.getMapMetadata().get(Integer.valueOf(metadataTag));
+      MapJoinObjectCtx ctx = MapJoinOperator.getMapMetadata().get(
+          Integer.valueOf(metadataTag));
 
       Writable val = ctx.getSerDe().getSerializedClass().newInstance();
       val.readFields(in);
-      obj =
-        (ArrayList<Object>)
-        ObjectInspectorUtils.copyToStandardObject(
-            ctx.getSerDe().deserialize(val),
-            ctx.getSerDe().getObjectInspector(),
-            ObjectInspectorCopyOption.WRITABLE);
+      obj = (ArrayList<Object>) ObjectInspectorUtils.copyToStandardObject(ctx
+          .getSerDe().deserialize(val), ctx.getSerDe().getObjectInspector(),
+          ObjectInspectorCopyOption.WRITABLE);
     } catch (Exception e) {
       throw new IOException(e);
     }
@@ -100,13 +101,13 @@
       out.writeInt(metadataTag);
 
       // get the tableDesc from the map stored in the mapjoin operator
-      MapJoinObjectCtx ctx = MapJoinOperator.getMapMetadata().get(Integer.valueOf(metadataTag));
+      MapJoinObjectCtx ctx = MapJoinOperator.getMapMetadata().get(
+          Integer.valueOf(metadataTag));
 
       // Different processing for key and value
       Writable outVal = ctx.getSerDe().serialize(obj, ctx.getStandardOI());
       outVal.write(out);
-    }
-    catch (SerDeException e) {
+    } catch (SerDeException e) {
       throw new IOException(e);
     }
   }
@@ -119,7 +120,8 @@
   }
 
   /**
-   * @param metadataTag the metadataTag to set
+   * @param metadataTag
+   *          the metadataTag to set
    */
   public void setMetadataTag(int metadataTag) {
     this.metadataTag = metadataTag;
@@ -133,7 +135,8 @@
   }
 
   /**
-   * @param obj the obj to set
+   * @param obj
+   *          the obj to set
    */
   public void setObj(ArrayList<Object> obj) {
     this.obj = obj;

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectValue.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectValue.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectValue.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectValue.java Thu Jan 21 10:37:58 2010
@@ -28,18 +28,17 @@
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator.MapJoinObjectCtx;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
+import org.apache.hadoop.io.Writable;
 
 /**
  * Map Join Object used for both key and value
  */
 public class MapJoinObjectValue implements Externalizable {
 
-  transient protected int     metadataTag;
+  transient protected int metadataTag;
   transient protected RowContainer obj;
   transient protected Configuration conf;
 
@@ -54,21 +53,26 @@
     this.metadataTag = metadataTag;
     this.obj = obj;
   }
-  
+
+  @Override
   public boolean equals(Object o) {
     if (o instanceof MapJoinObjectValue) {
-      MapJoinObjectValue mObj = (MapJoinObjectValue)o;
+      MapJoinObjectValue mObj = (MapJoinObjectValue) o;
       if (mObj.getMetadataTag() == metadataTag) {
-        if ((this.obj == null) && (mObj.getObj() == null))
+        if ((obj == null) && (mObj.getObj() == null)) {
           return true;
-        if ((obj != null) && (mObj.getObj() != null) && (mObj.getObj().equals(obj)))
+        }
+        if ((obj != null) && (mObj.getObj() != null)
+            && (mObj.getObj().equals(obj))) {
           return true;
+        }
       }
     }
 
     return false;
   }
 
+  @Override
   public int hashCode() {
     return (obj == null) ? 0 : obj.hashCode();
   }
@@ -77,10 +81,11 @@
   public void readExternal(ObjectInput in) throws IOException,
       ClassNotFoundException {
     try {
-      metadataTag   = in.readInt();
+      metadataTag = in.readInt();
 
       // get the tableDesc from the map stored in the mapjoin operator
-      MapJoinObjectCtx ctx = MapJoinOperator.getMapMetadata().get(Integer.valueOf(metadataTag));
+      MapJoinObjectCtx ctx = MapJoinOperator.getMapMetadata().get(
+          Integer.valueOf(metadataTag));
       int sz = in.readInt();
 
       RowContainer res = new RowContainer(ctx.getConf());
@@ -90,11 +95,10 @@
         Writable val = ctx.getSerDe().getSerializedClass().newInstance();
         val.readFields(in);
 
-        ArrayList<Object> memObj = (ArrayList<Object>)
-          ObjectInspectorUtils.copyToStandardObject(
-              ctx.getSerDe().deserialize(val),
-              ctx.getSerDe().getObjectInspector(),
-              ObjectInspectorCopyOption.WRITABLE);
+        ArrayList<Object> memObj = (ArrayList<Object>) ObjectInspectorUtils
+            .copyToStandardObject(ctx.getSerDe().deserialize(val), ctx
+                .getSerDe().getObjectInspector(),
+                ObjectInspectorCopyOption.WRITABLE);
 
         res.add(memObj);
       }
@@ -111,23 +115,20 @@
       out.writeInt(metadataTag);
 
       // get the tableDesc from the map stored in the mapjoin operator
-      MapJoinObjectCtx ctx = MapJoinOperator.getMapMetadata().get(Integer.valueOf(metadataTag));
+      MapJoinObjectCtx ctx = MapJoinOperator.getMapMetadata().get(
+          Integer.valueOf(metadataTag));
 
       // Different processing for key and value
       RowContainer<ArrayList<Object>> v = obj;
       out.writeInt(v.size());
 
-      for (ArrayList<Object> row = v.first();
-           row != null;
-           row = v.next() ) {
+      for (ArrayList<Object> row = v.first(); row != null; row = v.next()) {
         Writable outVal = ctx.getSerDe().serialize(row, ctx.getStandardOI());
         outVal.write(out);
       }
-    }
-    catch (SerDeException e) {
+    } catch (SerDeException e) {
       throw new IOException(e);
-    }
-    catch (HiveException e) {
+    } catch (HiveException e) {
       throw new IOException(e);
     }
   }
@@ -140,7 +141,8 @@
   }
 
   /**
-   * @param metadataTag the metadataTag to set
+   * @param metadataTag
+   *          the metadataTag to set
    */
   public void setMetadataTag(int metadataTag) {
     this.metadataTag = metadataTag;
@@ -154,7 +156,8 @@
   }
 
   /**
-   * @param obj the obj to set
+   * @param obj
+   *          the obj to set
    */
   public void setObj(RowContainer obj) {
     this.obj = obj;

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java Thu Jan 21 10:37:58 2010
@@ -73,133 +73,143 @@
  * 
  */
 public class RowContainer<Row extends List<Object>> {
-  
+
   protected Log LOG = LogFactory.getLog(this.getClass().getName());
-  
+
   // max # of rows can be put into one block
-  private static final int BLOCKSIZE   = 25000;
-  
-  private Row[] currentWriteBlock;   // the last block that add() should append to 
-  private Row[] currentReadBlock;   // the current block where the cursor is in
+  private static final int BLOCKSIZE = 25000;
+
+  private Row[] currentWriteBlock; // the last block that add() should append to
+  private Row[] currentReadBlock; // the current block where the cursor is in
   // since currentReadBlock may assigned to currentWriteBlock, we need to store
   // orginal read block
-  private Row[] firstReadBlockPointer;  
-  private int blockSize;     // number of objects in the block before it is spilled to disk
-  private int numFlushedBlocks;     // total # of blocks
-  private int size;          // total # of elements in the RowContainer
-  private File tmpFile;            // temporary file holding the spilled blocks
+  private Row[] firstReadBlockPointer;
+  private int blockSize; // number of objects in the block before it is spilled
+                         // to disk
+  private int numFlushedBlocks; // total # of blocks
+  private int size; // total # of elements in the RowContainer
+  private File tmpFile; // temporary file holding the spilled blocks
   Path tempOutPath = null;
   private File parentFile;
-  private int itrCursor;     // iterator cursor in the currBlock
-  private int readBlockSize; //size of current read block
-  private int addCursor;     // append cursor in the lastBlock
-  private SerDe serde;       // serialization/deserialization for the row
-  private ObjectInspector standardOI;  // object inspector for the row
-  
+  private int itrCursor; // iterator cursor in the currBlock
+  private int readBlockSize; // size of current read block
+  private int addCursor; // append cursor in the lastBlock
+  private SerDe serde; // serialization/deserialization for the row
+  private ObjectInspector standardOI; // object inspector for the row
+
   private List<Object> keyObject;
 
   private tableDesc tblDesc;
-  
-  boolean firstCalled = false; //once called first, it will never be able to write again.
+
+  boolean firstCalled = false; // once called first, it will never be able to
+                               // write again.
   int acutalSplitNum = 0;
   int currentSplitPointer = 0;
-  org.apache.hadoop.mapred.RecordReader rr = null; //record reader
+  org.apache.hadoop.mapred.RecordReader rr = null; // record reader
   RecordWriter rw = null;
   InputFormat<WritableComparable, Writable> inputFormat = null;
   InputSplit[] inputSplits = null;
   private Row dummyRow = null;
-  
-  Writable val = null; //cached to use serialize data
-  
+
+  Writable val = null; // cached to use serialize data
+
   JobConf jobCloneUsingLocalFs = null;
   private LocalFileSystem localFs;
 
   public RowContainer(Configuration jc) throws HiveException {
     this(BLOCKSIZE, jc);
   }
-  
+
   public RowContainer(int blockSize, Configuration jc) throws HiveException {
     // no 0-sized block
     this.blockSize = blockSize == 0 ? BLOCKSIZE : blockSize;
-    this.size      = 0;
+    this.size = 0;
     this.itrCursor = 0;
     this.addCursor = 0;
     this.numFlushedBlocks = 0;
-    this.tmpFile   = null;
+    this.tmpFile = null;
     this.currentWriteBlock = (Row[]) new ArrayList[blockSize];
     this.currentReadBlock = this.currentWriteBlock;
     this.firstReadBlockPointer = currentReadBlock;
-    this.serde     = null;
-    this.standardOI= null;
+    this.serde = null;
+    this.standardOI = null;
     try {
       this.localFs = FileSystem.getLocal(jc);
     } catch (IOException e) {
-     throw new HiveException(e);
+      throw new HiveException(e);
     }
     this.jobCloneUsingLocalFs = new JobConf(jc);
-    HiveConf.setVar(jobCloneUsingLocalFs, HiveConf.ConfVars.HADOOPFS, Utilities.HADOOP_LOCAL_FS);
+    HiveConf.setVar(jobCloneUsingLocalFs, HiveConf.ConfVars.HADOOPFS,
+        Utilities.HADOOP_LOCAL_FS);
   }
-  
-  public RowContainer(int blockSize, SerDe sd, ObjectInspector oi, Configuration jc) throws HiveException {
+
+  public RowContainer(int blockSize, SerDe sd, ObjectInspector oi,
+      Configuration jc) throws HiveException {
     this(blockSize, jc);
     setSerDe(sd, oi);
   }
-  
+
   public void setSerDe(SerDe sd, ObjectInspector oi) {
     this.serde = sd;
     this.standardOI = oi;
   }
-  
+
   public void add(Row t) throws HiveException {
-    if(this.tblDesc != null) {
-      if ( addCursor >= blockSize ) { // spill the current block to tmp file
+    if (this.tblDesc != null) {
+      if (addCursor >= blockSize) { // spill the current block to tmp file
         spillBlock(currentWriteBlock, addCursor);
         addCursor = 0;
-        if ( numFlushedBlocks == 1 )
+        if (numFlushedBlocks == 1) {
           currentWriteBlock = (Row[]) new ArrayList[blockSize];
-      } 
+        }
+      }
       currentWriteBlock[addCursor++] = t;
-    } else if(t != null) {
+    } else if (t != null) {
       // the tableDesc will be null in the case that all columns in that table
       // is not used. we use a dummy row to denote all rows in that table, and
-      // the dummy row is added by caller. 
+      // the dummy row is added by caller.
       this.dummyRow = t;
     }
     ++size;
   }
-  
+
   public Row first() throws HiveException {
-    if ( size == 0 )
+    if (size == 0) {
       return null;
-    
+    }
+
     try {
       firstCalled = true;
       // when we reach here, we must have some data already (because size >0).
-      // We need to see if there are any data flushed into file system. If not, we can
+      // We need to see if there are any data flushed into file system. If not,
+      // we can
       // directly read from the current write block. Otherwise, we need to read
       // from the beginning of the underlying file.
       this.itrCursor = 0;
       closeWriter();
       closeReader();
-      
-      if(tblDesc == null) {
-        this.itrCursor ++;
+
+      if (tblDesc == null) {
+        this.itrCursor++;
         return dummyRow;
       }
-      
+
       this.currentReadBlock = this.firstReadBlockPointer;
       if (this.numFlushedBlocks == 0) {
         this.readBlockSize = this.addCursor;
         this.currentReadBlock = this.currentWriteBlock;
       } else {
         if (inputSplits == null) {
-          if (this.inputFormat == null)
-            inputFormat = (InputFormat<WritableComparable, Writable>) ReflectionUtils.newInstance(tblDesc.getInputFileFormatClass(),
+          if (this.inputFormat == null) {
+            inputFormat = (InputFormat<WritableComparable, Writable>) ReflectionUtils
+                .newInstance(tblDesc.getInputFileFormatClass(),
                     jobCloneUsingLocalFs);
+          }
 
           HiveConf.setVar(jobCloneUsingLocalFs,
               HiveConf.ConfVars.HADOOPMAPREDINPUTDIR,
-              org.apache.hadoop.util.StringUtils.escapeString(parentFile.getAbsolutePath()));
+              org.apache.hadoop.util.StringUtils.escapeString(parentFile
+                  .getAbsolutePath()));
           inputSplits = inputFormat.getSplits(jobCloneUsingLocalFs, 1);
           acutalSplitNum = inputSplits.length;
         }
@@ -217,34 +227,35 @@
     } catch (Exception e) {
       throw new HiveException(e);
     }
-    
+
   }
 
   public Row next() throws HiveException {
-    
-    if(!firstCalled)
+
+    if (!firstCalled) {
       throw new RuntimeException("Call first() then call next().");
-    
-    if ( size == 0 )
+    }
+
+    if (size == 0) {
       return null;
-    
-    if(tblDesc == null) {
-      if(this.itrCursor < size) {
+    }
+
+    if (tblDesc == null) {
+      if (this.itrCursor < size) {
         this.itrCursor++;
         return dummyRow;
       }
-      return null;  
+      return null;
     }
-    
+
     Row ret;
-    if(itrCursor < this.readBlockSize) {
+    if (itrCursor < this.readBlockSize) {
       ret = this.currentReadBlock[itrCursor++];
       removeKeys(ret);
       return ret;
-    }
-    else {
+    } else {
       nextBlock();
-      if ( this.readBlockSize == 0) {
+      if (this.readBlockSize == 0) {
         if (currentWriteBlock != null && currentReadBlock != currentWriteBlock) {
           this.itrCursor = 0;
           this.readBlockSize = this.addCursor;
@@ -262,51 +273,58 @@
     if (this.keyObject != null
         && this.currentReadBlock != this.currentWriteBlock) {
       int len = this.keyObject.size();
-      int rowSize = ((ArrayList)ret).size();
-      for(int i=0;i<len;i++) {
+      int rowSize = ((ArrayList) ret).size();
+      for (int i = 0; i < len; i++) {
         ((ArrayList) ret).remove(rowSize - i - 1);
       }
     }
   }
-  
+
   ArrayList<Object> row = new ArrayList<Object>(2);
+
   private void spillBlock(Row[] block, int length) throws HiveException {
     try {
-      if ( tmpFile == null ) {
+      if (tmpFile == null) {
 
         String suffix = ".tmp";
-        if(this.keyObject != null)
+        if (this.keyObject != null) {
           suffix = "." + this.keyObject.toString() + suffix;
-        
-        while(true) {
+        }
+
+        while (true) {
           String parentId = "hive-rowcontainer" + Utilities.randGen.nextInt();
-          parentFile = new File("/tmp/"+ parentId);
+          parentFile = new File("/tmp/" + parentId);
           boolean success = parentFile.mkdir();
-          if(success)
+          if (success) {
             break;
+          }
           LOG.debug("retry creating tmp row-container directory...");
         }
-        
+
         tmpFile = File.createTempFile("RowContainer", suffix, parentFile);
         LOG.info("RowContainer created temp file " + tmpFile.getAbsolutePath());
-        // Delete the temp file if the JVM terminate normally through Hadoop job kill command.
+        // Delete the temp file if the JVM terminate normally through Hadoop job
+        // kill command.
         // Caveat: it won't be deleted if JVM is killed by 'kill -9'.
         parentFile.deleteOnExit();
-        tmpFile.deleteOnExit(); 
-        
+        tmpFile.deleteOnExit();
+
         // rFile = new RandomAccessFile(tmpFile, "rw");
-        HiveOutputFormat<?, ?> hiveOutputFormat = tblDesc.getOutputFileFormatClass().newInstance();
+        HiveOutputFormat<?, ?> hiveOutputFormat = tblDesc
+            .getOutputFileFormatClass().newInstance();
         tempOutPath = new Path(tmpFile.toString());
-        rw = HiveFileFormatUtils.getRecordWriter(this.jobCloneUsingLocalFs, hiveOutputFormat, 
-            serde.getSerializedClass(), false, tblDesc.getProperties(), tempOutPath);
+        rw = HiveFileFormatUtils.getRecordWriter(this.jobCloneUsingLocalFs,
+            hiveOutputFormat, serde.getSerializedClass(), false, tblDesc
+                .getProperties(), tempOutPath);
       } else if (rw == null) {
-        throw new HiveException("RowContainer has already been closed for writing.");
+        throw new HiveException(
+            "RowContainer has already been closed for writing.");
       }
-      
+
       row.clear();
       row.add(null);
       row.add(null);
-      
+
       if (this.keyObject != null) {
         row.set(1, this.keyObject);
         for (int i = 0; i < length; ++i) {
@@ -315,18 +333,19 @@
           Writable outVal = serde.serialize(row, standardOI);
           rw.write(outVal);
         }
-      }else {
-        for ( int i = 0; i < length; ++i ) {
+      } else {
+        for (int i = 0; i < length; ++i) {
           Row currentValRow = block[i];
           Writable outVal = serde.serialize(currentValRow, standardOI);
           rw.write(outVal);
         }
       }
-      
-      if(block == this.currentWriteBlock)
+
+      if (block == this.currentWriteBlock) {
         this.addCursor = 0;
-      
-      this.numFlushedBlocks ++;
+      }
+
+      this.numFlushedBlocks++;
     } catch (Exception e) {
       clear();
       LOG.error(e.toString(), e);
@@ -336,6 +355,7 @@
 
   /**
    * Get the number of elements in the RowContainer.
+   * 
    * @return number of elements in the RowContainer
    */
   public int size() {
@@ -345,57 +365,64 @@
   private boolean nextBlock() throws HiveException {
     itrCursor = 0;
     this.readBlockSize = 0;
-    if (this.numFlushedBlocks == 0) return false;
-    
+    if (this.numFlushedBlocks == 0) {
+      return false;
+    }
+
     try {
-      if(val == null)
+      if (val == null) {
         val = serde.getSerializedClass().newInstance();
+      }
       boolean nextSplit = true;
       int i = 0;
-      
-      if(rr != null) {
+
+      if (rr != null) {
         Object key = rr.createKey();
         while (i < this.currentReadBlock.length && rr.next(key, val)) {
           nextSplit = false;
-          this.currentReadBlock[i++] = (Row) ObjectInspectorUtils.copyToStandardObject(
-              serde.deserialize(val),
-              serde.getObjectInspector(),
-              ObjectInspectorCopyOption.WRITABLE);
+          this.currentReadBlock[i++] = (Row) ObjectInspectorUtils
+              .copyToStandardObject(serde.deserialize(val), serde
+                  .getObjectInspector(), ObjectInspectorCopyOption.WRITABLE);
         }
       }
-      
+
       if (nextSplit && this.currentSplitPointer < this.acutalSplitNum) {
-        //open record reader to read next split
-        rr = inputFormat.getRecordReader(inputSplits[currentSplitPointer], jobCloneUsingLocalFs,
-            Reporter.NULL);
+        // open record reader to read next split
+        rr = inputFormat.getRecordReader(inputSplits[currentSplitPointer],
+            jobCloneUsingLocalFs, Reporter.NULL);
         currentSplitPointer++;
         return nextBlock();
       }
-      
+
       this.readBlockSize = i;
       return this.readBlockSize > 0;
     } catch (Exception e) {
-      LOG.error(e.getMessage(),e);
+      LOG.error(e.getMessage(), e);
       try {
         this.clear();
       } catch (HiveException e1) {
-        LOG.error(e.getMessage(),e);
+        LOG.error(e.getMessage(), e);
       }
       throw new HiveException(e);
     }
   }
 
-  public void copyToDFSDirecory(FileSystem destFs, Path destPath) throws IOException, HiveException {
-    if (addCursor > 0)
+  public void copyToDFSDirecory(FileSystem destFs, Path destPath)
+      throws IOException, HiveException {
+    if (addCursor > 0) {
       this.spillBlock(this.currentWriteBlock, addCursor);
-    if(tempOutPath == null || tempOutPath.toString().trim().equals(""))
+    }
+    if (tempOutPath == null || tempOutPath.toString().trim().equals("")) {
       return;
+    }
     this.closeWriter();
-    LOG.info("RowContainer copied temp file " + tmpFile.getAbsolutePath()+ " to dfs directory " + destPath.toString());
-    destFs.copyFromLocalFile(true,tempOutPath, new Path(destPath, new Path(tempOutPath.getName())));
+    LOG.info("RowContainer copied temp file " + tmpFile.getAbsolutePath()
+        + " to dfs directory " + destPath.toString());
+    destFs.copyFromLocalFile(true, tempOutPath, new Path(destPath, new Path(
+        tempOutPath.getName())));
     clear();
   }
-  
+
   /**
    * Remove all elements in the RowContainer.
    */
@@ -410,13 +437,15 @@
     this.inputSplits = null;
     tempOutPath = null;
     addCursor = 0;
-    
+
     size = 0;
     try {
-      if (rw != null)
+      if (rw != null) {
         rw.close(false);
-      if (rr != null)
+      }
+      if (rr != null) {
         rr.close();
+      }
     } catch (Exception e) {
       LOG.error(e.toString());
       throw new HiveException(e);
@@ -430,38 +459,41 @@
   }
 
   private void deleteLocalFile(File file, boolean recursive) {
-    try{
+    try {
       if (file != null) {
-        if(!file.exists())
+        if (!file.exists()) {
           return;
-        if(file.isDirectory() && recursive) {
+        }
+        if (file.isDirectory() && recursive) {
           File[] files = file.listFiles();
-          for (int i = 0; i < files.length; i++)
-            deleteLocalFile(files[i], true);
+          for (File file2 : files) {
+            deleteLocalFile(file2, true);
+          }
         }
         boolean deleteSuccess = file.delete();
-        if(!deleteSuccess)
+        if (!deleteSuccess) {
           LOG.error("Error deleting tmp file:" + file.getAbsolutePath());
+        }
       }
     } catch (Exception e) {
       LOG.error("Error deleting tmp file:" + file.getAbsolutePath(), e);
     }
   }
-  
+
   private void closeWriter() throws IOException {
     if (this.rw != null) {
       this.rw.close(false);
       this.rw = null;
     }
   }
-  
+
   private void closeReader() throws IOException {
     if (this.rr != null) {
       this.rr.close();
       this.rr = null;
     }
   }
-  
+
   public void setKeyObject(List<Object> dummyKey) {
     this.keyObject = dummyKey;
   }
@@ -469,5 +501,5 @@
   public void setTableDesc(tableDesc tblDesc) {
     this.tblDesc = tblDesc;
   }
-  
+
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java Thu Jan 21 10:37:58 2010
@@ -19,8 +19,8 @@
 package org.apache.hadoop.hive.ql.history;
 
 import java.io.BufferedReader;
-import java.io.FileInputStream;
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStreamReader;
@@ -43,7 +43,6 @@
 import org.apache.hadoop.mapred.Counters.Counter;
 import org.apache.hadoop.mapred.Counters.Group;
 
-
 public class HiveHistory {
 
   PrintWriter histStream; // History File stream
@@ -54,14 +53,13 @@
 
   private LogHelper console;
 
-  private Map<String,String> idToTableMap = null;
-  
+  private Map<String, String> idToTableMap = null;
+
   // Job Hash Map
-  private HashMap<String, QueryInfo> queryInfoMap = new HashMap<String, QueryInfo>();
+  private final HashMap<String, QueryInfo> queryInfoMap = new HashMap<String, QueryInfo>();
 
   // Task Hash Map
-  private HashMap<String, TaskInfo> taskInfoMap = new HashMap<String, TaskInfo>();
-  
+  private final HashMap<String, TaskInfo> taskInfoMap = new HashMap<String, TaskInfo>();
 
   private static final String DELIMITER = " ";
 
@@ -70,8 +68,7 @@
   };
 
   public static enum Keys {
-    SESSION_ID, QUERY_ID, TASK_ID, QUERY_RET_CODE, QUERY_NUM_TASKS, QUERY_STRING, TIME,
-    TASK_RET_CODE, TASK_NAME, TASK_HADOOP_ID, TASK_HADOOP_PROGRESS, TASK_COUNTERS, TASK_NUM_REDUCERS, ROWS_INSERTED
+    SESSION_ID, QUERY_ID, TASK_ID, QUERY_RET_CODE, QUERY_NUM_TASKS, QUERY_STRING, TIME, TASK_RET_CODE, TASK_NAME, TASK_HADOOP_ID, TASK_HADOOP_PROGRESS, TASK_COUNTERS, TASK_NUM_REDUCERS, ROWS_INSERTED
   };
 
   private static final String KEY = "(\\w+)";
@@ -80,9 +77,9 @@
 
   private static final Pattern pattern = Pattern.compile(KEY + "=" + "\""
       + VALUE + "\"");
-  
-  private static final Pattern rowCountPattern = Pattern.compile(ROW_COUNT_PATTERN);
-  
+
+  private static final Pattern rowCountPattern = Pattern
+      .compile(ROW_COUNT_PATTERN);
 
   // temp buffer for parsed dataa
   private static Map<String, String> parseBuffer = new HashMap<String, String>();
@@ -112,7 +109,7 @@
       StringBuffer buf = new StringBuffer();
       while ((line = reader.readLine()) != null) {
         buf.append(line);
-        //if it does not end with " then it is line continuation
+        // if it does not end with " then it is line continuation
         if (!line.trim().endsWith("\"")) {
           continue;
         }
@@ -183,23 +180,22 @@
       console = new LogHelper(LOG);
       String conf_file_loc = ss.getConf().getVar(
           HiveConf.ConfVars.HIVEHISTORYFILELOC);
-      if ((conf_file_loc == null) || conf_file_loc.length() == 0)
-      {
+      if ((conf_file_loc == null) || conf_file_loc.length() == 0) {
         console.printError("No history file location given");
         return;
       }
-      
-      //Create directory 
+
+      // Create directory
       File f = new File(conf_file_loc);
-      if (!f.exists()){
-        if (!f.mkdir()){
-          console.printError("Unable to create log directory "+conf_file_loc );
+      if (!f.exists()) {
+        if (!f.mkdir()) {
+          console.printError("Unable to create log directory " + conf_file_loc);
           return;
         }
       }
       Random randGen = new Random();
-      histFileName = conf_file_loc + "/hive_job_log_" + ss.getSessionId()
-         +"_" + Math.abs(randGen.nextInt()) + ".txt";
+      histFileName = conf_file_loc + "/hive_job_log_" + ss.getSessionId() + "_"
+          + Math.abs(randGen.nextInt()) + ".txt";
       console.printInfo("Hive history file=" + histFileName);
       histStream = new PrintWriter(histFileName);
 
@@ -207,7 +203,8 @@
       hm.put(Keys.SESSION_ID.name(), ss.getSessionId());
       log(RecordTypes.SessionStart, hm);
     } catch (FileNotFoundException e) {
-      console.printError("FAILED: Failed to open Query Log : " +histFileName+ " "+  e.getMessage(), "\n"
+      console.printError("FAILED: Failed to open Query Log : " + histFileName
+          + " " + e.getMessage(), "\n"
           + org.apache.hadoop.util.StringUtils.stringifyException(e));
     }
 
@@ -228,14 +225,15 @@
    */
   void log(RecordTypes rt, Map<String, String> keyValMap) {
 
-    if (histStream == null)
+    if (histStream == null) {
       return;
+    }
 
     StringBuffer sb = new StringBuffer();
     sb.append(rt.name());
 
     for (Map.Entry<String, String> ent : keyValMap.entrySet()) {
-      
+
       sb.append(DELIMITER);
       String key = ent.getKey();
       String val = ent.getValue();
@@ -255,16 +253,15 @@
    */
   public void startQuery(String cmd, String id) {
     SessionState ss = SessionState.get();
-    if (ss == null)
+    if (ss == null) {
       return;
+    }
     QueryInfo ji = new QueryInfo();
 
     ji.hm.put(Keys.QUERY_ID.name(), id);
     ji.hm.put(Keys.QUERY_STRING.name(), cmd);
-    
+
     queryInfoMap.put(id, ji);
-    
-    
 
     log(RecordTypes.QueryStart, ji.hm);
 
@@ -279,8 +276,9 @@
    */
   public void setQueryProperty(String queryId, Keys propName, String propValue) {
     QueryInfo ji = queryInfoMap.get(queryId);
-    if (ji == null)
+    if (ji == null) {
       return;
+    }
     ji.hm.put(propName.name(), propValue);
   }
 
@@ -295,8 +293,9 @@
       String propValue) {
     String id = queryId + ":" + taskId;
     TaskInfo ti = taskInfoMap.get(id);
-    if (ti == null)
+    if (ti == null) {
       return;
+    }
     ti.hm.put(propName.name(), propValue);
   }
 
@@ -311,8 +310,9 @@
     QueryInfo ji = queryInfoMap.get(queryId);
     StringBuilder sb1 = new StringBuilder("");
     TaskInfo ti = taskInfoMap.get(id);
-    if (ti == null)
+    if (ti == null) {
       return;
+    }
     StringBuilder sb = new StringBuilder("");
     try {
 
@@ -330,39 +330,39 @@
           sb.append(':');
           sb.append(counter.getCounter());
           String tab = getRowCountTableName(counter.getDisplayName());
-          if (tab != null){
-            if (sb1.length() > 0)
+          if (tab != null) {
+            if (sb1.length() > 0) {
               sb1.append(",");
+            }
             sb1.append(tab);
             sb1.append('~');
             sb1.append(counter.getCounter());
             ji.rowCountMap.put(tab, counter.getCounter());
-            
-            
+
           }
         }
       }
-     
-     
 
     } catch (Exception e) {
       e.printStackTrace();
     }
-    if (sb1.length()>0)
-    {
+    if (sb1.length() > 0) {
       taskInfoMap.get(id).hm.put(Keys.ROWS_INSERTED.name(), sb1.toString());
-      queryInfoMap.get(queryId).hm.put(Keys.ROWS_INSERTED.name(), sb1.toString());
+      queryInfoMap.get(queryId).hm.put(Keys.ROWS_INSERTED.name(), sb1
+          .toString());
     }
-    if (sb.length() > 0)
+    if (sb.length() > 0) {
       taskInfoMap.get(id).hm.put(Keys.TASK_COUNTERS.name(), sb.toString());
+    }
   }
 
-  public void printRowCount(String queryId){
+  public void printRowCount(String queryId) {
     QueryInfo ji = queryInfoMap.get(queryId);
-    for (String tab: ji.rowCountMap.keySet()){
-      console.printInfo(ji.rowCountMap.get(tab)+" Rows loaded to "+ tab);
+    for (String tab : ji.rowCountMap.keySet()) {
+      console.printInfo(ji.rowCountMap.get(tab) + " Rows loaded to " + tab);
     }
   }
+
   /**
    * Called at the end of Job. A Job is sql query.
    * 
@@ -371,8 +371,9 @@
   public void endQuery(String queryId) {
 
     QueryInfo ji = queryInfoMap.get(queryId);
-    if (ji == null)
+    if (ji == null) {
       return;
+    }
     log(RecordTypes.QueryEnd, ji.hm);
   }
 
@@ -385,8 +386,9 @@
   public void startTask(String queryId, Task<? extends Serializable> task,
       String taskName) {
     SessionState ss = SessionState.get();
-    if (ss == null)
+    if (ss == null) {
       return;
+    }
     TaskInfo ti = new TaskInfo();
 
     ti.hm.put(Keys.QUERY_ID.name(), ss.getQueryId());
@@ -409,8 +411,9 @@
     String id = queryId + ":" + task.getId();
     TaskInfo ti = taskInfoMap.get(id);
 
-    if (ti == null)
+    if (ti == null) {
       return;
+    }
     log(RecordTypes.TaskEnd, ti.hm);
   }
 
@@ -422,8 +425,9 @@
   public void progressTask(String queryId, Task<? extends Serializable> task) {
     String id = queryId + ":" + task.getId();
     TaskInfo ti = taskInfoMap.get(id);
-    if (ti == null)
+    if (ti == null) {
       return;
+    }
     log(RecordTypes.TaskProgress, ti.hm);
 
   }
@@ -432,6 +436,7 @@
    * write out counters
    */
   static Map<String, String> ctrmap = null;
+
   public void logPlanProgress(QueryPlan plan) throws IOException {
     if (ctrmap == null) {
       ctrmap = new HashMap<String, String>();
@@ -439,25 +444,29 @@
     ctrmap.put("plan", plan.toString());
     log(RecordTypes.Counters, ctrmap);
   }
-  
+
   /**
    * Set the table to id map
+   * 
    * @param map
    */
-  public void setIdToTableMap(Map<String, String> map){
+  public void setIdToTableMap(Map<String, String> map) {
     idToTableMap = map;
   }
 
   /**
-   *  Returns table name for the counter name
+   * Returns table name for the counter name
+   * 
    * @param name
    * @return tableName
    */
-  String getRowCountTableName(String name){
-    if (idToTableMap == null) return null;
+  String getRowCountTableName(String name) {
+    if (idToTableMap == null) {
+      return null;
+    }
     Matcher m = rowCountPattern.matcher(name);
 
-    if (m.find()){
+    if (m.find()) {
       String tuple = m.group(1);
       return idToTableMap.get(tuple);
     }
@@ -465,5 +474,4 @@
 
   }
 
-
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryViewer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryViewer.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryViewer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryViewer.java Thu Jan 21 10:37:58 2010
@@ -22,8 +22,8 @@
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.hadoop.hive.ql.history.HiveHistory.QueryInfo;
 import org.apache.hadoop.hive.ql.history.HiveHistory.Keys;
+import org.apache.hadoop.hive.ql.history.HiveHistory.QueryInfo;
 import org.apache.hadoop.hive.ql.history.HiveHistory.RecordTypes;
 import org.apache.hadoop.hive.ql.history.HiveHistory.TaskInfo;
 
@@ -35,10 +35,10 @@
   String sessionId;
 
   // Job Hash Map
-  private HashMap<String, QueryInfo> jobInfoMap = new HashMap<String, QueryInfo>();
+  private final HashMap<String, QueryInfo> jobInfoMap = new HashMap<String, QueryInfo>();
 
   // Task Hash Map
-  private HashMap<String, TaskInfo> taskInfoMap = new HashMap<String, TaskInfo>();
+  private final HashMap<String, TaskInfo> taskInfoMap = new HashMap<String, TaskInfo>();
 
   public HiveHistoryViewer(String path) {
     historyFile = path;
@@ -81,7 +81,8 @@
 
     if (recType == RecordTypes.SessionStart) {
       sessionId = values.get(Keys.SESSION_ID.name());
-    } else if (recType == RecordTypes.QueryStart || recType == RecordTypes.QueryEnd) {
+    } else if (recType == RecordTypes.QueryStart
+        || recType == RecordTypes.QueryEnd) {
       String key = values.get(Keys.QUERY_ID.name());
       QueryInfo ji;
       if (jobInfoMap.containsKey(key)) {

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecute.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecute.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecute.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecute.java Thu Jan 21 10:37:58 2010
@@ -19,27 +19,29 @@
 package org.apache.hadoop.hive.ql.hooks;
 
 import java.util.Set;
-import org.apache.hadoop.security.UserGroupInformation;
+
 import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.security.UserGroupInformation;
 
 /**
- * The post execute hook interface. A list of such hooks can
- * be configured to be called after compilation and before
- * execution.
+ * The post execute hook interface. A list of such hooks can be configured to be
+ * called after compilation and before execution.
  */
 public interface PostExecute {
 
   /**
-   * The run command that is called just before the execution of the
-   * query.
-   *
-   * @param sess The session state.
-   * @param inputs The set of input tables and partitions.
-   * @param outputs The set of output tables, partitions, local and hdfs directories.
-   * @param ugi The user group security information.
+   * The run command that is called just before the execution of the query.
+   * 
+   * @param sess
+   *          The session state.
+   * @param inputs
+   *          The set of input tables and partitions.
+   * @param outputs
+   *          The set of output tables, partitions, local and hdfs directories.
+   * @param ugi
+   *          The user group security information.
    */
   public void run(SessionState sess, Set<ReadEntity> inputs,
-      Set<WriteEntity> outputs, UserGroupInformation ugi)
-    throws Exception;
+      Set<WriteEntity> outputs, UserGroupInformation ugi) throws Exception;
 
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java Thu Jan 21 10:37:58 2010
@@ -19,27 +19,29 @@
 package org.apache.hadoop.hive.ql.hooks;
 
 import java.util.Set;
-import org.apache.hadoop.security.UserGroupInformation;
+
 import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.security.UserGroupInformation;
 
 /**
- * The pre execute hook interface. A list of such hooks can
- * be configured to be called after compilation and before 
- * execution.
+ * The pre execute hook interface. A list of such hooks can be configured to be
+ * called after compilation and before execution.
  */
 public interface PreExecute {
 
   /**
-   * The run command that is called just before the execution of the
-   * query.
+   * The run command that is called just before the execution of the query.
    * 
-   * @param sess The session state.
-   * @param inputs The set of input tables and partitions.
-   * @param outputs The set of output tables, partitions, local and hdfs directories.
-   * @param ugi The user group security information.
+   * @param sess
+   *          The session state.
+   * @param inputs
+   *          The set of input tables and partitions.
+   * @param outputs
+   *          The set of output tables, partitions, local and hdfs directories.
+   * @param ugi
+   *          The user group security information.
    */
-  public void run(SessionState sess, Set<ReadEntity> inputs, 
-      Set<WriteEntity> outputs, UserGroupInformation ugi)
-    throws Exception;
-  
+  public void run(SessionState sess, Set<ReadEntity> inputs,
+      Set<WriteEntity> outputs, UserGroupInformation ugi) throws Exception;
+
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java Thu Jan 21 10:37:58 2010
@@ -18,50 +18,56 @@
 
 package org.apache.hadoop.hive.ql.hooks;
 
+import java.net.URI;
+import java.util.Map;
+
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
-import java.util.Map;
-import java.net.URI;
 
 /**
- * This class encapsulates the information on the partition and
- * tables that are read by the query.
+ * This class encapsulates the information on the partition and tables that are
+ * read by the query.
  */
 public class ReadEntity {
 
   /**
    * The partition. This is null for a non partitioned table.
    */
-  private Partition p;
+  private final Partition p;
 
   /**
    * The table.
    */
-  private Table t;
+  private final Table t;
 
   /**
    * Constructor.
-   *
-   * @param t The Table that the query reads from.
+   * 
+   * @param t
+   *          The Table that the query reads from.
    */
   public ReadEntity(Table t) {
     this.t = t;
-    this.p = null;
+    p = null;
   }
 
   /**
    * Constructor given a partiton.
-   *
-   * @param p The partition that the query reads from.
+   * 
+   * @param p
+   *          The partition that the query reads from.
    */
   public ReadEntity(Partition p) {
-    this.t = p.getTable();
+    t = p.getTable();
     this.p = p;
   }
+
   /**
    * Enum that tells what time of a read entity this is.
    */
-  public static enum Type {TABLE, PARTITION};
+  public static enum Type {
+    TABLE, PARTITION
+  };
 
   /**
    * Get the type.
@@ -76,8 +82,7 @@
   public Map<String, String> getParameters() {
     if (p != null) {
       return p.getTPartition().getParameters();
-    }
-    else {
+    } else {
       return t.getTTable().getParameters();
     }
   }
@@ -88,8 +93,7 @@
   public URI getLocation() {
     if (p != null) {
       return p.getDataLocation();
-    }
-    else {
+    } else {
       return t.getDataLocation();
     }
   }
@@ -114,9 +118,9 @@
   @Override
   public String toString() {
     if (p != null) {
-      return p.getTable().getDbName() + "@" + p.getTable().getName() + "@" + p.getName();
-    }
-    else {
+      return p.getTable().getDbName() + "@" + p.getTable().getName() + "@"
+          + p.getName();
+    } else {
       return t.getDbName() + "@" + t.getName();
     }
   }
@@ -126,15 +130,16 @@
    */
   @Override
   public boolean equals(Object o) {
-    if (o == null)
+    if (o == null) {
       return false;
+    }
 
     if (o instanceof ReadEntity) {
-      ReadEntity ore = (ReadEntity)o;
+      ReadEntity ore = (ReadEntity) o;
       return (toString().equalsIgnoreCase(ore.toString()));
-    }
-    else
+    } else {
       return false;
+    }
   }
 
   /**

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java Thu Jan 21 10:37:58 2010
@@ -18,21 +18,23 @@
 
 package org.apache.hadoop.hive.ql.hooks;
 
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.metadata.Partition;
 import java.net.URI;
 
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+
 /**
- * This class encapsulates an object that is being written to
- * by the query. This object may be a table, partition, dfs
- * directory or a local directory.
+ * This class encapsulates an object that is being written to by the query. This
+ * object may be a table, partition, dfs directory or a local directory.
  */
 public class WriteEntity {
 
   /**
    * The type of the write entity.
    */
-  public static enum Type {TABLE, PARTITION, DFS_DIR, LOCAL_DIR};
+  public static enum Type {
+    TABLE, PARTITION, DFS_DIR, LOCAL_DIR
+  };
 
   /**
    * The type.
@@ -42,57 +44,60 @@
   /**
    * The table. This is null if this is a directory.
    */
-  private Table t;
+  private final Table t;
 
   /**
    * The partition.This is null if this object is not a partition.
    */
-  private Partition p;
+  private final Partition p;
 
   /**
    * The directory if this is a directory.
    */
-  private String d;
+  private final String d;
 
   /**
    * Constructor for a table.
-   *
-   * @param t Table that is written to.
+   * 
+   * @param t
+   *          Table that is written to.
    */
   public WriteEntity(Table t) {
-    this.d = null;
-    this.p = null;
+    d = null;
+    p = null;
     this.t = t;
-    this.typ = Type.TABLE;
+    typ = Type.TABLE;
   }
 
   /**
    * Constructor for a partition.
-   *
-   * @param p Partition that is written to.
+   * 
+   * @param p
+   *          Partition that is written to.
    */
   public WriteEntity(Partition p) {
-    this.d = null;
+    d = null;
     this.p = p;
-    this.t = p.getTable();
-    this.typ = Type.PARTITION;
+    t = p.getTable();
+    typ = Type.PARTITION;
   }
 
   /**
    * Constructor for a file.
-   *
-   * @param d The name of the directory that is being written to.
-   * @param islocal Flag to decide whether this directory is local or in dfs.
+   * 
+   * @param d
+   *          The name of the directory that is being written to.
+   * @param islocal
+   *          Flag to decide whether this directory is local or in dfs.
    */
   public WriteEntity(String d, boolean islocal) {
     this.d = d;
-    this.p = null;
-    this.t = null;
+    p = null;
+    t = null;
     if (islocal) {
-      this.typ = Type.LOCAL_DIR;
-    }
-    else {
-      this.typ = Type.DFS_DIR;
+      typ = Type.LOCAL_DIR;
+    } else {
+      typ = Type.DFS_DIR;
     }
   }
 
@@ -107,14 +112,17 @@
    * Get the location of the entity.
    */
   public URI getLocation() throws Exception {
-    if (typ == Type.TABLE)
+    if (typ == Type.TABLE) {
       return t.getDataLocation();
+    }
 
-    if (typ == Type.PARTITION)
+    if (typ == Type.PARTITION) {
       return p.getDataLocation();
+    }
 
-    if (typ == Type.DFS_DIR || typ == Type.LOCAL_DIR)
+    if (typ == Type.DFS_DIR || typ == Type.LOCAL_DIR) {
       return new URI(d);
+    }
 
     return null;
   }
@@ -136,8 +144,9 @@
   /**
    * toString function.
    */
+  @Override
   public String toString() {
-    switch(typ) {
+    switch (typ) {
     case TABLE:
       return t.getDbName() + "@" + t.getName();
     case PARTITION:
@@ -152,15 +161,16 @@
    */
   @Override
   public boolean equals(Object o) {
-    if (o == null)
+    if (o == null) {
       return false;
+    }
 
     if (o instanceof WriteEntity) {
-      WriteEntity ore = (WriteEntity)o;
+      WriteEntity ore = (WriteEntity) o;
       return (toString().equalsIgnoreCase(ore.toString()));
-    }
-    else
+    } else {
       return false;
+    }
   }
 
   /**

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CodecPool.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CodecPool.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CodecPool.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CodecPool.java Thu Jan 21 10:37:58 2010
@@ -29,45 +29,43 @@
 import org.apache.hadoop.io.compress.Decompressor;
 
 /**
- * A global compressor/decompressor pool used to save and reuse 
- * (possibly native) compression/decompression codecs.
+ * A global compressor/decompressor pool used to save and reuse (possibly
+ * native) compression/decompression codecs.
  */
 public class CodecPool {
   private static final Log LOG = LogFactory.getLog(CodecPool.class);
-  
+
   /**
-   * A global compressor pool used to save the expensive 
+   * A global compressor pool used to save the expensive
    * construction/destruction of (possibly native) decompression codecs.
    */
-  private static final Map<Class<Compressor>, List<Compressor>> compressorPool = 
-    new HashMap<Class<Compressor>, List<Compressor>>();
-  
+  private static final Map<Class<Compressor>, List<Compressor>> compressorPool = new HashMap<Class<Compressor>, List<Compressor>>();
+
   /**
-   * A global decompressor pool used to save the expensive 
+   * A global decompressor pool used to save the expensive
    * construction/destruction of (possibly native) decompression codecs.
    */
-  private static final Map<Class<Decompressor>, List<Decompressor>> decompressorPool = 
-    new HashMap<Class<Decompressor>, List<Decompressor>>();
+  private static final Map<Class<Decompressor>, List<Decompressor>> decompressorPool = new HashMap<Class<Decompressor>, List<Decompressor>>();
 
   private static <T> T borrow(Map<Class<T>, List<T>> pool,
-                             Class<? extends T> codecClass) {
+      Class<? extends T> codecClass) {
     T codec = null;
-    
+
     // Check if an appropriate codec is available
     synchronized (pool) {
       if (pool.containsKey(codecClass)) {
         List<T> codecList = pool.get(codecClass);
-        
+
         if (codecList != null) {
           synchronized (codecList) {
             if (!codecList.isEmpty()) {
-              codec = codecList.remove(codecList.size()-1);
+              codec = codecList.remove(codecList.size() - 1);
             }
           }
         }
       }
     }
-    
+
     return codec;
   }
 
@@ -86,18 +84,19 @@
       }
     }
   }
-  
+
   /**
-   * Get a {@link Compressor} for the given {@link CompressionCodec} from the 
+   * Get a {@link Compressor} for the given {@link CompressionCodec} from the
    * pool or a new one.
-   *
-   * @param codec the <code>CompressionCodec</code> for which to get the 
-   *              <code>Compressor</code>
-   * @return <code>Compressor</code> for the given 
-   *         <code>CompressionCodec</code> from the pool or a new one
+   * 
+   * @param codec
+   *          the <code>CompressionCodec</code> for which to get the
+   *          <code>Compressor</code>
+   * @return <code>Compressor</code> for the given <code>CompressionCodec</code>
+   *         from the pool or a new one
    */
   public static Compressor getCompressor(CompressionCodec codec) {
-    Compressor compressor = (Compressor) borrow(compressorPool, codec.getCompressorType());
+    Compressor compressor = borrow(compressorPool, codec.getCompressorType());
     if (compressor == null) {
       compressor = codec.createCompressor();
       LOG.info("Got brand-new compressor");
@@ -106,18 +105,20 @@
     }
     return compressor;
   }
-  
+
   /**
    * Get a {@link Decompressor} for the given {@link CompressionCodec} from the
    * pool or a new one.
-   *  
-   * @param codec the <code>CompressionCodec</code> for which to get the 
-   *              <code>Decompressor</code>
-   * @return <code>Decompressor</code> for the given 
+   * 
+   * @param codec
+   *          the <code>CompressionCodec</code> for which to get the
+   *          <code>Decompressor</code>
+   * @return <code>Decompressor</code> for the given
    *         <code>CompressionCodec</code> the pool or a new one
    */
   public static Decompressor getDecompressor(CompressionCodec codec) {
-    Decompressor decompressor = (Decompressor) borrow(decompressorPool, codec.getDecompressorType());
+    Decompressor decompressor = borrow(decompressorPool, codec
+        .getDecompressorType());
     if (decompressor == null) {
       decompressor = codec.createDecompressor();
       LOG.info("Got brand-new decompressor");
@@ -126,11 +127,12 @@
     }
     return decompressor;
   }
-  
+
   /**
    * Return the {@link Compressor} to the pool.
    * 
-   * @param compressor the <code>Compressor</code> to be returned to the pool
+   * @param compressor
+   *          the <code>Compressor</code> to be returned to the pool
    */
   public static void returnCompressor(Compressor compressor) {
     if (compressor == null) {
@@ -139,12 +141,12 @@
     compressor.reset();
     payback(compressorPool, compressor);
   }
-  
+
   /**
    * Return the {@link Decompressor} to the pool.
    * 
-   * @param decompressor the <code>Decompressor</code> to be returned to the 
-   *                     pool
+   * @param decompressor
+   *          the <code>Decompressor</code> to be returned to the pool
    */
   public static void returnDecompressor(Decompressor decompressor) {
     if (decompressor == null) {

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java Thu Jan 21 10:37:58 2010
@@ -18,105 +18,103 @@
 
 package org.apache.hadoop.hive.ql.io;
 
-import java.io.File;
 import java.io.DataInput;
 import java.io.DataOutput;
+import java.io.File;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.Map;
-import java.util.List;
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.io.Serializable;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
 import org.apache.hadoop.hive.ql.plan.partitionDesc;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.hive.shims.HadoopShims.CombineFileInputFormatShim;
+import org.apache.hadoop.hive.shims.HadoopShims.InputSplitShim;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobConfigurable;
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.FileInputFormat;
-
-import org.apache.hadoop.hive.shims.HadoopShims.CombineFileInputFormatShim;
-import org.apache.hadoop.hive.shims.HadoopShims.InputSplitShim;
-import org.apache.hadoop.hive.shims.ShimLoader;
-
-import org.apache.hadoop.util.ReflectionUtils;
 
 /**
- * CombineHiveInputFormat is a parameterized InputFormat which looks at the path name and determine
- * the correct InputFormat for that path name from mapredPlan.pathToPartitionInfo().
- * It can be used to read files with different input format in the same map-reduce job.
+ * CombineHiveInputFormat is a parameterized InputFormat which looks at the path
+ * name and determine the correct InputFormat for that path name from
+ * mapredPlan.pathToPartitionInfo(). It can be used to read files with different
+ * input format in the same map-reduce job.
  */
-public class CombineHiveInputFormat<K extends WritableComparable,
-                             V extends Writable> extends HiveInputFormat<K, V> {
+public class CombineHiveInputFormat<K extends WritableComparable, V extends Writable>
+    extends HiveInputFormat<K, V> {
 
-  public static final Log LOG =
-    LogFactory.getLog("org.apache.hadoop.hive.ql.io.CombineHiveInputFormat");
+  public static final Log LOG = LogFactory
+      .getLog("org.apache.hadoop.hive.ql.io.CombineHiveInputFormat");
 
   /**
-   * CombineHiveInputSplit encapsulates an InputSplit with its corresponding inputFormatClassName.
-   * A CombineHiveInputSplit comprises of multiple chunks from different files. Since, they belong
-   * to a single directory, there is a single inputformat for all the chunks.
+   * CombineHiveInputSplit encapsulates an InputSplit with its corresponding
+   * inputFormatClassName. A CombineHiveInputSplit comprises of multiple chunks
+   * from different files. Since, they belong to a single directory, there is a
+   * single inputformat for all the chunks.
    */
   public static class CombineHiveInputSplit implements InputSplitShim {
 
-    String           inputFormatClassName;
-    InputSplitShim   inputSplitShim;
+    String inputFormatClassName;
+    InputSplitShim inputSplitShim;
 
     public CombineHiveInputSplit() throws IOException {
-      this(ShimLoader.getHadoopShims().getCombineFileInputFormat().getInputSplitShim());
+      this(ShimLoader.getHadoopShims().getCombineFileInputFormat()
+          .getInputSplitShim());
     }
 
-    public CombineHiveInputSplit(InputSplitShim inputSplitShim) throws IOException {
+    public CombineHiveInputSplit(InputSplitShim inputSplitShim)
+        throws IOException {
       this(inputSplitShim.getJob(), inputSplitShim);
     }
 
-    public CombineHiveInputSplit(JobConf job, InputSplitShim inputSplitShim) throws IOException {
+    public CombineHiveInputSplit(JobConf job, InputSplitShim inputSplitShim)
+        throws IOException {
       this.inputSplitShim = inputSplitShim;
       if (job != null) {
-        Map<String, partitionDesc> pathToPartitionInfo = 
-          Utilities.getMapRedWork(job).getPathToPartitionInfo();
+        Map<String, partitionDesc> pathToPartitionInfo = Utilities
+            .getMapRedWork(job).getPathToPartitionInfo();
 
-        // extract all the inputFormatClass names for each chunk in the CombinedSplit.
+        // extract all the inputFormatClass names for each chunk in the
+        // CombinedSplit.
         Path[] ipaths = inputSplitShim.getPaths();
         for (int i = 0; i < ipaths.length; i++) {
-        	partitionDesc part = null;
+          partitionDesc part = null;
           try {
-          	part = getPartitionDescFromPath(pathToPartitionInfo, ipaths[i].getParent());
+            part = getPartitionDescFromPath(pathToPartitionInfo, ipaths[i]
+                .getParent());
           } catch (IOException e) {
             // The file path may be present in case of sampling - so ignore that
-          	part = null;
+            part = null;
           }
 
           if (part == null) {
             try {
-            	part = getPartitionDescFromPath(pathToPartitionInfo, ipaths[i]);
+              part = getPartitionDescFromPath(pathToPartitionInfo, ipaths[i]);
             } catch (IOException e) {
-              LOG.warn("CombineHiveInputSplit unable to find table description for " +
-                       ipaths[i].getParent());
+              LOG
+                  .warn("CombineHiveInputSplit unable to find table description for "
+                      + ipaths[i].getParent());
               continue;
             }
           }
-          
-          // create a new InputFormat instance if this is the first time to see this class
-          if (i == 0)
+
+          // create a new InputFormat instance if this is the first time to see
+          // this class
+          if (i == 0) {
             inputFormatClassName = part.getInputFileFormatClass().getName();
-          else
-            assert inputFormatClassName.equals(part.getInputFileFormatClass().getName());
+          } else {
+            assert inputFormatClassName.equals(part.getInputFileFormatClass()
+                .getName());
+          }
         }
       }
     }
@@ -124,7 +122,7 @@
     public InputSplitShim getInputSplitShim() {
       return inputSplitShim;
     }
-    
+
     /**
      * Returns the inputFormat class name for the i-th chunk
      */
@@ -135,58 +133,59 @@
     public void setInputFormatClassName(String inputFormatClassName) {
       this.inputFormatClassName = inputFormatClassName;
     }
-    
+
     public JobConf getJob() {
       return inputSplitShim.getJob();
     }
-    
+
     public long getLength() {
       return inputSplitShim.getLength();
     }
-    
-    /** Returns an array containing the startoffsets of the files in the split*/ 
+
+    /** Returns an array containing the startoffsets of the files in the split */
     public long[] getStartOffsets() {
       return inputSplitShim.getStartOffsets();
     }
-    
-    /** Returns an array containing the lengths of the files in the split*/ 
+
+    /** Returns an array containing the lengths of the files in the split */
     public long[] getLengths() {
       return inputSplitShim.getLengths();
     }
-    
+
     /** Returns the start offset of the i<sup>th</sup> Path */
     public long getOffset(int i) {
       return inputSplitShim.getOffset(i);
     }
-    
+
     /** Returns the length of the i<sup>th</sup> Path */
     public long getLength(int i) {
       return inputSplitShim.getLength(i);
     }
-    
+
     /** Returns the number of Paths in the split */
     public int getNumPaths() {
       return inputSplitShim.getNumPaths();
     }
-    
+
     /** Returns the i<sup>th</sup> Path */
     public Path getPath(int i) {
       return inputSplitShim.getPath(i);
     }
-    
+
     /** Returns all the Paths in the split */
     public Path[] getPaths() {
       return inputSplitShim.getPaths();
     }
-    
+
     /** Returns all the Paths where this input-split resides */
     public String[] getLocations() throws IOException {
       return inputSplitShim.getLocations();
     }
-    
+
     /**
      * Prints this obejct as a string.
      */
+    @Override
     public String toString() {
       StringBuffer sb = new StringBuffer();
       sb.append(inputSplitShim.toString());
@@ -210,22 +209,27 @@
       inputSplitShim.write(out);
 
       if (inputFormatClassName == null) {
-        Map<String, partitionDesc> pathToPartitionInfo = 
-          Utilities.getMapRedWork(getJob()).getPathToPartitionInfo();
-        
-        // extract all the inputFormatClass names for each chunk in the CombinedSplit.
+        Map<String, partitionDesc> pathToPartitionInfo = Utilities
+            .getMapRedWork(getJob()).getPathToPartitionInfo();
+
+        // extract all the inputFormatClass names for each chunk in the
+        // CombinedSplit.
         partitionDesc part = null;
         try {
-        	part = getPartitionDescFromPath(pathToPartitionInfo, inputSplitShim.getPath(0).getParent());
+          part = getPartitionDescFromPath(pathToPartitionInfo, inputSplitShim
+              .getPath(0).getParent());
         } catch (IOException e) {
           // The file path may be present in case of sampling - so ignore that
-        	part = null;
+          part = null;
         }
 
-        if (part == null)
-        	part = getPartitionDescFromPath(pathToPartitionInfo, inputSplitShim.getPath(0));
+        if (part == null) {
+          part = getPartitionDescFromPath(pathToPartitionInfo, inputSplitShim
+              .getPath(0));
+        }
 
-        // create a new InputFormat instance if this is the first time to see this class
+        // create a new InputFormat instance if this is the first time to see
+        // this class
         inputFormatClassName = part.getInputFileFormatClass().getName();
       }
 
@@ -236,40 +240,45 @@
   /**
    * Create Hive splits based on CombineFileSplit
    */
+  @Override
   public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
 
     init(job);
-    CombineFileInputFormatShim combine = ShimLoader.getHadoopShims().getCombineFileInputFormat();
+    CombineFileInputFormatShim combine = ShimLoader.getHadoopShims()
+        .getCombineFileInputFormat();
 
     if (combine.getInputPathsShim(job).length == 0) {
       throw new IOException("No input paths specified in job");
     }
     ArrayList<InputSplit> result = new ArrayList<InputSplit>();
 
-    // combine splits only from same tables. Do not combine splits from multiple tables.
+    // combine splits only from same tables. Do not combine splits from multiple
+    // tables.
     Path[] paths = combine.getInputPathsShim(job);
-    for (int i = 0; i < paths.length; i++) {
-      LOG.info("CombineHiveInputSplit creating pool for " + paths[i]);
-      combine.createPool(job, new CombineFilter(paths[i]));
+    for (Path path : paths) {
+      LOG.info("CombineHiveInputSplit creating pool for " + path);
+      combine.createPool(job, new CombineFilter(path));
     }
 
-    InputSplitShim[] iss = (InputSplitShim[])combine.getSplits(job, 1);
-    for (InputSplitShim is: iss) {
+    InputSplitShim[] iss = combine.getSplits(job, 1);
+    for (InputSplitShim is : iss) {
       CombineHiveInputSplit csplit = new CombineHiveInputSplit(job, is);
       result.add(csplit);
     }
-    
+
     LOG.info("number of splits " + result.size());
 
     return result.toArray(new CombineHiveInputSplit[result.size()]);
   }
 
   /**
-   * Create a generic Hive RecordReader than can iterate over all chunks in 
-   * a CombinedFileSplit
+   * Create a generic Hive RecordReader than can iterate over all chunks in a
+   * CombinedFileSplit
    */
-  public RecordReader getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException {
-    CombineHiveInputSplit hsplit = (CombineHiveInputSplit)split;
+  @Override
+  public RecordReader getRecordReader(InputSplit split, JobConf job,
+      Reporter reporter) throws IOException {
+    CombineHiveInputSplit hsplit = (CombineHiveInputSplit) split;
 
     String inputFormatClassName = null;
     Class inputFormatClass = null;
@@ -280,33 +289,35 @@
       throw new IOException("cannot find class " + inputFormatClassName);
     }
 
-    initColumnsNeeded(job, inputFormatClass, hsplit.getPath(0).toString(), 
-                      hsplit.getPath(0).toUri().getPath());
+    initColumnsNeeded(job, inputFormatClass, hsplit.getPath(0).toString(),
+        hsplit.getPath(0).toUri().getPath());
 
-    return 
-      ShimLoader.getHadoopShims().getCombineFileInputFormat().getRecordReader(job, 
-        ((CombineHiveInputSplit)split).getInputSplitShim(), 
-        reporter, CombineHiveRecordReader.class);
+    return ShimLoader.getHadoopShims().getCombineFileInputFormat()
+        .getRecordReader(job,
+            ((CombineHiveInputSplit) split).getInputSplitShim(), reporter,
+            CombineHiveRecordReader.class);
   }
 
   protected static partitionDesc getPartitionDescFromPath(
-      Map<String, partitionDesc> pathToPartitionInfo, Path dir) throws IOException {
-	// The format of the keys in pathToPartitionInfo sometimes contains a port
-	// and sometimes doesn't, so we just compare paths.
-    for (Map.Entry<String, partitionDesc> entry : pathToPartitionInfo.entrySet()) {
+      Map<String, partitionDesc> pathToPartitionInfo, Path dir)
+      throws IOException {
+    // The format of the keys in pathToPartitionInfo sometimes contains a port
+    // and sometimes doesn't, so we just compare paths.
+    for (Map.Entry<String, partitionDesc> entry : pathToPartitionInfo
+        .entrySet()) {
       try {
-        if (new URI(entry.getKey()).getPath().equals(dir.toUri().getPath())) {			
+        if (new URI(entry.getKey()).getPath().equals(dir.toUri().getPath())) {
           return entry.getValue();
         }
+      } catch (URISyntaxException e2) {
       }
-      catch (URISyntaxException e2) {}
     }
     throw new IOException("cannot find dir = " + dir.toString()
-      + " in partToPartitionInfo!");
+        + " in partToPartitionInfo!");
   }
 
   static class CombineFilter implements PathFilter {
-    private String pString;
+    private final String pString;
 
     // store a path prefix in this TestFilter
     public CombineFilter(Path p) {
@@ -322,6 +333,7 @@
       return false;
     }
 
+    @Override
     public String toString() {
       return "PathFilter:" + pString;
     }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveRecordReader.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveRecordReader.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveRecordReader.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveRecordReader.java Thu Jan 21 10:37:58 2010
@@ -17,74 +17,75 @@
  */
 
 package org.apache.hadoop.hive.ql.io;
+
 import java.io.IOException;
 
-import org.apache.hadoop.mapred.RecordReader;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.FileSplit;
 import org.apache.hadoop.hive.ql.exec.ExecMapper;
-
 import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.CombineHiveInputSplit;
 import org.apache.hadoop.hive.shims.HadoopShims.InputSplitShim;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.mapred.FileSplit;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.Reporter;
+
+public class CombineHiveRecordReader<K extends WritableComparable, V extends Writable>
+    implements RecordReader<K, V> {
+
+  private final RecordReader recordReader;
 
-public class CombineHiveRecordReader<K extends WritableComparable, V extends Writable>  
-  implements RecordReader<K, V> {
-  
-  private RecordReader recordReader;
-  
-  public CombineHiveRecordReader(InputSplit split, Configuration conf, 
-                                 Reporter reporter, Integer partition) 
-    throws IOException {
-    JobConf job = (JobConf)conf;
-    CombineHiveInputSplit hsplit = new CombineHiveInputSplit(job, (InputSplitShim)split);
-    String inputFormatClassName = hsplit.inputFormatClassName(); 
+  public CombineHiveRecordReader(InputSplit split, Configuration conf,
+      Reporter reporter, Integer partition) throws IOException {
+    JobConf job = (JobConf) conf;
+    CombineHiveInputSplit hsplit = new CombineHiveInputSplit(job,
+        (InputSplitShim) split);
+    String inputFormatClassName = hsplit.inputFormatClassName();
     Class inputFormatClass = null;
     try {
       inputFormatClass = Class.forName(inputFormatClassName);
     } catch (ClassNotFoundException e) {
-      throw new IOException ("CombineHiveRecordReader: class not found " + inputFormatClassName);
+      throw new IOException("CombineHiveRecordReader: class not found "
+          + inputFormatClassName);
     }
-    InputFormat inputFormat = CombineHiveInputFormat.getInputFormatFromCache(inputFormatClass, job);
-    
+    InputFormat inputFormat = HiveInputFormat.getInputFormatFromCache(
+        inputFormatClass, job);
+
     // create a split for the given partition
-    FileSplit fsplit = new FileSplit(hsplit.getPaths()[partition],
-                                     hsplit.getStartOffsets()[partition],
-                                     hsplit.getLengths()[partition],
-                                     hsplit.getLocations());
-    
+    FileSplit fsplit = new FileSplit(hsplit.getPaths()[partition], hsplit
+        .getStartOffsets()[partition], hsplit.getLengths()[partition], hsplit
+        .getLocations());
+
     this.recordReader = inputFormat.getRecordReader(fsplit, job, reporter);
   }
-  
-  public void close() throws IOException { 
-    recordReader.close(); 
-  }
-  
-  public K createKey() { 
-    return (K)recordReader.createKey();
-  }
-  
-  public V createValue() { 
-    return (V)recordReader.createValue();
+
+  public void close() throws IOException {
+    recordReader.close();
+  }
+
+  public K createKey() {
+    return (K) recordReader.createKey();
+  }
+
+  public V createValue() {
+    return (V) recordReader.createValue();
   }
-  
-  public long getPos() throws IOException { 
+
+  public long getPos() throws IOException {
     return recordReader.getPos();
   }
-  
-  public float getProgress() throws IOException { 
+
+  public float getProgress() throws IOException {
     return recordReader.getProgress();
   }
-  
-  public boolean  next(K key, V value) throws IOException { 
-    if (ExecMapper.getDone())
+
+  public boolean next(K key, V value) throws IOException {
+    if (ExecMapper.getDone()) {
       return false;
+    }
     return recordReader.next(key, value);
   }
 }
-



Mime
View raw message