hadoop-hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From zs...@apache.org
Subject svn commit: r907950 [5/15] - in /hadoop/hive/trunk: ./ checkstyle/ cli/src/java/org/apache/hadoop/hive/cli/ common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/conf/ contrib/src/java/org/apache/hadoop/hive/contrib/file...
Date Tue, 09 Feb 2010 07:55:50 GMT
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecute.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecute.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecute.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecute.java Tue Feb  9 07:55:30 2010
@@ -41,7 +41,7 @@
    * @param ugi
    *          The user group security information.
    */
-  public void run(SessionState sess, Set<ReadEntity> inputs,
+  void run(SessionState sess, Set<ReadEntity> inputs,
       Set<WriteEntity> outputs, UserGroupInformation ugi) throws Exception;
 
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java Tue Feb  9 07:55:30 2010
@@ -41,7 +41,7 @@
    * @param ugi
    *          The user group security information.
    */
-  public void run(SessionState sess, Set<ReadEntity> inputs,
+  void run(SessionState sess, Set<ReadEntity> inputs,
       Set<WriteEntity> outputs, UserGroupInformation ugi) throws Exception;
 
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CodecPool.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CodecPool.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CodecPool.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CodecPool.java Tue Feb  9 07:55:30 2010
@@ -32,20 +32,22 @@
  * A global compressor/decompressor pool used to save and reuse (possibly
  * native) compression/decompression codecs.
  */
-public class CodecPool {
+public final class CodecPool {
   private static final Log LOG = LogFactory.getLog(CodecPool.class);
 
   /**
    * A global compressor pool used to save the expensive
    * construction/destruction of (possibly native) decompression codecs.
    */
-  private static final Map<Class<Compressor>, List<Compressor>> compressorPool = new HashMap<Class<Compressor>, List<Compressor>>();
+  private static final Map<Class<Compressor>, List<Compressor>> COMPRESSOR_POOL =
+      new HashMap<Class<Compressor>, List<Compressor>>();
 
   /**
    * A global decompressor pool used to save the expensive
    * construction/destruction of (possibly native) decompression codecs.
    */
-  private static final Map<Class<Decompressor>, List<Decompressor>> decompressorPool = new HashMap<Class<Decompressor>, List<Decompressor>>();
+  private static final Map<Class<Decompressor>, List<Decompressor>> DECOMPRESSOR_POOL =
+      new HashMap<Class<Decompressor>, List<Decompressor>>();
 
   private static <T> T borrow(Map<Class<T>, List<T>> pool,
       Class<? extends T> codecClass) {
@@ -96,7 +98,7 @@
    *         from the pool or a new one
    */
   public static Compressor getCompressor(CompressionCodec codec) {
-    Compressor compressor = borrow(compressorPool, codec.getCompressorType());
+    Compressor compressor = borrow(COMPRESSOR_POOL, codec.getCompressorType());
     if (compressor == null) {
       compressor = codec.createCompressor();
       LOG.info("Got brand-new compressor");
@@ -117,7 +119,7 @@
    *         <code>CompressionCodec</code> the pool or a new one
    */
   public static Decompressor getDecompressor(CompressionCodec codec) {
-    Decompressor decompressor = borrow(decompressorPool, codec
+    Decompressor decompressor = borrow(DECOMPRESSOR_POOL, codec
         .getDecompressorType());
     if (decompressor == null) {
       decompressor = codec.createDecompressor();
@@ -139,7 +141,7 @@
       return;
     }
     compressor.reset();
-    payback(compressorPool, compressor);
+    payback(COMPRESSOR_POOL, compressor);
   }
 
   /**
@@ -153,6 +155,10 @@
       return;
     }
     decompressor.reset();
-    payback(decompressorPool, decompressor);
+    payback(DECOMPRESSOR_POOL, decompressor);
+  }
+
+  private CodecPool() {
+    // prevent instantiation
   }
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java Tue Feb  9 07:55:30 2010
@@ -71,8 +71,7 @@
           .getInputSplitShim());
     }
 
-    public CombineHiveInputSplit(InputSplitShim inputSplitShim)
-        throws IOException {
+    public CombineHiveInputSplit(InputSplitShim inputSplitShim) throws IOException {
       this(inputSplitShim.getJob(), inputSplitShim);
     }
 
@@ -102,7 +101,7 @@
             } catch (IOException e) {
               LOG
                   .warn("CombineHiveInputSplit unable to find table description for "
-                      + ipaths[i].getParent());
+                  + ipaths[i].getParent());
               continue;
             }
           }
@@ -124,7 +123,7 @@
     }
 
     /**
-     * Returns the inputFormat class name for the i-th chunk
+     * Returns the inputFormat class name for the i-th chunk.
      */
     public String inputFormatClassName() {
       return inputFormatClassName;
@@ -142,42 +141,42 @@
       return inputSplitShim.getLength();
     }
 
-    /** Returns an array containing the startoffsets of the files in the split */
+    /** Returns an array containing the startoffsets of the files in the split. */
     public long[] getStartOffsets() {
       return inputSplitShim.getStartOffsets();
     }
 
-    /** Returns an array containing the lengths of the files in the split */
+    /** Returns an array containing the lengths of the files in the split. */
     public long[] getLengths() {
       return inputSplitShim.getLengths();
     }
 
-    /** Returns the start offset of the i<sup>th</sup> Path */
+    /** Returns the start offset of the i<sup>th</sup> Path. */
     public long getOffset(int i) {
       return inputSplitShim.getOffset(i);
     }
 
-    /** Returns the length of the i<sup>th</sup> Path */
+    /** Returns the length of the i<sup>th</sup> Path. */
     public long getLength(int i) {
       return inputSplitShim.getLength(i);
     }
 
-    /** Returns the number of Paths in the split */
+    /** Returns the number of Paths in the split. */
     public int getNumPaths() {
       return inputSplitShim.getNumPaths();
     }
 
-    /** Returns the i<sup>th</sup> Path */
+    /** Returns the i<sup>th</sup> Path. */
     public Path getPath(int i) {
       return inputSplitShim.getPath(i);
     }
 
-    /** Returns all the Paths in the split */
+    /** Returns all the Paths in the split. */
     public Path[] getPaths() {
       return inputSplitShim.getPaths();
     }
 
-    /** Returns all the Paths where this input-split resides */
+    /** Returns all the Paths where this input-split resides. */
     public String[] getLocations() throws IOException {
       return inputSplitShim.getLocations();
     }
@@ -195,7 +194,7 @@
     }
 
     /**
-     * Writable interface
+     * Writable interface.
      */
     public void readFields(DataInput in) throws IOException {
       inputSplitShim.readFields(in);
@@ -203,7 +202,7 @@
     }
 
     /**
-     * Writable interface
+     * Writable interface.
      */
     public void write(DataOutput out) throws IOException {
       inputSplitShim.write(out);
@@ -238,7 +237,7 @@
   }
 
   /**
-   * Create Hive splits based on CombineFileSplit
+   * Create Hive splits based on CombineFileSplit.
    */
   @Override
   public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
@@ -273,7 +272,7 @@
 
   /**
    * Create a generic Hive RecordReader than can iterate over all chunks in a
-   * CombinedFileSplit
+   * CombinedFileSplit.
    */
   @Override
   public RecordReader getRecordReader(InputSplit split, JobConf job,
@@ -294,13 +293,12 @@
 
     return ShimLoader.getHadoopShims().getCombineFileInputFormat()
         .getRecordReader(job,
-            ((CombineHiveInputSplit) split).getInputSplitShim(), reporter,
-            CombineHiveRecordReader.class);
+        ((CombineHiveInputSplit) split).getInputSplitShim(), reporter,
+        CombineHiveRecordReader.class);
   }
 
   protected static PartitionDesc getPartitionDescFromPath(
-      Map<String, PartitionDesc> pathToPartitionInfo, Path dir)
-      throws IOException {
+      Map<String, PartitionDesc> pathToPartitionInfo, Path dir) throws IOException {
     // The format of the keys in pathToPartitionInfo sometimes contains a port
     // and sometimes doesn't, so we just compare paths.
     for (Map.Entry<String, PartitionDesc> entry : pathToPartitionInfo

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveRecordReader.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveRecordReader.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveRecordReader.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveRecordReader.java Tue Feb  9 07:55:30 2010
@@ -33,6 +33,12 @@
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
 
+/**
+ * CombineHiveRecordReader.
+ *
+ * @param <K>
+ * @param <V>
+ */
 public class CombineHiveRecordReader<K extends WritableComparable, V extends Writable>
     implements RecordReader<K, V> {
 

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java Tue Feb  9 07:55:30 2010
@@ -42,7 +42,7 @@
 
 /**
  * An {@link org.apache.hadoop.mapred.InputFormat} for Plain files with
- * {@link Deserializer} records
+ * {@link Deserializer} records.
  */
 public class FlatFileInputFormat<T> extends
     FileInputFormat<Void, FlatFileInputFormat.RowContainer<T>> {
@@ -53,7 +53,7 @@
    * Allows boolean next(k,v) to be called by reference but still allow the
    * deserializer to create a new object (i.e., row) on every call to next.
    */
-  static public class RowContainer<T> {
+  public static class RowContainer<T> {
     T row;
   }
 
@@ -69,25 +69,25 @@
    * not have a way of configuring the actual Java class being
    * serialized/deserialized.
    */
-  static public interface SerializationContext<S> extends Configurable {
+  public static interface SerializationContext<S> extends Configurable {
 
     /**
-     * An {@link Serialization} object for objects of type S
+     * An {@link Serialization} object for objects of type S.
      * 
      * @return a serialization object for this context
      */
-    public Serialization<S> getSerialization() throws IOException;
+    Serialization<S> getSerialization() throws IOException;
 
     /**
-     * Produces the specific class to deserialize
+     * Produces the specific class to deserialize.
      */
-    public Class<? extends S> getRealClass() throws IOException;
+    Class<? extends S> getRealClass() throws IOException;
   }
 
   /**
-   * The JobConf keys for the Serialization implementation
+   * The JobConf keys for the Serialization implementation.
    */
-  static public final String SerializationImplKey = "mapred.input.serialization.implKey";
+  public static final String SerializationImplKey = "mapred.input.serialization.implKey";
 
   /**
    * An implementation of {@link SerializationContext} that reads the
@@ -95,13 +95,13 @@
    * JobConf.
    * 
    */
-  static public class SerializationContextFromConf<S> implements
+  public static class SerializationContextFromConf<S> implements
       FlatFileInputFormat.SerializationContext<S> {
 
     /**
      * The JobConf keys for the Class that is being deserialized.
      */
-    static public final String SerializationSubclassKey = "mapred.input.serialization.subclassKey";
+    public static final String SerializationSubclassKey = "mapred.input.serialization.subclassKey";
 
     /**
      * Implements configurable so it can use the configuration to find the right
@@ -119,7 +119,7 @@
     }
 
     /**
-     * @return the actual class being deserialized
+     * @return the actual class being deserialized.
      * @exception does
      *              not currently throw IOException
      */
@@ -182,29 +182,29 @@
     private final FSDataInputStream fsin;
 
     /**
-     * For calculating progress
+     * For calculating progress.
      */
     private final long end;
 
     /**
-     * The constructed deserializer
+     * The constructed deserializer.
      */
     private final Deserializer<R> deserializer;
 
     /**
-     * Once EOF is reached, stop calling the deserializer
+     * Once EOF is reached, stop calling the deserializer.
      */
     private boolean isEOF;
 
     /**
      * The JobConf which contains information needed to instantiate the correct
-     * Deserializer
+     * Deserializer.
      */
     private final Configuration conf;
 
     /**
      * The actual class of the row's we are deserializing, not just the base
-     * class
+     * class.
      */
     private final Class<R> realRowClass;
 
@@ -217,8 +217,7 @@
      * @param split
      *          the split for this file
      */
-    public FlatFileRecordReader(Configuration conf, FileSplit split)
-        throws IOException {
+    public FlatFileRecordReader(Configuration conf, FileSplit split) throws IOException {
       final Path path = split.getPath();
       FileSystem fileSys = path.getFileSystem(conf);
       CompressionCodecFactory compressionCodecs = new CompressionCodecFactory(
@@ -244,10 +243,9 @@
       SerializationContext<R> sinfo;
       Class<SerializationContext<R>> sinfoClass = (Class<SerializationContext<R>>) conf
           .getClass(SerializationContextImplKey,
-              SerializationContextFromConf.class);
+          SerializationContextFromConf.class);
 
-      sinfo = (SerializationContext<R>) ReflectionUtils.newInstance(sinfoClass,
-          conf);
+      sinfo = (SerializationContext<R>)ReflectionUtils.newInstance(sinfoClass, conf);
 
       // Get the Serialization object and the class being deserialized
       Serialization<R> serialization = sinfo.getSerialization();
@@ -258,9 +256,10 @@
     }
 
     /**
-     * The JobConf key of the SerializationContext to use
+     * The JobConf key of the SerializationContext to use.
      */
-    static public final String SerializationContextImplKey = "mapred.input.serialization.context_impl";
+    public static final String SerializationContextImplKey =
+      "mapred.input.serialization.context_impl";
 
     /**
      * @return null
@@ -274,12 +273,12 @@
      */
     public RowContainer<R> createValue() {
       RowContainer<R> r = new RowContainer<R>();
-      r.row = (R) ReflectionUtils.newInstance(realRowClass, conf);
+      r.row = (R)ReflectionUtils.newInstance(realRowClass, conf);
       return r;
     }
 
     /**
-     * Returns the next row # and value
+     * Returns the next row # and value.
      * 
      * @param key
      *          - void as these files have a value only
@@ -291,8 +290,7 @@
      * @exception IOException
      *              from the deserializer
      */
-    public synchronized boolean next(Void key, RowContainer<R> value)
-        throws IOException {
+    public synchronized boolean next(Void key, RowContainer<R> value) throws IOException {
       if (isEOF || in.available() == 0) {
         isEOF = true;
         return false;

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveBinaryOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveBinaryOutputFormat.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveBinaryOutputFormat.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveBinaryOutputFormat.java Tue Feb  9 07:55:30 2010
@@ -24,15 +24,12 @@
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
-import org.apache.hadoop.hive.serde.Constants;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.TextOutputFormat;
 import org.apache.hadoop.util.Progressable;
 
@@ -68,7 +65,7 @@
 
     FileSystem fs = outPath.getFileSystem(jc);
     final OutputStream outStream = fs.create(outPath);
-    
+
     return new RecordWriter() {
       public void write(Writable r) throws IOException {
         if (r instanceof Text) {

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java Tue Feb  9 07:55:30 2010
@@ -52,10 +52,11 @@
  * HiveOutputFormat for the older ones.
  * 
  */
-public class HiveFileFormatUtils {
+public final class HiveFileFormatUtils {
 
   static {
-    outputFormatSubstituteMap = new HashMap<Class<? extends OutputFormat>, Class<? extends HiveOutputFormat>>();
+    outputFormatSubstituteMap =
+        new HashMap<Class<? extends OutputFormat>, Class<? extends HiveOutputFormat>>();
     HiveFileFormatUtils.registerOutputFormatSubstitute(
         IgnoreKeyTextOutputFormat.class, HiveIgnoreKeyTextOutputFormat.class);
     HiveFileFormatUtils.registerOutputFormatSubstitute(
@@ -63,27 +64,28 @@
   }
 
   @SuppressWarnings("unchecked")
-  private static Map<Class<? extends OutputFormat>, Class<? extends HiveOutputFormat>> outputFormatSubstituteMap;
+  private static Map<Class<? extends OutputFormat>, Class<? extends HiveOutputFormat>>
+  outputFormatSubstituteMap;
 
   /**
-   * register a substitute
+   * register a substitute.
    * 
    * @param origin
    *          the class that need to be substituted
    * @param substitute
    */
   @SuppressWarnings("unchecked")
-  public synchronized static void registerOutputFormatSubstitute(
+  public static synchronized void registerOutputFormatSubstitute(
       Class<? extends OutputFormat> origin,
       Class<? extends HiveOutputFormat> substitute) {
     outputFormatSubstituteMap.put(origin, substitute);
   }
 
   /**
-   * get a OutputFormat's substitute HiveOutputFormat
+   * get a OutputFormat's substitute HiveOutputFormat.
    */
   @SuppressWarnings("unchecked")
-  public synchronized static Class<? extends HiveOutputFormat> getOutputFormatSubstitute(
+  public static synchronized Class<? extends HiveOutputFormat> getOutputFormatSubstitute(
       Class<?> origin) {
     if (HiveOutputFormat.class.isAssignableFrom(origin)) {
       return (Class<? extends HiveOutputFormat>) origin;
@@ -112,12 +114,14 @@
   }
 
   static {
-    inputFormatCheckerMap = new HashMap<Class<? extends InputFormat>, Class<? extends InputFormatChecker>>();
+    inputFormatCheckerMap =
+        new HashMap<Class<? extends InputFormat>, Class<? extends InputFormatChecker>>();
     HiveFileFormatUtils.registerInputFormatChecker(
         SequenceFileInputFormat.class, SequenceFileInputFormatChecker.class);
     HiveFileFormatUtils.registerInputFormatChecker(RCFileInputFormat.class,
         RCFileInputFormat.class);
-    inputFormatCheckerInstanceCache = new HashMap<Class<? extends InputFormatChecker>, InputFormatChecker>();
+    inputFormatCheckerInstanceCache =
+        new HashMap<Class<? extends InputFormatChecker>, InputFormatChecker>();
   }
 
   @SuppressWarnings("unchecked")
@@ -126,14 +130,14 @@
   private static Map<Class<? extends InputFormatChecker>, InputFormatChecker> inputFormatCheckerInstanceCache;
 
   /**
-   * register an InputFormatChecker for a given InputFormat
+   * register an InputFormatChecker for a given InputFormat.
    * 
    * @param format
    *          the class that need to be substituted
    * @param checker
    */
   @SuppressWarnings("unchecked")
-  public synchronized static void registerInputFormatChecker(
+  public static synchronized void registerInputFormatChecker(
       Class<? extends InputFormat> format,
       Class<? extends InputFormatChecker> checker) {
     inputFormatCheckerMap.put(format, checker);
@@ -142,7 +146,7 @@
   /**
    * get an InputFormatChecker for a file format.
    */
-  public synchronized static Class<? extends InputFormatChecker> getInputFormatChecker(
+  public static synchronized Class<? extends InputFormatChecker> getInputFormatChecker(
       Class<?> inputFormat) {
     Class<? extends InputFormatChecker> result = inputFormatCheckerMap
         .get(inputFormat);
@@ -150,7 +154,7 @@
   }
 
   /**
-   * checks if files are in same format as the given input format
+   * checks if files are in same format as the given input format.
    */
   @SuppressWarnings("unchecked")
   public static boolean checkInputFormat(FileSystem fs, HiveConf conf,
@@ -239,4 +243,7 @@
     return null;
   }
 
+  private HiveFileFormatUtils() {
+    // prevent instantiation
+  }
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java Tue Feb  9 07:55:30 2010
@@ -142,7 +142,7 @@
       } catch (Exception e) {
         throw new IOException(
             "Cannot create an instance of InputSplit class = "
-                + inputSplitClassName + ":" + e.getMessage());
+            + inputSplitClassName + ":" + e.getMessage());
       }
       inputSplit.readFields(in);
       inputFormatClassName = in.readUTF();

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveOutputFormat.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveOutputFormat.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveOutputFormat.java Tue Feb  9 07:55:30 2010
@@ -58,7 +58,7 @@
    *          progress used for status report
    * @return the RecordWriter for the output file
    */
-  public RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
+  RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
       final Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress) throws IOException;
 

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveSequenceFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveSequenceFileOutputFormat.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveSequenceFileOutputFormat.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveSequenceFileOutputFormat.java Tue Feb  9 07:55:30 2010
@@ -40,7 +40,7 @@
   BytesWritable EMPTY_KEY = new BytesWritable();
 
   /**
-   * create the final out file, and output an empty key as the key
+   * create the final out file, and output an empty key as the key.
    * 
    * @param jc
    *          the job configuration file

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/InputFormatChecker.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/InputFormatChecker.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/InputFormatChecker.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/InputFormatChecker.java Tue Feb  9 07:55:30 2010
@@ -31,10 +31,10 @@
 public interface InputFormatChecker {
 
   /**
-   * This method is used to validate the input files
+   * This method is used to validate the input files.
    * 
    */
-  public boolean validateInput(FileSystem fs, HiveConf conf,
+  boolean validateInput(FileSystem fs, HiveConf conf,
       ArrayList<FileStatus> files) throws IOException;
 
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java Tue Feb  9 07:55:30 2010
@@ -148,19 +148,20 @@
 
   private static final Log LOG = LogFactory.getLog(RCFile.class);
 
-  public static String RECORD_INTERVAL_CONF_STR = "hive.io.rcfile.record.interval";
+  public static final String RECORD_INTERVAL_CONF_STR = "hive.io.rcfile.record.interval";
 
-  public static String COLUMN_NUMBER_METADATA_STR = "hive.io.rcfile.column.number";
+  public static final String COLUMN_NUMBER_METADATA_STR = "hive.io.rcfile.column.number";
 
-  public static String COLUMN_NUMBER_CONF_STR = "hive.io.rcfile.column.number.conf";
+  public static final String COLUMN_NUMBER_CONF_STR = "hive.io.rcfile.column.number.conf";
 
   /*
    * these header and Sync are kept from SequenceFile, for compatible of
    * SequenceFile's format.
    */
   private static final byte VERSION_WITH_METADATA = (byte) 6;
-  private static byte[] VERSION = new byte[] { (byte) 'S', (byte) 'E',
-      (byte) 'Q', VERSION_WITH_METADATA };
+  private static final byte[] VERSION = new byte[] {
+      (byte) 'S', (byte) 'E', (byte) 'Q', VERSION_WITH_METADATA
+      };
 
   private static final int SYNC_ESCAPE = -1; // "length" of sync entries
   private static final int SYNC_HASH_SIZE = 16; // number of bytes in hash
@@ -263,7 +264,7 @@
     }
 
     /**
-     * get number of bytes to store the keyBuffer
+     * get number of bytes to store the keyBuffer.
      * 
      * @return number of bytes used to store this KeyBuffer on disk
      * @throws IOException
@@ -593,8 +594,7 @@
     }
 
     /** Constructs a RCFile Writer. */
-    public Writer(FileSystem fs, Configuration conf, Path name)
-        throws IOException {
+    public Writer(FileSystem fs, Configuration conf, Path name) throws IOException {
       this(fs, conf, name, null, new Metadata(), null);
     }
 
@@ -628,10 +628,9 @@
      * @throws IOException
      */
     public Writer(FileSystem fs, Configuration conf, Path name,
-        Progressable progress, Metadata metadata, CompressionCodec codec)
-        throws IOException {
-      this(fs, conf, name, fs.getConf().getInt("io.file.buffer.size", 4096), fs
-          .getDefaultReplication(), fs.getDefaultBlockSize(), progress,
+        Progressable progress, Metadata metadata, CompressionCodec codec) throws IOException {
+      this(fs, conf, name, fs.getConf().getInt("io.file.buffer.size", 4096),
+          fs.getDefaultReplication(), fs.getDefaultBlockSize(), progress,
           metadata, codec);
     }
 
@@ -746,7 +745,7 @@
       return codec;
     }
 
-    /** create a sync point */
+    /** create a sync point. */
     public void sync() throws IOException {
       if (sync != null && lastSyncPos != out.getPos()) {
         out.writeInt(SYNC_ESCAPE); // mark the start of the sync
@@ -769,8 +768,8 @@
     private int columnBufferSize = 0;
 
     /**
-     * append a row of values. Currently it only can accept <
-     * {@link BytesRefArrayWritable}. If its<code>size()</code> is less than the
+     * Append a row of values. Currently it only can accept <
+     * {@link BytesRefArrayWritable}. If its <code>size()</code> is less than the
      * column number in the file, zero bytes are appended for the empty columns.
      * If its size() is greater then the column number in the file, the exceeded
      * columns' bytes are ignored.
@@ -865,7 +864,7 @@
         out.writeInt(compressedKeyLen);
         out.write(keyCompressionBuffer.getData(), 0, compressedKeyLen);
       }
-      value.write(out);// value
+      value.write(out); // value
 
       // clear the columnBuffers
       clearColumnBuffers();
@@ -961,8 +960,7 @@
     int[] prjColIDs = null; // selected column IDs
 
     /** Create a new RCFile reader. */
-    public Reader(FileSystem fs, Path file, Configuration conf)
-        throws IOException {
+    public Reader(FileSystem fs, Path file, Configuration conf) throws IOException {
       this(fs, file, conf.getInt("io.file.buffer.size", 4096), conf, 0, fs
           .getFileStatus(file).getLen());
     }
@@ -1129,7 +1127,7 @@
         seek(end);
         return;
       }
-      
+
       //this is to handle syn(pos) where pos < headerEnd.
       if (position < headerEnd) {
         // seek directly to first record
@@ -1162,8 +1160,7 @@
       }
     }
 
-    private void handleChecksumException(ChecksumException e)
-        throws IOException {
+    private void handleChecksumException(ChecksumException e) throws IOException {
       if (conf.getBoolean("io.skip.checksum.errors", false)) {
         LOG.warn("Bad checksum at " + getPosition() + ". Skipping entries.");
         sync(getPosition() + conf.getInt("io.bytes.per.checksum", 512));
@@ -1392,8 +1389,7 @@
      * 
      * @throws IOException
      */
-    public synchronized void getCurrentRow(BytesRefArrayWritable ret)
-        throws IOException {
+    public synchronized void getCurrentRow(BytesRefArrayWritable ret) throws IOException {
 
       if (!keyInit || rowFetched) {
         return;
@@ -1453,8 +1449,8 @@
     public boolean syncSeen() {
       return syncSeen;
     }
-    
-    /** Returns the last seen sync position */
+
+    /** Returns the last seen sync position. */
     public long lastSeenSyncPos() {
       return lastSeenSyncPos;
     }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileInputFormat.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileInputFormat.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileInputFormat.java Tue Feb  9 07:55:30 2010
@@ -34,6 +34,12 @@
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
 
+/**
+ * RCFileInputFormat.
+ *
+ * @param <K>
+ * @param <V>
+ */
 public class RCFileInputFormat<K extends LongWritable, V extends BytesRefArrayWritable>
     extends FileInputFormat<K, V> implements InputFormatChecker {
 

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java Tue Feb  9 07:55:30 2010
@@ -38,6 +38,10 @@
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 
+/**
+ * RCFileOutputFormat.
+ *
+ */
 public class RCFileOutputFormat extends
     FileOutputFormat<WritableComparable, BytesRefArrayWritable> implements
     HiveOutputFormat<WritableComparable, Writable> {
@@ -57,7 +61,7 @@
   }
 
   /**
-   * Returns the number of columns set in the conf for writers
+   * Returns the number of columns set in the conf for writers.
    * 
    * @param conf
    * @return number of columns for RCFile's writer
@@ -69,8 +73,7 @@
   /** {@inheritDoc} */
   @Override
   public RecordWriter<WritableComparable, BytesRefArrayWritable> getRecordWriter(
-      FileSystem ignored, JobConf job, String name, Progressable progress)
-      throws IOException {
+      FileSystem ignored, JobConf job, String name, Progressable progress) throws IOException {
 
     Path outputPath = getWorkOutputPath(job);
     FileSystem fs = outputPath.getFileSystem(job);
@@ -101,7 +104,7 @@
   }
 
   /**
-   * create the final out file,
+   * create the final out file.
    * 
    * @param jc
    *          the job configuration file
@@ -120,8 +123,7 @@
   @Override
   public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getHiveRecordWriter(
       JobConf jc, Path finalOutPath, Class<? extends Writable> valueClass,
-      boolean isCompressed, Properties tableProperties, Progressable progress)
-      throws IOException {
+      boolean isCompressed, Properties tableProperties, Progressable progress) throws IOException {
 
     String[] cols = null;
     String columns = tableProperties.getProperty("columns");

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java Tue Feb  9 07:55:30 2010
@@ -30,6 +30,12 @@
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.util.ReflectionUtils;
 
+/**
+ * RCFileRecordReader.
+ *
+ * @param <K>
+ * @param <V>
+ */
 public class RCFileRecordReader<K extends LongWritable, V extends BytesRefArrayWritable>
     implements RecordReader<LongWritable, BytesRefArrayWritable> {
 
@@ -75,9 +81,9 @@
   @Override
   public boolean next(LongWritable key, BytesRefArrayWritable value)
       throws IOException {
-    
+
     more = next(key);
-    
+
     if (more) {
       in.getCurrentRow(value);
     }
@@ -88,14 +94,14 @@
     if (!more) {
       return false;
     }
-    
+
     more = in.next(key);
     if (!more) {
       return false;
     }
-    
+
     long lastSeenSyncPos = in.lastSeenSyncPos();
-    if(lastSeenSyncPos >= end) {
+    if (lastSeenSyncPos >= end) {
       more = false;
       return more;
     }
@@ -103,7 +109,7 @@
   }
 
   /**
-   * Return the progress within the input split
+   * Return the progress within the input split.
    * 
    * @return 0.0 to 1.0 of the input byte range
    */

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/SequenceFileInputFormatChecker.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/SequenceFileInputFormatChecker.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/SequenceFileInputFormatChecker.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/SequenceFileInputFormatChecker.java Tue Feb  9 07:55:30 2010
@@ -26,6 +26,10 @@
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.io.SequenceFile;
 
+/**
+ * SequenceFileInputFormatChecker.
+ *
+ */
 public class SequenceFileInputFormatChecker implements InputFormatChecker {
 
   @Override

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java Tue Feb  9 07:55:30 2010
@@ -43,7 +43,7 @@
   private final Dispatcher dispatcher;
 
   /**
-   * Constructor
+   * Constructor.
    * 
    * @param disp
    *          dispatcher to call for each op encountered
@@ -68,7 +68,7 @@
   }
 
   /**
-   * Dispatch the current operator
+   * Dispatch the current operator.
    * 
    * @param nd
    *          node being walked
@@ -91,7 +91,7 @@
   }
 
   /**
-   * starting point for walking
+   * starting point for walking.
    * 
    * @throws SemanticException
    */
@@ -108,7 +108,7 @@
   }
 
   /**
-   * walk the current operator and its descendants
+   * walk the current operator and its descendants.
    * 
    * @param nd
    *          current operator in the graph

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultRuleDispatcher.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultRuleDispatcher.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultRuleDispatcher.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultRuleDispatcher.java Tue Feb  9 07:55:30 2010
@@ -35,7 +35,7 @@
   private final NodeProcessor defaultProc;
 
   /**
-   * constructor
+   * Constructor.
    * 
    * @param defaultProc
    *          default processor to be fired if no rule matches
@@ -52,7 +52,7 @@
   }
 
   /**
-   * dispatcher function
+   * Dispatcher function.
    * 
    * @param nd
    *          operator to process

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java Tue Feb  9 07:55:30 2010
@@ -24,7 +24,7 @@
 
 /**
  * Dispatcher interface for Operators Used in operator graph walking to dispatch
- * process/visitor functions for operators
+ * process/visitor functions for operators.
  */
 public interface Dispatcher {
 
@@ -41,6 +41,6 @@
    * @return Object The return object from the processing call.
    * @throws SemanticException
    */
-  public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs)
+  Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs)
       throws SemanticException;
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java Tue Feb  9 07:55:30 2010
@@ -38,7 +38,7 @@
    *          the map from node to objects returned by the processors.
    * @throws SemanticException
    */
-  public void startWalking(Collection<Node> startNodes,
+  void startWalking(Collection<Node> startNodes,
       HashMap<Node, Object> nodeOutput) throws SemanticException;
 
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java Tue Feb  9 07:55:30 2010
@@ -32,12 +32,12 @@
    * 
    * @return List<? extends Node>
    */
-  public List<? extends Node> getChildren();
+  List<? extends Node> getChildren();
 
   /**
    * Gets the name of the node. This is used in the rule dispatchers.
    * 
    * @return String
    */
-  public String getName();
+  String getName();
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java Tue Feb  9 07:55:30 2010
@@ -28,7 +28,7 @@
 public interface NodeProcessor {
 
   /**
-   * generic process for all ops that don't have specific implementations
+   * Generic process for all ops that don't have specific implementations.
    * 
    * @param nd
    *          operator to process
@@ -39,6 +39,6 @@
    * @return Object to be returned by the process call
    * @throws SemanticException
    */
-  public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
+  Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
       Object... nodeOutputs) throws SemanticException;
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessorCtx.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessorCtx.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessorCtx.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessorCtx.java Tue Feb  9 07:55:30 2010
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hive.ql.lib;
 
 /**
- * Operator Processor Context
+ * Operator Processor Context.
  */
 public interface NodeProcessorCtx {
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java Tue Feb  9 07:55:30 2010
@@ -32,7 +32,7 @@
    */
 
   /**
-   * Constructor
+   * Constructor.
    * 
    * @param disp
    *          dispatcher to call for each op encountered
@@ -42,7 +42,7 @@
   }
 
   /**
-   * walk the current operator and its descendants
+   * Walk the current operator and its descendants.
    * 
    * @param nd
    *          current operator in the graph

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java Tue Feb  9 07:55:30 2010
@@ -24,7 +24,7 @@
 
 /**
  * Rule interface for Operators Used in operator dispatching to dispatch
- * process/visitor functions for operators
+ * process/visitor functions for operators.
  */
 public interface Rule {
 
@@ -33,10 +33,10 @@
    *         matches
    * @throws SemanticException
    */
-  public int cost(Stack<Node> stack) throws SemanticException;
+  int cost(Stack<Node> stack) throws SemanticException;
 
   /**
    * @return the name of the rule - may be useful for debugging
    */
-  public String getName();
+  String getName();
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java Tue Feb  9 07:55:30 2010
@@ -26,7 +26,7 @@
 
 /**
  * Rule interface for Nodes Used in Node dispatching to dispatch process/visitor
- * functions for Nodes
+ * functions for Nodes.
  */
 public class RuleRegExp implements Rule {
 

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java Tue Feb  9 07:55:30 2010
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hive.ql.metadata;
 
 /**
- * Generic exception class for Hive
+ * Generic exception class for Hive.
  */
 
 public class HiveException extends Exception {

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java Tue Feb  9 07:55:30 2010
@@ -258,7 +258,7 @@
   }
 
   /**
-   * Find partitions on the fs that are unknown to the metastore
+   * Find partitions on the fs that are unknown to the metastore.
    * 
    * @param table
    *          Table where the partitions would be located

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java Tue Feb  9 07:55:30 2010
@@ -19,10 +19,10 @@
 package org.apache.hadoop.hive.ql.metadata;
 
 /**
- * General collection of helper functions
+ * General collection of helper functions.
  * 
  */
-public class HiveUtils {
+public final class HiveUtils {
 
   public static final char QUOTE = '"';
   public static final char COLON = ':';
@@ -119,4 +119,8 @@
     // in identifier by doubling them up.
     return "`" + identifier + "`";
   }
+
+  private HiveUtils() {
+    // prevent instantiation
+  }
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/InvalidTableException.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/InvalidTableException.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/InvalidTableException.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/InvalidTableException.java Tue Feb  9 07:55:30 2010
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hive.ql.metadata;
 
 /**
- * Generic exception class for Hive
+ * Generic exception class for Hive.
  * 
  */
 

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Sample.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Sample.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Sample.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Sample.java Tue Feb  9 07:55:30 2010
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hive.ql.metadata;
 
 /**
- * A sample defines a subset of data based on sampling on a given dimension
+ * A sample defines a subset of data based on sampling on a given dimension.
  * 
  **/
 public class Sample {

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java Tue Feb  9 07:55:30 2010
@@ -53,7 +53,7 @@
   private HashMap<Operator<? extends Serializable>, OpParseContext> opToParseCtxMap;
 
   /**
-   * empty constructor
+   * empty constructor.
    */
   public ColumnPruner() {
     pGraphContext = null;
@@ -62,7 +62,7 @@
   /**
    * Transform the query tree. For each table under consideration, check if all
    * columns are needed. If not, only select the operators needed at the
-   * beginning and proceed
+   * beginning and proceed.
    * 
    * @param pactx
    *          the current parse context
@@ -108,7 +108,7 @@
 
   /**
    * Walks the op tree in post order fashion (skips selects with file sink or
-   * script op children)
+   * script op children).
    */
   public static class ColumnPrunerWalker extends DefaultGraphWalker {
 
@@ -117,7 +117,7 @@
     }
 
     /**
-     * Walk the given operator
+     * Walk the given operator.
      */
     @Override
     public void walk(Node nd) throws SemanticException {

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java Tue Feb  9 07:55:30 2010
@@ -50,12 +50,12 @@
 import org.apache.hadoop.hive.ql.parse.OpParseContext;
 import org.apache.hadoop.hive.ql.parse.RowResolver;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.GroupByDesc;
 import org.apache.hadoop.hive.ql.plan.JoinDesc;
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
+import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
 import org.apache.hadoop.hive.ql.plan.SelectDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
@@ -63,7 +63,11 @@
 /**
  * Factory for generating the different node processors used by ColumnPruner.
  */
-public class ColumnPrunerProcFactory {
+public final class ColumnPrunerProcFactory {
+
+  private ColumnPrunerProcFactory() {
+    // prevent instantiation
+  }
 
   /**
    * Node Processor for Column Pruning on Filter Operators.
@@ -344,8 +348,7 @@
      * @throws SemanticException
      */
     private void handleChildren(SelectOperator op,
-        List<String> retainedSelOutputCols, ColumnPrunerProcCtx cppCtx)
-        throws SemanticException {
+        List<String> retainedSelOutputCols, ColumnPrunerProcCtx cppCtx) throws SemanticException {
       for (Operator<? extends Serializable> child : op.getChildOperators()) {
         if (child instanceof ReduceSinkOperator) {
           boolean[] flags = getPruneReduceSinkOpRetainFlags(
@@ -391,8 +394,7 @@
   }
 
   private static void pruneReduceSinkOperator(boolean[] retainFlags,
-      ReduceSinkOperator reduce, ColumnPrunerProcCtx cppCtx)
-      throws SemanticException {
+      ReduceSinkOperator reduce, ColumnPrunerProcCtx cppCtx) throws SemanticException {
     ReduceSinkDesc reduceConf = reduce.getConf();
     Map<String, ExprNodeDesc> oldMap = reduce.getColumnExprMap();
     Map<String, ExprNodeDesc> newMap = new HashMap<String, ExprNodeDesc>();
@@ -445,7 +447,7 @@
     reduceConf.setValueCols(newValueEval);
     TableDesc newValueTable = PlanUtils.getReduceValueTableDesc(PlanUtils
         .getFieldSchemasFromColumnList(reduceConf.getValueCols(),
-            newOutputColNames, 0, ""));
+        newOutputColNames, 0, ""));
     reduceConf.setValueSerializeInfo(newValueTable);
   }
 
@@ -496,8 +498,7 @@
   private static void pruneJoinOperator(NodeProcessorCtx ctx,
       CommonJoinOperator op, JoinDesc conf,
       Map<String, ExprNodeDesc> columnExprMap,
-      Map<Byte, List<Integer>> retainMap, boolean mapJoin)
-      throws SemanticException {
+      Map<Byte, List<Integer>> retainMap, boolean mapJoin) throws SemanticException {
     ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx;
     Map<Byte, List<String>> prunedColLists = new HashMap<Byte, List<String>>();
     List<Operator<? extends Serializable>> childOperators = op

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java Tue Feb  9 07:55:30 2010
@@ -49,7 +49,6 @@
 import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory;
 import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles;
 import org.apache.hadoop.hive.ql.plan.ConditionalWork;
-import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExtractDesc;
@@ -58,6 +57,7 @@
 import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
+import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.ql.plan.TableScanDesc;
@@ -65,7 +65,7 @@
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 
 /**
- * Processor for the rule - table scan followed by reduce sink
+ * Processor for the rule - table scan followed by reduce sink.
  */
 public class GenMRFileSink1 implements NodeProcessor {
 
@@ -73,7 +73,7 @@
   }
 
   /**
-   * File Sink Operator encountered
+   * File Sink Operator encountered.
    * 
    * @param nd
    *          the file sink operator encountered
@@ -108,8 +108,8 @@
               HiveConf.ConfVars.HIVEMERGEMAPFILES) && (((MapredWork) currTask
               .getWork()).getReducer() == null))
               || (parseCtx.getConf().getBoolVar(
-                  HiveConf.ConfVars.HIVEMERGEMAPREDFILES) && (((MapredWork) currTask
-                  .getWork()).getReducer() != null))) {
+              HiveConf.ConfVars.HIVEMERGEMAPREDFILES) && (((MapredWork) currTask
+              .getWork()).getReducer() != null))) {
             chDir = true;
           }
         }
@@ -176,18 +176,18 @@
 
     Operator extract = OperatorFactory.getAndMakeChild(new ExtractDesc(
         new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo,
-            Utilities.ReduceField.VALUE.toString(), "", false)), 
-            new RowSchema(out_rwsch.getColumnInfos()));
+        Utilities.ReduceField.VALUE.toString(), "", false)),
+        new RowSchema(out_rwsch.getColumnInfos()));
 
     TableDesc ts = (TableDesc) fsConf.getTableInfo().clone();
     fsConf
         .getTableInfo()
         .getProperties()
         .remove(
-            org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_PARTITION_COLUMNS);
+        org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_PARTITION_COLUMNS);
     FileSinkOperator newOutput = (FileSinkOperator) OperatorFactory
         .getAndMakeChild(new FileSinkDesc(finalName, ts, parseCtx.getConf()
-            .getBoolVar(HiveConf.ConfVars.COMPRESSRESULT)), fsRS, extract);
+        .getBoolVar(HiveConf.ConfVars.COMPRESSRESULT)), fsRS, extract);
 
     cplan.setReducer(extract);
     ArrayList<String> aliases = new ArrayList<String>();

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMROperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMROperator.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMROperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMROperator.java Tue Feb  9 07:55:30 2010
@@ -30,7 +30,7 @@
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 
 /**
- * Processor for the rule - no specific rule fired
+ * Processor for the rule - no specific rule fired.
  */
 public class GenMROperator implements NodeProcessor {
 
@@ -38,7 +38,7 @@
   }
 
   /**
-   * Reduce Scan encountered
+   * Reduce Scan encountered.
    * 
    * @param nd
    *          the reduce sink operator encountered
@@ -54,7 +54,7 @@
     GenMapRedCtx mapredCtx = mapCurrCtx.get(stack.get(stack.size() - 2));
     mapCurrCtx.put((Operator<? extends Serializable>) nd, new GenMapRedCtx(
         mapredCtx.getCurrTask(), mapredCtx.getCurrTopOp(), mapredCtx
-            .getCurrAliasId()));
+        .getCurrAliasId()));
     return null;
   }
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java Tue Feb  9 07:55:30 2010
@@ -92,6 +92,10 @@
     }
   }
 
+  /**
+   * GenMRUnionCtx.
+   *
+   */
   public static class GenMRUnionCtx {
     Task<? extends Serializable> uTask;
     List<String> taskTmpDir;
@@ -128,6 +132,10 @@
     }
   }
 
+  /**
+   * GenMRMapJoinCtx.
+   *
+   */
   public static class GenMRMapJoinCtx {
     String taskTmpDir;
     TableDesc tt_desc;

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java Tue Feb  9 07:55:30 2010
@@ -34,7 +34,7 @@
 import org.apache.hadoop.hive.ql.plan.MapredWork;
 
 /**
- * Processor for the rule - table scan followed by reduce sink
+ * Processor for the rule - table scan followed by reduce sink.
  */
 public class GenMRRedSink1 implements NodeProcessor {
 
@@ -42,7 +42,7 @@
   }
 
   /**
-   * Reduce Scan encountered
+   * Reduce Scan encountered.
    * 
    * @param nd
    *          the reduce sink operator encountered
@@ -77,13 +77,10 @@
       } else {
         GenMapRedUtils.splitPlan(op, ctx);
       }
-    }
-    // This will happen in case of joins. The current plan can be thrown away
-    // after being merged with the
-    // original plan
-    else {
-      GenMapRedUtils
-          .joinPlan(op, null, opMapTask, ctx, -1, false, false, false);
+    } else {
+      // This will happen in case of joins. The current plan can be thrown away
+      // after being merged with the original plan
+      GenMapRedUtils.joinPlan(op, null, opMapTask, ctx, -1, false, false, false);
       currTask = opMapTask;
       ctx.setCurrTask(currTask);
     }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java Tue Feb  9 07:55:30 2010
@@ -32,7 +32,7 @@
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 
 /**
- * Processor for the rule - reduce sink followed by reduce sink
+ * Processor for the rule - reduce sink followed by reduce sink.
  */
 public class GenMRRedSink2 implements NodeProcessor {
 
@@ -40,7 +40,7 @@
   }
 
   /**
-   * Reduce Scan encountered
+   * Reduce Scan encountered.
    * 
    * @param nd
    *          the reduce sink operator encountered

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java Tue Feb  9 07:55:30 2010
@@ -36,7 +36,7 @@
 import org.apache.hadoop.hive.ql.plan.MapredWork;
 
 /**
- * Processor for the rule - union followed by reduce sink
+ * Processor for the rule - union followed by reduce sink.
  */
 public class GenMRRedSink3 implements NodeProcessor {
 
@@ -44,7 +44,7 @@
   }
 
   /**
-   * Reduce Scan encountered
+   * Reduce Scan encountered.
    * 
    * @param nd
    *          the reduce sink operator encountered
@@ -87,11 +87,10 @@
       } else {
         GenMapRedUtils.splitPlan(op, ctx);
       }
-    }
-    // The union is already initialized. However, the union is walked from
-    // another input
-    // initUnionPlan is idempotent
-    else if (plan.getReducer() == reducer) {
+    } else if (plan.getReducer() == reducer) {
+      // The union is already initialized. However, the union is walked from
+      // another input
+      // initUnionPlan is idempotent
       GenMapRedUtils.initUnionPlan(op, ctx);
     } else {
       GenMapRedUtils.initUnionPlan(ctx, currTask, false);

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink4.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink4.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink4.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink4.java Tue Feb  9 07:55:30 2010
@@ -34,7 +34,7 @@
 import org.apache.hadoop.hive.ql.plan.MapredWork;
 
 /**
- * Processor for the rule - map join followed by reduce sink
+ * Processor for the rule - map join followed by reduce sink.
  */
 public class GenMRRedSink4 implements NodeProcessor {
 
@@ -42,7 +42,7 @@
   }
 
   /**
-   * Reduce Scan encountered
+   * Reduce Scan encountered.
    * 
    * @param nd
    *          the reduce sink operator encountered
@@ -79,11 +79,10 @@
       } else {
         GenMapRedUtils.splitPlan(op, ctx);
       }
-    }
-    // There is a join after mapjoin. One of the branches of mapjoin has already
-    // been initialized.
-    // Initialize the current branch, and join with the original plan.
-    else {
+    } else {
+      // There is a join after mapjoin. One of the branches of mapjoin has already
+      // been initialized.
+      // Initialize the current branch, and join with the original plan.
       assert plan.getReducer() != reducer;
       GenMapRedUtils.joinPlan(op, currTask, opMapTask, ctx, -1, false, true,
           false);

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java Tue Feb  9 07:55:30 2010
@@ -34,14 +34,14 @@
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 
 /**
- * Processor for the rule - table scan
+ * Processor for the rule - table scan.
  */
 public class GenMRTableScan1 implements NodeProcessor {
   public GenMRTableScan1() {
   }
 
   /**
-   * Table Sink encountered
+   * Table Sink encountered.
    * 
    * @param nd
    *          the table sink operator encountered

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java Tue Feb  9 07:55:30 2010
@@ -44,14 +44,14 @@
 import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext.UnionParseContext;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
+import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 
 /**
- * Processor for the rule - TableScan followed by Union
+ * Processor for the rule - TableScan followed by Union.
  */
 public class GenMRUnion1 implements NodeProcessor {
 
@@ -175,12 +175,13 @@
     // Create a file sink operator for this file name
     Operator<? extends Serializable> fs_op = OperatorFactory.get(
         new FileSinkDesc(taskTmpDir, tt_desc, parseCtx.getConf().getBoolVar(
-            HiveConf.ConfVars.COMPRESSINTERMEDIATE)), parent.getSchema());
+        HiveConf.ConfVars.COMPRESSINTERMEDIATE)), parent.getSchema());
 
     assert parent.getChildOperators().size() == 1;
     parent.getChildOperators().set(0, fs_op);
 
-    List<Operator<? extends Serializable>> parentOpList = new ArrayList<Operator<? extends Serializable>>();
+    List<Operator<? extends Serializable>> parentOpList =
+      new ArrayList<Operator<? extends Serializable>>();
     parentOpList.add(parent);
     fs_op.setParentOperators(parentOpList);
 

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java Tue Feb  9 07:55:30 2010
@@ -20,6 +20,7 @@
 
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.ConcurrentModificationException;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
@@ -55,13 +56,13 @@
 import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
 import org.apache.hadoop.hive.ql.parse.RowResolver;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.FetchWork;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
 import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
+import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.ql.plan.TableScanDesc;
@@ -69,18 +70,17 @@
 
 /**
  * General utility common functions for the Processor to convert operator into
- * map-reduce tasks
+ * map-reduce tasks.
  */
-public class GenMapRedUtils {
+public final class GenMapRedUtils {
   private static Log LOG;
 
   static {
-    LOG = LogFactory
-        .getLog("org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils");
+    LOG = LogFactory.getLog("org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils");
   }
 
   /**
-   * Initialize the current plan by adding it to root tasks
+   * Initialize the current plan by adding it to root tasks.
    * 
    * @param op
    *          the reduce sink operator encountered
@@ -128,7 +128,7 @@
   }
 
   /**
-   * Initialize the current plan by adding it to root tasks
+   * Initialize the current plan by adding it to root tasks.
    * 
    * @param op
    *          the map join operator encountered
@@ -287,7 +287,7 @@
   }
 
   /**
-   * Merge the current task with the task for the current reducer
+   * Merge the current task with the task for the current reducer.
    * 
    * @param op
    *          operator being processed
@@ -388,7 +388,7 @@
   }
 
   /**
-   * Split the current plan by creating a temporary destination
+   * Split the current plan by creating a temporary destination.
    * 
    * @param op
    *          the reduce sink operator encountered
@@ -420,7 +420,7 @@
   }
 
   /**
-   * set the current task in the mapredWork
+   * set the current task in the mapredWork.
    * 
    * @param alias_id
    *          current alias
@@ -497,7 +497,7 @@
 
       // Later the properties have to come from the partition as opposed
       // to from the table in order to support versioning.
-      Path paths[];
+      Path[] paths;
       sampleDesc sampleDescr = parseCtx.getOpToSamplePruner().get(topOp);
 
       if (sampleDescr != null) {
@@ -567,9 +567,9 @@
       if (tblDir == null) {
         localPlan.getAliasToFetchWork()
             .put(
-                alias_id,
-                new FetchWork(FetchWork.convertPathToStringArray(partDir),
-                    partDesc));
+            alias_id,
+            new FetchWork(FetchWork.convertPathToStringArray(partDir),
+            partDesc));
       } else {
         localPlan.getAliasToFetchWork().put(alias_id,
             new FetchWork(tblDir.toString(), tblDesc));
@@ -579,7 +579,7 @@
   }
 
   /**
-   * set the current task in the mapredWork
+   * set the current task in the mapredWork.
    * 
    * @param alias
    *          current alias
@@ -621,7 +621,7 @@
   }
 
   /**
-   * set key and value descriptor
+   * set key and value descriptor.
    * 
    * @param plan
    *          current plan
@@ -655,7 +655,7 @@
   }
 
   /**
-   * create a new plan and return
+   * create a new plan and return.
    * 
    * @return the new plan
    */
@@ -671,7 +671,7 @@
   }
 
   /**
-   * insert in the map for the operator to row resolver
+   * insert in the map for the operator to row resolver.
    * 
    * @param op
    *          operator created
@@ -747,7 +747,8 @@
       }
     }
 
-    List<Operator<? extends Serializable>> parentOpList = new ArrayList<Operator<? extends Serializable>>();
+    List<Operator<? extends Serializable>> parentOpList =
+        new ArrayList<Operator<? extends Serializable>>();
     parentOpList.add(parent);
     fs_op.setParentOperators(parentOpList);
 
@@ -815,7 +816,7 @@
     opProcCtx.setCurrTask(childTask);
   }
 
-  static public void mergeMapJoinUnion(UnionOperator union,
+  public static void mergeMapJoinUnion(UnionOperator union,
       GenMRProcContext ctx, int pos) throws SemanticException {
     ParseContext parseCtx = ctx.getParseCtx();
     UnionProcContext uCtx = parseCtx.getUCtx();
@@ -866,7 +867,7 @@
           }
           notDone = false;
         }
-      } catch (java.util.ConcurrentModificationException e) {
+      } catch (ConcurrentModificationException e) {
       }
     } else {
       setTaskPlan(ctx.getCurrAliasId(), ctx.getCurrTopOp(), uPlan, false, ctx);
@@ -880,4 +881,8 @@
     ctx.getMapCurrCtx().put(union,
         new GenMapRedCtx(ctx.getCurrTask(), null, null));
   }
+
+  private GenMapRedUtils() {
+    // prevent instantiation
+  }
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java Tue Feb  9 07:55:30 2010
@@ -98,8 +98,7 @@
     return new NodeProcessor() {
       @Override
       public Object process(Node nd, Stack<Node> stack,
-          NodeProcessorCtx procCtx, Object... nodeOutputs)
-          throws SemanticException {
+          NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
         return null;
       }
     };
@@ -109,6 +108,10 @@
     return new BucketGroupByProcessor(pctx);
   }
 
+  /**
+   * BucketGroupByProcessor.
+   *
+   */
   public class BucketGroupByProcessor implements NodeProcessor {
 
     protected ParseContext pGraphContext;
@@ -177,7 +180,8 @@
         if (topOp == null || (!(topOp instanceof TableScanOperator))) {
           // this is in a sub-query.
           // In future, we need to infer subq's columns propery. For example
-          // "select key, count(1) from (from clustergroupbyselect key, value where ds='210') group by key, 3;",
+          // "select key, count(1) 
+          // from (from clustergroupbyselect key, value where ds='210') group by key, 3;",
           // even though the group by op is in a subquery, it can be changed to
           // bucket groupby.
           return;
@@ -245,8 +249,7 @@
      * @throws SemanticException
      */
     private boolean matchBucketOrSortedColumns(List<String> groupByCols,
-        List<String> bucketCols, List<String> sortCols)
-        throws SemanticException {
+        List<String> bucketCols, List<String> sortCols) throws SemanticException {
       boolean ret = false;
 
       if (sortCols == null || sortCols.size() == 0) {
@@ -294,6 +297,10 @@
     }
   }
 
+  /**
+   * GroupByOptProcCtx.
+   *
+   */
   public class GroupByOptProcCtx implements NodeProcessorCtx {
   }
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java Tue Feb  9 07:55:30 2010
@@ -86,7 +86,7 @@
   }
 
   /**
-   * Find all big tables from STREAMTABLE hints
+   * Find all big tables from STREAMTABLE hints.
    * 
    * @param joinCtx
    *          The join context
@@ -106,7 +106,7 @@
 
   /**
    * Reorder the tables in a join operator appropriately (by reordering the tags
-   * of the reduces sinks)
+   * of the reduces sinks).
    * 
    * @param joinOp
    *          The join operator to be processed

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java Tue Feb  9 07:55:30 2010
@@ -42,15 +42,15 @@
 import org.apache.hadoop.hive.ql.parse.ErrorMsg;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 
 /**
- * Operator factory for MapJoin processing
+ * Operator factory for MapJoin processing.
  */
-public class MapJoinFactory {
+public final class MapJoinFactory {
 
   public static int getPositionParent(MapJoinOperator op, Stack<Node> stack) {
     int pos = 0;
@@ -65,7 +65,7 @@
   }
 
   /**
-   * TableScan followed by MapJoin
+   * TableScan followed by MapJoin.
    */
   public static class TableScanMapJoin implements NodeProcessor {
 
@@ -99,10 +99,9 @@
       if (opMapTask == null) {
         assert currPlan.getReducer() == null;
         GenMapRedUtils.initMapJoinPlan(mapJoin, ctx, false, false, false, pos);
-      }
-      // The current plan can be thrown away after being merged with the
-      // original plan
-      else {
+      } else {
+        // The current plan can be thrown away after being merged with the
+        // original plan
         GenMapRedUtils.joinPlan(mapJoin, null, opMapTask, ctx, pos, false,
             false, false);
         currTask = opMapTask;
@@ -116,7 +115,7 @@
   }
 
   /**
-   * ReduceSink followed by MapJoin
+   * ReduceSink followed by MapJoin.
    */
   public static class ReduceSinkMapJoin implements NodeProcessor {
 
@@ -150,10 +149,9 @@
         assert cplan.getReducer() == null;
         opTaskMap.put(mapJoin, currTask);
         opProcCtx.setCurrMapJoinOp(null);
-      }
-      // The current plan can be thrown away after being merged with the
-      // original plan
-      else {
+      } else {
+        // The current plan can be thrown away after being merged with the
+        // original plan
         GenMapRedUtils.joinPlan(mapJoin, currTask, opMapTask, opProcCtx, pos,
             false, false, false);
         currTask = opMapTask;
@@ -165,7 +163,7 @@
   }
 
   /**
-   * MapJoin followed by Select
+   * MapJoin followed by Select.
    */
   public static class MapJoin implements NodeProcessor {
 
@@ -233,7 +231,7 @@
       // Create a file sink operator for this file name
       Operator<? extends Serializable> fs_op = OperatorFactory.get(
           new FileSinkDesc(taskTmpDir, tt_desc, parseCtx.getConf().getBoolVar(
-              HiveConf.ConfVars.COMPRESSINTERMEDIATE)), mapJoin.getSchema());
+          HiveConf.ConfVars.COMPRESSINTERMEDIATE)), mapJoin.getSchema());
 
       assert mapJoin.getChildOperators().size() == 1;
       mapJoin.getChildOperators().set(0, fs_op);
@@ -258,7 +256,7 @@
   }
 
   /**
-   * MapJoin followed by MapJoin
+   * MapJoin followed by MapJoin.
    */
   public static class MapJoinMapJoin implements NodeProcessor {
 
@@ -301,10 +299,9 @@
       if (opMapTask == null) {
         assert currPlan.getReducer() == null;
         GenMapRedUtils.initMapJoinPlan(mapJoin, ctx, true, false, false, pos);
-      }
-      // The current plan can be thrown away after being merged with the
-      // original plan
-      else {
+      } else {
+        // The current plan can be thrown away after being merged with the
+        // original plan
         GenMapRedUtils.joinPlan(mapJoin, currTask, opMapTask, ctx, pos, false,
             true, false);
         currTask = opMapTask;
@@ -317,7 +314,7 @@
   }
 
   /**
-   * Union followed by MapJoin
+   * Union followed by MapJoin.
    */
   public static class UnionMapJoin implements NodeProcessor {
 
@@ -367,10 +364,9 @@
         ctx.setCurrMapJoinOp(mapJoin);
         GenMapRedUtils.initMapJoinPlan(mapJoin, ctx, true, true, false, pos);
         ctx.setCurrUnionOp(null);
-      }
-      // The current plan can be thrown away after being merged with the
-      // original plan
-      else {
+      } else {
+        // The current plan can be thrown away after being merged with the
+        // original plan
         Task<? extends Serializable> uTask = ctx.getUnionTask(
             ctx.getCurrUnionOp()).getUTask();
         if (uTask.getId().equals(opMapTask.getId())) {
@@ -409,4 +405,8 @@
   public static NodeProcessor getMapJoinMapJoin() {
     return new MapJoinMapJoin();
   }
+
+  private MapJoinFactory() {
+    // prevent instantiation
+  }
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java Tue Feb  9 07:55:30 2010
@@ -51,11 +51,11 @@
 import org.apache.hadoop.hive.ql.parse.QBJoinTree;
 import org.apache.hadoop.hive.ql.parse.RowResolver;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.JoinDesc;
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
+import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
 import org.apache.hadoop.hive.ql.plan.SelectDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
@@ -71,7 +71,7 @@
   private ParseContext pGraphContext;
 
   /**
-   * empty constructor
+   * empty constructor.
    */
   public MapJoinProcessor() {
     pGraphContext = null;
@@ -192,7 +192,7 @@
             outputColumnNames.add(outputCol);
             ExprNodeDesc colDesc = new ExprNodeColumnDesc(valueInfo.getType(),
                 valueInfo.getInternalName(), valueInfo.getTabAlias(), valueInfo
-                    .getIsPartitionCol());
+                .getIsPartitionCol());
             values.add(colDesc);
             outputRS.put(key, field, new ColumnInfo(outputCol, valueInfo
                 .getType(), valueInfo.getTabAlias(), valueInfo
@@ -240,9 +240,9 @@
 
     MapJoinOperator mapJoinOp = (MapJoinOperator) putOpInsertMap(
         OperatorFactory.getAndMakeChild(new MapJoinDesc(keyExprMap,
-            keyTableDesc, valueExprMap, valueTableDescs, outputColumnNames,
-            mapJoinPos, joinCondns), new RowSchema(outputRS.getColumnInfos()),
-            newPar), outputRS);
+        keyTableDesc, valueExprMap, valueTableDescs, outputColumnNames,
+        mapJoinPos, joinCondns), new RowSchema(outputRS.getColumnInfos()),
+        newPar), outputRS);
 
     mapJoinOp.getConf().setReversedExprs(op.getConf().getReversedExprs());
     mapJoinOp.setColumnExprMap(colExprMap);
@@ -297,7 +297,7 @@
 
     SelectOperator sel = (SelectOperator) putOpInsertMap(
         OperatorFactory.getAndMakeChild(select, new RowSchema(inputRR
-            .getColumnInfos()), input), inputRR);
+        .getColumnInfos()), input), inputRR);
 
     sel.setColumnExprMap(colExprMap);
 
@@ -420,10 +420,14 @@
     return pGraphContext;
   }
 
+  /**
+   * CurrentMapJoin.
+   *
+   */
   public static class CurrentMapJoin implements NodeProcessor {
 
     /**
-     * Store the current mapjoin in the context
+     * Store the current mapjoin in the context.
      */
     @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
@@ -436,10 +440,14 @@
     }
   }
 
+  /**
+   * MapJoinFS.
+   *
+   */
   public static class MapJoinFS implements NodeProcessor {
 
     /**
-     * Store the current mapjoin in a list of mapjoins followed by a filesink
+     * Store the current mapjoin in a list of mapjoins followed by a filesink.
      */
     @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
@@ -466,10 +474,14 @@
     }
   }
 
+  /**
+   * MapJoinDefault.
+   *
+   */
   public static class MapJoinDefault implements NodeProcessor {
 
     /**
-     * Store the mapjoin in a rejected list
+     * Store the mapjoin in a rejected list.
      */
     @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
@@ -487,10 +499,14 @@
     }
   }
 
+  /**
+   * Default.
+   *
+   */
   public static class Default implements NodeProcessor {
 
     /**
-     * nothing to do
+     * Nothing to do.
      */
     @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
@@ -515,10 +531,14 @@
     return new CurrentMapJoin();
   }
 
+  /**
+   * MapJoinWalkerCtx.
+   *
+   */
   public static class MapJoinWalkerCtx implements NodeProcessorCtx {
-    List<MapJoinOperator> listMapJoinsNoRed;
-    List<MapJoinOperator> listRejectedMapJoins;
-    MapJoinOperator currMapJoinOp;
+    private List<MapJoinOperator> listMapJoinsNoRed;
+    private List<MapJoinOperator> listRejectedMapJoins;
+    private MapJoinOperator currMapJoinOp;
 
     /**
      * @param listMapJoinsNoRed



Mime
View raw message