hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ser...@apache.org
Subject svn commit: r1573404 - in /hive/trunk: common/src/java/org/apache/hadoop/hive/conf/ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/ ql/src/java/org/apach...
Date Sun, 02 Mar 2014 23:42:51 GMT
Author: sershe
Date: Sun Mar  2 23:42:50 2014
New Revision: 1573404

URL: http://svn.apache.org/r1573404
Log:
HIVE-6429 MapJoinKey has large memory overhead in typical cases (Sergey Shelukhin, reviewed by Gunther Hagleitner, Jitendra Nath Pandey)

Added:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyBytes.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyObject.java
Modified:
    hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapperBatch.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java
    hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinEqualityTableContainer.java
    hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinKey.java
    hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinTableContainer.java
    hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/Utilities.java
    hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java
    hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/OutputByteBuffer.java
    hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/io/HiveDecimalWritable.java
    hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java
    hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java

Modified: hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Sun Mar  2 23:42:50 2014
@@ -431,6 +431,7 @@ public class HiveConf extends Configurat
     // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row,
     // need to remove by hive .13. Also, do not change default (see SMB operator)
     HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100),
+    HIVEMAPJOINUSEOPTIMIZEDKEYS("hive.mapjoin.optimized.keys", true),
     HIVEMAPJOINLAZYHASHTABLE("hive.mapjoin.lazy.hashtable", true),
 
     HIVESMBJOINCACHEROWS("hive.smbjoin.cache.rows", 10000),
@@ -445,6 +446,7 @@ public class HiveConf extends Configurat
     HIVE_GROUPBY_ORDERBY_POSITION_ALIAS("hive.groupby.orderby.position.alias", false),
     HIVE_NEW_JOB_GROUPING_SET_CARDINALITY("hive.new.job.grouping.set.cardinality", 30),
 
+
     // for hive udtf operator
     HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false),
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java Sun Mar  2 23:42:50 2014
@@ -121,35 +121,6 @@ public abstract class AbstractMapJoinOpe
     return OperatorType.MAPJOIN;
   }
 
-  // returns true if there are elements in key list and any of them is null
-  protected boolean hasAnyNulls(ArrayList<Object> key) {
-    if (key != null && key.size() > 0) {
-      for (int i = 0; i < key.size(); i++) {
-        if (key.get(i) == null && (nullsafes == null || !nullsafes[i])) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  // returns true if there are elements in key list and any of them is null
-  protected boolean hasAnyNulls(Object[] key) {
-    if (key != null && key.length> 0) {
-      for (int i = 0; i < key.length; i++) {
-        if (key[i] == null && (nullsafes == null || !nullsafes[i])) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  // returns true if there are elements in key list and any of them is null
-  protected boolean hasAnyNulls(MapJoinKey key) {
-    return key.hasAnyNulls(nullsafes);
-  }
-
   @Override
   public void closeOp(boolean abort) throws HiveException {
     super.closeOp(abort);

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java Sun Mar  2 23:42:50 2014
@@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.exec;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
+import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -34,4 +35,6 @@ public interface HashTableLoader {
 
   void load(MapJoinTableContainer[] mapJoinTables, MapJoinTableContainerSerDe[] mapJoinTableSerdes)
       throws HiveException;
+
+  MapJoinKey getKeyType();
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java Sun Mar  2 23:42:50 2014
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionHandler;
 import org.apache.hadoop.hive.ql.exec.persistence.HashMapWrapper;
+import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKeyObject;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinObjectSerDeContext;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinEagerRowContainer;
@@ -228,9 +229,9 @@ public class HashTableSinkOperator exten
   @Override
   public void processOp(Object row, int tag) throws HiveException {
     alias = (byte)tag;
-    // compute keys and values as StandardObjects
-    MapJoinKey key = JoinUtil.computeMapJoinKeys(null, row, joinKeys[alias],
-        joinKeysObjectInspectors[alias]);
+    // compute keys and values as StandardObjects. Use non-optimized key (MR).
+    MapJoinKey key = MapJoinKey.readFromRow(null, new MapJoinKeyObject(),
+        row, joinKeys[alias], joinKeysObjectInspectors[alias], true);
     Object[] value = EMPTY_OBJECT_ARRAY;
     if((hasFilter(alias) && filterMaps[alias].length > 0) || joinValues[alias].size() > 0) {
       value = JoinUtil.computeMapJoinValues(row, joinValues[alias],

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java Sun Mar  2 23:42:50 2014
@@ -23,7 +23,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey;
 import org.apache.hadoop.hive.ql.exec.persistence.RowContainer;
 import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -149,30 +148,6 @@ public class JoinUtil {
   }
 
   /**
-   * Return the key as a standard object. StandardObject can be inspected by a
-   * standard ObjectInspector. The first parameter a MapJoinKey can
-   * be null if the caller would like a new object to be instantiated.
-   */
-  public static MapJoinKey computeMapJoinKeys(MapJoinKey key, Object row,
-      List<ExprNodeEvaluator> keyFields, List<ObjectInspector> keyFieldsOI)
-      throws HiveException {
-    int size = keyFields.size();
-    if(key == null || key.getKey().length != size) {
-      key = new MapJoinKey(new Object[size]);
-    }
-    Object[] array = key.getKey();
-    for (int keyIndex = 0; keyIndex < size; keyIndex++) {
-      array[keyIndex] = (ObjectInspectorUtils.copyToStandardObject(keyFields.get(keyIndex)
-          .evaluate(row), keyFieldsOI.get(keyIndex), ObjectInspectorCopyOption.WRITABLE));
-    }
-    return key;
-  }
-
-
-
-
-
-  /**
    * Return the value as a standard object. StandardObject can be inspected by a
    * standard ObjectInspector.
    */

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java Sun Mar  2 23:42:50 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.hive.ql.metadat
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
+import org.apache.hadoop.hive.serde2.ByteStream.Output;
 import org.apache.hadoop.hive.serde2.SerDe;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -53,7 +54,7 @@ public class MapJoinOperator extends Abs
   private transient String serdeKey;
   private transient ObjectCache cache;
 
-  private HashTableLoader loader;
+  protected HashTableLoader loader;
 
   protected transient MapJoinTableContainer[] mapJoinTables;
   private transient MapJoinTableContainerSerDe[] mapJoinTableSerdes;
@@ -167,9 +168,11 @@ public class MapJoinOperator extends Abs
     }
   }
 
+  protected transient final Output outputForMapJoinKey = new Output();
   protected MapJoinKey computeMapJoinKey(Object row, byte alias) throws HiveException {
-    return JoinUtil.computeMapJoinKeys(key, row, joinKeys[alias],
-        joinKeysObjectInspectors[alias]);
+    MapJoinKey refKey = (key == null ? loader.getKeyType() : key);
+    return MapJoinKey.readFromRow(outputForMapJoinKey,
+        refKey, row, joinKeys[alias], joinKeysObjectInspectors[alias], key == refKey);
   }
 
   @Override
@@ -184,12 +187,13 @@ public class MapJoinOperator extends Abs
 
       // compute keys and values as StandardObjects
       key = computeMapJoinKey(row, alias);
+      int fieldCount = joinKeys[alias].size();
       boolean joinNeeded = false;
       for (byte pos = 0; pos < order.length; pos++) {
         if (pos != alias) {
           MapJoinRowContainer rowContainer = mapJoinTables[pos].get(key);
           // there is no join-value or join-key has all null elements
-          if (rowContainer == null || key.hasAnyNulls(nullsafes)) {
+          if (rowContainer == null || key.hasAnyNulls(fieldCount, nullsafes)) {
             if (!noOuterJoin) {
               joinNeeded = true;
               storage[pos] = dummyObjVectors[pos];

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java Sun Mar  2 23:42:50 2014
@@ -36,6 +36,8 @@ import org.apache.hadoop.hive.ql.exec.Op
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.TemporaryHashSinkOperator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey;
+import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKeyObject;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -154,4 +156,9 @@ public class HashTableLoader implements 
 
     Arrays.fill(tables, null);
   }
+
+  @Override
+  public MapJoinKey getKeyType() {
+    return new MapJoinKeyObject(); // always use Object-array keys
+  }
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java Sun Mar  2 23:42:50 2014
@@ -19,91 +19,173 @@
 package org.apache.hadoop.hive.ql.exec.persistence;
 
 import java.io.IOException;
-import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.hadoop.hive.common.type.Decimal128;
+import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator;
+import org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapper;
+import org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapperBatch;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.ByteStream.Output;
 import org.apache.hadoop.hive.serde2.SerDe;
 import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe;
+import org.apache.hadoop.hive.serde2.lazybinary.LazyBinaryUtils;
+import org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe.StringWrapper;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
+import org.apache.hadoop.hive.serde2.objectinspector.StructField;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+
 import org.apache.hadoop.io.Writable;
 
-@SuppressWarnings("deprecation")
-public class MapJoinKey {
-  private static final Object[] EMPTY_OBJECT_ARRAY = new Object[0];
-
-  private Object[] key;
-  
-  public MapJoinKey(Object[] key) {
-    this.key = key;
-  }
-  public MapJoinKey() {
-    this(EMPTY_OBJECT_ARRAY);
-  }
-
-  public Object[] getKey() {
-    return key;
-  }
-  public boolean hasAnyNulls(boolean[] nullsafes){
-    if (key != null && key.length > 0) {
-      for (int i = 0; i < key.length; i++) {
-        if (key[i] == null && (nullsafes == null || !nullsafes[i])) {
-          return true;
-        }
+/**
+ * The base class for MapJoinKey; also acts as a factory for creating and reading
+ * the keys, choosing whether size-optimized byte-array based MJKBytes can be used.
+ */
+public abstract class MapJoinKey {
+  private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
+
+  @SuppressWarnings("deprecation")
+  public static MapJoinKey read(Output output, MapJoinKey key, MapJoinObjectSerDeContext context,
+      Writable writable, boolean mayReuseKey) throws SerDeException {
+    SerDe serde = context.getSerDe();
+    Object obj = serde.deserialize(writable);
+    boolean useOptimized = useOptimizedKeyBasedOnPrev(key);
+    if (useOptimized || key == null) {
+      byte[] structBytes = serializeKey(output, obj, serde.getObjectInspector());
+      if (structBytes != null) {
+        return MapJoinKeyBytes.fromBytes(key, mayReuseKey, structBytes);
+      } else if (useOptimized) {
+        throw new SerDeException(
+            "Failed to serialize " + obj + " even though optimized keys are used");
       }
     }
-    return false;
+    MapJoinKeyObject result = mayReuseKey ? (MapJoinKeyObject)key : new MapJoinKeyObject();
+    result.read(serde.getObjectInspector(), obj);
+    return result;
   }
-  
-  
-  @Override
-  public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result + Arrays.hashCode(key);
+
+  private static byte[] serializeKey(
+      Output byteStream, Object obj, ObjectInspector oi) throws SerDeException {
+    if (null == obj || !(oi instanceof StructObjectInspector)) {
+      return null; // not supported
+    }
+    StructObjectInspector soi = (StructObjectInspector)oi;
+    List<? extends StructField> fields = soi.getAllStructFieldRefs();
+    int size = fields.size();
+    if (size > 8) {
+      return null; // not supported
+    } else if (size == 0) {
+      return EMPTY_BYTE_ARRAY; // shortcut for null keys
+    }
+    Object[] fieldData = new Object[size];
+    List<ObjectInspector> fieldOis = new ArrayList<ObjectInspector>(size);
+    for (int i = 0; i < size; ++i) {
+      StructField field = fields.get(i);
+      fieldData[i] = soi.getStructFieldData(obj, field);
+      fieldOis.add(field.getFieldObjectInspector());
+    }
+
+    return serializeRowCommon(byteStream, fieldData, fieldOis);
+  }
+
+  public static MapJoinKey readFromVector(Output output, MapJoinKey key, VectorHashKeyWrapper kw,
+      VectorExpressionWriter[] keyOutputWriters, VectorHashKeyWrapperBatch keyWrapperBatch,
+      boolean mayReuseKey) throws HiveException {
+    boolean useOptimized = useOptimizedKeyBasedOnPrev(key);
+    if (useOptimized || key == null) {
+      byte[] structBytes = null;
+      try {
+        structBytes = serializeVector(output, kw, keyOutputWriters, keyWrapperBatch);
+      } catch (SerDeException e) {
+        throw new HiveException(e);
+      }
+      if (structBytes != null) {
+        return MapJoinKeyBytes.fromBytes(key, mayReuseKey, structBytes);
+      } else if (useOptimized) {
+        throw new HiveException(
+            "Failed to serialize " + kw + " even though optimized keys are used");
+      }
+    }
+    MapJoinKeyObject result = mayReuseKey ? (MapJoinKeyObject)key : new MapJoinKeyObject();
+    result.readFromVector(kw, keyOutputWriters, keyWrapperBatch);
     return result;
   }
-  @Override
-  public boolean equals(Object obj) {
-    if (this == obj)
-      return true;
-    if (obj == null)
-      return false;
-    if (getClass() != obj.getClass())
-      return false;
-    MapJoinKey other = (MapJoinKey) obj;
-    if (!Arrays.equals(key, other.key))
-      return false;
-    return true;
-  }
-
-  public void read(MapJoinObjectSerDeContext context, ObjectInputStream in, Writable container)
-  throws IOException, SerDeException {
-    container.readFields(in);
-    read(context, container);
+
+  private static byte[] serializeVector(Output byteStream, VectorHashKeyWrapper kw,
+      VectorExpressionWriter[] keyOutputWriters, VectorHashKeyWrapperBatch keyWrapperBatch)
+          throws HiveException, SerDeException {
+    Object[] fieldData = new Object[keyOutputWriters.length];
+    List<ObjectInspector> fieldOis = new ArrayList<ObjectInspector>();
+    for (int i = 0; i < keyOutputWriters.length; ++i) {
+      VectorExpressionWriter writer = keyOutputWriters[i];
+      fieldOis.add(writer.getObjectInspector());
+      // This is rather convoluted... to simplify for per, we could call getRawKeyValue
+      // instead of writable, and serialize based on Java type as opposed to OI.
+      fieldData[i] = keyWrapperBatch.getWritableKeyValue(kw, i, writer);
+    }
+    return serializeRowCommon(byteStream, fieldData, fieldOis);
   }
 
-  @SuppressWarnings("unchecked")
-  public void read(MapJoinObjectSerDeContext context, Writable container) throws SerDeException {
-    SerDe serde = context.getSerDe();
-    List<Object> value = (List<Object>)ObjectInspectorUtils.copyToStandardObject(serde.deserialize(container),
-        serde.getObjectInspector(), ObjectInspectorCopyOption.WRITABLE);
+  public static MapJoinKey readFromRow(Output output, MapJoinKey key, Object row,
+      List<ExprNodeEvaluator> fields, List<ObjectInspector> keyFieldsOI, boolean mayReuseKey)
+          throws HiveException {
+    Object[] fieldObjs = new Object[fields.size()];
+    for (int keyIndex = 0; keyIndex < fields.size(); ++keyIndex) {
+      fieldObjs[keyIndex] = fields.get(keyIndex).evaluate(row);
+    }
+    boolean useOptimized = useOptimizedKeyBasedOnPrev(key);
+    if (useOptimized || key == null) {
+      try {
+        byte[] structBytes = serializeRow(output, fieldObjs, keyFieldsOI);
+        if (structBytes != null) {
+          return MapJoinKeyBytes.fromBytes(key, mayReuseKey, structBytes);
+        } else if (useOptimized) {
+          throw new HiveException(
+              "Failed to serialize " + row + " even though optimized keys are used");
+        }
+      } catch (SerDeException ex) {
+        throw new HiveException("Serialization error", ex);
+      }
+    }
+    MapJoinKeyObject result = mayReuseKey ? (MapJoinKeyObject)key : new MapJoinKeyObject();
+    result.readFromRow(fieldObjs, keyFieldsOI);
+    return result;
+  }
+
+  private static byte[] serializeRow(Output byteStream,
+      Object[] fieldData, List<ObjectInspector> fieldOis) throws SerDeException {
+    if (fieldData.length > 8) {
+      return null; // not supported
+    } else if (fieldData.length == 0) {
+      return EMPTY_BYTE_ARRAY; // shortcut for null keys
+    }
+    assert fieldData.length == fieldOis.size();
+    return serializeRowCommon(byteStream, fieldData, fieldOis);
+  }
 
-    if(value == null) {
-      key = EMPTY_OBJECT_ARRAY;
+  private static byte[] serializeRowCommon(Output byteStream,
+      Object[] fieldData, List<ObjectInspector> fieldOis) throws SerDeException {
+    if (byteStream == null) {
+      byteStream = new Output();
     } else {
-      key = value.toArray();
+      byteStream.reset();
     }
+    LazyBinarySerDe.serializeStruct(byteStream, fieldData, fieldOis);
+    return Arrays.copyOf(byteStream.getData(), byteStream.getCount());
   }
 
-  public void write(MapJoinObjectSerDeContext context, ObjectOutputStream out)
-  throws IOException, SerDeException {
-    SerDe serde = context.getSerDe();
-    ObjectInspector objectInspector = context.getStandardOI();
-    Writable container = serde.serialize(key, objectInspector);
-    container.write(out);
+  private static boolean useOptimizedKeyBasedOnPrev(MapJoinKey key) {
+    return (key != null) && (key instanceof MapJoinKeyBytes);
   }
-}
+
+  public abstract void write(MapJoinObjectSerDeContext context, ObjectOutputStream out)
+      throws IOException, SerDeException;
+
+  public abstract boolean hasAnyNulls(int fieldCount, boolean[] nullsafes);
+}
\ No newline at end of file

Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyBytes.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyBytes.java?rev=1573404&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyBytes.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyBytes.java Sun Mar  2 23:42:50 2014
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.persistence;
+
+import java.io.ObjectOutputStream;
+import java.util.Arrays;
+
+/**
+ * Size-optimized implementation of MapJoinKeyBase. MJK only needs to support equality and
+ * hashCode, so for simple cases we can write the requisite writables that are part of the
+ * key into byte array and retain the functionality without storing the writables themselves.
+ */
+@SuppressWarnings("deprecation")
+public class MapJoinKeyBytes extends MapJoinKey {
+  /**
+   * First byte is field count. The rest is written using BinarySortableSerDe.
+   */
+  private byte[] array;
+
+  private void setBytes(byte[] array) {
+    this.array = array;
+  }
+
+  @Override
+  public void write(MapJoinObjectSerDeContext context, ObjectOutputStream out) {
+    throw new UnsupportedOperationException(this.getClass().getName() + " cannot be serialized");
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null || !(obj instanceof MapJoinKeyBytes)) return false;
+    MapJoinKeyBytes other = (MapJoinKeyBytes)obj;
+    return Arrays.equals(this.array, other.array);
+  }
+
+  @Override
+  public int hashCode() {
+    return Arrays.hashCode(array);
+  }
+
+  @Override
+  public boolean hasAnyNulls(int fieldCount, boolean[] nullsafes) {
+    if (this.array.length == 0) return false; // null key
+    byte nulls = (byte)(this.array[0]);
+    for (int i = 0; i < fieldCount; ++i) {
+      if (((nulls & 1) == 0) && (nullsafes == null || !nullsafes[i])) return true;
+      nulls >>>= 1;
+    }
+    return false;
+  }
+
+  public static MapJoinKey fromBytes(MapJoinKey key, boolean mayReuseKey, byte[] structBytes) {
+    MapJoinKeyBytes result = (mayReuseKey && key != null)
+        ? (MapJoinKeyBytes)key : new MapJoinKeyBytes();
+    result.setBytes(structBytes);
+    return result;
+  }
+}

Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyObject.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyObject.java?rev=1573404&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyObject.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyObject.java Sun Mar  2 23:42:50 2014
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.persistence;
+
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator;
+import org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapper;
+import org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapperBatch;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.SerDe;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
+import org.apache.hadoop.io.Writable;
+
+@SuppressWarnings("deprecation")
+public class MapJoinKeyObject extends MapJoinKey {
+  private static final Object[] EMPTY_OBJECT_ARRAY = new Object[0];
+
+  private Object[] key;
+
+  public MapJoinKeyObject(Object[] key) {
+    this.key = key;
+  }
+  public MapJoinKeyObject() {
+    this(EMPTY_OBJECT_ARRAY);
+  }
+
+  public int getKeyLength() {
+    return key.length;
+  }
+
+  @Override
+  public boolean hasAnyNulls(int fieldCount, boolean[] nullsafes) {
+    assert fieldCount == key.length;
+    if (key != null && key.length > 0) {
+      for (int i = 0; i < key.length; i++) {
+        if (key[i] == null && (nullsafes == null || !nullsafes[i])) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + Arrays.hashCode(key);
+    return result;
+  }
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (obj == null)
+      return false;
+    if (getClass() != obj.getClass())
+      return false;
+    MapJoinKeyObject other = (MapJoinKeyObject) obj;
+    if (!Arrays.equals(key, other.key))
+      return false;
+    return true;
+  }
+
+  public void read(MapJoinObjectSerDeContext context, ObjectInputStream in, Writable container)
+  throws IOException, SerDeException {
+    container.readFields(in);
+    read(context, container);
+  }
+
+  public void read(MapJoinObjectSerDeContext context, Writable container) throws SerDeException {
+    read(context.getSerDe().getObjectInspector(), context.getSerDe().deserialize(container));
+  }
+
+  protected void read(ObjectInspector oi, Object obj) throws SerDeException {
+    @SuppressWarnings("unchecked")
+    List<Object> value = (List<Object>)ObjectInspectorUtils.copyToStandardObject(
+        obj, oi, ObjectInspectorCopyOption.WRITABLE);
+    if (value == null) {
+      key = EMPTY_OBJECT_ARRAY;
+    } else {
+      key = value.toArray();
+    }
+  }
+
+  @Override
+  public void write(MapJoinObjectSerDeContext context, ObjectOutputStream out)
+      throws IOException, SerDeException {
+    SerDe serde = context.getSerDe();
+    ObjectInspector objectInspector = context.getStandardOI();
+    Writable container = serde.serialize(key, objectInspector);
+    container.write(out);
+  }
+
+  protected void readFromRow(Object[] fieldObjs, List<ObjectInspector> keyFieldsOI)
+      throws HiveException {
+    if (key == null || key.length != fieldObjs.length) {
+      key = new Object[fieldObjs.length];
+    }
+    for (int keyIndex = 0; keyIndex < fieldObjs.length; ++keyIndex) {
+      key[keyIndex] = (ObjectInspectorUtils.copyToStandardObject(fieldObjs[keyIndex],
+          keyFieldsOI.get(keyIndex), ObjectInspectorCopyOption.WRITABLE));
+    }
+  }
+
+  protected boolean[] getNulls() {
+    boolean[] nulls = null;
+    for (int i = 0; i < key.length; ++i) {
+      if (key[i] == null) {
+        if (nulls == null) {
+          nulls = new boolean[key.length];
+        }
+        nulls[i] = true;
+      }
+    }
+    return nulls;
+  }
+
+  public void readFromVector(VectorHashKeyWrapper kw, VectorExpressionWriter[] keyOutputWriters,
+      VectorHashKeyWrapperBatch keyWrapperBatch) throws HiveException {
+    if (key == null || key.length != keyOutputWriters.length) {
+      key = new Object[keyOutputWriters.length];
+    }
+    for (int keyIndex = 0; keyIndex < keyOutputWriters.length; ++keyIndex) {
+      key[keyIndex] = keyWrapperBatch.getWritableKeyValue(
+          kw, keyIndex, keyOutputWriters[keyIndex]);
+    }
+  }
+}

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java Sun Mar  2 23:42:50 2014
@@ -49,6 +49,11 @@ public class MapJoinTableContainerSerDe 
   }
 
   @SuppressWarnings({"unchecked"})
+  /**
+   * Loads the table container. Only used on MR path.
+   * @param in Input stream.
+   * @return Loaded table.
+   */
   public MapJoinTableContainer load(ObjectInputStream in) 
       throws HiveException {
     SerDe keySerDe = keyContext.getSerDe();
@@ -68,7 +73,7 @@ public class MapJoinTableContainerSerDe 
       Writable valueContainer = valueSerDe.getSerializedClass().newInstance();    
       int numKeys = in.readInt();
       for (int keyIndex = 0; keyIndex < numKeys; keyIndex++) {
-        MapJoinKey key = new MapJoinKey();
+        MapJoinKeyObject key = new MapJoinKeyObject();
         key.read(keyContext, in, keyContainer);
         MapJoinEagerRowContainer values = new MapJoinEagerRowContainer();
         values.read(valueContext, in, valueContainer);

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java Sun Mar  2 23:42:50 2014
@@ -28,13 +28,16 @@ import org.apache.hadoop.hive.ql.exec.Ma
 import org.apache.hadoop.hive.ql.exec.MapredContext;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
 import org.apache.hadoop.hive.ql.exec.persistence.HashMapWrapper;
+import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKeyObject;
 import org.apache.hadoop.hive.ql.exec.persistence.LazyFlatRowContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
+import org.apache.hadoop.hive.serde2.ByteStream.Output;
 import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.tez.runtime.api.LogicalInput;
@@ -51,6 +54,7 @@ public class HashTableLoader implements 
   private ExecMapperContext context;
   private Configuration hconf;
   private MapJoinDesc desc;
+  private MapJoinKey lastKey = null;
 
   @Override
   public void init(ExecMapperContext context, Configuration hconf, MapJoinOperator joinOp) {
@@ -71,6 +75,12 @@ public class HashTableLoader implements 
         HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR);
     boolean useLazyRows = HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEMAPJOINLAZYHASHTABLE);
 
+    // We only check if we can use optimized keys here; that is ok because we don't
+    // create optimized keys in MapJoin if hash map doesn't have optimized keys.
+    if (!HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDKEYS)) {
+      lastKey = new MapJoinKeyObject();
+    }
+    Output output = new Output(); // Reusable output for serialization.
     for (int pos = 0; pos < mapJoinTables.length; pos++) {
       if (pos == desc.getPosBigTable()) {
         continue;
@@ -85,14 +95,17 @@ public class HashTableLoader implements 
             hashTableLoadFactor);
 
         // simply read all the kv pairs into the hashtable.
+
         while (kvReader.next()) {
-          MapJoinKey key = new MapJoinKey();
-          key.read(mapJoinTableSerdes[pos].getKeyContext(), (Writable)kvReader.getCurrentKey());
+          // We pass key in as reference, to find out quickly if optimized keys can be used.
+          // However, we do not reuse the object since we are putting them into the hashmap.
+          lastKey = MapJoinKey.read(output, lastKey, mapJoinTableSerdes[pos].getKeyContext(),
+              (Writable)kvReader.getCurrentKey(), false);
 
-          LazyFlatRowContainer values = (LazyFlatRowContainer)tableContainer.get(key);
+          LazyFlatRowContainer values = (LazyFlatRowContainer)tableContainer.get(lastKey);
           if (values == null) {
             values = new LazyFlatRowContainer();
-            tableContainer.put(key, values);
+            tableContainer.put(lastKey, values);
           }
           values.add(mapJoinTableSerdes[pos].getValueContext(),
               (BytesWritable)kvReader.getCurrentValue(), useLazyRows);
@@ -107,5 +120,16 @@ public class HashTableLoader implements 
         throw new HiveException(e);
       }
     }
+    if (lastKey == null) {
+      lastKey = new MapJoinKeyObject(); // No rows in tables, the key type doesn't matter.
+    }
+  }
+
+  @Override
+  public MapJoinKey getKeyType() {
+    if (lastKey == null) {
+      throw new AssertionError("Should be called after loading tables");
+    }
+    return lastKey;
   }
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapperBatch.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapperBatch.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapperBatch.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapperBatch.java Sun Mar  2 23:42:50 2014
@@ -24,6 +24,7 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
+import org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe;
 
 /**
  * Class for handling vectorized hash map key wrappers. It evaluates the key columns in a
@@ -671,8 +672,8 @@ public class VectorHashKeyWrapperBatch {
     }
     else {
       throw new HiveException(String.format(
-          "Internal inconsistent KeyLookupHelper at index [%d]:%d %d %d",
-          i, klh.longIndex, klh.doubleIndex, klh.stringIndex));
+          "Internal inconsistent KeyLookupHelper at index [%d]:%d %d %d %d",
+          i, klh.longIndex, klh.doubleIndex, klh.stringIndex, klh.decimalIndex));
     }
   }
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java Sun Mar  2 23:42:50 2014
@@ -30,6 +30,7 @@ import org.apache.hadoop.hive.ql.exec.Ex
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKeyObject;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
@@ -39,6 +40,7 @@ import org.apache.hadoop.hive.ql.plan.Ex
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
+import org.apache.hadoop.hive.serde2.ByteStream.Output;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 
@@ -88,7 +90,8 @@ public class VectorMapJoinOperator exten
   }
 
   private interface MapJoinKeyEvaluator {
-      MapJoinKey evaluate(VectorHashKeyWrapper kw) throws HiveException;
+      MapJoinKey evaluate(VectorHashKeyWrapper kw)
+          throws HiveException;
   }
 
   public VectorMapJoinOperator (VectorizationContext vContext, OperatorDesc conf)
@@ -147,21 +150,20 @@ public class VectorMapJoinOperator exten
     // This key evaluator translates from the vectorized VectorHashKeyWrapper format
     // into the row-mode MapJoinKey
     keyEvaluator = new MapJoinKeyEvaluator() {
-      private MapJoinKey key;
+      private MapJoinKey key = null;
+      private final Output output = new Output();
 
       public MapJoinKeyEvaluator init() {
-        key = new MapJoinKey(new Object[keyExpressions.length]);
         return this;
       }
 
       @Override
       public MapJoinKey evaluate(VectorHashKeyWrapper kw) throws HiveException {
-        Object[] keyValues = key.getKey();
-        for(int i=0; i<keyExpressions.length; ++i) {
-          keyValues[i] = keyWrapperBatch.getWritableKeyValue(kw, i, keyOutputWriters[i]);
-        }
+        MapJoinKey refKey = (key == null ? loader.getKeyType() : key);
+        key = MapJoinKey.readFromVector(
+            output, refKey, kw, keyOutputWriters, keyWrapperBatch, refKey == key);
         return key;
-      };
+      }
     }.init();
 
     Map<Byte, List<ExprNodeDesc>> valueExpressions = conf.getExprs();
@@ -257,7 +259,6 @@ public class VectorMapJoinOperator exten
 
   @Override
   protected MapJoinKey computeMapJoinKey(Object row, byte alias) throws HiveException {
-    VectorizedRowBatch inBatch = (VectorizedRowBatch) row;
     return keyEvaluator.evaluate(keyValues[batchIndex]);
   }
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java Sun Mar  2 23:42:50 2014
@@ -30,7 +30,7 @@ import org.apache.hadoop.hive.ql.exec.Ex
 import org.apache.hadoop.hive.ql.exec.JoinUtil;
 import org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey;
+import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKeyObject;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory;

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinEqualityTableContainer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinEqualityTableContainer.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinEqualityTableContainer.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinEqualityTableContainer.java Sun Mar  2 23:42:50 2014
@@ -29,10 +29,10 @@ import org.junit.Test;
 
 public class TestMapJoinEqualityTableContainer {
   
-  private static final MapJoinKey KEY1 = new MapJoinKey(new Object[] {new Text("key1")});
-  private static final MapJoinKey KEY2 = new MapJoinKey(new Object[] {new Text("key2")});
-  private static final MapJoinKey KEY3 = new MapJoinKey(new Object[] {new Text("key3")});
-  private static final MapJoinKey KEY4 = new MapJoinKey(new Object[] {new Text("key4")});
+  private static final MapJoinKeyObject KEY1 = new MapJoinKeyObject(new Object[] {new Text("key1")});
+  private static final MapJoinKeyObject KEY2 = new MapJoinKeyObject(new Object[] {new Text("key2")});
+  private static final MapJoinKeyObject KEY3 = new MapJoinKeyObject(new Object[] {new Text("key3")});
+  private static final MapJoinKeyObject KEY4 = new MapJoinKeyObject(new Object[] {new Text("key4")});
   private static final Object[] VALUE = new Object[] {new Text("value")};
   private MapJoinTableContainer container;
   private MapJoinRowContainer rowContainer;
@@ -49,7 +49,8 @@ public class TestMapJoinEqualityTableCon
     container.put(KEY3, rowContainer);
     container.put(KEY4, rowContainer);
     Assert.assertEquals(4, container.size());
-    Map<MapJoinKey, MapJoinRowContainer> localContainer = new HashMap<MapJoinKey, MapJoinRowContainer>();
+    Map<MapJoinKey, MapJoinRowContainer> localContainer =
+        new HashMap<MapJoinKey, MapJoinRowContainer>();
     for(Entry<MapJoinKey, MapJoinRowContainer> entry : container.entrySet()) {
       localContainer.put(entry.getKey(), entry.getValue());
     }

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinKey.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinKey.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinKey.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinKey.java Sun Mar  2 23:42:50 2014
@@ -26,30 +26,20 @@ public class TestMapJoinKey {
 
   @Test
   public void testEqualityHashCode() throws Exception {
-    MapJoinKey key1 = new MapJoinKey(new String[] {"key"});
-    MapJoinKey key2 = new MapJoinKey(new String[] {"key"});
+    MapJoinKeyObject key1 = new MapJoinKeyObject(new String[] {"key"});
+    MapJoinKeyObject key2 = new MapJoinKeyObject(new String[] {"key"});
     Utilities.testEquality(key1, key2);
-    key1 = new MapJoinKey(new Object[] {148, null});
-    key2 = new MapJoinKey(new Object[] {148, null});
+    key1 = new MapJoinKeyObject(new Object[] {148, null});
+    key2 = new MapJoinKeyObject(new Object[] {148, null});
     Utilities.testEquality(key1, key2);
-    key1 = new MapJoinKey(new Object[] {null, "key1"});
-    key2 = new MapJoinKey(new Object[] {null, "key2"});
+    key1 = new MapJoinKeyObject(new Object[] {null, "key1"});
+    key2 = new MapJoinKeyObject(new Object[] {null, "key2"});
     Assert.assertFalse(key1.equals(key2));
   }
   @Test
-  public void testHasAnyNulls() throws Exception {
-    MapJoinKey key = new MapJoinKey(new String[] {"key", null});
-    Assert.assertTrue(key.hasAnyNulls(null));
-    // field 1 is not null safe
-    Assert.assertTrue(key.hasAnyNulls(new boolean[] { false, false }));
-    // field 1 is null safe
-    Assert.assertFalse(key.hasAnyNulls(new boolean[] { false, true }));
-    Assert.assertFalse(key.hasAnyNulls(new boolean[] { true, true }));
-  }
-  @Test
   public void testSerialization() throws Exception {
-    MapJoinKey key1 = new MapJoinKey(new Object[] {new Text("field0"), null, new Text("field2")});
-    MapJoinKey key2 = Utilities.serde(key1, "f0,f1,f2", "string,string,string");
+    MapJoinKeyObject key1 = new MapJoinKeyObject(new Object[] {new Text("field0"), null, new Text("field2")});
+    MapJoinKeyObject key2 = Utilities.serde(key1, "f0,f1,f2", "string,string,string");
     Utilities.testEquality(key1, key2);
   }
 }

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinTableContainer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinTableContainer.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinTableContainer.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinTableContainer.java Sun Mar  2 23:42:50 2014
@@ -40,11 +40,11 @@ public class TestMapJoinTableContainer {
   private ObjectInputStream in;
   private MapJoinTableContainer container;
   private MapJoinTableContainerSerDe containerSerde;
-  private MapJoinKey key;
+  private MapJoinKeyObject key;
   private MapJoinRowContainer rowContainer;
   @Before
   public void setup() throws Exception {
-    key = new MapJoinKey(KEY);
+    key = new MapJoinKeyObject(KEY);
     rowContainer = new MapJoinEagerRowContainer();
     rowContainer.add(VALUE);
     baos = new ByteArrayOutputStream();

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/Utilities.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/Utilities.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/Utilities.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/Utilities.java Sun Mar  2 23:42:50 2014
@@ -33,19 +33,16 @@ import org.apache.hadoop.io.BytesWritabl
 
 class Utilities {
 
-  static void testEquality(MapJoinKey key1, MapJoinKey key2) {
+  static void testEquality(MapJoinKeyObject key1, MapJoinKeyObject key2) {
     Assert.assertEquals(key1.hashCode(), key2.hashCode());
     Assert.assertEquals(key1, key2);
-    Assert.assertEquals(key1.getKey().length, key2.getKey().length);
-    int length = key1.getKey().length;
-    for (int i = 0; i <length; i++) {
-      Assert.assertEquals(key1.getKey()[i], key2.getKey()[i]); 
-    }
+    Assert.assertEquals(key1.getKeyLength(), key2.getKeyLength());
+    Assert.assertTrue(key1.equals(key2));
   }
-  
-  static MapJoinKey serde(MapJoinKey key, String columns, String types) 
+
+  static MapJoinKeyObject serde(MapJoinKeyObject key, String columns, String types) 
   throws Exception {
-    MapJoinKey result = new MapJoinKey();
+    MapJoinKeyObject result = new MapJoinKeyObject();
     ByteArrayInputStream bais;
     ObjectInputStream in;
     ByteArrayOutputStream baos = new ByteArrayOutputStream();

Modified: hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java
URL: http://svn.apache.org/viewvc/hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java (original)
+++ hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java Sun Mar  2 23:42:50 2014
@@ -763,12 +763,12 @@ public class BinarySortableSerDe extends
         // get the sign of the big decimal
         int sign = dec.compareTo(HiveDecimal.ZERO);
 
-        // we'll encode the absolute value (sign is separate)
-        dec = dec.abs();
+    // we'll encode the absolute value (sign is separate)
+    dec = dec.abs();
 
-        // get the scale factor to turn big decimal into a decimal < 1
-        int factor = dec.precision() - dec.scale();
-        factor = sign == 1 ? factor : -factor;
+    // get the scale factor to turn big decimal into a decimal < 1
+    int factor = dec.precision() - dec.scale();
+    factor = sign == 1 ? factor : -factor;
 
         // convert the absolute big decimal to string
         dec.scaleByPowerOfTen(Math.abs(dec.scale()));

Modified: hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/OutputByteBuffer.java
URL: http://svn.apache.org/viewvc/hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/OutputByteBuffer.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/OutputByteBuffer.java (original)
+++ hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/OutputByteBuffer.java Sun Mar  2 23:42:50 2014
@@ -25,9 +25,17 @@ import java.util.Arrays;
  */
 public class OutputByteBuffer {
 
-  byte[] data = new byte[128];
+  byte[] data;
   int length;
 
+  public OutputByteBuffer(int size) {
+    data = new byte[size];
+  }
+
+  public OutputByteBuffer() {
+    this(128);
+  }
+
   /**
    * Reset the byte buffer.
    */

Modified: hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/io/HiveDecimalWritable.java
URL: http://svn.apache.org/viewvc/hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/io/HiveDecimalWritable.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/io/HiveDecimalWritable.java (original)
+++ hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/io/HiveDecimalWritable.java Sun Mar  2 23:42:50 2014
@@ -24,6 +24,7 @@ import java.math.BigInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.common.type.Decimal128;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.serde2.ByteStream.Output;
 import org.apache.hadoop.hive.serde2.lazybinary.LazyBinaryUtils;
@@ -119,6 +120,14 @@ public class HiveDecimalWritable impleme
     return getHiveDecimal().compareTo(that.getHiveDecimal());
   }
 
+  public static void writeToByteStream(Decimal128 dec, Output byteStream) {
+    HiveDecimal hd = HiveDecimal.create(dec.toBigDecimal());
+    LazyBinaryUtils.writeVInt(byteStream, hd.scale());
+    byte[] bytes = hd.unscaledValue().toByteArray();
+    LazyBinaryUtils.writeVInt(byteStream, bytes.length);
+    byteStream.write(bytes, 0, bytes.length);
+  }
+
   public void writeToByteStream(Output byteStream) {
     LazyBinaryUtils.writeVInt(byteStream, scale);
     LazyBinaryUtils.writeVInt(byteStream, internalStorage.length);

Modified: hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java
URL: http://svn.apache.org/viewvc/hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java (original)
+++ hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java Sun Mar  2 23:42:50 2014
@@ -27,6 +27,8 @@ import java.util.Properties;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.type.Decimal128;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.AbstractSerDe;
 import org.apache.hadoop.hive.serde2.ByteStream;
@@ -41,6 +43,7 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
@@ -73,9 +76,7 @@ import org.apache.hadoop.io.Writable;
  * compact format.
  */
 public class LazyBinarySerDe extends AbstractSerDe {
-
-  public static final Log LOG = LogFactory.getLog(LazyBinarySerDe.class
-      .getName());
+  public static final Log LOG = LogFactory.getLog(LazyBinarySerDe.class.getName());
 
   public LazyBinarySerDe() throws SerDeException {
   }
@@ -218,6 +219,42 @@ public class LazyBinarySerDe extends Abs
     return serializeBytesWritable;
   }
 
+  public static class StringWrapper {
+    public byte[] bytes;
+    public int start, length;
+
+    public void set(byte[] bytes, int start, int length) {
+      this.bytes = bytes;
+      this.start = start;
+      this.length = length;
+    }
+  }
+
+  private static boolean serializeStruct(Output byteStream, Object obj,
+      StructObjectInspector soi, boolean warnedOnceNullMapKey) throws SerDeException {
+    // do nothing for null struct
+    if (null == obj) {
+      return warnedOnceNullMapKey;
+    }
+
+    List<? extends StructField> fields = soi.getAllStructFieldRefs();
+    int size = fields.size();
+    Object[] fieldData = new Object[size];
+    List<ObjectInspector> fieldOis = new ArrayList<ObjectInspector>(size);
+    for (int i = 0; i < size; ++i) {
+      StructField field = fields.get(i);
+      fieldData[i] = soi.getStructFieldData(obj, field);
+      fieldOis.add(field.getFieldObjectInspector());
+    }
+
+    return serializeStruct(byteStream, fieldData, fieldOis, warnedOnceNullMapKey);
+  }
+
+  public static void serializeStruct(Output byteStream, Object[] fieldData,
+      List<ObjectInspector> fieldOis) throws SerDeException {
+    serializeStruct(byteStream, fieldData, fieldOis, true);
+  }
+
   /**
    * Serialize a struct object without writing the byte size. This function is
    * shared by both row serialization and struct serialization.
@@ -232,25 +269,18 @@ public class LazyBinarySerDe extends Abs
    *          has been issued once already when encountering null map keys
    * @return a boolean indicating whether a warning for null map keys has been issued
    *          once already
+   * @throws SerDeException 
    */
-  private static boolean serializeStruct(Output byteStream, Object obj,
-      StructObjectInspector soi, boolean warnedOnceNullMapKey) throws SerDeException {
-    // do nothing for null struct
-    if (null == obj) {
-      return warnedOnceNullMapKey;
-    }
-    /*
-     * Interleave serializing one null byte and 8 struct fields in each round,
-     * in order to support data deserialization with different table schemas
-     */
-    List<? extends StructField> fields = soi.getAllStructFieldRefs();
-    int size = fields.size();
+  private static boolean serializeStruct(Output byteStream, Object[] fieldData,
+      List<ObjectInspector> fieldOis, boolean warnedOnceNullMapKey) throws SerDeException {
+
     int lasti = 0;
     byte nullByte = 0;
+    int size = fieldData.length;
 
     for (int i = 0; i < size; i++) {
       // set bit to 1 if a field is not null
-      if (null != soi.getStructFieldData(obj, fields.get(i))) {
+      if (null != fieldData[i]) {
         nullByte |= 1 << (i % 8);
       }
       // write the null byte every eight elements or
@@ -259,8 +289,8 @@ public class LazyBinarySerDe extends Abs
       if (7 == i % 8 || i == size - 1) {
         byteStream.write(nullByte);
         for (int j = lasti; j <= i; j++) {
-          warnedOnceNullMapKey = serialize(byteStream, soi.getStructFieldData(obj, fields
-              .get(j)), fields.get(j).getFieldObjectInspector(), false, warnedOnceNullMapKey);
+          warnedOnceNullMapKey = serialize(
+              byteStream, fieldData[j], fieldOis.get(j), false, warnedOnceNullMapKey);
         }
         lasti = i + 1;
         nullByte = 0;
@@ -297,9 +327,8 @@ public class LazyBinarySerDe extends Abs
    * @return a boolean indicating whether a warning for null map keys has been issued
    *          once already
    */
-  public static boolean serialize(Output byteStream, Object obj,
-      ObjectInspector objInspector, boolean skipLengthPrefix, boolean warnedOnceNullMapKey)
-      throws SerDeException {
+  public static boolean serialize(Output byteStream, Object obj, ObjectInspector objInspector,
+      boolean skipLengthPrefix, boolean warnedOnceNullMapKey) throws SerDeException {
 
     // do nothing for null object
     if (null == obj) {
@@ -354,15 +383,7 @@ public class LazyBinarySerDe extends Abs
       }
       case DOUBLE: {
         DoubleObjectInspector doi = (DoubleObjectInspector) poi;
-        long v = Double.doubleToLongBits(doi.get(obj));
-        byteStream.write((byte) (v >> 56));
-        byteStream.write((byte) (v >> 48));
-        byteStream.write((byte) (v >> 40));
-        byteStream.write((byte) (v >> 32));
-        byteStream.write((byte) (v >> 24));
-        byteStream.write((byte) (v >> 16));
-        byteStream.write((byte) (v >> 8));
-        byteStream.write((byte) (v));
+        LazyBinaryUtils.writeDouble(byteStream, doi.get(obj));
         return warnedOnceNullMapKey;
       }
       case STRING: {
@@ -391,8 +412,8 @@ public class LazyBinarySerDe extends Abs
           LazyBinaryUtils.writeVInt(byteStream, length);
         } else {
           if (length == 0){
-            throw new RuntimeException("LazyBinaryColumnarSerde cannot serialize a non-null " +
-            		"zero length binary field. Consider using either LazyBinarySerde or ColumnarSerde.");
+            throw new RuntimeException("LazyBinaryColumnarSerde cannot serialize a non-null zero "
+                + "length binary field. Consider using either LazyBinarySerde or ColumnarSerde.");
           }
         }
         byteStream.write(bw.getBytes(),0,length);

Modified: hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java?rev=1573404&r1=1573403&r2=1573404&view=diff
==============================================================================
--- hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java (original)
+++ hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java Sun Mar  2 23:42:50 2014
@@ -414,6 +414,18 @@ public final class LazyBinaryUtils {
     int len = LazyBinaryUtils.writeVLongToByteArray(vLongBytes, l);
     byteStream.write(vLongBytes, 0, len);
   }
+  
+  public static void writeDouble(Output byteStream, double d) {
+    long v = Double.doubleToLongBits(d);
+    byteStream.write((byte) (v >> 56));
+    byteStream.write((byte) (v >> 48));
+    byteStream.write((byte) (v >> 40));
+    byteStream.write((byte) (v >> 32));
+    byteStream.write((byte) (v >> 24));
+    byteStream.write((byte) (v >> 16));
+    byteStream.write((byte) (v >> 8));
+    byteStream.write((byte) (v));
+  }
 
   static HashMap<TypeInfo, ObjectInspector> cachedLazyBinaryObjectInspector = new HashMap<TypeInfo, ObjectInspector>();
 



Mime
View raw message