hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject svn commit: r1527793 [1/3] - in /hive/branches/vectorization: ./ common/src/java/org/apache/hadoop/hive/conf/ contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/ data/files/ hbase-handler/src/java/org/apache/hadoop/hive/hbase/ hcatalog/c...
Date Mon, 30 Sep 2013 21:58:32 GMT
Author: hashutosh
Date: Mon Sep 30 21:58:29 2013
New Revision: 1527793

URL: http://svn.apache.org/r1527793
Log:
Merged in with latest trunk

Added:
    hive/branches/vectorization/hcatalog/src/test/e2e/templeton/tests/jobstatus.conf
      - copied unchanged from r1527792, hive/trunk/hcatalog/src/test/e2e/templeton/tests/jobstatus.conf
    hive/branches/vectorization/hcatalog/src/test/e2e/templeton/tests/jobsubmission_streaming.conf
      - copied unchanged from r1527792, hive/trunk/hcatalog/src/test/e2e/templeton/tests/jobsubmission_streaming.conf
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/FSRecordWriter.java
      - copied unchanged from r1527792, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/FSRecordWriter.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/LeadLagInfo.java
      - copied unchanged from r1527792, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LeadLagInfo.java
    hive/branches/vectorization/ql/src/test/queries/clientnegative/illegal_partition_type.q
      - copied unchanged from r1527792, hive/trunk/ql/src/test/queries/clientnegative/illegal_partition_type.q
    hive/branches/vectorization/ql/src/test/queries/clientnegative/illegal_partition_type2.q
      - copied unchanged from r1527792, hive/trunk/ql/src/test/queries/clientnegative/illegal_partition_type2.q
    hive/branches/vectorization/ql/src/test/queries/clientpositive/cast_to_int.q
      - copied unchanged from r1527792, hive/trunk/ql/src/test/queries/clientpositive/cast_to_int.q
    hive/branches/vectorization/ql/src/test/queries/clientpositive/partition_type_check.q
      - copied unchanged from r1527792, hive/trunk/ql/src/test/queries/clientpositive/partition_type_check.q
    hive/branches/vectorization/ql/src/test/results/clientnegative/illegal_partition_type.q.out
      - copied unchanged from r1527792, hive/trunk/ql/src/test/results/clientnegative/illegal_partition_type.q.out
    hive/branches/vectorization/ql/src/test/results/clientnegative/illegal_partition_type2.q.out
      - copied unchanged from r1527792, hive/trunk/ql/src/test/results/clientnegative/illegal_partition_type2.q.out
    hive/branches/vectorization/ql/src/test/results/clientpositive/cast_to_int.q.out
      - copied unchanged from r1527792, hive/trunk/ql/src/test/results/clientpositive/cast_to_int.q.out
    hive/branches/vectorization/ql/src/test/results/clientpositive/partition_type_check.q.out
      - copied unchanged from r1527792, hive/trunk/ql/src/test/results/clientpositive/partition_type_check.q.out
    hive/branches/vectorization/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testAlternativeTestJVM.approved.txt
      - copied unchanged from r1527792, hive/trunk/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testAlternativeTestJVM.approved.txt
Removed:
    hive/branches/vectorization/data/files/TestSerDe.jar
    hive/branches/vectorization/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/CleanupPhase.java
    hive/branches/vectorization/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestCleanupPhase.java
Modified:
    hive/branches/vectorization/   (props changed)
    hive/branches/vectorization/build-common.xml
    hive/branches/vectorization/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/branches/vectorization/contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextOutputFormat.java
    hive/branches/vectorization/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java
    hive/branches/vectorization/hcatalog/core/src/test/java/org/apache/hcatalog/cli/DummyStorageHandler.java
    hive/branches/vectorization/hcatalog/src/test/e2e/templeton/build.xml
    hive/branches/vectorization/hcatalog/src/test/e2e/templeton/conf/default.conf
    hive/branches/vectorization/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm
    hive/branches/vectorization/hcatalog/src/test/e2e/templeton/tests/ddl.conf
    hive/branches/vectorization/hcatalog/src/test/e2e/templeton/tests/jobsubmission.conf
    hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBaseOutputFormat.java
    hive/branches/vectorization/metastore/scripts/upgrade/postgres/014-HIVE-3764.postgres.sql
    hive/branches/vectorization/metastore/scripts/upgrade/postgres/hive-schema-0.12.0.postgres.sql
    hive/branches/vectorization/metastore/scripts/upgrade/postgres/hive-schema-0.13.0.postgres.sql
    hive/branches/vectorization/metastore/scripts/upgrade/postgres/upgrade-0.11.0-to-0.12.0.postgres.sql
    hive/branches/vectorization/metastore/scripts/upgrade/postgres/upgrade-0.12.0-to-0.13.0.postgres.sql
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveBinaryOutputFormat.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveNullValueSequenceFileOutputFormat.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveOutputFormat.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughOutputFormat.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughRecordWriter.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveSequenceFileOutputFormat.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroContainerOutputFormat.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroGenericRecordWriter.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Writer.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingExprNodeEvaluatorFactory.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDesc.java
    hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDeserializer.java
    hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java
    hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
    hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/io/udf/Rot13OutputFormat.java
    hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/udf/TestToInteger.java
    hive/branches/vectorization/ql/src/test/queries/clientnegative/deletejar.q
    hive/branches/vectorization/ql/src/test/queries/clientnegative/invalid_columns.q
    hive/branches/vectorization/ql/src/test/queries/clientpositive/alter1.q
    hive/branches/vectorization/ql/src/test/queries/clientpositive/alter_partition_coltype.q
    hive/branches/vectorization/ql/src/test/queries/clientpositive/input16.q
    hive/branches/vectorization/ql/src/test/queries/clientpositive/input16_cc.q
    hive/branches/vectorization/ql/src/test/queries/clientpositive/reduce_deduplicate_extended.q
    hive/branches/vectorization/ql/src/test/queries/clientpositive/union_null.q
    hive/branches/vectorization/ql/src/test/results/beelinepositive/union_null.q.out
    hive/branches/vectorization/ql/src/test/results/clientnegative/alter_table_add_partition.q.out
    hive/branches/vectorization/ql/src/test/results/clientnegative/alter_view_failure5.q.out
    hive/branches/vectorization/ql/src/test/results/clientnegative/columnstats_tbllvl.q.out
    hive/branches/vectorization/ql/src/test/results/clientnegative/columnstats_tbllvl_incorrect_column.q.out
    hive/branches/vectorization/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
    hive/branches/vectorization/ql/src/test/results/clientpositive/reduce_deduplicate_extended.q.out
    hive/branches/vectorization/ql/src/test/results/clientpositive/union_null.q.out
    hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/SerDeStats.java
    hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyInteger.java
    hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyLong.java
    hive/branches/vectorization/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleSerDe.java
    hive/branches/vectorization/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java
    hive/branches/vectorization/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/TestConfiguration.java
    hive/branches/vectorization/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/context/CloudExecutionContextProvider.java
    hive/branches/vectorization/testutils/ptest2/src/main/resources/batch-exec.vm
    hive/branches/vectorization/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.java
    hive/branches/vectorization/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testBatch.approved.txt

Propchange: hive/branches/vectorization/
------------------------------------------------------------------------------
  Merged /hive/trunk:r1526766-1527792

Modified: hive/branches/vectorization/build-common.xml
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/build-common.xml?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/build-common.xml (original)
+++ hive/branches/vectorization/build-common.xml Mon Sep 30 21:58:29 2013
@@ -294,7 +294,6 @@
      encoding="${build.encoding}"
      srcdir="${test.src.dir}"
      includes="org/apache/**/hive/**/*.java"
-     excludes="**/TestSerDe.java"
      destdir="${test.build.classes}"
      debug="${javac.debug}"
      optimize="${javac.optimize}"
@@ -329,8 +328,13 @@
     </jar>
     <delete file="${test.build.dir}/test-serdes.jar"/>
     <jar jarfile="${test.build.dir}/test-serdes.jar">
-        <fileset dir="${test.build.classes}" includes="**/serde2/*.class"/>
+        <fileset dir="${test.build.classes}" includes="**/serde2/*.class" excludes="**/serde2/TestSerDe.class"/>
     </jar>  	
+    <delete file="${test.build.dir}/TestSerDe.jar"/>
+    <jar jarfile="${test.build.dir}/TestSerDe.jar">
+        <fileset dir="${test.build.classes}" includes="**/serde2/TestSerDe.class"/>
+    </jar>
+    <delete file="${test.build.classes}/org/apache/hadoop/hive/serde2/TestSerDe.class"/> 
   </target>
 
   <target name="test-conditions">

Modified: hive/branches/vectorization/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/vectorization/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Mon Sep 30 21:58:29 2013
@@ -816,6 +816,8 @@ public class HiveConf extends Configurat
 
     //Vectorization enabled
     HIVE_VECTORIZATION_ENABLED("hive.vectorized.execution.enabled", false),
+
+    HIVE_TYPE_CHECK_ON_INSERT("hive.typecheck.on.insert", true),
     ;
 
     public final String varname;

Modified: hive/branches/vectorization/contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextOutputFormat.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextOutputFormat.java (original)
+++ hive/branches/vectorization/contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextOutputFormat.java Mon Sep 30 21:58:29 2013
@@ -24,7 +24,7 @@ import java.util.Properties;
 
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
+import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
@@ -53,13 +53,13 @@ public class Base64TextOutputFormat<K ex
    * Base64RecordWriter.
    *
    */
-  public static class Base64RecordWriter implements RecordWriter,
+  public static class Base64RecordWriter implements FSRecordWriter,
       JobConfigurable {
 
-    RecordWriter writer;
+    FSRecordWriter writer;
     BytesWritable bytesWritable;
 
-    public Base64RecordWriter(RecordWriter writer) {
+    public Base64RecordWriter(FSRecordWriter writer) {
       this.writer = writer;
       bytesWritable = new BytesWritable();
     }
@@ -119,7 +119,7 @@ public class Base64TextOutputFormat<K ex
   }
 
   @Override
-  public RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
+  public FSRecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
       Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress) throws IOException {
 

Modified: hive/branches/vectorization/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java (original)
+++ hive/branches/vectorization/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java Mon Sep 30 21:58:29 2013
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
+import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.io.Text;
@@ -71,7 +71,7 @@ public class HiveHFileOutputFormat exten
   }
 
   @Override
-  public RecordWriter getHiveRecordWriter(
+  public FSRecordWriter getHiveRecordWriter(
     final JobConf jc,
     final Path finalOutPath,
     Class<? extends Writable> valueClass,
@@ -120,7 +120,7 @@ public class HiveHFileOutputFormat exten
       ++i;
     }
 
-    return new RecordWriter() {
+    return new FSRecordWriter() {
 
       @Override
       public void close(boolean abort) throws IOException {

Modified: hive/branches/vectorization/hcatalog/core/src/test/java/org/apache/hcatalog/cli/DummyStorageHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/core/src/test/java/org/apache/hcatalog/cli/DummyStorageHandler.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/core/src/test/java/org/apache/hcatalog/cli/DummyStorageHandler.java (original)
+++ hive/branches/vectorization/hcatalog/core/src/test/java/org/apache/hcatalog/cli/DummyStorageHandler.java Mon Sep 30 21:58:29 2013
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.metastore.HiveMetaHook;
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.metadata.AuthorizationException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -285,7 +286,7 @@ class DummyStorageHandler extends HCatSt
      * org.apache.hadoop.util.Progressable)
      */
     @Override
-    public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getHiveRecordWriter(
+    public FSRecordWriter getHiveRecordWriter(
       JobConf jc, Path finalOutPath,
       Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress)

Modified: hive/branches/vectorization/hcatalog/src/test/e2e/templeton/build.xml
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/src/test/e2e/templeton/build.xml?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/src/test/e2e/templeton/build.xml (original)
+++ hive/branches/vectorization/hcatalog/src/test/e2e/templeton/build.xml Mon Sep 30 21:58:29 2013
@@ -94,6 +94,7 @@
 
         <exec executable="./test_harness.pl" dir="${test.location}" failonerror="true">
             <env key="HARNESS_ROOT" value="."/>
+            <env key="DRIVER_ROOT" value="${basedir}/drivers"/>
             <env key="TH_WORKING_DIR" value="${test.location}"/>
             <env key="TH_INPDIR_LOCAL" value="${inpdir.local}"/>
             <env key="TH_INPDIR_HDFS" value="${inpdir.hdfs}"/>
@@ -109,9 +110,10 @@
             <env key="SECURE_MODE" value="${secure.mode}"/>
             <arg line="${tests.to.run}"/>
             <arg value="${basedir}/tests/serverstatus.conf"/>
+            <arg value="${basedir}/tests/jobsubmission_streaming.conf"/>
             <arg value="${basedir}/tests/ddl.conf"/>
+            <arg value="${basedir}/tests/jobstatus.conf"/>
             <arg value="${basedir}/tests/jobsubmission.conf"/>
-            <arg value="${basedir}/tests/jobsubmission2.conf"/>
         </exec>
     </target>
 
@@ -124,6 +126,7 @@
         <property name="tests.to.run" value=""/>
         <exec executable="${harness.dir}/test_harness.pl" dir="${test.location}" failonerror="true">
             <env key="HARNESS_ROOT" value="${harness.dir}"/>
+            <env key="DRIVER_ROOT" value="${basedir}/drivers"/>
             <env key="TH_WORKING_DIR" value="${test.location}"/>
             <env key="TH_INPDIR_LOCAL" value="${inpdir.local}"/>
             <env key="TH_INPDIR_HDFS" value="${inpdir.hdfs}"/>

Modified: hive/branches/vectorization/hcatalog/src/test/e2e/templeton/conf/default.conf
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/src/test/e2e/templeton/conf/default.conf?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/src/test/e2e/templeton/conf/default.conf (original)
+++ hive/branches/vectorization/hcatalog/src/test/e2e/templeton/conf/default.conf Mon Sep 30 21:58:29 2013
@@ -1,3 +1,4 @@
+############################################################################           
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -14,7 +15,7 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-
+                                                                                       
 my $me = `whoami`;
 chomp $me;
 

Modified: hive/branches/vectorization/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm (original)
+++ hive/branches/vectorization/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm Mon Sep 30 21:58:29 2013
@@ -1,3 +1,4 @@
+############################################################################           
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -14,7 +15,7 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-
+                                                                                       
 package TestDriverCurl;
 
 ###########################################################################
@@ -35,6 +36,7 @@ use strict;
 use English;
 use Storable qw(dclone);
 use File::Glob ':glob';
+use JSON::Path;
 
 my $passedStr = 'passed';
 my $failedStr = 'failed';
@@ -150,13 +152,16 @@ sub new
 sub globalSetup
   {
     my ($self, $globalHash, $log) = @_;
+    my $subName = (caller(0))[3];
+
 
     # Setup the output path
     my $me = `whoami`;
     chomp $me;
-    my $jobId = $globalHash->{'job-id'};
-    my $timeId = time;
-    $globalHash->{'runid'} = $me . "-" . $timeId . "-" . $jobId;
+    #usernames on windows can be "domain\username" change the "\"
+    # as runid is used in file names
+    $me =~ s/\\/_/;
+    $globalHash->{'runid'} = $me . "." . time;
 
     # if "-ignore false" was provided on the command line,
     # it means do run tests even when marked as 'ignore'
@@ -170,7 +175,6 @@ sub globalSetup
 
     $globalHash->{'outpath'} = $globalHash->{'outpathbase'} . "/" . $globalHash->{'runid'} . "/";
     $globalHash->{'localpath'} = $globalHash->{'localpathbase'} . "/" . $globalHash->{'runid'} . "/";
-    $globalHash->{'tmpPath'} = $globalHash->{'tmpPath'} . "/" . $globalHash->{'runid'} . "/";
     $globalHash->{'webhdfs_url'} = $ENV{'WEBHDFS_URL'};
     $globalHash->{'templeton_url'} = $ENV{'TEMPLETON_URL'};
     $globalHash->{'current_user'} = $ENV{'USER_NAME'};
@@ -186,11 +190,6 @@ sub globalSetup
     $globalHash->{'inpdir_hdfs'} = $ENV{'TH_INPDIR_HDFS'};
 
     $globalHash->{'is_secure_mode'} = $ENV{'SECURE_MODE'};
-  }
-
-sub globalSetupConditional
-  {
-    my ($self, $globalHash, $log) = @_;
 
     # add libexec location to the path
     if (defined($ENV{'PATH'})) {
@@ -225,12 +224,6 @@ sub globalSetupConditional
 # None
 sub globalCleanup
   {
-    # noop there because the removal of temp directories, which are created in #globalSetupConditional(), is to be
-    # performed in method #globalCleanupConditional().
-  }
-
-sub globalCleanupConditional
-  {
     my ($self, $globalHash, $log) = @_;
 
     IPC::Run::run(['rm', '-rf', $globalHash->{'tmpPath'}], \undef, $log, $log) or 
@@ -292,6 +285,17 @@ sub replaceParameters
       my @new_options = ();
       foreach my $option (@options) {
         $option = $self->replaceParametersInArg($option, $testCmd, $log);
+        if (isWindows()) {
+          my $equal_pos = index($option, '=');
+          if ($equal_pos != -1) {
+            my $left = substr($option, 0, $equal_pos);
+            my $right = substr($option, $equal_pos+1);
+            if ($right =~ /=/) {
+              $right = '"'.$right.'"';
+              $option = $left . "=" . $right;
+            }
+          }
+        }
         push @new_options, ($option);
       }
       $testCmd->{$aPfix . 'post_options'} = \@new_options;
@@ -306,6 +310,15 @@ sub replaceParameters
       }
     }    
 
+    if (defined $testCmd->{$aPfix . 'json_path'}) {
+      my $json_path_matches = $testCmd->{$aPfix . 'json_path'};
+      my @keys = keys %{$json_path_matches};
+
+      foreach my $key (@keys) {
+        my $new_value = $self->replaceParametersInArg($json_path_matches->{$key}, $testCmd, $log);
+        $json_path_matches->{$key} = $new_value;
+      }
+    }
 
   }
 
@@ -498,7 +511,7 @@ sub execCurlCmd(){
     $testCmd->{'http_daemon'} = $d;
     $testCmd->{'callback_url'} = $d->url . 'templeton/$jobId';
     push @curl_cmd, ('-d', 'callback=' . $testCmd->{'callback_url'});
-    #	push ${testCmd->{'post_options'}}, ('callback=' . $testCmd->{'callback_url'});
+    push @{$testCmd->{$argPrefix . 'post_options'}}, ('callback=' . $testCmd->{'callback_url'});
     #	#my @options = @{$testCmd->{'post_options'}};
     #	print $log "post options  @options\n";
   }
@@ -510,7 +523,7 @@ sub execCurlCmd(){
   push @curl_cmd, ("-X", $method, "-o", $res_body, "-D", $res_header);  
   push @curl_cmd, ($url);
 
-  print $log "$0:$subName Going to run command : " .  join (' ', @curl_cmd);
+  print $log "$0:$subName Going to run command : " .  join (' , ', @curl_cmd);
   print $log "\n";
 
 
@@ -604,6 +617,37 @@ sub compare
 
     my $json_hash;
     my %json_info;
+    # for information on JSONPath, check http://goessner.net/articles/JsonPath/
+    if (defined $testCmd->{'json_path'}) {
+      my $json_matches = $testCmd->{'json_path'};
+      foreach my $key (keys %$json_matches) {
+        my $regex_expected_value = $json_matches->{$key};
+        my $path = JSON::Path->new($key);
+        my $value; 
+        # when filter_job_status is defined 
+        if (defined $testCmd->{'filter_job_status'}) {
+	        # decode $testResult->{'body'} to an array of hash
+	        my $body = decode_json $testResult->{'body'};
+	        # in the tests, we run this case with jobName = "PigLatin:loadstore.pig"
+	        # filter $body to leave only records with this jobName
+	        my @filtered_body = grep {($_->{detail}{profile}{jobName} eq "PigLatin:loadstore.pig")}  @$body;
+			my @sorted_filtered_body = sort { $a->{id} <=> $b->{id} } @filtered_body;
+        	$value = $path->value(\@sorted_filtered_body);
+        } else {
+        	$value = $path->value($testResult->{'body'});
+        }
+        
+        if ($value !~ /$regex_expected_value/s) {
+          print $log "$0::$subName INFO check failed:"
+            . " json pattern check failed. For field "
+              . "$key, regex <" . $regex_expected_value
+                . "> did not match the result <" . $value
+                  . ">\n";
+          $result = 0;
+          last;
+        }
+      }
+    } 
     if (defined $testCmd->{'json_field_substr_match'} || $testCmd->{'json_field_match_object'}) {
       my $json = new JSON;
       $json_hash = $json->utf8->decode($testResult->{'body'});
@@ -639,7 +683,7 @@ sub compare
         print $log "Comparing $key: $json_field_val with regex /$regex_expected_value/\n";
 
         if ($json_field_val !~ /$regex_expected_value/s) {
-          print $log "$0::$subName INFO check failed:" 
+          print $log "$0::$subName WARN check failed:" 
             . " json pattern check failed. For field "
               . "$key, regex <" . $regex_expected_value 
                 . "> did not match the result <" . $json_field_val
@@ -654,7 +698,7 @@ sub compare
         print $log "Comparing $key: " . dump($json_field_val) . ",expected value:  " . dump($regex_expected_obj);
 
         if (!Compare($json_field_val, $regex_expected_obj)) {
-          print $log "$0::$subName INFO check failed:" 
+          print $log "$0::$subName WARN check failed:" 
             . " json compare failed. For field "
               . "$key, regex <" . dump($regex_expected_obj)
                 . "> did not match the result <" . dump($json_field_val)
@@ -671,7 +715,7 @@ sub compare
       sleep $testCmd->{'kill_job_timeout'};
       my $jobid = $json_hash->{'id'};
       if (!defined $jobid) {
-        print $log "$0::$subName INFO check failed: " 
+        print $log "$0::$subName WARN check failed: " 
           . "no jobid (id field)found in result";
         $result = 0;
       } else {
@@ -682,13 +726,14 @@ sub compare
 
     #try to get the call back url request until timeout
     if ($result == 1 && defined $testCmd->{'check_call_back'}) {
-      my $d = $testCmd->{'http_daemon'};
-      if (defined $testCmd->{'timeout_seconds'}) {
-        $d->timeout($testCmd->{'timeout_seconds'})
-      }
-      else {      
-        $d->timeout(300);         #wait for 5 mins by default
+
+      my $timeout = 300; #wait for 5 mins for callback
+      if(defined $testCmd->{'timeout'}){
+        $timeout = $testCmd->{'timeout'};
       }
+
+      my $d = $testCmd->{'http_daemon'};
+      $d->timeout($timeout);
       my $url_requested;
       $testCmd->{'callback_url'} =~ s/\$jobId/$json_hash->{'id'}/g;
       print $log "Expanded callback url : <" . $testCmd->{'callback_url'} . ">\n";
@@ -717,13 +762,12 @@ sub compare
 
     }
 
-    
     if ( (defined $testCmd->{'check_job_created'})
          || (defined $testCmd->{'check_job_complete'})
-         || (defined $testCmd->{'check_job_exit_value'}) ) {
+         || (defined $testCmd->{'check_job_exit_value'}) ) {    
       my $jobid = $json_hash->{'id'};
       if (!defined $jobid) {
-        print $log "$0::$subName INFO check failed: " 
+        print $log "$0::$subName WARN check failed: " 
           . "no jobid (id field)found in result";
         $result = 0;
       } else {
@@ -731,7 +775,7 @@ sub compare
         my $json = new JSON;
         my $res_hash = $json->utf8->decode($jobResult->{'body'});
         if (! defined $res_hash->{'status'}) {
-          print $log "$0::$subName INFO check failed: " 
+          print $log "$0::$subName WARN check failed: " 
             . "jobresult not defined ";
           $result = 0;
         }
@@ -739,10 +783,6 @@ sub compare
           my $jobComplete;
           my $NUM_RETRIES = 60;
           my $SLEEP_BETWEEN_RETRIES = 5;
-          if (defined $testCmd->{'timeout_seconds'} && $testCmd->{'timeout_seconds'} > 0) {
-            $SLEEP_BETWEEN_RETRIES = ($testCmd->{'timeout_seconds'} / $NUM_RETRIES);
-            print $log "found timeout_seconds & set SLEEP_BETWEEN_RETRIES=$SLEEP_BETWEEN_RETRIES";
-          }
 
           #first wait for job completion
           while ($NUM_RETRIES-- > 0) {
@@ -756,7 +796,7 @@ sub compare
             $res_hash = $json->utf8->decode($jobResult->{'body'});
           }
           if ( (!defined $jobComplete) || lc($jobComplete) ne "true") {
-            print $log "$0::$subName INFO check failed: " 
+            print $log "$0::$subName WARN check failed: " 
               . " timeout on wait for job completion ";
             $result = 0;
           } else { 
@@ -772,12 +812,140 @@ sub compare
             if (defined($testCmd->{'check_job_exit_value'})) {
               my $exitValue = $res_hash->{'exitValue'};
               my $expectedExitValue = $testCmd->{'check_job_exit_value'};
-              if ( (!defined $exitValue) || $exitValue ne $expectedExitValue) {
+              if ( (!defined $exitValue) || $exitValue % 128 ne $expectedExitValue) {
                 print $log "check_job_exit_value failed. got exitValue $exitValue,  expected  $expectedExitValue";
                 $result = 0;
               }
             }
           }
+
+	  #Check userargs
+	  print $log "$0::$subName INFO Checking userargs";
+          my @options = @{$testCmd->{'post_options'}};
+          if( !defined $res_hash->{'userargs'}){
+            print $log "$0::$subName INFO expected userargs" 
+                . " but userargs not defined\n";
+            $result = 0;
+          }
+
+	  #create exp_userargs hash from @options
+          my %exp_userargs = ();
+          foreach my $opt ( @options ){
+            print $log "opt $opt";
+            my ($key, $val) = split q:=:, $opt, 2;   
+            if(defined $exp_userargs{$key}){
+
+              #if we have already seen this value
+              #then make the value an array and push new value in
+              if(ref($exp_userargs{$key}) eq ""){
+                my @ar = ($exp_userargs{$key});
+                $exp_userargs{$key} = \@ar;
+              }
+              my $ar = $exp_userargs{$key}; 
+              push @$ar, ($val); 
+            }
+            else{
+              $exp_userargs{$key} = $val;	
+            }
+          }
+
+          my %r_userargs = %{$res_hash->{'userargs'}};
+          foreach my $key( keys %exp_userargs){
+            if( !defined $r_userargs{$key}){
+              print $log "$0::$subName INFO $key not found in userargs \n";
+              $result = 0;
+              next;
+            }
+              
+            print $log "$0::$subName DEBUG comparing expected " 
+                . " $key ->" . dump($exp_userargs{$key})
+                . " With result $key ->" . dump($r_userargs{$key}) . "\n";
+
+            if (!Compare($exp_userargs{$key}, $r_userargs{$key})) {
+              print $log "$0::$subName WARN check failed:" 
+                  . " json compare failed. For field "
+                  . "$key, regex <" . dump($r_userargs{$key})
+                  . "> did not match the result <" . dump($exp_userargs{$key})
+                  . ">\n";
+              $result = 0;
+            }
+          }
+		  if ($result != 0 && $testCmd->{'check_logs'}) {
+            my $testCmdBasics = $self->copyTestBasicConfig($testCmd);
+            $testCmdBasics->{'method'} = 'GET';
+            $testCmdBasics->{'url'} = ':WEBHDFS_URL:/webhdfs/v1:OUTDIR:' . '/status/logs?op=LISTSTATUS';
+            my $curl_result = $self->execCurlCmd($testCmdBasics, "", $log);
+            my $path = JSON::Path->new("FileStatuses.FileStatus[*].pathSuffix");
+            my @value = $path->values($curl_result->{'body'});
+            if ($testCmd->{'check_logs'}->{'job_num'} && $testCmd->{'check_logs'}->{'job_num'} ne (scalar @value)-1) {
+              print $log "$0::$subName INFO check failed: "
+                . " Expect " . $testCmd->{'check_logs'}->{'job_num'} . " jobs in logs, but get " . scalar @value;
+              $result = 0;
+              return $result;
+            }
+            foreach my $jobid (@value) {
+              if ($jobid eq 'list.txt') {
+                next;
+              }
+              my $testCmdBasics = $self->copyTestBasicConfig($testCmd);
+              $testCmdBasics->{'method'} = 'GET';
+              $testCmdBasics->{'url'} = ':WEBHDFS_URL:/webhdfs/v1:OUTDIR:' . '/status/logs/' . $jobid . '?op=LISTSTATUS';
+              my $curl_result = $self->execCurlCmd($testCmdBasics, "", $log);
+
+              my $path = JSON::Path->new("FileStatuses.FileStatus[*]");
+              my @value = $path->values($curl_result->{'body'});
+
+              my $foundjobconf = 0;
+              foreach my $elem (@value) {
+                if ($elem->{'pathSuffix'} eq "job.xml.html") {
+                  $foundjobconf = 1;
+                  if ($elem->{'length'} eq "0") {
+                    print $log "$0::$subName INFO check failed: "
+                      . " job.xml.html for " . $jobid . " is empty";
+					$result = 0;
+					return $result;
+                  }
+                  next;
+                }
+                my $attempt = $elem->{'pathSuffix'};
+                my $testCmdBasics = $self->copyTestBasicConfig($testCmd);
+                $testCmdBasics->{'method'} = 'GET';
+                $testCmdBasics->{'url'} = ':WEBHDFS_URL:/webhdfs/v1:OUTDIR:' . '/status/logs/' . $jobid . '/' . $attempt . '?op=LISTSTATUS';
+                my $curl_result = $self->execCurlCmd($testCmdBasics, "", $log);
+                my $path = JSON::Path->new("FileStatuses.FileStatus[*].pathSuffix");
+                my @value = $path->values($curl_result->{'body'});
+                my @files = ('stderr', 'stdout', 'syslog');
+                foreach my $file (@files) {
+                  if ( !grep( /$file/, @value ) ) {
+                    print $log "$0::$subName INFO check failed: "
+                      . " Cannot find " . $file . " in logs/" . $attempt;
+                    $result = 0;
+                    return $result;
+                  }
+                }
+                $path = JSON::Path->new("FileStatuses.FileStatus[*].length");
+                @value = $path->values($curl_result->{'body'});
+                my $foundnonzerofile = 0;
+                foreach my $length (@value) {
+                  if ($length ne "0") {
+                    $foundnonzerofile = 1;
+                  }
+                }
+                if (!$foundnonzerofile) {
+                  print $log "$0::$subName INFO check failed: "
+                    . " All files in logs/" . $attempt . " are empty";
+                  $result = 0;
+                  return $result;
+                }
+              }
+              if (!$foundjobconf) {
+                print $log "$0::$subName INFO check failed: "
+                  . " Cannot find job.xml.html for " . $jobid;
+				$result = 0;
+				return $result;
+              }
+            }
+          }
         }
       }
     }
@@ -1357,5 +1525,13 @@ sub tmpIPCRunJoinStdoe {
   return ( $? );
 }
 
-
+sub isWindows
+{
+    if($^O =~ /mswin/i) {
+        return 1;
+    }
+    else {
+        return 0;
+    }
+}
 1;

Modified: hive/branches/vectorization/hcatalog/src/test/e2e/templeton/tests/ddl.conf
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/src/test/e2e/templeton/tests/ddl.conf?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/src/test/e2e/templeton/tests/ddl.conf (original)
+++ hive/branches/vectorization/hcatalog/src/test/e2e/templeton/tests/ddl.conf Mon Sep 30 21:58:29 2013
@@ -284,7 +284,7 @@ $cfg = 
                                 #	  'json_field_substr_match' => {'table-name' => 'templeton_testtab1'}, 
      'json_field_match_object' => { 'columns' => '[
                  { "name" : "i", "type" : "int", "comment" : "from deserializer" },
-                 { "name" : "j", "type" : "bigint", "comment" : "from deserializer"  }
+                 { "name" : "j", "type" : "bigint", "comment" : "from deserializer" }
            ]' },
     },
     {                           #drop table
@@ -354,7 +354,7 @@ $cfg = 
                       "fieldsTerminatedBy" : "\u0001",
                       "collectionItemsTerminatedBy" : "\u0002",
                       "mapKeysTerminatedBy" : "\u0003",
-                      "linesTerminatedBy" : "\n",
+                      "linesTerminatedBy" : "\\\n",
 
                       "serde" : {
                         "name" : "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe",
@@ -406,6 +406,7 @@ $cfg = 
                   "type" : "bigint", "comment" : "from deserializer"
               },
 	     {
+                  "comment" : "IP Address of the User",
                   "name" : "ip",
                   "type" : "string", "comment" : "from deserializer"
               }
@@ -516,15 +517,7 @@ $cfg = 
      'url' => ':TEMPLETON_URL:/templeton/v1/ddl',
      'status_code' => 200,
      'post_options' => ['user.name=:UNAME:',
-                        'exec=create table if not exists templetontest_parts (i int, j bigint, ip STRING COMMENT "IP Address of the User")
-COMMENT "This is the page view table"
- PARTITIONED BY(dt STRING, country STRING)
-ROW FORMAT DELIMITED
-  FIELDS TERMINATED BY "\001"
-  COLLECTION ITEMS TERMINATED BY "\002"
-  MAP KEYS TERMINATED BY "\003"
-STORED AS rcfile
---LOCATION "table1_location" '],
+                        'exec=create table if not exists templetontest_parts (i int, j bigint, ip STRING COMMENT \'IP Address of the User\') COMMENT \'This is the page view table\'  PARTITIONED BY(dt STRING, country STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY \'\001\'  COLLECTION ITEMS TERMINATED BY \'\002\'  MAP KEYS TERMINATED BY \'\003\' STORED AS rcfile --LOCATION \'table1_location\' '],
      'json_field_substr_match' => {'stderr' => 'OK'},
     },
     {
@@ -632,6 +625,7 @@ STORED AS rcfile
 		   "type" : "bigint", "comment" : "from deserializer"
 	       },
 	       {
+		   "comment" : "IP Address of the User",
 		   "name" : "ip",
 		   "type" : "string", "comment" : "from deserializer"
 	      }
@@ -684,6 +678,7 @@ STORED AS rcfile
 		   "type" : "bigint", "comment" : "from deserializer"
 	       },
 	       {
+		   "comment" : "IP Address of the User",
 		   "name" : "ip",
 		   "type" : "string", "comment" : "from deserializer"
 	      }
@@ -732,7 +727,7 @@ STORED AS rcfile
      'status_code' => 404,
      'json_field_substr_match' => 
        {
-        'error' => 'FAILED: SemanticException \[Error 10006\]: Partition not found \{dt=20120101\, country=IN\}'
+        'error' => 'Partition not found {dt=20120101, country=IN}'
        },
     },
 
@@ -761,7 +756,7 @@ STORED AS rcfile
      'method' => 'POST',
      'url' => ':TEMPLETON_URL:/templeton/v1/ddl?user.name=:UNAME:',
      'status_code' => 200,
-     'post_options' => ['user.name=:UNAME:','exec=create table if not exists templeton_testcol_tab (i int comment "column with comment", j bigint) STORED AS rcfile;'],
+     'post_options' => ['user.name=:UNAME:','exec=create table if not exists templeton_testcol_tab (i int comment \'column with comment\', j bigint) STORED AS rcfile;'],
      'json_field_substr_match' => {'stderr' => 'OK'},
     },
     {
@@ -779,7 +774,7 @@ STORED AS rcfile
      'json_field_match_object' => 
      {
       'columns' => '[
-                 { "name" : "i", "type" : "int", "comment" : "from deserializer"},
+                 { "name" : "i", "type" : "int", "comment" : "from deserializer" },
                  { "name" : "j", "type" : "bigint", "comment" : "from deserializer" }
            ]' 
      },
@@ -1088,9 +1083,7 @@ STORED AS rcfile
      'status_code' => 200,
      'post_options' => ['user.name=:UNAME:',
                         'permissions=---------',
-                        'exec=create table templetontest_hcatgp(i int, j bigint)  
-                         PARTITIONED BY(dt STRING, country STRING)
-                         STORED AS rcfile;'
+                        'exec=create table templetontest_hcatgp(i int, j bigint) PARTITIONED BY(dt STRING, 3Bcountry STRING) STORED AS rcfile;'
                        ],
      'json_field_substr_match' => {'stderr' => 'OK', 'exitcode' => '^0$'}
     },

Modified: hive/branches/vectorization/hcatalog/src/test/e2e/templeton/tests/jobsubmission.conf
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/src/test/e2e/templeton/tests/jobsubmission.conf?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/src/test/e2e/templeton/tests/jobsubmission.conf (original)
+++ hive/branches/vectorization/hcatalog/src/test/e2e/templeton/tests/jobsubmission.conf Mon Sep 30 21:58:29 2013
@@ -35,79 +35,76 @@ $cfg = 
  [
 ##=============================================================================================================
   {
-   'name' => 'TestStreaming',
+   'name' => 'TestKillJob',
    'tests' => 
    [
     {
      'num' => 1,
      'method' => 'POST',
      'url' => ':TEMPLETON_URL:/templeton/v1/mapreduce/streaming',
-     'post_options' => ['user.name=:UNAME:','input=:INPDIR_HDFS:/nums.txt','output=:OUTDIR:/mycounts', 
-                        'mapper=/bin/cat', 'reducer=/usr/bin/wc'],
-     'json_field_substr_match' => { 'id' => '\d+'},
-                                #results
-     'status_code' => 200,
-     'check_job_created' => 1,
-     'check_job_complete' => 'SUCCESS',
-     'check_job_exit_value' => 0,
-     'check_call_back' => 1,
-    },
-    {
-     #-ve test - no input file
-     'num' => 2,
-     'ignore' => 'wait for fix in hadoop 1.0.3',
-     'method' => 'POST',
-     'url' => ':TEMPLETON_URL:/templeton/v1/mapreduce/streaming',
-     'post_options' => ['user.name=:UNAME:','input=:INPDIR_HDFS:/nums.txt','output=:OUTDIR:/mycounts', 
-                        'mapper=/bin/ls no_such-file-12e3', 'reducer=/usr/bin/wc'],
+     'post_options' => ['user.name=:UNAME:','input=:INPDIR_HDFS:/nums.txt',
+                        'input=:INPDIR_HDFS:/nums.txt',
+                        'output=:OUTDIR:/mycounts', 
+                        'mapper=sleep 100', 'reducer=wc'],
      'json_field_substr_match' => { 'id' => '\d+'},
                                 #results
      'status_code' => 200,
      'check_job_created' => 1,
-     'check_job_complete' => 'FAILURE',
-     'check_call_back' => 1,
+     'check_job_complete' => 'KILLED',
+#     'check_call_back' => 1, #TODO - enable call back check after fix
+     'kill_job_timeout' => 10,
     },
-
    ]
   },
 ##=============================================================================================================
   {
-   'name' => 'TestKillJob',
+   'name' => 'TestMapReduce',
    'tests' => 
    [
     {
+         
      'num' => 1,
      'method' => 'POST',
-     'url' => ':TEMPLETON_URL:/templeton/v1/mapreduce/streaming',
-     'post_options' => ['user.name=:UNAME:','input=:INPDIR_HDFS:/nums.txt','output=:OUTDIR:/mycounts', 
-                        'mapper=/bin/sleep 100', 'reducer=/usr/bin/wc'],
+     'url' => ':TEMPLETON_URL:/templeton/v1/mapreduce/jar',
+     'post_options' => ['user.name=:UNAME:','arg=:INPDIR_HDFS:/nums.txt', 'arg= :OUTDIR:/wc.txt', 
+                        'jar=:INPDIR_HDFS:/hexamples.jar', 'class=wordcount', ],
      'json_field_substr_match' => { 'id' => '\d+'},
                                 #results
      'status_code' => 200,
      'check_job_created' => 1,
-     'check_job_complete' => 'KILLED',
-#     'check_call_back' => 1, #TODO - enable call back check after fix
-     'kill_job_timeout' => 10,
+     'check_job_complete' => 'SUCCESS',
+     'check_job_exit_value' => 0,
+     'check_call_back' => 1,
     },
-   ]
-  },
-##=============================================================================================================
-  {
-   'name' => 'TestMapReduce',
-   'tests' => 
-   [
     {
          
-     'num' => 1,
+     'num' => 2,
+     'method' => 'POST',
+     'url' => ':TEMPLETON_URL:/templeton/v1/mapreduce/jar',
+     'post_options' => ['user.name=:UNAME:','arg=-mt', 'arg=660000', 
+                        'jar=:INPDIR_HDFS:/hexamples.jar', 'class=sleep', ],
+     'json_field_substr_match' => { 'id' => '\d+'},
+                                #results
+     'status_code' => 200,
+     'check_job_created' => 1,
+     'check_job_complete' => 'SUCCESS',
+     'check_job_exit_value' => 0,
+     'check_call_back' => 1,
+     'timeout' => 840, #increase timeout as this test takes long
+    },
+    {
+     # with log enabled 
+     'num' => 3,
      'method' => 'POST',
      'url' => ':TEMPLETON_URL:/templeton/v1/mapreduce/jar',
      'post_options' => ['user.name=:UNAME:','arg=:INPDIR_HDFS:/nums.txt', 'arg= :OUTDIR:/wc.txt', 
-                        'jar=:INPDIR_HDFS:/hexamples.jar', 'class=wordcount', ],
+                        'jar=:INPDIR_HDFS:/hexamples.jar', 'class=wordcount', 'statusdir=:OUTDIR:/status', 'enablelog=true' ],
      'json_field_substr_match' => { 'id' => '\d+'},
                                 #results
      'status_code' => 200,
      'check_job_created' => 1,
-     'check_job_complete' => 'SUCCESS', 
+     'check_job_complete' => 'SUCCESS',
+     'check_logs' => { 'job_num' => '1' },
      'check_job_exit_value' => 0,
      'check_call_back' => 1,
     },
@@ -120,7 +117,6 @@ $cfg = 
    [
     {
                                 #test syntax error
-     'ignore' => 'fails in current version',
      'num' => 1,
      'method' => 'POST',
      'url' => ':TEMPLETON_URL:/templeton/v1/pig',
@@ -129,7 +125,6 @@ $cfg = 
                                 #results
      'status_code' => 200,
      'check_job_created' => 1,
-     'check_job_complete' => 'FAILURE', 
      'check_job_exit_value' => 8,
      'check_call_back' => 1,
     },
@@ -143,8 +138,7 @@ $cfg = 
                                 #results
      'status_code' => 200,
      'check_job_created' => 1,
-     'check_job_exit_value' => 0,
-     'check_job_complete' => 'SUCCESS', 
+     'check_job_complete' => 'SUCCESS',
      'check_job_exit_value' => 0,
      'check_call_back' => 1,
     },
@@ -158,7 +152,7 @@ $cfg = 
                                 #results
      'status_code' => 200,
      'check_job_created' => 1,
-     'check_job_complete' => 'SUCCESS', 
+     'check_job_complete' => 'SUCCESS',
      'check_job_exit_value' => 0,
      'check_call_back' => 1,
     },
@@ -228,21 +222,50 @@ $cfg = 
     {
                                 #no file to be copied, should result in launcher job error 
      'num' => 8,
-     ignore => 'check is disabled for now in templeton',
      'method' => 'POST',
      'url' => ':TEMPLETON_URL:/templeton/v1/pig',
-     'post_options' => ['user.name=:UNAME:', 'arg=-p', 'arg=INPDIR=:INPDIR_HDFS:','arg=-p', 'arg= OUTDIR=:OUTDIR:', 'file=:INPDIR_HDFS:/no_such_file.pig',
+     'post_options' => ['user.name=:UNAME:', 'arg=-p', 'arg=INPDIR=:INPDIR_HDFS:','arg=-p', 'arg=OUTDIR=:OUTDIR:', 'file=:INPDIR_HDFS:/no_such_file.pig',
                         'files=:INPDIR_HDFS:/rowcountmacro.pig' ],
      'json_field_substr_match' => { 'error' => 'does not exist'},
                                 #results
-     'status_code' => 200,
-     'check_job_complete' => 'FAILURE', 
+     'status_code' => 400,
 
     },
 	
+	{
+                                #Auto add quote around args
+     'ignore' => 'MS9 feature, will reenable later',
+     'num' => 9,
+       'method' => 'POST',
+     'url' => ':TEMPLETON_URL:/templeton/v1/pig',
+     'post_options' => ['user.name=:UNAME:','arg=-check', 'file=:INPDIR_HDFS:/loadstore.pig', 'arg=-p', 'arg=INPDIR=:INPDIR_HDFS:','arg=-p', 'arg=OUTDIR=:OUTDIR:', ],
+     'json_field_substr_match' => { 'id' => '\d+'},
+                                #results
+     'status_code' => 200,
+     'check_job_created' => 1,
+     'check_job_complete' => 'SUCCESS',
+     'check_job_exit_value' => 0,
+     'check_call_back' => 1,
+    },
 
+    {
+                                #a simple load store script with log enabled
+     'num' => 9,
+     'method' => 'POST',
+     'url' => ':TEMPLETON_URL:/templeton/v1/pig',
+     'post_options' => ['user.name=:UNAME:', 'arg=-p', 'arg=INPDIR=:INPDIR_HDFS:','arg=-p', 'arg=OUTDIR=:OUTDIR:', 'file=:INPDIR_HDFS:/loadstore.pig',
+                    'statusdir=:OUTDIR:/status', 'enablelog=true'],
+     'json_field_substr_match' => { 'id' => '\d+'},
+                                #results
+     'status_code' => 200,
+     'check_job_created' => 1,
+     'check_job_complete' => 'SUCCESS',
+     'check_logs' => { 'job_num' => '1' },
+     'check_job_exit_value' => 0,
+     'check_call_back' => 1,
+    },
 
-    #test 9
+    #test 10
     #TODO jython test
 
 
@@ -257,7 +280,6 @@ $cfg = 
    [
     {
                                 #test syntax error
-     'ignore' => 'fails in current version',
      'num' => 1,
      'method' => 'POST',
      'url' => ':TEMPLETON_URL:/templeton/v1/hive',
@@ -266,8 +288,7 @@ $cfg = 
                                 #results
      'status_code' => 200,
      'check_job_created' => 1,
-     'check_job_complete' => 'FAILURE', 
-     'check_job_exit_value' => 11,
+     'check_job_exit_value' => 64,
 
     },
  
@@ -305,7 +326,7 @@ $cfg = 
      'num' => 4,
      'method' => 'POST',
      'url' => ':TEMPLETON_URL:/templeton/v1/hive',
-     'post_options' => ['user.name=:UNAME:','execute=create external table mynums(a int, b int) location ":INPDIR_HDFS:/numstable/";', ],
+     'post_options' => ['user.name=:UNAME:','execute=create external table mynums(a int, b int) location \':INPDIR_HDFS:/numstable/\';', ],
      'json_field_substr_match' => { 'id' => '\d+'},
                                 #results
      'status_code' => 200,
@@ -412,7 +433,49 @@ $cfg = 
    ]
   },
 
-
+    {
+                                #test add jar
+     'num' => 9,
+     'method' => 'POST',
+     'url' => ':TEMPLETON_URL:/templeton/v1/hive',
+     'post_options' => ['user.name=:UNAME:','execute=add jar piggybank.jar', 'files=:INPDIR_HDFS:/piggybank.jar',],
+     'json_field_substr_match' => { 'id' => '\d+'},
+                                #results
+     'status_code' => 200,
+     'check_job_created' => 1,
+     'check_job_complete' => 'SUCCESS', 
+     'check_job_exit_value' => 0,
+     'check_call_back' => 1,
+    },
+    {
+                                #test add jar when the jar is not shipped
+     'num' => 10,
+     'method' => 'POST',
+     'url' => ':TEMPLETON_URL:/templeton/v1/hive',
+     'post_options' => ['user.name=:UNAME:','execute=add jar piggybank.jar',],
+     'json_field_substr_match' => { 'id' => '\d+'},
+                                #results
+     'status_code' => 200,
+     'check_job_created' => 1,
+     'check_job_complete' => 'SUCCESS', 
+     'check_job_exit_value' => 1,
+     'check_call_back' => 1,
+    }, 
+    {
+                                #enable logs
+     'num' => 11,
+     'method' => 'POST',
+     'url' => ':TEMPLETON_URL:/templeton/v1/hive',	
+     'post_options' => ['user.name=:UNAME:','execute=select a,b from mynums', 'statusdir=:OUTDIR:/status', 'enablelog=true'],
+     'json_field_substr_match' => { 'id' => '\d+'},
+                                #results
+     'status_code' => 200,
+     'check_job_created' => 1,
+     'check_job_complete' => 'SUCCESS',
+     'check_logs' => { 'job_num' => '1' },
+     'check_job_exit_value' => 0,
+     'check_call_back' => 1,
+    },
 
 
 

Modified: hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBaseOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBaseOutputFormat.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBaseOutputFormat.java (original)
+++ hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBaseOutputFormat.java Mon Sep 30 21:58:29 2013
@@ -25,6 +25,7 @@ import java.util.Properties;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
@@ -40,7 +41,7 @@ public class HBaseBaseOutputFormat imple
   HiveOutputFormat<WritableComparable<?>, Put> {
 
   @Override
-  public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getHiveRecordWriter(
+  public FSRecordWriter getHiveRecordWriter(
     JobConf jc, Path finalOutPath,
     Class<? extends Writable> valueClass, boolean isCompressed,
     Properties tableProperties, Progressable progress)

Modified: hive/branches/vectorization/metastore/scripts/upgrade/postgres/014-HIVE-3764.postgres.sql
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/metastore/scripts/upgrade/postgres/014-HIVE-3764.postgres.sql?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/metastore/scripts/upgrade/postgres/014-HIVE-3764.postgres.sql (original)
+++ hive/branches/vectorization/metastore/scripts/upgrade/postgres/014-HIVE-3764.postgres.sql Mon Sep 30 21:58:29 2013
@@ -4,9 +4,8 @@
 CREATE TABLE "VERSION" (
   "VER_ID" bigint,
   "SCHEMA_VERSION" character varying(127) NOT NULL,
-  "VERSION_COMMENT" character varying(255) NOT NULL,
-  PRIMARY KEY ("VER_ID")
+  "VERSION_COMMENT" character varying(255) NOT NULL
 );
 ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
 
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '', 'Initial value');
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '', 'Initial value');

Modified: hive/branches/vectorization/metastore/scripts/upgrade/postgres/hive-schema-0.12.0.postgres.sql
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/metastore/scripts/upgrade/postgres/hive-schema-0.12.0.postgres.sql?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/metastore/scripts/upgrade/postgres/hive-schema-0.12.0.postgres.sql (original)
+++ hive/branches/vectorization/metastore/scripts/upgrade/postgres/hive-schema-0.12.0.postgres.sql Mon Sep 30 21:58:29 2013
@@ -522,7 +522,6 @@ CREATE TABLE "VERSION" (
   "VER_ID" bigint,
   "SCHEMA_VERSION" character varying(127) NOT NULL,
   "VERSION_COMMENT" character varying(255) NOT NULL,
-  PRIMARY KEY ("VER_ID")
 );
 
 --
@@ -1400,7 +1399,7 @@ REVOKE ALL ON SCHEMA public FROM PUBLIC;
 GRANT ALL ON SCHEMA public TO PUBLIC;
 
 
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.12.0', 'Hive release version 0.12.0');
 --
 -- PostgreSQL database dump complete
 --

Modified: hive/branches/vectorization/metastore/scripts/upgrade/postgres/hive-schema-0.13.0.postgres.sql
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/metastore/scripts/upgrade/postgres/hive-schema-0.13.0.postgres.sql?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/metastore/scripts/upgrade/postgres/hive-schema-0.13.0.postgres.sql (original)
+++ hive/branches/vectorization/metastore/scripts/upgrade/postgres/hive-schema-0.13.0.postgres.sql Mon Sep 30 21:58:29 2013
@@ -521,8 +521,7 @@ CREATE TABLE "TAB_COL_STATS" (
 CREATE TABLE "VERSION" (
   "VER_ID" bigint,
   "SCHEMA_VERSION" character varying(127) NOT NULL,
-  "COMMENT" character varying(255) NOT NULL,
-  PRIMARY KEY ("VER_ID")
+  "VERSION_COMMENT" character varying(255) NOT NULL
 );
 
 --
@@ -1400,7 +1399,7 @@ REVOKE ALL ON SCHEMA public FROM PUBLIC;
 GRANT ALL ON SCHEMA public TO PUBLIC;
 
 
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.13.0', 'Hive release version 0.13.0');
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.13.0', 'Hive release version 0.13.0');
 --
 -- PostgreSQL database dump complete
 --

Modified: hive/branches/vectorization/metastore/scripts/upgrade/postgres/upgrade-0.11.0-to-0.12.0.postgres.sql
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/metastore/scripts/upgrade/postgres/upgrade-0.11.0-to-0.12.0.postgres.sql?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/metastore/scripts/upgrade/postgres/upgrade-0.11.0-to-0.12.0.postgres.sql (original)
+++ hive/branches/vectorization/metastore/scripts/upgrade/postgres/upgrade-0.11.0-to-0.12.0.postgres.sql Mon Sep 30 21:58:29 2013
@@ -1,5 +1,5 @@
 SELECT 'Upgrading MetaStore schema from 0.11.0 to 0.12.0';
 \i 013-HIVE-3255.postgres.sql;
 \i 014-HIVE-3764.postgres.sql;
-UPDATE VERSION SET SCHEMA_VERSION='0.12.0', VERSION_COMMENT='Hive release version 0.12.0' where VER_ID=1;
+UPDATE "VERSION" SET "SCHEMA_VERSION"='0.12.0', "VERSION_COMMENT"='Hive release version 0.12.0' where "VER_ID"=1;
 SELECT 'Finished upgrading MetaStore schema from 0.11.0 to 0.12.0';

Modified: hive/branches/vectorization/metastore/scripts/upgrade/postgres/upgrade-0.12.0-to-0.13.0.postgres.sql
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/metastore/scripts/upgrade/postgres/upgrade-0.12.0-to-0.13.0.postgres.sql?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/metastore/scripts/upgrade/postgres/upgrade-0.12.0-to-0.13.0.postgres.sql (original)
+++ hive/branches/vectorization/metastore/scripts/upgrade/postgres/upgrade-0.12.0-to-0.13.0.postgres.sql Mon Sep 30 21:58:29 2013
@@ -1,3 +1,3 @@
 SELECT 'Upgrading MetaStore schema from 0.11.0 to 0.12.0';
-UPDATE VERSION SET SCHEMA_VERSION='0.13.0', VERSION_COMMENT='Hive release version 0.13.0' where VER_ID=1;
+UPDATE "VERSION" SET "SCHEMA_VERSION"='0.13.0', "VERSION_COMMENT"='Hive release version 0.13.0' where "VER_ID"=1;
 SELECT 'Finished upgrading MetaStore schema from 0.11.0 to 0.12.0';

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java Mon Sep 30 21:58:29 2013
@@ -362,6 +362,7 @@ public enum ErrorMsg {
   UNSUPPORTED_ALTER_TBL_OP(10245, "{0} alter table options is not supported"),
   INVALID_BIGTABLE_MAPJOIN(10246, "{0} table chosen for streaming is not valid", true),
   MISSING_OVER_CLAUSE(10247, "Missing over clause for function : "),
+  PARTITION_SPEC_TYPE_MISMATCH(10248, "Cannot add partition column {0} of type {1} as it cannot be converted to type {2}", true),
 
   SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."),
   SCRIPT_IO_ERROR(20001, "An error occurred while reading or writing to your custom script. "

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java Mon Sep 30 21:58:29 2013
@@ -35,11 +35,13 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.io.FSRecordWriter;
+import org.apache.hadoop.hive.ql.io.FSRecordWriter.StatsProvidingRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveKey;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
-import org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat;
 import org.apache.hadoop.hive.ql.io.HivePartitioner;
+import org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -84,11 +86,13 @@ public class FileSinkOperator extends Te
   protected transient int dpStartCol; // start column # for DP columns
   protected transient List<String> dpVals; // array of values corresponding to DP columns
   protected transient List<Object> dpWritables;
-  protected transient RecordWriter[] rowOutWriters; // row specific RecordWriters
+  protected transient FSRecordWriter[] rowOutWriters; // row specific RecordWriters
   protected transient int maxPartitions;
   protected transient ListBucketingCtx lbCtx;
   protected transient boolean isSkewedStoredAsSubDirectories;
   protected transient boolean statsCollectRawDataSize;
+  private transient boolean[] statsFromRecordWriter;
+  private transient boolean isCollectRWStats;
 
 
   private static final transient String[] FATAL_ERR_MSG = {
@@ -96,22 +100,12 @@ public class FileSinkOperator extends Te
       "Number of dynamic partitions exceeded hive.exec.max.dynamic.partitions.pernode."
   };
 
-  /**
-   * RecordWriter.
-   *
-   */
-  public static interface RecordWriter {
-    void write(Writable w) throws IOException;
-
-    void close(boolean abort) throws IOException;
-  }
-
   public class FSPaths implements Cloneable {
     Path tmpPath;
     Path taskOutputTempPath;
     Path[] outPaths;
     Path[] finalPaths;
-    RecordWriter[] outWriters;
+    FSRecordWriter[] outWriters;
     Stat stat;
 
     public FSPaths() {
@@ -122,7 +116,7 @@ public class FileSinkOperator extends Te
       taskOutputTempPath = Utilities.toTaskTempPath(specPath);
       outPaths = new Path[numFiles];
       finalPaths = new Path[numFiles];
-      outWriters = new RecordWriter[numFiles];
+      outWriters = new FSRecordWriter[numFiles];
       stat = new Stat();
     }
 
@@ -166,11 +160,11 @@ public class FileSinkOperator extends Te
       }
     }
 
-    public void setOutWriters(RecordWriter[] out) {
+    public void setOutWriters(FSRecordWriter[] out) {
       outWriters = out;
     }
 
-    public RecordWriter[] getOutWriters() {
+    public FSRecordWriter[] getOutWriters() {
       return outWriters;
     }
 
@@ -328,6 +322,7 @@ public class FileSinkOperator extends Te
       isCompressed = conf.getCompressed();
       parent = Utilities.toTempPath(conf.getDirName());
       statsCollectRawDataSize = conf.isStatsCollectRawDataSize();
+      statsFromRecordWriter = new boolean[numFiles];
 
       serializer = (Serializer) conf.getTableInfo().getDeserializerClass().newInstance();
       serializer.initialize(null, conf.getTableInfo().getProperties());
@@ -520,6 +515,8 @@ public class FileSinkOperator extends Te
         fsp.outWriters[filesIdx] = HiveFileFormatUtils.getHiveRecordWriter(
             jc, conf.getTableInfo(), outputClass, conf, fsp.outPaths[filesIdx],
             reporter);
+        // If the record writer provides stats, get it from there instead of the serde
+        statsFromRecordWriter[filesIdx] = fsp.outWriters[filesIdx] instanceof StatsProvidingRecordWriter;
         // increment the CREATED_FILES counter
         if (reporter != null) {
           reporter.incrCounter(ProgressCounter.CREATED_FILES, 1);
@@ -623,7 +620,11 @@ public class FileSinkOperator extends Te
       }
 
       rowOutWriters = fpaths.outWriters;
-      if (conf.isGatherStats()) {
+      // check if all record writers implement statistics. if atleast one RW
+      // doesn't implement stats interface we will fallback to conventional way
+      // of gathering stats
+      isCollectRWStats = areAllTrue(statsFromRecordWriter);
+      if (conf.isGatherStats() && !isCollectRWStats) {
         if (statsCollectRawDataSize) {
           SerDeStats stats = serializer.getSerDeStats();
           if (stats != null) {
@@ -634,12 +635,14 @@ public class FileSinkOperator extends Te
       }
 
 
+      FSRecordWriter rowOutWriter = null;
+
       if (row_count != null) {
         row_count.set(row_count.get() + 1);
       }
 
       if (!multiFileSpray) {
-        rowOutWriters[0].write(recordValue);
+        rowOutWriter = rowOutWriters[0];
       } else {
         int keyHashCode = 0;
         for (int i = 0; i < partitionEval.length; i++) {
@@ -650,8 +653,9 @@ public class FileSinkOperator extends Te
         key.setHashCode(keyHashCode);
         int bucketNum = prtner.getBucket(key, null, totalFiles);
         int idx = bucketMap.get(bucketNum);
-        rowOutWriters[idx].write(recordValue);
+        rowOutWriter = rowOutWriters[idx];
       }
+      rowOutWriter.write(recordValue);
     } catch (IOException e) {
       throw new HiveException(e);
     } catch (SerDeException e) {
@@ -659,6 +663,15 @@ public class FileSinkOperator extends Te
     }
   }
 
+  private boolean areAllTrue(boolean[] statsFromRW) {
+    for(boolean b : statsFromRW) {
+      if (!b) {
+        return false;
+      }
+    }
+    return true;
+  }
+
   /**
    * Lookup list bucketing path.
    * @param lbDirName
@@ -868,6 +881,27 @@ public class FileSinkOperator extends Te
     if (!abort) {
       for (FSPaths fsp : valToPaths.values()) {
         fsp.closeWriters(abort);
+
+        // before closing the operator check if statistics gathering is requested
+        // and is provided by record writer. this is different from the statistics
+        // gathering done in processOp(). In processOp(), for each row added
+        // serde statistics about the row is gathered and accumulated in hashmap.
+        // this adds more overhead to the actual processing of row. But if the
+        // record writer already gathers the statistics, it can simply return the
+        // accumulated statistics which will be aggregated in case of spray writers
+        if (conf.isGatherStats() && isCollectRWStats) {
+          for (int idx = 0; idx < fsp.outWriters.length; idx++) {
+            FSRecordWriter outWriter = fsp.outWriters[idx];
+            if (outWriter != null) {
+              SerDeStats stats = ((StatsProvidingRecordWriter) outWriter).getStats();
+              if (stats != null) {
+                fsp.stat.addToStat(StatsSetupConst.RAW_DATA_SIZE, stats.getRawDataSize());
+                fsp.stat.addToStat(StatsSetupConst.ROW_COUNT, stats.getRowCount());
+              }
+            }
+          }
+        }
+
         if (isNativeTable) {
           fsp.commit(fs);
         }
@@ -938,7 +972,7 @@ public class FileSinkOperator extends Te
                  hiveOutputFormat = ReflectionUtils.newInstance(conf.getTableInfo().getOutputFileFormatClass(),job);
            }
           else {
-                 hiveOutputFormat = conf.getTableInfo().getOutputFileFormatClass().newInstance(); 
+                 hiveOutputFormat = conf.getTableInfo().getOutputFileFormatClass().newInstance();
           }
         }
         else {

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java Mon Sep 30 21:58:29 2013
@@ -80,15 +80,20 @@ public class UnionOperator extends Opera
     for (int p = 0; p < parents; p++) {
       assert (parentFields[p].size() == columns);
       for (int c = 0; c < columns; c++) {
-        columnTypeResolvers[c].update(parentFields[p].get(c)
-            .getFieldObjectInspector());
+        if (!columnTypeResolvers[c].update(parentFields[p].get(c)
+            .getFieldObjectInspector())) {
+          // checked in SemanticAnalyzer. Should not happen
+          throw new HiveException("Incompatible types for union operator");
+        }
       }
     }
 
     ArrayList<ObjectInspector> outputFieldOIs = new ArrayList<ObjectInspector>(
         columns);
     for (int c = 0; c < columns; c++) {
-      outputFieldOIs.add(columnTypeResolvers[c].get());
+      // can be null for void type
+      ObjectInspector oi = columnTypeResolvers[c].get();
+      outputFieldOIs.add(oi == null ? parentFields[0].get(c).getFieldObjectInspector() : oi);
     }
 
     // create output row ObjectInspector

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Mon Sep 30 21:58:29 2013
@@ -102,12 +102,12 @@ import org.apache.hadoop.hive.ql.Context
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryPlan;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.exec.mr.ExecDriver;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapper;
 import org.apache.hadoop.hive.ql.exec.mr.ExecReducer;
 import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
 import org.apache.hadoop.hive.ql.io.ContentSummaryInputFormat;
+import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
@@ -1694,7 +1694,7 @@ public final class Utilities {
 
     for (String p : paths) {
       Path path = new Path(p);
-      RecordWriter writer = HiveFileFormatUtils.getRecordWriter(
+      FSRecordWriter writer = HiveFileFormatUtils.getRecordWriter(
           jc, hiveOutputFormat, outputClass, isCompressed,
           tableInfo.getProperties(), path, reporter);
       writer.close(false);
@@ -2853,7 +2853,7 @@ public final class Utilities {
     Path newFilePath = new Path(newFile);
 
     String onefile = newPath.toString();
-    RecordWriter recWriter = outFileFormat.newInstance().getHiveRecordWriter(job, newFilePath,
+    FSRecordWriter recWriter = outFileFormat.newInstance().getHiveRecordWriter(job, newFilePath,
         Text.class, false, props, null);
     if (dummyRow) {
       // empty files are omitted at CombineHiveInputFormat.

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java Mon Sep 30 21:58:29 2013
@@ -28,8 +28,8 @@ import java.util.Properties;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.PTFDeserializer;
@@ -240,7 +240,7 @@ public class PTFRowContainer<Row extends
   }
 
 
-  private static class PTFRecordWriter implements RecordWriter {
+  private static class PTFRecordWriter implements FSRecordWriter {
     BytesWritable EMPTY_KEY = new BytesWritable();
 
     SequenceFile.Writer outStream;
@@ -262,7 +262,7 @@ public class PTFRowContainer<Row extends
     extends HiveSequenceFileOutputFormat<K,V> {
 
     @Override
-    public RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
+    public FSRecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
         Class<? extends Writable> valueClass, boolean isCompressed,
         Properties tableProperties, Progressable progress) throws IOException {
 

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java Mon Sep 30 21:58:29 2013
@@ -30,8 +30,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -105,7 +105,7 @@ public class RowContainer<ROW extends Li
   int acutalSplitNum = 0;
   int currentSplitPointer = 0;
   org.apache.hadoop.mapred.RecordReader rr = null; // record reader
-  RecordWriter rw = null;
+  FSRecordWriter rw = null;
   InputFormat<WritableComparable, Writable> inputFormat = null;
   InputSplit[] inputSplits = null;
   private ROW dummyRow = null;
@@ -531,7 +531,7 @@ public class RowContainer<ROW extends Li
 
   }
 
-  protected RecordWriter getRecordWriter() {
+  protected FSRecordWriter getRecordWriter() {
     return rw;
   }
 

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveBinaryOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveBinaryOutputFormat.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveBinaryOutputFormat.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveBinaryOutputFormat.java Mon Sep 30 21:58:29 2013
@@ -24,7 +24,6 @@ import java.util.Properties;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
@@ -43,7 +42,7 @@ public class HiveBinaryOutputFormat<K ex
   /**
    * create the final out file, and output row by row. After one row is
    * appended, a configured row separator is appended
-   * 
+   *
    * @param jc
    *          the job configuration file
    * @param outPath
@@ -59,14 +58,14 @@ public class HiveBinaryOutputFormat<K ex
    * @return the RecordWriter
    */
   @Override
-  public RecordWriter getHiveRecordWriter(JobConf jc, Path outPath,
+  public FSRecordWriter getHiveRecordWriter(JobConf jc, Path outPath,
       Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress) throws IOException {
 
     FileSystem fs = outPath.getFileSystem(jc);
     final OutputStream outStream = fs.create(outPath);
 
-    return new RecordWriter() {
+    return new FSRecordWriter() {
       public void write(Writable r) throws IOException {
         if (r instanceof Text) {
           Text tr = (Text) r;

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java Mon Sep 30 21:58:29 2013
@@ -32,7 +32,6 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat;
@@ -246,7 +245,7 @@ public final class HiveFileFormatUtils {
     return true;
   }
 
-  public static RecordWriter getHiveRecordWriter(JobConf jc,
+  public static FSRecordWriter getHiveRecordWriter(JobConf jc,
       TableDesc tableInfo, Class<? extends Writable> outputClass,
       FileSinkDesc conf, Path outPath, Reporter reporter) throws HiveException {
     boolean storagehandlerofhivepassthru = false;
@@ -287,7 +286,7 @@ public final class HiveFileFormatUtils {
     }
   }
 
-  public static RecordWriter getRecordWriter(JobConf jc,
+  public static FSRecordWriter getRecordWriter(JobConf jc,
       HiveOutputFormat<?, ?> hiveOutputFormat,
       final Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProp, Path outPath, Reporter reporter

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java Mon Sep 30 21:58:29 2013
@@ -25,7 +25,6 @@ import java.util.Properties;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
@@ -39,7 +38,7 @@ import org.apache.hadoop.util.Progressab
 /**
  * HiveIgnoreKeyTextOutputFormat replaces key with null before feeding the <key,
  * value> to TextOutputFormat.RecordWriter.
- * 
+ *
  */
 public class HiveIgnoreKeyTextOutputFormat<K extends WritableComparable, V extends Writable>
     extends TextOutputFormat<K, V> implements HiveOutputFormat<K, V> {
@@ -47,7 +46,7 @@ public class HiveIgnoreKeyTextOutputForm
   /**
    * create the final out file, and output row by row. After one row is
    * appended, a configured row separator is appended
-   * 
+   *
    * @param jc
    *          the job configuration file
    * @param outPath
@@ -63,7 +62,7 @@ public class HiveIgnoreKeyTextOutputForm
    * @return the RecordWriter
    */
   @Override
-  public RecordWriter getHiveRecordWriter(JobConf jc, Path outPath,
+  public FSRecordWriter getHiveRecordWriter(JobConf jc, Path outPath,
       Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress) throws IOException {
     int rowSeparator = 0;
@@ -79,7 +78,7 @@ public class HiveIgnoreKeyTextOutputForm
     FileSystem fs = outPath.getFileSystem(jc);
     final OutputStream outStream = Utilities.createCompressedStream(jc, fs
         .create(outPath), isCompressed);
-    return new RecordWriter() {
+    return new FSRecordWriter() {
       public void write(Writable r) throws IOException {
         if (r instanceof Text) {
           Text tr = (Text) r;

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveNullValueSequenceFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveNullValueSequenceFileOutputFormat.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveNullValueSequenceFileOutputFormat.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveNullValueSequenceFileOutputFormat.java Mon Sep 30 21:58:29 2013
@@ -23,7 +23,6 @@ import java.util.Properties;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.NullWritable;
@@ -48,7 +47,7 @@ public class HiveNullValueSequenceFileOu
   private boolean keyIsText;
 
   @Override
-  public RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
+  public FSRecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
       Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress) throws IOException {
 
@@ -58,7 +57,7 @@ public class HiveNullValueSequenceFileOu
 
     keyWritable = new HiveKey();
     keyIsText = valueClass.equals(Text.class);
-    return new RecordWriter() {
+    return new FSRecordWriter() {
       public void write(Writable r) throws IOException {
         if (keyIsText) {
           Text text = (Text) r;

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveOutputFormat.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveOutputFormat.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HiveOutputFormat.java Mon Sep 30 21:58:29 2013
@@ -22,7 +22,6 @@ import java.io.IOException;
 import java.util.Properties;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.OutputFormat;
@@ -58,7 +57,7 @@ public interface HiveOutputFormat<K, V> 
    *          progress used for status report
    * @return the RecordWriter for the output file
    */
-  RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
+  FSRecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
       final Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress) throws IOException;
 

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughOutputFormat.java?rev=1527793&r1=1527792&r2=1527793&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughOutputFormat.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughOutputFormat.java Mon Sep 30 21:58:29 2013
@@ -49,7 +49,7 @@ public class HivePassThroughOutputFormat
                                   "org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat";
 
   public static final String HIVE_PASSTHROUGH_STORAGEHANDLER_OF_JOBCONFKEY =
-                                 "hive.passthrough.storagehandler.of"; 
+                                 "hive.passthrough.storagehandler.of";
 
   public HivePassThroughOutputFormat() {
     //construct this class through ReflectionUtils from FileSinkOperator
@@ -99,7 +99,7 @@ public class HivePassThroughOutputFormat
   }
 
   @Override
-  public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getHiveRecordWriter(
+  public FSRecordWriter getHiveRecordWriter(
       JobConf jc, Path finalOutPath, Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress) throws IOException {
     if (this.initialized == false) {



Mime
View raw message