hadoop-hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From na...@apache.org
Subject svn commit: r883983 - in /hadoop/hive/trunk: CHANGES.txt ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java ql/src/test/queries/clientnegative/script_broken_pipe3.q ql/src/test/results/clientpositive/partition_wise_fileformat.q.out
Date Wed, 25 Nov 2009 05:45:39 GMT
Author: namit
Date: Wed Nov 25 05:45:39 2009
New Revision: 883983

URL: http://svn.apache.org/viewvc?rev=883983&view=rev
Log:
HIVE-947. runlength encding for RCFILE (He Yongqiang via namit)


Modified:
    hadoop/hive/trunk/CHANGES.txt
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
    hadoop/hive/trunk/ql/src/test/queries/clientnegative/script_broken_pipe3.q
    hadoop/hive/trunk/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out

Modified: hadoop/hive/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/CHANGES.txt?rev=883983&r1=883982&r2=883983&view=diff
==============================================================================
--- hadoop/hive/trunk/CHANGES.txt (original)
+++ hadoop/hive/trunk/CHANGES.txt Wed Nov 25 05:45:39 2009
@@ -118,9 +118,11 @@
     (Paul Yang via zshao)
 
     HIVE-914. Optimize UDFJson (Paul Yang via namit)
-    
+
     HIVE-945. custom mappers/reducers should not be initialized at compile time (namit via
He Yongqiang)
 
+    HIVE-947. runlength encding for RCFILE (He Yongqiang via namit)
+
   OPTIMIZATIONS
 
   BUG FIXES

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java?rev=883983&r1=883982&r2=883983&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java Wed Nov 25 05:45:39
2009
@@ -519,6 +519,16 @@
       // used to store each value's length
       NonSyncDataOutputBuffer valLenBuffer;
 
+      /*
+       * use a run-length encoding. We only record run length if a same
+       * 'prevValueLen' occurs more than one time. And we negative the run
+       * length to distinguish a runLength and a normal value length. For
+       * example, if the values' lengths are 1,1,1,2, we record 1, ~2,2. And for
+       * value lengths 1,2,3 we record 1,2,3.
+       */
+      int runLength = 0;
+      int prevValueLength = -1;
+      
       ColumnBuffer() throws IOException {
         columnValBuffer = new NonSyncDataOutputBuffer();
         valLenBuffer = new NonSyncDataOutputBuffer();
@@ -526,12 +536,42 @@
 
       public void append(BytesRefWritable data) throws IOException {
         data.writeDataTo(columnValBuffer);
-        WritableUtils.writeVLong(valLenBuffer, data.getLength());
+        int currentLen = data.getLength();
+        
+        if( prevValueLength < 0) {
+          startNewGroup(currentLen);
+          return;
+        }
+        
+        if(currentLen != prevValueLength) {
+          flushGroup();
+          startNewGroup(currentLen);
+        } else {
+          runLength ++;
+        }
+      }
+
+      private void startNewGroup(int currentLen) {
+        prevValueLength = currentLen;
+        runLength = 0;
+        return;
       }
 
       public void clear() throws IOException {
         valLenBuffer.reset();
         columnValBuffer.reset();
+        prevValueLength = -1;
+        runLength = 0;
+      }
+      
+      public void flushGroup() throws IOException {
+        if (prevValueLength >= 0) {
+          WritableUtils.writeVLong(valLenBuffer, prevValueLength);
+          if (runLength > 0)
+            WritableUtils.writeVLong(valLenBuffer, ~runLength);
+          runLength = -1;
+          prevValueLength = -1;
+        }
       }
     }
 
@@ -761,7 +801,8 @@
       int valueLength = 0;
       for (int columnIndex = 0; columnIndex < columnNumber; columnIndex++) {
         ColumnBuffer currentBuf = columnBuffers[columnIndex];
-
+        currentBuf.flushGroup();
+        
         NonSyncDataOutputBuffer columnValue = currentBuf.columnValBuffer;
 
         if (isCompressed()) {
@@ -891,6 +932,8 @@
 
     private int[] columnRowReadIndex = null;
     private NonSyncDataInputBuffer[] colValLenBufferReadIn;
+    private int[] columnRunLength;
+    private int[] columnPrvLength;
     private boolean decompress = false;
 
     private Decompressor keyDecompressor;
@@ -959,11 +1002,15 @@
       }
 
       colValLenBufferReadIn = new NonSyncDataInputBuffer[columnNumber];
+      columnRunLength = new int[columnNumber];
+      columnPrvLength = new int[columnNumber];
       columnRowReadIndex = new int[columnNumber];
       for (int i = 0; i < columnNumber; i++) {
         columnRowReadIndex[i] = 0;
         if (!skippedColIDs[i])
           colValLenBufferReadIn[i] = new NonSyncDataInputBuffer();
+        columnRunLength[i] = 0;
+        columnPrvLength[i] = -1;
       }
 
       currentKey = createKeyBuffer();
@@ -1178,7 +1225,10 @@
         colValLenBufferReadIn[i].reset(currentKey.allCellValLenBuffer[i]
             .getData(), currentKey.allCellValLenBuffer[i].getLength());
         columnRowReadIndex[i] = 0;
+        columnRunLength[i] = 0;
+        columnPrvLength[i] = -1;
       }
+      
       return currentKeyLength;
     }
 
@@ -1228,7 +1278,7 @@
       fetchColumnTempBuf.reset(currentKey.allCellValLenBuffer[columnID]
           .getData(), currentKey.allCellValLenBuffer[columnID].getLength());
       for (int i = 0; i < recordsNumInValBuffer; i++) {
-        int length = WritableUtils.readVInt(fetchColumnTempBuf);
+        int length = getColumnNextValueLength(columnID);
 
         BytesRefWritable currentCell = rest.get(i);
         if (currentValue.decompressedFlag[columnID])
@@ -1318,7 +1368,7 @@
         BytesRefWritable ref = ret.unCheckedGet(i);
 
         int columnCurrentRowStart = (int) columnRowReadIndex[i];
-        int length = (int) WritableUtils.readVLong(colValLenBufferReadIn[i]);
+        int length = getColumnNextValueLength(i);
         columnRowReadIndex[i] = columnCurrentRowStart + length;
 
         if (currentValue.decompressedFlag[j])
@@ -1329,6 +1379,26 @@
       rowFetched = true;
     }
 
+    private int getColumnNextValueLength(int i) throws IOException {
+      if (columnRunLength[i] > 0) {
+        --columnRunLength[i];
+        return columnPrvLength[i];
+      } else {
+        int length = (int) WritableUtils.readVLong(colValLenBufferReadIn[i]);
+        if (length < 0) {
+          // we reach a runlength here, use the previous length and reset
+          // runlength
+          columnRunLength[i] = ~length;
+          columnRunLength[i]--;
+          length = columnPrvLength[i];
+        } else {
+          columnPrvLength[i] = length;
+          columnRunLength[i] = 0;
+        }
+        return length;
+      }
+    }
+
     /** Returns true iff the previous call to next passed a sync mark. */
     public boolean syncSeen() {
       return syncSeen;

Modified: hadoop/hive/trunk/ql/src/test/queries/clientnegative/script_broken_pipe3.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientnegative/script_broken_pipe3.q?rev=883983&r1=883982&r2=883983&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientnegative/script_broken_pipe3.q (original)
+++ hadoop/hive/trunk/ql/src/test/queries/clientnegative/script_broken_pipe3.q Wed Nov 25
05:45:39 2009
@@ -1,3 +1,2 @@
-set hive.exec.script.allow.partial.consumption = true;
 -- Test to ensure that a script with a bad error code still fails even with partial consumption
 SELECT TRANSFORM(*) USING 'false' AS a, b, c FROM (SELECT * FROM src LIMIT 1) tmp;

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out?rev=883983&r1=883982&r2=883983&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out Wed
Nov 25 05:45:39 2009
@@ -16,8 +16,8 @@
 POSTHOOK: query: show table extended like partition_test_partitioned
 POSTHOOK: type: SHOW_TABLESTATUS
 tableName:partition_test_partitioned
-owner:heyongqiang
-location:file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/partition_test_partitioned
+owner:njain
+location:file:/data/users/njain/hive_commit1/hive_commit1/build/ql/test/data/warehouse/partition_test_partitioned
 inputformat:org.apache.hadoop.mapred.TextInputFormat
 outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 columns:struct columns { string key, string value}
@@ -28,15 +28,15 @@
 maxFileSize:216
 minFileSize:216
 lastAccessTime:0
-lastUpdateTime:1257964954000
+lastUpdateTime:1259087630000
 
 PREHOOK: query: show table extended like partition_test_partitioned partition(dt=100)
 PREHOOK: type: SHOW_TABLESTATUS
 POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=100)
 POSTHOOK: type: SHOW_TABLESTATUS
 tableName:partition_test_partitioned
-owner:heyongqiang
-location:file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/partition_test_partitioned/dt=100
+owner:njain
+location:file:/data/users/njain/hive_commit1/hive_commit1/build/ql/test/data/warehouse/partition_test_partitioned/dt=100
 inputformat:org.apache.hadoop.mapred.TextInputFormat
 outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 columns:struct columns { string key, string value}
@@ -47,16 +47,16 @@
 maxFileSize:216
 minFileSize:216
 lastAccessTime:0
-lastUpdateTime:1257964954000
+lastUpdateTime:1259087630000
 
 PREHOOK: query: select key from partition_test_partitioned where dt=100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partition_test_partitioned@dt=100
-PREHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/1912311519/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1314763739/10000
 POSTHOOK: query: select key from partition_test_partitioned where dt=100
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_test_partitioned@dt=100
-POSTHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/1912311519/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1314763739/10000
 238
 
 311
@@ -85,11 +85,11 @@
 PREHOOK: query: select key from partition_test_partitioned
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partition_test_partitioned@dt=100
-PREHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/534751822/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/710640588/10000
 POSTHOOK: query: select key from partition_test_partitioned
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_test_partitioned@dt=100
-POSTHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/534751822/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/710640588/10000
 238
 
 311
@@ -134,27 +134,27 @@
 POSTHOOK: query: show table extended like partition_test_partitioned
 POSTHOOK: type: SHOW_TABLESTATUS
 tableName:partition_test_partitioned
-owner:heyongqiang
-location:file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/partition_test_partitioned
+owner:njain
+location:file:/data/users/njain/hive_commit1/hive_commit1/build/ql/test/data/warehouse/partition_test_partitioned
 inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
 outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
 columns:struct columns { string key, string value}
 partitioned:true
 partitionColumns:struct partition_columns { string dt}
 totalNumberFiles:2
-totalFileSize:602
-maxFileSize:386
+totalFileSize:586
+maxFileSize:370
 minFileSize:216
 lastAccessTime:0
-lastUpdateTime:1257964968000
+lastUpdateTime:1259087640000
 
 PREHOOK: query: show table extended like partition_test_partitioned partition(dt=100)
 PREHOOK: type: SHOW_TABLESTATUS
 POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=100)
 POSTHOOK: type: SHOW_TABLESTATUS
 tableName:partition_test_partitioned
-owner:heyongqiang
-location:file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/partition_test_partitioned/dt=100
+owner:njain
+location:file:/data/users/njain/hive_commit1/hive_commit1/build/ql/test/data/warehouse/partition_test_partitioned/dt=100
 inputformat:org.apache.hadoop.mapred.TextInputFormat
 outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 columns:struct columns { string key, string value}
@@ -165,35 +165,35 @@
 maxFileSize:216
 minFileSize:216
 lastAccessTime:0
-lastUpdateTime:1257964968000
+lastUpdateTime:1259087640000
 
 PREHOOK: query: show table extended like partition_test_partitioned partition(dt=101)
 PREHOOK: type: SHOW_TABLESTATUS
 POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=101)
 POSTHOOK: type: SHOW_TABLESTATUS
 tableName:partition_test_partitioned
-owner:heyongqiang
-location:file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/partition_test_partitioned/dt=101
+owner:njain
+location:file:/data/users/njain/hive_commit1/hive_commit1/build/ql/test/data/warehouse/partition_test_partitioned/dt=101
 inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
 outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
 columns:struct columns { string key, string value}
 partitioned:true
 partitionColumns:struct partition_columns { string dt}
 totalNumberFiles:1
-totalFileSize:386
-maxFileSize:386
-minFileSize:386
+totalFileSize:370
+maxFileSize:370
+minFileSize:370
 lastAccessTime:0
-lastUpdateTime:1257964968000
+lastUpdateTime:1259087640000
 
 PREHOOK: query: select key from partition_test_partitioned where dt=100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partition_test_partitioned@dt=100
-PREHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/1625649982/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/646831114/10000
 POSTHOOK: query: select key from partition_test_partitioned where dt=100
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_test_partitioned@dt=100
-POSTHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/1625649982/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/646831114/10000
 238
 
 311
@@ -222,11 +222,11 @@
 PREHOOK: query: select key from partition_test_partitioned where dt=101
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partition_test_partitioned@dt=101
-PREHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/588993976/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/852607729/10000
 POSTHOOK: query: select key from partition_test_partitioned where dt=101
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_test_partitioned@dt=101
-POSTHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/588993976/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/852607729/10000
 238
 
 311
@@ -256,12 +256,12 @@
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partition_test_partitioned@dt=100
 PREHOOK: Input: default@partition_test_partitioned@dt=101
-PREHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/446093126/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/933497662/10000
 POSTHOOK: query: select key from partition_test_partitioned
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_test_partitioned@dt=100
 POSTHOOK: Input: default@partition_test_partitioned@dt=101
-POSTHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/446093126/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/933497662/10000
 238
 
 311
@@ -331,27 +331,27 @@
 POSTHOOK: query: show table extended like partition_test_partitioned
 POSTHOOK: type: SHOW_TABLESTATUS
 tableName:partition_test_partitioned
-owner:heyongqiang
-location:file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/partition_test_partitioned
+owner:njain
+location:file:/data/users/njain/hive_commit1/hive_commit1/build/ql/test/data/warehouse/partition_test_partitioned
 inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat
 outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
 columns:struct columns { string key, string value}
 partitioned:true
 partitionColumns:struct partition_columns { string dt}
 totalNumberFiles:3
-totalFileSize:1490
+totalFileSize:1474
 maxFileSize:888
 minFileSize:216
 lastAccessTime:0
-lastUpdateTime:1257964987000
+lastUpdateTime:1259087654000
 
 PREHOOK: query: show table extended like partition_test_partitioned partition(dt=100)
 PREHOOK: type: SHOW_TABLESTATUS
 POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=100)
 POSTHOOK: type: SHOW_TABLESTATUS
 tableName:partition_test_partitioned
-owner:heyongqiang
-location:file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/partition_test_partitioned/dt=100
+owner:njain
+location:file:/data/users/njain/hive_commit1/hive_commit1/build/ql/test/data/warehouse/partition_test_partitioned/dt=100
 inputformat:org.apache.hadoop.mapred.TextInputFormat
 outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 columns:struct columns { string key, string value}
@@ -362,34 +362,34 @@
 maxFileSize:216
 minFileSize:216
 lastAccessTime:0
-lastUpdateTime:1257964987000
+lastUpdateTime:1259087654000
 
 PREHOOK: query: show table extended like partition_test_partitioned partition(dt=101)
 PREHOOK: type: SHOW_TABLESTATUS
 POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=101)
 POSTHOOK: type: SHOW_TABLESTATUS
 tableName:partition_test_partitioned
-owner:heyongqiang
-location:file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/partition_test_partitioned/dt=101
+owner:njain
+location:file:/data/users/njain/hive_commit1/hive_commit1/build/ql/test/data/warehouse/partition_test_partitioned/dt=101
 inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
 outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
 columns:struct columns { string key, string value}
 partitioned:true
 partitionColumns:struct partition_columns { string dt}
 totalNumberFiles:1
-totalFileSize:386
-maxFileSize:386
-minFileSize:386
+totalFileSize:370
+maxFileSize:370
+minFileSize:370
 lastAccessTime:0
-lastUpdateTime:1257964987000
+lastUpdateTime:1259087654000
 
 PREHOOK: query: show table extended like partition_test_partitioned partition(dt=102)
 PREHOOK: type: SHOW_TABLESTATUS
 POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=102)
 POSTHOOK: type: SHOW_TABLESTATUS
 tableName:partition_test_partitioned
-owner:heyongqiang
-location:file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/partition_test_partitioned/dt=102
+owner:njain
+location:file:/data/users/njain/hive_commit1/hive_commit1/build/ql/test/data/warehouse/partition_test_partitioned/dt=102
 inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat
 outputformat:org.apache.hadoop.mapred.SequenceFileOutputFormat
 columns:struct columns { string key, string value}
@@ -400,16 +400,16 @@
 maxFileSize:888
 minFileSize:888
 lastAccessTime:0
-lastUpdateTime:1257964987000
+lastUpdateTime:1259087654000
 
 PREHOOK: query: select key from partition_test_partitioned where dt=100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partition_test_partitioned@dt=100
-PREHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/629170993/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/911681205/10000
 POSTHOOK: query: select key from partition_test_partitioned where dt=100
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_test_partitioned@dt=100
-POSTHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/629170993/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/911681205/10000
 238
 
 311
@@ -438,11 +438,11 @@
 PREHOOK: query: select key from partition_test_partitioned where dt=101
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partition_test_partitioned@dt=101
-PREHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/533727408/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1814559948/10000
 POSTHOOK: query: select key from partition_test_partitioned where dt=101
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_test_partitioned@dt=101
-POSTHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/533727408/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1814559948/10000
 238
 
 311
@@ -471,11 +471,11 @@
 PREHOOK: query: select key from partition_test_partitioned where dt=102
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partition_test_partitioned@dt=102
-PREHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/2040962662/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/857836436/10000
 POSTHOOK: query: select key from partition_test_partitioned where dt=102
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_test_partitioned@dt=102
-POSTHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/2040962662/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/857836436/10000
 238
 
 311
@@ -506,13 +506,13 @@
 PREHOOK: Input: default@partition_test_partitioned@dt=100
 PREHOOK: Input: default@partition_test_partitioned@dt=101
 PREHOOK: Input: default@partition_test_partitioned@dt=102
-PREHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/1296016456/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1800799624/10000
 POSTHOOK: query: select key from partition_test_partitioned
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_test_partitioned@dt=100
 POSTHOOK: Input: default@partition_test_partitioned@dt=101
 POSTHOOK: Input: default@partition_test_partitioned@dt=102
-POSTHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/1296016456/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1800799624/10000
 238
 
 311
@@ -593,13 +593,13 @@
 PREHOOK: Input: default@partition_test_partitioned@dt=100
 PREHOOK: Input: default@partition_test_partitioned@dt=101
 PREHOOK: Input: default@partition_test_partitioned@dt=102
-PREHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/510641429/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1806258390/10000
 POSTHOOK: query: select key from partition_test_partitioned where dt >=100 and dt <=
102
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_test_partitioned@dt=100
 POSTHOOK: Input: default@partition_test_partitioned@dt=101
 POSTHOOK: Input: default@partition_test_partitioned@dt=102
-POSTHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/510641429/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1806258390/10000
 238
 
 311



Mime
View raw message