hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ser...@apache.org
Subject [69/70] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Sergey Shelukhin)
Date Tue, 07 Feb 2017 20:59:37 GMT
http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index f5b1c23,dceb4a5..f55bbd8
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@@ -6740,11 -6791,51 +6756,14 @@@ public class SemanticAnalyzer extends B
          // This is a non-native table.
          // We need to set stats as inaccurate.
          setStatsForNonNativeTable(dest_tab);
-         createInsertDesc(dest_tab, !qb.getParseInfo().isInsertIntoTable(dest_tab.getTableName()));
+         // true if it is insert overwrite.
+         boolean overwrite = !qb.getParseInfo().isInsertIntoTable(
+                 String.format("%s.%s", dest_tab.getDbName(), dest_tab.getTableName()));
+         createInsertDesc(dest_tab, overwrite);
        }
  
 -      WriteEntity output = null;
 -
 -      // Here only register the whole table for post-exec hook if no DP present
 -      // in the case of DP, we will register WriteEntity in MoveTask when the
 -      // list of dynamically created partitions are known.
 -      if ((dpCtx == null || dpCtx.getNumDPCols() == 0)) {
 -        output = new WriteEntity(dest_tab,  determineWriteType(ltd, isNonNativeTable, dest));
 -        if (!outputs.add(output)) {
 -          throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES
 -              .getMsg(dest_tab.getTableName()));
 -        }
 -      }
 -      if ((dpCtx != null) && (dpCtx.getNumDPCols() >= 0)) {
 -        // No static partition specified
 -        if (dpCtx.getNumSPCols() == 0) {
 -          output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable, dest),
false);
 -          outputs.add(output);
 -          output.setDynamicPartitionWrite(true);
 -        }
 -        // part of the partition specified
 -        // Create a DummyPartition in this case. Since, the metastore does not store partial
 -        // partitions currently, we need to store dummy partitions
 -        else {
 -          try {
 -            String ppath = dpCtx.getSPPath();
 -            ppath = ppath.substring(0, ppath.length() - 1);
 -            DummyPartition p =
 -                new DummyPartition(dest_tab, dest_tab.getDbName()
 -                    + "@" + dest_tab.getTableName() + "@" + ppath,
 -                    partSpec);
 -            output = new WriteEntity(p, getWriteType(dest), false);
 -            output.setDynamicPartitionWrite(true);
 -            outputs.add(output);
 -          } catch (HiveException e) {
 -            throw new SemanticException(e.getMessage(), e);
 -          }
 -        }
 -      }
 -
 +      WriteEntity output = generateTableWriteEntity(
 +          dest, dest_tab, partSpec, ltd, dpCtx, isNonNativeTable);
        ctx.getLoadTableOutputMap().put(ltd, output);
        break;
      }

http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
index d42715a,5cc1c45..6e3ef05
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
@@@ -505,19 -488,19 +505,35 @@@ public class FileSinkDesc extends Abstr
      this.statsTmpDir = statsCollectionTempDir;
    }
  
 +  public void setMmWriteId(Long mmWriteId) {
 +    this.mmWriteId = mmWriteId;
 +  }
 +
 +  public void setIsMerge(boolean b) {
 +    this.isMerge = b;
 +  }
 +
 +  public boolean isMerge() {
 +    return isMerge;
 +  }
 +
 +  public boolean isMmCtas() {
 +    return isMmCtas;
 +  }
++
+   public class FileSinkOperatorExplainVectorization extends OperatorExplainVectorization
{
+ 
+     public FileSinkOperatorExplainVectorization(VectorDesc vectorDesc) {
+       // Native vectorization not supported.
+       super(vectorDesc, false);
+     }
+   }
+ 
+   @Explain(vectorization = Vectorization.OPERATOR, displayName = "File Sink Vectorization",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
+   public FileSinkOperatorExplainVectorization getFileSinkVectorization() {
+     if (vectorDesc == null) {
+       return null;
+     }
+     return new FileSinkOperatorExplainVectorization(vectorDesc);
+   }
  }

http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
index 647ad3e,d4bdd96..2ab4fb3
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
@@@ -726,11 -733,85 +737,93 @@@ public class MapWork extends BaseWork 
      return vectorizedRowBatch;
    }
  
 +  public void setIsMergeFromResolver(boolean b) {
 +    this.isMergeFromResolver = b;
 +  }
 +
 +  public boolean isMergeFromResolver() {
 +    return this.isMergeFromResolver;
 +  }
++
+   /*
+    * Whether the HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT variable
+    * (hive.vectorized.use.vectorized.input.format) was true when the Vectorizer class evaluated
+    * vectorizing this node.
+    *
+    * When Vectorized Input File Format looks at this flag, it can determine whether it should
+    * operate vectorized or not.  In some modes, the node can be vectorized but use row
+    * serialization.
+    */
+   public void setUseVectorizedInputFileFormat(boolean useVectorizedInputFileFormat) {
+     this.useVectorizedInputFileFormat = useVectorizedInputFileFormat;
+   }
+ 
+   public boolean getUseVectorizedInputFileFormat() {
+     return useVectorizedInputFileFormat;
+   }
+ 
+   public void setNotEnabledInputFileFormatReason(VectorizerReason notEnabledInputFileFormatReason)
{
+     this.notEnabledInputFileFormatReason = notEnabledInputFileFormatReason;
+   }
+ 
+   public VectorizerReason getNotEnabledInputFileFormatReason() {
+     return notEnabledInputFileFormatReason;
+   }
+ 
+   public void setVectorizationInputFileFormatClassNameSet(Set<String> vectorizationInputFileFormatClassNameSet)
{
+     this.vectorizationInputFileFormatClassNameSet = vectorizationInputFileFormatClassNameSet;
+   }
+ 
+   public Set<String> getVectorizationInputFileFormatClassNameSet() {
+     return vectorizationInputFileFormatClassNameSet;
+   }
+ 
+   public void setVectorizationEnabledConditionsMet(ArrayList<String> vectorizationEnabledConditionsMet)
{
+     this.vectorizationEnabledConditionsMet = VectorizationCondition.addBooleans(vectorizationEnabledConditionsMet,
true);
+   }
+ 
+   public List<String> getVectorizationEnabledConditionsMet() {
+     return vectorizationEnabledConditionsMet;
+   }
+ 
+   public void setVectorizationEnabledConditionsNotMet(List<String> vectorizationEnabledConditionsNotMet)
{
+     this.vectorizationEnabledConditionsNotMet = VectorizationCondition.addBooleans(vectorizationEnabledConditionsNotMet,
false);
+   }
+ 
+   public List<String> getVectorizationEnabledConditionsNotMet() {
+     return vectorizationEnabledConditionsNotMet;
+   }
+ 
+   public class MapExplainVectorization extends BaseExplainVectorization {
+ 
+     private final MapWork mapWork;
+ 
+     public MapExplainVectorization(MapWork mapWork) {
+       super(mapWork);
+       this.mapWork = mapWork;
+     }
+ 
+     @Explain(vectorization = Vectorization.SUMMARY, displayName = "inputFileFormats", explainLevels
= { Level.DEFAULT, Level.EXTENDED })
+     public Set<String> inputFileFormats() {
+       return mapWork.getVectorizationInputFileFormatClassNameSet();
+     }
+ 
+     @Explain(vectorization = Vectorization.SUMMARY, displayName = "enabledConditionsMet",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
+     public List<String> enabledConditionsMet() {
+       return mapWork.getVectorizationEnabledConditionsMet();
+     }
+ 
+     @Explain(vectorization = Vectorization.SUMMARY, displayName = "enabledConditionsNotMet",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
+     public List<String> enabledConditionsNotMet() {
+       return mapWork.getVectorizationEnabledConditionsNotMet();
+     }
+   }
+ 
+   @Explain(vectorization = Vectorization.SUMMARY, displayName = "Map Vectorization", explainLevels
= { Level.DEFAULT, Level.EXTENDED })
+   public MapExplainVectorization getMapExplainVectorization() {
+     if (!getVectorizationExamined()) {
+       return null;
+     }
+     return new MapExplainVectorization(this);
+   }
  }

http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b7670c47/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --cc ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index 3482dba,4fa0651..694ba7f
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@@ -108,14 -120,10 +108,15 @@@ import org.apache.hadoop.mapred.OutputF
  import org.apache.hadoop.mapred.RecordWriter;
  import org.apache.hadoop.mapred.Reporter;
  import org.apache.hadoop.security.UserGroupInformation;
 +import org.apache.hive.common.util.MockFileSystem;
 +import org.apache.hive.common.util.MockFileSystem.MockBlock;
 +import org.apache.hive.common.util.MockFileSystem.MockFile;
 +import org.apache.hive.common.util.MockFileSystem.MockOutputStream;
 +import org.apache.hive.common.util.MockFileSystem.MockPath;
  import org.apache.hadoop.util.Progressable;
- import org.apache.orc.*;
- import org.apache.orc.impl.PhysicalFsWriter;
+ import org.apache.orc.OrcConf;
+ import org.apache.orc.OrcProto;
+ import org.apache.orc.TypeDescription;
  import org.junit.Before;
  import org.junit.Rule;
  import org.junit.Test;


Mime
View raw message