hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gunt...@apache.org
Subject svn commit: r1572806 [1/2] - in /hive/branches/tez: ./ common/src/java/org/apache/hadoop/hive/conf/ itests/hive-unit/src/test/java/org/apache/hive/jdbc/ metastore/scripts/upgrade/derby/ metastore/scripts/upgrade/mysql/ metastore/scripts/upgrade/oracle/...
Date Fri, 28 Feb 2014 02:13:46 GMT
Author: gunther
Date: Fri Feb 28 02:13:45 2014
New Revision: 1572806

URL: http://svn.apache.org/r1572806
Log:
Merge latest trunk into branch. (Gunther Hagleitner)

Added:
    hive/branches/tez/metastore/scripts/upgrade/derby/017-HIVE-6458.derby.sql
      - copied unchanged from r1572804, hive/trunk/metastore/scripts/upgrade/derby/017-HIVE-6458.derby.sql
    hive/branches/tez/metastore/scripts/upgrade/mysql/017-HIVE-6458.mysql.sql
      - copied unchanged from r1572804, hive/trunk/metastore/scripts/upgrade/mysql/017-HIVE-6458.mysql.sql
    hive/branches/tez/metastore/scripts/upgrade/oracle/017-HIVE-6458.oracle.sql
      - copied unchanged from r1572804, hive/trunk/metastore/scripts/upgrade/oracle/017-HIVE-6458.oracle.sql
    hive/branches/tez/metastore/scripts/upgrade/postgres/017-HIVE-6458.postgres.sql
      - copied unchanged from r1572804, hive/trunk/metastore/scripts/upgrade/postgres/017-HIVE-6458.postgres.sql
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/LazyFlatRowContainer.java
      - copied unchanged from r1572804, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/LazyFlatRowContainer.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinEagerRowContainer.java
      - copied unchanged from r1572804, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinEagerRowContainer.java
    hive/branches/tez/service/src/java/org/apache/hive/service/auth/PamAuthenticationProviderImpl.java
      - copied unchanged from r1572804, hive/trunk/service/src/java/org/apache/hive/service/auth/PamAuthenticationProviderImpl.java
Modified:
    hive/branches/tez/   (props changed)
    hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/branches/tez/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
    hive/branches/tez/metastore/scripts/upgrade/derby/hive-schema-0.13.0.derby.sql
    hive/branches/tez/metastore/scripts/upgrade/derby/upgrade-0.12.0-to-0.13.0.derby.sql
    hive/branches/tez/metastore/scripts/upgrade/mysql/hive-schema-0.13.0.mysql.sql
    hive/branches/tez/metastore/scripts/upgrade/mysql/upgrade-0.12.0-to-0.13.0.mysql.sql
    hive/branches/tez/metastore/scripts/upgrade/oracle/hive-schema-0.13.0.oracle.sql
    hive/branches/tez/metastore/scripts/upgrade/oracle/upgrade-0.12.0-to-0.13.0.oracle.sql
    hive/branches/tez/metastore/scripts/upgrade/postgres/hive-schema-0.13.0.postgres.sql
    hive/branches/tez/metastore/scripts/upgrade/postgres/upgrade-0.12.0-to-0.13.0.postgres.sql
    hive/branches/tez/pom.xml
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/AbstractRowContainer.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinRowContainer.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
    hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinEqualityTableContainer.java
    hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinRowContainer.java
    hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinTableContainer.java
    hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestPTFRowContainer.java
    hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/Utilities.java
    hive/branches/tez/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
    hive/branches/tez/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
    hive/branches/tez/service/pom.xml
    hive/branches/tez/service/src/java/org/apache/hive/service/auth/AuthenticationProviderFactory.java
    hive/branches/tez/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java

Propchange: hive/branches/tez/
------------------------------------------------------------------------------
  Merged /hive/trunk:r1571590-1572804

Modified: hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Fri Feb 28 02:13:45 2014
@@ -431,6 +431,7 @@ public class HiveConf extends Configurat
     // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row,
     // need to remove by hive .13. Also, do not change default (see SMB operator)
     HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100),
+    HIVEMAPJOINLAZYHASHTABLE("hive.mapjoin.lazy.hashtable", true),
 
     HIVESMBJOINCACHEROWS("hive.smbjoin.cache.rows", 10000),
     HIVEGROUPBYMAPINTERVAL("hive.groupby.mapaggr.checkinterval", 100000),
@@ -829,13 +830,16 @@ public class HiveConf extends Configurat
 
     // HiveServer2 auth configuration
     HIVE_SERVER2_AUTHENTICATION("hive.server2.authentication", "NONE",
-        new StringsValidator("NOSASL", "NONE", "LDAP", "KERBEROS", "CUSTOM")),
+        new StringsValidator("NOSASL", "NONE", "LDAP", "KERBEROS", "PAM", "CUSTOM")),
     HIVE_SERVER2_KERBEROS_KEYTAB("hive.server2.authentication.kerberos.keytab", ""),
     HIVE_SERVER2_KERBEROS_PRINCIPAL("hive.server2.authentication.kerberos.principal", ""),
     HIVE_SERVER2_PLAIN_LDAP_URL("hive.server2.authentication.ldap.url", null),
     HIVE_SERVER2_PLAIN_LDAP_BASEDN("hive.server2.authentication.ldap.baseDN", null),
     HIVE_SERVER2_PLAIN_LDAP_DOMAIN("hive.server2.authentication.ldap.Domain", null),
     HIVE_SERVER2_CUSTOM_AUTHENTICATION_CLASS("hive.server2.custom.authentication.class", null),
+    // List of the underlying pam services that should be used when auth type is PAM
+    // A file with the same name must exist in /etc/pam.d
+    HIVE_SERVER2_PAM_SERVICES("hive.server2.authentication.pam.services", null),
     HIVE_SERVER2_ENABLE_DOAS("hive.server2.enable.doAs", true),
     HIVE_SERVER2_TABLE_TYPE_MAPPING("hive.server2.table.type.mapping", "CLASSIC",
         new StringsValidator("CLASSIC", "HIVE")),

Modified: hive/branches/tez/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java (original)
+++ hive/branches/tez/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java Fri Feb 28 02:13:45 2014
@@ -1071,7 +1071,7 @@ public class TestJdbcDriver2 {
     tests.put("", new Object[]{});
 
     for (String checkPattern: tests.keySet()) {
-      ResultSet rs = (ResultSet)con.getMetaData().getTables("default", null, checkPattern, null);
+      ResultSet rs = con.getMetaData().getTables("default", null, checkPattern, null);
       ResultSetMetaData resMeta = rs.getMetaData();
       assertEquals(5, resMeta.getColumnCount());
       assertEquals("TABLE_CAT", resMeta.getColumnName(1));
@@ -1100,7 +1100,7 @@ public class TestJdbcDriver2 {
     }
 
     // only ask for the views.
-    ResultSet rs = (ResultSet)con.getMetaData().getTables("default", null, null
+    ResultSet rs = con.getMetaData().getTables("default", null, null
         , new String[]{viewTypeName});
     int cnt=0;
     while (rs.next()) {
@@ -1112,7 +1112,7 @@ public class TestJdbcDriver2 {
 
   @Test
   public void testMetaDataGetCatalogs() throws SQLException {
-    ResultSet rs = (ResultSet)con.getMetaData().getCatalogs();
+    ResultSet rs = con.getMetaData().getCatalogs();
     ResultSetMetaData resMeta = rs.getMetaData();
     assertEquals(1, resMeta.getColumnCount());
     assertEquals("TABLE_CAT", resMeta.getColumnName(1));
@@ -1122,7 +1122,7 @@ public class TestJdbcDriver2 {
 
   @Test
   public void testMetaDataGetSchemas() throws SQLException {
-    ResultSet rs = (ResultSet)con.getMetaData().getSchemas();
+    ResultSet rs = con.getMetaData().getSchemas();
     ResultSetMetaData resMeta = rs.getMetaData();
     assertEquals(2, resMeta.getColumnCount());
     assertEquals("TABLE_SCHEM", resMeta.getColumnName(1));
@@ -1172,7 +1172,7 @@ public class TestJdbcDriver2 {
    */
   private void metaDataGetTableTypeTest(Set<String> tabletypes)
       throws SQLException {
-    ResultSet rs = (ResultSet)con.getMetaData().getTableTypes();
+    ResultSet rs = con.getMetaData().getTableTypes();
 
     int cnt = 0;
     while (rs.next()) {
@@ -1237,7 +1237,7 @@ public class TestJdbcDriver2 {
    */
   @Test
   public void testMetaDataGetColumnsMetaData() throws SQLException {
-    ResultSet rs = (ResultSet)con.getMetaData().getColumns(null, null
+    ResultSet rs = con.getMetaData().getColumns(null, null
         , "testhivejdbcdriver\\_table", null);
 
     ResultSetMetaData rsmd = rs.getMetaData();
@@ -1301,18 +1301,34 @@ public class TestJdbcDriver2 {
     ResultSet res = stmt.executeQuery("describe " + tableName);
 
     res.next();
-    assertEquals("Column name 'under_col' not found", "under_col", res.getString(1).trim());
+    assertEquals("Column name 'under_col' not found", "under_col", res.getString(1));
     assertEquals("Column type 'under_col' for column under_col not found", "int", res
-        .getString(2).trim());
+        .getString(2));
     res.next();
-    assertEquals("Column name 'value' not found", "value", res.getString(1).trim());
+    assertEquals("Column name 'value' not found", "value", res.getString(1));
     assertEquals("Column type 'string' for column key not found", "string", res
-        .getString(2).trim());
+        .getString(2));
 
     assertFalse("More results found than expected", res.next());
   }
 
   @Test
+  public void testShowColumns() throws SQLException {
+    Statement stmt = con.createStatement();
+    assertNotNull("Statement is null", stmt);
+
+    ResultSet res = stmt.executeQuery("show columns in " + tableName);
+    res.next();
+    assertEquals("Column name 'under_col' not found",
+        "under_col", res.getString(1));
+
+    res.next();
+    assertEquals("Column name 'value' not found",
+        "value", res.getString(1));
+    assertFalse("More results found than expected", res.next());
+  }
+
+  @Test
   public void testDatabaseMetaData() throws SQLException {
     DatabaseMetaData meta = con.getMetaData();
 
@@ -1881,7 +1897,7 @@ public class TestJdbcDriver2 {
   public void testUnsupportedFetchTypes() throws Exception {
     try {
       con.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE,
-        ResultSet.CONCUR_READ_ONLY);
+          ResultSet.CONCUR_READ_ONLY);
       fail("createStatement with TYPE_SCROLL_SENSITIVE should fail");
     } catch(SQLException e) {
       assertEquals("HYC00", e.getSQLState().trim());
@@ -1889,7 +1905,7 @@ public class TestJdbcDriver2 {
 
     try {
       con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
-        ResultSet.CONCUR_UPDATABLE);
+          ResultSet.CONCUR_UPDATABLE);
       fail("createStatement with CONCUR_UPDATABLE should fail");
     } catch(SQLException e) {
       assertEquals("HYC00", e.getSQLState().trim());
@@ -1924,7 +1940,7 @@ public class TestJdbcDriver2 {
   private void execFetchFirst(String sqlStmt, String colName, boolean oneRowOnly)
       throws Exception {
     Statement stmt = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
-          ResultSet.CONCUR_READ_ONLY);
+        ResultSet.CONCUR_READ_ONLY);
     ResultSet res = stmt.executeQuery(sqlStmt);
 
     List<String> results = new ArrayList<String> ();

Modified: hive/branches/tez/metastore/scripts/upgrade/derby/hive-schema-0.13.0.derby.sql
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/scripts/upgrade/derby/hive-schema-0.13.0.derby.sql?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/metastore/scripts/upgrade/derby/hive-schema-0.13.0.derby.sql (original)
+++ hive/branches/tez/metastore/scripts/upgrade/derby/hive-schema-0.13.0.derby.sql Fri Feb 28 02:13:45 2014
@@ -100,6 +100,10 @@ CREATE TABLE "APP"."PART_COL_STATS"("DB_
 
 CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
 
+CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000), "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "FUNC_NAME" VARCHAR(128), "FUNC_TYPE" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
+
+CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
+
 -- ----------------------------------------------
 -- DDL Statements for indexes
 -- ----------------------------------------------
@@ -130,6 +134,12 @@ CREATE INDEX "APP"."PARTITIONCOLUMNPRIVI
 
 CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID");
 
+CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID");
+
+CREATE INDEX "APP"."FUNCS_N49" ON "APP"."FUNCS" ("DB_ID");
+
+CREATE INDEX "APP"."FUNC_RU_N49" ON "APP"."FUNC_RU" ("FUNC_ID");
+
 -- ----------------------------------------------
 -- DDL Statements for keys
 -- ----------------------------------------------
@@ -213,6 +223,10 @@ ALTER TABLE "APP"."TAB_COL_STATS" ADD CO
 
 ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_PK" PRIMARY KEY ("CS_ID");
 
+ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_PK" PRIMARY KEY ("FUNC_ID");
+
+ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_PK" PRIMARY KEY ("FUNC_ID", "INTEGER_IDX");
+
 -- foreign
 ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
 
@@ -288,6 +302,10 @@ ALTER TABLE "APP"."PART_COL_STATS" ADD C
 
 ALTER TABLE "APP"."VERSION" ADD CONSTRAINT "VERSION_PK" PRIMARY KEY ("VER_ID");
 
+ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
 -- ----------------------------------------------
 -- DDL Statements for checks
 -- ----------------------------------------------

Modified: hive/branches/tez/metastore/scripts/upgrade/derby/upgrade-0.12.0-to-0.13.0.derby.sql
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/scripts/upgrade/derby/upgrade-0.12.0-to-0.13.0.derby.sql?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/metastore/scripts/upgrade/derby/upgrade-0.12.0-to-0.13.0.derby.sql (original)
+++ hive/branches/tez/metastore/scripts/upgrade/derby/upgrade-0.12.0-to-0.13.0.derby.sql Fri Feb 28 02:13:45 2014
@@ -1,2 +1,5 @@
--- Upgrade MetaStore schema from 0.11.0 to 0.12.0
+-- Upgrade MetaStore schema from 0.12.0 to 0.13.0
+RUN '016-HIVE-6386.derby.sql';
+RUN '017-HIVE-6458.derby.sql';
+
 UPDATE "APP".VERSION SET SCHEMA_VERSION='0.13.0', VERSION_COMMENT='Hive release version 0.13.0' where VER_ID=1;

Modified: hive/branches/tez/metastore/scripts/upgrade/mysql/hive-schema-0.13.0.mysql.sql
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/scripts/upgrade/mysql/hive-schema-0.13.0.mysql.sql?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/metastore/scripts/upgrade/mysql/hive-schema-0.13.0.mysql.sql (original)
+++ hive/branches/tez/metastore/scripts/upgrade/mysql/hive-schema-0.13.0.mysql.sql Fri Feb 28 02:13:45 2014
@@ -761,6 +761,37 @@ CREATE TABLE IF NOT EXISTS `VERSION` (
   PRIMARY KEY (`VER_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 
+--
+-- Table structure for table FUNCS
+--
+CREATE TABLE IF NOT EXISTS `FUNCS` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `CREATE_TIME` INT(11) NOT NULL,
+  `DB_ID` BIGINT(20),
+  `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `FUNC_TYPE` INT(11) NOT NULL,
+  `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,
+  PRIMARY KEY (`FUNC_ID`),
+  UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`),
+  KEY `FUNCS_N49` (`DB_ID`),
+  CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNC_RU
+--
+CREATE TABLE IF NOT EXISTS `FUNC_RU` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `RESOURCE_TYPE` INT(11) NOT NULL,
+  `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `INTEGER_IDX` INT(11) NOT NULL,
+  PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`),
+  CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+
 INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.13.0', 'Hive release version 0.13.0');
 
 /*!40101 SET character_set_client = @saved_cs_client */;

Modified: hive/branches/tez/metastore/scripts/upgrade/mysql/upgrade-0.12.0-to-0.13.0.mysql.sql
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/scripts/upgrade/mysql/upgrade-0.12.0-to-0.13.0.mysql.sql?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/metastore/scripts/upgrade/mysql/upgrade-0.12.0-to-0.13.0.mysql.sql (original)
+++ hive/branches/tez/metastore/scripts/upgrade/mysql/upgrade-0.12.0-to-0.13.0.mysql.sql Fri Feb 28 02:13:45 2014
@@ -1,6 +1,8 @@
 SELECT 'Upgrading MetaStore schema from 0.12.0 to 0.13.0' AS ' ';
 
 SOURCE 015-HIVE-5700.mysql.sql;
+SOURCE 016-HIVE-6386.mysql.sql;
+SOURCE 017-HIVE-6458.mysql.sql;
 
 UPDATE VERSION SET SCHEMA_VERSION='0.13.0', VERSION_COMMENT='Hive release version 0.13.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 0.12.0 to 0.13.0' AS ' ';

Modified: hive/branches/tez/metastore/scripts/upgrade/oracle/hive-schema-0.13.0.oracle.sql
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/scripts/upgrade/oracle/hive-schema-0.13.0.oracle.sql?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/metastore/scripts/upgrade/oracle/hive-schema-0.13.0.oracle.sql (original)
+++ hive/branches/tez/metastore/scripts/upgrade/oracle/hive-schema-0.13.0.oracle.sql Fri Feb 28 02:13:45 2014
@@ -525,6 +525,29 @@ ALTER TABLE PART_COL_STATS ADD CONSTRAIN
 
 CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
 
+CREATE TABLE FUNCS (
+  FUNC_ID NUMBER NOT NULL,
+  CLASS_NAME VARCHAR2(4000),
+  CREATE_TIME NUMBER(10) NOT NULL,
+  DB_ID NUMBER,
+  FUNC_NAME VARCHAR2(128),
+  FUNC_TYPE NUMBER(10) NOT NULL,
+  OWNER_NAME VARCHAR2(128),
+  OWNER_TYPE VARCHAR2(10)
+);
+
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+
+CREATE TABLE FUNC_RU (
+  FUNC_ID NUMBER NOT NULL,
+  RESOURCE_TYPE NUMBER(10) NOT NULL,
+  RESOURCE_URI VARCHAR2(4000),
+  INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
+
+
 -- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
 ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
 
@@ -714,5 +737,20 @@ CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
 -- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
 CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
 
+
+-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
+
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
+
+CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+
+
+-- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
+
+CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+
+
 INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.13.0', 'Hive release version 0.13.0');
 

Modified: hive/branches/tez/metastore/scripts/upgrade/oracle/upgrade-0.12.0-to-0.13.0.oracle.sql
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/scripts/upgrade/oracle/upgrade-0.12.0-to-0.13.0.oracle.sql?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/metastore/scripts/upgrade/oracle/upgrade-0.12.0-to-0.13.0.oracle.sql (original)
+++ hive/branches/tez/metastore/scripts/upgrade/oracle/upgrade-0.12.0-to-0.13.0.oracle.sql Fri Feb 28 02:13:45 2014
@@ -1,6 +1,8 @@
 SELECT 'Upgrading MetaStore schema from 0.12.0 to 0.13.0' AS Status from dual;
 
 @015-HIVE-5700.oracle.sql;
+@016-HIVE-6386.oracle.sql;
+@017-HIVE-6458.oracle.sql;
 
 UPDATE VERSION SET SCHEMA_VERSION='0.13.0', VERSION_COMMENT='Hive release version 0.13.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 0.12.0 to 0.13.0' AS Status from dual;

Modified: hive/branches/tez/metastore/scripts/upgrade/postgres/hive-schema-0.13.0.postgres.sql
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/scripts/upgrade/postgres/hive-schema-0.13.0.postgres.sql?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/metastore/scripts/upgrade/postgres/hive-schema-0.13.0.postgres.sql (original)
+++ hive/branches/tez/metastore/scripts/upgrade/postgres/hive-schema-0.13.0.postgres.sql Fri Feb 28 02:13:45 2014
@@ -552,6 +552,32 @@ CREATE TABLE "PART_COL_STATS" (
 );
 
 --
+-- Table structure for FUNCS
+--
+CREATE TABLE "FUNCS" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "CLASS_NAME" VARCHAR(4000),
+  "CREATE_TIME" INTEGER NOT NULL,
+  "DB_ID" BIGINT,
+  "FUNC_NAME" VARCHAR(128),
+  "FUNC_TYPE" INTEGER NOT NULL,
+  "OWNER_NAME" VARCHAR(128),
+  "OWNER_TYPE" VARCHAR(10),
+  PRIMARY KEY ("FUNC_ID")
+);
+
+--
+-- Table structure for FUNC_RU
+--
+CREATE TABLE "FUNC_RU" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "RESOURCE_TYPE" INTEGER NOT NULL,
+  "RESOURCE_URI" VARCHAR(4000),
+  "INTEGER_IDX" INTEGER NOT NULL,
+  PRIMARY KEY ("FUNC_ID", "INTEGER_IDX")
+);
+
+--
 -- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
 --
 
@@ -1132,6 +1158,24 @@ CREATE INDEX "TAB_COL_STATS_N49" ON "TAB
 
 CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
 
+--
+-- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID");
+
+--
+-- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID");
+
+--
+-- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID");
+
 
 ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
     ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
@@ -1391,6 +1435,14 @@ ALTER TABLE ONLY "PART_COL_STATS" ADD CO
 
 ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
 
+-- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNCS"
+    ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE;
+
+-- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNC_RU"
+    ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE;
+
 --
 -- Name: public; Type: ACL; Schema: -; Owner: hiveuser
 --

Modified: hive/branches/tez/metastore/scripts/upgrade/postgres/upgrade-0.12.0-to-0.13.0.postgres.sql
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/scripts/upgrade/postgres/upgrade-0.12.0-to-0.13.0.postgres.sql?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/metastore/scripts/upgrade/postgres/upgrade-0.12.0-to-0.13.0.postgres.sql (original)
+++ hive/branches/tez/metastore/scripts/upgrade/postgres/upgrade-0.12.0-to-0.13.0.postgres.sql Fri Feb 28 02:13:45 2014
@@ -1,6 +1,8 @@
 SELECT 'Upgrading MetaStore schema from 0.12.0 to 0.13.0';
 
 \i 015-HIVE-5700.postgres.sql;
+\i 016-HIVE-6386.postgres.sql;
+\i 017-HIVE-6458.postgres.sql;
 
 UPDATE "VERSION" SET "SCHEMA_VERSION"='0.13.0', "VERSION_COMMENT"='Hive release version 0.13.0' where "VER_ID"=1;
 SELECT 'Finished upgrading MetaStore schema from 0.12.0 to 0.13.0';

Modified: hive/branches/tez/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/pom.xml?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/pom.xml (original)
+++ hive/branches/tez/pom.xml Fri Feb 28 02:13:45 2014
@@ -141,6 +141,7 @@
     <velocity.version>1.5</velocity.version>
     <xerces.version>2.9.1</xerces.version>
     <zookeeper.version>3.4.5</zookeeper.version>
+    <jpam.version>1.1</jpam.version>
   </properties>
 
   <repositories>

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java Fri Feb 28 02:13:45 2014
@@ -323,7 +323,7 @@ public abstract class CommonJoinOperator
     LOG.trace("Join: Starting new group");
     newGroupStarted = true;
     for (AbstractRowContainer<List<Object>> alw : storage) {
-      alw.clear();
+      alw.clearRows();
     }
     super.startGroup();
   }
@@ -437,8 +437,8 @@ public abstract class CommonJoinOperator
   private void genJoinObject() throws HiveException {
     boolean rightFirst = true;
     boolean hasFilter = hasFilter(order[0]);
-    AbstractRowContainer<List<Object>> aliasRes = storage[order[0]];
-    for (List<Object> rightObj = aliasRes.first(); rightObj != null; rightObj = aliasRes.next()) {
+    AbstractRowContainer.RowIterator<List<Object>> iter = storage[order[0]].rowIter();
+    for (List<Object> rightObj = iter.first(); rightObj != null; rightObj = iter.next()) {
       boolean rightNull = rightObj == dummyObj[0];
       if (hasFilter) {
         filterTags[0] = getFilterTag(rightObj);
@@ -472,8 +472,9 @@ public abstract class CommonJoinOperator
       boolean tryLOForFO = type == JoinDesc.FULL_OUTER_JOIN;
 
       boolean rightFirst = true;
-      for (List<Object> rightObj = aliasRes.first(); !done && rightObj != null;
-           rightObj = loopAgain ? rightObj : aliasRes.next(), rightFirst = loopAgain = false) {
+      AbstractRowContainer.RowIterator<List<Object>> iter = aliasRes.rowIter();
+      for (List<Object> rightObj = iter.first(); !done && rightObj != null;
+           rightObj = loopAgain ? rightObj : iter.next(), rightFirst = loopAgain = false) {
         System.arraycopy(prevSkip, 0, skip, 0, prevSkip.length);
 
         boolean rightNull = rightObj == dummyObj[aliasNum];
@@ -639,8 +640,8 @@ public abstract class CommonJoinOperator
 
   private void genUniqueJoinObject(int aliasNum, int forwardCachePos)
       throws HiveException {
-    AbstractRowContainer<List<Object>> alias = storage[order[aliasNum]];
-    for (List<Object> row = alias.first(); row != null; row = alias.next()) {
+    AbstractRowContainer.RowIterator<List<Object>> iter = storage[order[aliasNum]].rowIter();
+    for (List<Object> row = iter.first(); row != null; row = iter.next()) {
       int sz = joinValues[order[aliasNum]].size();
       int p = forwardCachePos;
       for (int j = 0; j < sz; j++) {
@@ -660,7 +661,7 @@ public abstract class CommonJoinOperator
     int p = 0;
     for (int i = 0; i < numAliases; i++) {
       int sz = joinValues[order[i]].size();
-      List<Object> obj = storage[order[i]].first();
+      List<Object> obj = storage[order[i]].rowIter().first();
       for (int j = 0; j < sz; j++) {
         forwardCache[p++] = obj.get(j);
       }
@@ -684,11 +685,11 @@ public abstract class CommonJoinOperator
         Byte alias = order[i];
         AbstractRowContainer<List<Object>> alw = storage[alias];
 
-        if (alw.size() != 1) {
+        if (alw.rowCount() != 1) {
           allOne = false;
         }
 
-        if (alw.size() == 0) {
+        if (alw.rowCount() == 0) {
           alw.add(dummyObj[i]);
           hasNulls = true;
         } else if (condn[i].getPreserved()) {
@@ -718,24 +719,25 @@ public abstract class CommonJoinOperator
         AbstractRowContainer<List<Object>> alw = storage[alias];
 
         if (noOuterJoin) {
-          if (alw.size() == 0) {
+          if (alw.rowCount() == 0) {
             LOG.trace("No data for alias=" + i);
             return;
-          } else if (alw.size() > 1) {
+          } else if (alw.rowCount() > 1) {
             mayHasMoreThanOne = true;
           }
         } else {
-          if (alw.size() == 0) {
+          if (alw.rowCount() == 0) {
             hasEmpty = true;
             alw.add(dummyObj[i]);
-          } else if (!hasEmpty && alw.size() == 1) {
-            if (hasAnyFiltered(alias, alw.first())) {
+          } else if (!hasEmpty && alw.rowCount() == 1) {
+            if (hasAnyFiltered(alias, alw.rowIter().first())) {
               hasEmpty = true;
             }
           } else {
             mayHasMoreThanOne = true;
             if (!hasEmpty) {
-              for (List<Object> row = alw.first(); row != null; row = alw.next()) {
+              AbstractRowContainer.RowIterator<List<Object>> iter = alw.rowIter();
+              for (List<Object> row = iter.first(); row != null; row = iter.next()) {
                 reportProgress();
                 if (hasAnyFiltered(alias, row)) {
                   hasEmpty = true;
@@ -784,7 +786,7 @@ public abstract class CommonJoinOperator
     LOG.trace("Join Op close");
     for (AbstractRowContainer<List<Object>> alw : storage) {
       if (alw != null) {
-        alw.clear(); // clean up the temp files
+        alw.clearRows(); // clean up the temp files
       }
     }
     Arrays.fill(storage, null);

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Fri Feb 28 02:13:45 2014
@@ -216,11 +216,11 @@ public class DDLTask extends Task<DDLWor
     // normal human readable output or a json object.
     formatter = MetaDataFormatUtils.getFormatter(conf);
     INTERMEDIATE_ARCHIVED_DIR_SUFFIX =
-      HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED);
+        HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED);
     INTERMEDIATE_ORIGINAL_DIR_SUFFIX =
-      HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL);
+        HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL);
     INTERMEDIATE_EXTRACTED_DIR_SUFFIX =
-      HiveConf.getVar(conf, ConfVars.METASTORE_INT_EXTRACTED);
+        HiveConf.getVar(conf, ConfVars.METASTORE_INT_EXTRACTED);
   }
 
   @Override
@@ -447,7 +447,7 @@ public class DDLTask extends Task<DDLWor
       }
 
       AlterTableExchangePartition alterTableExchangePartition =
-        work.getAlterTableExchangePartition();
+          work.getAlterTableExchangePartition();
       if (alterTableExchangePartition != null) {
         return exchangeTablePartition(db, alterTableExchangePartition);
       }
@@ -508,7 +508,7 @@ public class DDLTask extends Task<DDLWor
           if (grantRole) {
             db.grantRole(roleName, userName, principal.getType(),
                 grantOrRevokeRoleDDL.getGrantor(), grantOrRevokeRoleDDL
-                    .getGrantorType(), grantOrRevokeRoleDDL.isGrantOption());
+                .getGrantorType(), grantOrRevokeRoleDDL.isGrantOption());
           } else {
             db.revokeRole(roleName, userName, principal.getType());
           }
@@ -556,7 +556,7 @@ public class DDLTask extends Task<DDLWor
     try {
       if (hiveObjectDesc == null) {
         privs.addAll(db.showPrivilegeGrant(HiveObjectType.GLOBAL, principalName, type,
-          null, null, null, null));
+            null, null, null, null));
       } else if (hiveObjectDesc != null && hiveObjectDesc.getObject() == null) {
         privs.addAll(db.showPrivilegeGrant(null, principalName, type, null, null, null, null));
       } else {
@@ -656,9 +656,9 @@ public class DDLTask extends Task<DDLWor
 
         //only grantInfo is used
         HiveObjectPrivilege thriftObjectPriv = new HiveObjectPrivilege(new HiveObjectRef(
-          AuthorizationUtils.getThriftHiveObjType(privObj.getType()),privObj.getDbname(),
-          privObj.getTableViewURI(),null,null), principal.getName(),
-          AuthorizationUtils.getThriftPrincipalType(principal.getType()), grantInfo);
+            AuthorizationUtils.getThriftHiveObjType(privObj.getType()),privObj.getDbname(),
+            privObj.getTableViewURI(),null,null), principal.getName(),
+            AuthorizationUtils.getThriftPrincipalType(principal.getType()), grantInfo);
         privList.add(thriftObjectPriv);
       }
       boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST);
@@ -792,9 +792,9 @@ public class DDLTask extends Task<DDLWor
                         tableName, partValues, null), null, null,  new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
               } else {
                 privBag
-                    .addToPrivileges(new HiveObjectPrivilege(
-                        new HiveObjectRef(HiveObjectType.TABLE, dbName,
-                            tableName, null, null), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
+                .addToPrivileges(new HiveObjectPrivilege(
+                    new HiveObjectRef(HiveObjectType.TABLE, dbName,
+                        tableName, null, null), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
               }
             } else {
               privBag.addToPrivileges(new HiveObjectPrivilege(
@@ -1040,7 +1040,7 @@ public class DDLTask extends Task<DDLWor
     }
 
     db
-        .createIndex(
+    .createIndex(
         crtIndex.getTableName(), crtIndex.getIndexName(), crtIndex.getIndexTypeHandlerClass(),
         crtIndex.getIndexedCols(), crtIndex.getIndexTableName(), crtIndex.getDeferredRebuild(),
         crtIndex.getInputFormat(), crtIndex.getOutputFormat(), crtIndex.getSerde(),
@@ -1049,12 +1049,12 @@ public class DDLTask extends Task<DDLWor
         crtIndex.getLineDelim(), crtIndex.getMapKeyDelim(), crtIndex.getIndexComment()
         );
     if (HiveUtils.getIndexHandler(conf, crtIndex.getIndexTypeHandlerClass()).usesIndexTable()) {
-        String indexTableName =
-            crtIndex.getIndexTableName() != null ? crtIndex.getIndexTableName() :
+      String indexTableName =
+          crtIndex.getIndexTableName() != null ? crtIndex.getIndexTableName() :
             MetaStoreUtils.getIndexTableName(SessionState.get().getCurrentDatabase(),
-            crtIndex.getTableName(), crtIndex.getIndexName());
-        Table indexTable = db.getTable(indexTableName);
-        work.getOutputs().add(new WriteEntity(indexTable));
+                crtIndex.getTableName(), crtIndex.getIndexName());
+          Table indexTable = db.getTable(indexTableName);
+          work.getOutputs().add(new WriteEntity(indexTable));
     }
     return 0;
   }
@@ -1066,54 +1066,54 @@ public class DDLTask extends Task<DDLWor
     Index idx = db.getIndex(dbName, baseTableName, indexName);
 
     switch(alterIndex.getOp()) {
-      case ADDPROPS:
-        idx.getParameters().putAll(alterIndex.getProps());
-        break;
-      case UPDATETIMESTAMP:
-        try {
-          Map<String, String> props = new HashMap<String, String>();
-          Map<Map<String, String>, Long> basePartTs = new HashMap<Map<String, String>, Long>();
+    case ADDPROPS:
+      idx.getParameters().putAll(alterIndex.getProps());
+      break;
+    case UPDATETIMESTAMP:
+      try {
+        Map<String, String> props = new HashMap<String, String>();
+        Map<Map<String, String>, Long> basePartTs = new HashMap<Map<String, String>, Long>();
 
-          Table baseTbl = db.getTable(SessionState.get().getCurrentDatabase(),
-              baseTableName);
+        Table baseTbl = db.getTable(SessionState.get().getCurrentDatabase(),
+            baseTableName);
 
-          if (baseTbl.isPartitioned()) {
-            List<Partition> baseParts;
-            if (alterIndex.getSpec() != null) {
-              baseParts = db.getPartitions(baseTbl, alterIndex.getSpec());
-            } else {
-              baseParts = db.getPartitions(baseTbl);
-            }
-            if (baseParts != null) {
-              for (Partition p : baseParts) {
-                FileSystem fs = p.getDataLocation().getFileSystem(db.getConf());
-                FileStatus fss = fs.getFileStatus(p.getDataLocation());
-                basePartTs.put(p.getSpec(), fss.getModificationTime());
-              }
-            }
+        if (baseTbl.isPartitioned()) {
+          List<Partition> baseParts;
+          if (alterIndex.getSpec() != null) {
+            baseParts = db.getPartitions(baseTbl, alterIndex.getSpec());
           } else {
-            FileSystem fs = baseTbl.getPath().getFileSystem(db.getConf());
-            FileStatus fss = fs.getFileStatus(baseTbl.getPath());
-            basePartTs.put(null, fss.getModificationTime());
-          }
-          for (Map<String, String> spec : basePartTs.keySet()) {
-            if (spec != null) {
-              props.put(spec.toString(), basePartTs.get(spec).toString());
-            } else {
-              props.put("base_timestamp", basePartTs.get(null).toString());
+            baseParts = db.getPartitions(baseTbl);
+          }
+          if (baseParts != null) {
+            for (Partition p : baseParts) {
+              FileSystem fs = p.getDataLocation().getFileSystem(db.getConf());
+              FileStatus fss = fs.getFileStatus(p.getDataLocation());
+              basePartTs.put(p.getSpec(), fss.getModificationTime());
             }
           }
-          idx.getParameters().putAll(props);
-        } catch (HiveException e) {
-          throw new HiveException("ERROR: Failed to update index timestamps");
-        } catch (IOException e) {
-          throw new HiveException("ERROR: Failed to look up timestamps on filesystem");
+        } else {
+          FileSystem fs = baseTbl.getPath().getFileSystem(db.getConf());
+          FileStatus fss = fs.getFileStatus(baseTbl.getPath());
+          basePartTs.put(null, fss.getModificationTime());
+        }
+        for (Map<String, String> spec : basePartTs.keySet()) {
+          if (spec != null) {
+            props.put(spec.toString(), basePartTs.get(spec).toString());
+          } else {
+            props.put("base_timestamp", basePartTs.get(null).toString());
+          }
         }
+        idx.getParameters().putAll(props);
+      } catch (HiveException e) {
+        throw new HiveException("ERROR: Failed to update index timestamps");
+      } catch (IOException e) {
+        throw new HiveException("ERROR: Failed to look up timestamps on filesystem");
+      }
 
-        break;
-      default:
-        console.printError("Unsupported Alter commnad");
-        return 1;
+      break;
+    default:
+      console.printError("Unsupported Alter commnad");
+      return 1;
     }
 
     // set last modified by properties
@@ -1178,17 +1178,17 @@ public class DDLTask extends Task<DDLWor
   }
 
   /**
-  * Alter partition column type in a table
-  *
-  * @param db
-  *          Database to rename the partition.
-  * @param alterPartitionDesc
-  *          change partition column type.
-  * @return Returns 0 when execution succeeds and above 0 if it fails.
-  * @throws HiveException
-  */
+   * Alter partition column type in a table
+   *
+   * @param db
+   *          Database to rename the partition.
+   * @param alterPartitionDesc
+   *          change partition column type.
+   * @return Returns 0 when execution succeeds and above 0 if it fails.
+   * @throws HiveException
+   */
   private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionDesc)
-    throws HiveException {
+      throws HiveException {
 
     Table tbl = db.getTable(alterPartitionDesc.getDbName(), alterPartitionDesc.getTableName());
     String tabName = alterPartitionDesc.getTableName();
@@ -1389,7 +1389,7 @@ public class DDLTask extends Task<DDLWor
 
   private int archive(Hive db, AlterTableSimpleDesc simpleDesc,
       DriverContext driverContext)
-      throws HiveException {
+          throws HiveException {
     String dbName = simpleDesc.getDbName();
     String tblName = simpleDesc.getTableName();
 
@@ -1417,7 +1417,7 @@ public class DDLTask extends Task<DDLWor
       for(Partition p: partitions){
         if(partitionInCustomLocation(tbl, p)) {
           String message = String.format("ARCHIVE cannot run for partition " +
-                      "groups with custom locations like %s", p.getLocation());
+              "groups with custom locations like %s", p.getLocation());
           throw new HiveException(message);
         }
       }
@@ -1505,10 +1505,10 @@ public class DDLTask extends Task<DDLWor
       // First create the archive in a tmp dir so that if the job fails, the
       // bad files don't pollute the filesystem
       Path tmpPath = new Path(driverContext.getCtx()
-                    .getExternalTmpPath(originalDir.toUri()), "partlevel");
+          .getExternalTmpPath(originalDir.toUri()), "partlevel");
 
       console.printInfo("Creating " + archiveName +
-                        " for " + originalDir.toString());
+          " for " + originalDir.toString());
       console.printInfo("in " + tmpPath);
       console.printInfo("Please wait... (this may take a while)");
 
@@ -1517,7 +1517,7 @@ public class DDLTask extends Task<DDLWor
       try {
         int maxJobNameLen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
         String jobname = String.format("Archiving %s@%s",
-          tbl.getTableName(), partSpecInfo.getName());
+            tbl.getTableName(), partSpecInfo.getName());
         jobname = Utilities.abbreviate(jobname, maxJobNameLen - 6);
         conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, jobname);
         ret = shim.createHadoopArchive(conf, originalDir, tmpPath, archiveName);
@@ -1542,7 +1542,7 @@ public class DDLTask extends Task<DDLWor
     } else {
       if (pathExists(intermediateArchivedDir)) {
         console.printInfo("Intermediate archive directory " + intermediateArchivedDir +
-        " already exists. Assuming it contains an archived version of the partition");
+            " already exists. Assuming it contains an archived version of the partition");
       }
     }
 
@@ -1650,7 +1650,7 @@ public class DDLTask extends Task<DDLWor
       for(Partition p: partitions){
         if(partitionInCustomLocation(tbl, p)) {
           String message = String.format("UNARCHIVE cannot run for partition " +
-                      "groups with custom locations like %s", p.getLocation());
+              "groups with custom locations like %s", p.getLocation());
           throw new HiveException(message);
         }
       }
@@ -1780,7 +1780,7 @@ public class DDLTask extends Task<DDLWor
       }
     } else {
       console.printInfo(intermediateArchivedDir + " already exists. " +
-      "Assuming it contains the archived version of the partition");
+          "Assuming it contains the archived version of the partition");
     }
 
     // If there is a failure from here to until when the metadata is changed,
@@ -1798,7 +1798,7 @@ public class DDLTask extends Task<DDLWor
       }
     } else {
       console.printInfo(originalDir + " already exists. " +
-      "Assuming it contains the extracted files in the partition");
+          "Assuming it contains the extracted files in the partition");
     }
 
     for(Partition p: partitions) {
@@ -2164,27 +2164,27 @@ public class DDLTask extends Task<DDLWor
           if (delims.containsKey(serdeConstants.FIELD_DELIM)) {
             tbl_row_format += "  FIELDS TERMINATED BY '" +
                 escapeHiveCommand(StringEscapeUtils.escapeJava(delims.get(
-                serdeConstants.FIELD_DELIM))) + "' \n";
+                    serdeConstants.FIELD_DELIM))) + "' \n";
           }
           if (delims.containsKey(serdeConstants.COLLECTION_DELIM)) {
             tbl_row_format += "  COLLECTION ITEMS TERMINATED BY '" +
                 escapeHiveCommand(StringEscapeUtils.escapeJava(delims.get(
-                serdeConstants.COLLECTION_DELIM))) + "' \n";
+                    serdeConstants.COLLECTION_DELIM))) + "' \n";
           }
           if (delims.containsKey(serdeConstants.MAPKEY_DELIM)) {
             tbl_row_format += "  MAP KEYS TERMINATED BY '" +
                 escapeHiveCommand(StringEscapeUtils.escapeJava(delims.get(
-                serdeConstants.MAPKEY_DELIM))) + "' \n";
+                    serdeConstants.MAPKEY_DELIM))) + "' \n";
           }
           if (delims.containsKey(serdeConstants.LINE_DELIM)) {
             tbl_row_format += "  LINES TERMINATED BY '" +
                 escapeHiveCommand(StringEscapeUtils.escapeJava(delims.get(
-                serdeConstants.LINE_DELIM))) + "' \n";
+                    serdeConstants.LINE_DELIM))) + "' \n";
           }
           if (delims.containsKey(serdeConstants.SERIALIZATION_NULL_FORMAT)) {
             tbl_row_format += "  NULL DEFINED AS '" +
                 escapeHiveCommand(StringEscapeUtils.escapeJava(delims.get(
-                serdeConstants.SERIALIZATION_NULL_FORMAT))) + "' \n";
+                    serdeConstants.SERIALIZATION_NULL_FORMAT))) + "' \n";
           }
         }
         else {
@@ -2400,7 +2400,7 @@ public class DDLTask extends Task<DDLWor
   }
 
   public int showColumns(Hive db, ShowColumnsDesc showCols)
-                         throws HiveException {
+      throws HiveException {
 
     String dbName = showCols.getDbName();
     String tableName = showCols.getTableName();
@@ -2421,8 +2421,11 @@ public class DDLTask extends Task<DDLWor
 
       List<FieldSchema> cols = table.getCols();
       cols.addAll(table.getPartCols());
-      outStream.writeBytes(
-          MetaDataFormatUtils.getAllColumnsInformation(cols, false));
+      // In case the query is served by HiveServer2, don't pad it with spaces,
+      // as HiveServer2 output is consumed by JDBC/ODBC clients.
+      boolean isOutputPadded = !SessionState.get().isHiveServerQuery();
+      outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(
+          cols, false, isOutputPadded));
       outStream.close();
       outStream = null;
     } catch (IOException e) {
@@ -2515,8 +2518,8 @@ public class DDLTask extends Task<DDLWor
       }
       else {
         locks = lockMgr.getLocks(getHiveObject(showLocks.getTableName(),
-                                               showLocks.getPartSpec()),
-                                 true, isExt);
+            showLocks.getPartSpec()),
+            true, isExt);
       }
 
       Collections.sort(locks, new Comparator<HiveLock>() {
@@ -2602,10 +2605,10 @@ public class DDLTask extends Task<DDLWor
 
     Map<String, String> partSpec = lockTbl.getPartSpec();
     HiveLockObjectData lockData =
-      new HiveLockObjectData(lockTbl.getQueryId(),
-                             String.valueOf(System.currentTimeMillis()),
-                             "EXPLICIT",
-                             lockTbl.getQueryStr());
+        new HiveLockObjectData(lockTbl.getQueryId(),
+            String.valueOf(System.currentTimeMillis()),
+            "EXPLICIT",
+            lockTbl.getQueryStr());
 
     if (partSpec == null) {
       HiveLock lck = lockMgr.lock(new HiveLockObject(tbl, lockData), mode, true);
@@ -2651,9 +2654,9 @@ public class DDLTask extends Task<DDLWor
     }
 
     HiveLockObjectData lockData =
-      new HiveLockObjectData(lockDb.getQueryId(),
-                             String.valueOf(System.currentTimeMillis()),
-                             "EXPLICIT", lockDb.getQueryStr());
+        new HiveLockObjectData(lockDb.getQueryId(),
+            String.valueOf(System.currentTimeMillis()),
+            "EXPLICIT", lockDb.getQueryStr());
 
     HiveLock lck = lockMgr.lock(new HiveLockObject(dbObj.getName(), lockData), mode, true);
     if (lck == null) {
@@ -2699,7 +2702,7 @@ public class DDLTask extends Task<DDLWor
   }
 
   private HiveLockObject getHiveObject(String tabName,
-                                       Map<String, String> partSpec) throws HiveException {
+      Map<String, String> partSpec) throws HiveException {
     Table  tbl = db.getTable(tabName);
     if (tbl == null) {
       throw new HiveException("Table " + tabName + " does not exist ");
@@ -2830,12 +2833,12 @@ public class DDLTask extends Task<DDLWor
       if (database == null) {
         throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, descDatabase.getDatabaseName());
       } else {
-          Map<String, String> params = null;
-          if(descDatabase.isExt()) {
-            params = database.getParameters();
-          }
-          PrincipalType ownerType = database.getOwnerType();
-          formatter.showDatabaseDescription(outStream, database.getName(),
+        Map<String, String> params = null;
+        if(descDatabase.isExt()) {
+          params = database.getParameters();
+        }
+        PrincipalType ownerType = database.getOwnerType();
+        formatter.showDatabaseDescription(outStream, database.getName(),
             database.getDescription(), database.getLocationUri(),
             database.getOwnerName(), (null == ownerType) ? null : ownerType.name(), params);
       }
@@ -3018,7 +3021,7 @@ public class DDLTask extends Task<DDLWor
           outStream.close();
           outStream = null;
           throw new HiveException(ErrorMsg.INVALID_PARTITION,
-                  StringUtils.join(descTbl.getPartSpec().keySet(), ','), tableName);
+              StringUtils.join(descTbl.getPartSpec().keySet(), ','), tableName);
         }
         tbl = part.getTable();
       }
@@ -3039,19 +3042,22 @@ public class DDLTask extends Task<DDLWor
         cols = (part == null || tbl.getTableType() == TableType.VIRTUAL_VIEW) ?
             tbl.getCols() : part.getCols();
 
-        if (!descTbl.isFormatted()) {
-          if (tableName.equals(colPath)) {
-            cols.addAll(tbl.getPartCols());
-          }
-        }
+            if (!descTbl.isFormatted()) {
+              if (tableName.equals(colPath)) {
+                cols.addAll(tbl.getPartCols());
+              }
+            }
       } else {
         cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer());
       }
 
       fixDecimalColumnTypeName(cols);
-
-      formatter.describeTable(outStream, colPath, tableName, tbl, part, cols,
-                              descTbl.isFormatted(), descTbl.isExt(), descTbl.isPretty());
+      // In case the query is served by HiveServer2, don't pad it with spaces,
+      // as HiveServer2 output is consumed by JDBC/ODBC clients.
+      boolean isOutputPadded = !SessionState.get().isHiveServerQuery();
+      formatter.describeTable(outStream, colPath, tableName, tbl, part,
+          cols, descTbl.isFormatted(), descTbl.isExt(),
+          descTbl.isPretty(), isOutputPadded);
 
       LOG.info("DDLTask: written data for " + tbl.getTableName());
       outStream.close();
@@ -3195,7 +3201,7 @@ public class DDLTask extends Task<DDLWor
         part = db.getPartition(tbl, alterTbl.getPartSpec(), false);
         if (part == null) {
           throw new HiveException(ErrorMsg.INVALID_PARTITION,
-                  StringUtils.join(alterTbl.getPartSpec().keySet(), ',') + " for table " + alterTbl.getOldName());
+              StringUtils.join(alterTbl.getPartSpec().keySet(), ',') + " for table " + alterTbl.getOldName());
         }
       }
       else {
@@ -3213,7 +3219,7 @@ public class DDLTask extends Task<DDLWor
       if (tbl.getSerializationLib().equals(
           "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
         console
-            .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
+        .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
         tbl.setSerializationLib(LazySimpleSerDe.class.getName());
         tbl.getTTable().getSd().setCols(newCols);
       } else {
@@ -3302,7 +3308,7 @@ public class DDLTask extends Task<DDLWor
       if (tbl.getSerializationLib().equals(
           "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
         console
-            .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
+        .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
         tbl.setSerializationLib(LazySimpleSerDe.class.getName());
       } else if (!tbl.getSerializationLib().equals(
           MetadataTypedColumnsetSerDe.class.getName())
@@ -3343,7 +3349,7 @@ public class DDLTask extends Task<DDLWor
               alterTbl.getProps());
         }
         tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), tbl.
-              getDeserializer()));
+            getDeserializer()));
       }
     } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) {
       if(part != null) {
@@ -3580,7 +3586,7 @@ public class DDLTask extends Task<DDLWor
             return;
           }
           throw new HiveException(
-            "Cannot drop a base table with DROP VIEW");
+              "Cannot drop a base table with DROP VIEW");
         }
       }
     }
@@ -3591,7 +3597,7 @@ public class DDLTask extends Task<DDLWor
     }
 
     int partitionBatchSize = HiveConf.getIntVar(conf,
-      ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX);
+        ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX);
 
     // We should check that all the partitions of the table can be dropped
     if (tbl != null && tbl.isPartitioned()) {
@@ -3599,13 +3605,13 @@ public class DDLTask extends Task<DDLWor
 
       for(int i=0; i < partitionNames.size(); i+= partitionBatchSize) {
         List<String> partNames = partitionNames.subList(i, Math.min(i+partitionBatchSize,
-          partitionNames.size()));
+            partitionNames.size()));
         List<Partition> listPartitions = db.getPartitionsByNames(tbl, partNames);
         for (Partition p: listPartitions) {
           if (!p.canDrop()) {
             throw new HiveException("Table " + tbl.getTableName() +
-              " Partition" + p.getName() +
-              " is protected from being dropped");
+                " Partition" + p.getName() +
+                " is protected from being dropped");
           }
         }
       }
@@ -3641,7 +3647,7 @@ public class DDLTask extends Task<DDLWor
     try {
 
       Deserializer d = ReflectionUtils.newInstance(conf.getClassByName(serdeName).
-        asSubclass(Deserializer.class), conf);
+          asSubclass(Deserializer.class), conf);
       if (d != null) {
         LOG.debug("Found class for " + serdeName);
       }
@@ -3755,8 +3761,8 @@ public class DDLTask extends Task<DDLWor
 
     if (crtTbl.getStorageHandler() != null) {
       tbl.setProperty(
-        org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE,
-        crtTbl.getStorageHandler());
+          org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE,
+          crtTbl.getStorageHandler());
     }
     HiveStorageHandler storageHandler = tbl.getStorageHandler();
 
@@ -3773,7 +3779,7 @@ public class DDLTask extends Task<DDLWor
       } else {
         String serDeClassName = storageHandler.getSerDeClass().getName();
         LOG.info("Use StorageHandler-supplied " + serDeClassName
-          + " for table " + crtTbl.getTableName());
+            + " for table " + crtTbl.getTableName());
         tbl.setSerializationLib(serDeClassName);
       }
     } else {
@@ -3804,7 +3810,7 @@ public class DDLTask extends Task<DDLWor
     }
     if (crtTbl.getSerdeProps() != null) {
       Iterator<Entry<String, String>> iter = crtTbl.getSerdeProps().entrySet()
-        .iterator();
+          .iterator();
       while (iter.hasNext()) {
         Entry<String, String> m = iter.next();
         tbl.setSerdeParam(m.getKey(), m.getValue());
@@ -3840,9 +3846,9 @@ public class DDLTask extends Task<DDLWor
     tbl.setOutputFormatClass(crtTbl.getOutputFormat());
 
     tbl.getTTable().getSd().setInputFormat(
-      tbl.getInputFormatClass().getName());
+        tbl.getInputFormatClass().getName());
     tbl.getTTable().getSd().setOutputFormat(
-      tbl.getOutputFormatClass().getName());
+        tbl.getOutputFormatClass().getName());
 
     if (crtTbl.isExternal()) {
       tbl.setProperty("EXTERNAL", "TRUE");
@@ -3937,7 +3943,7 @@ public class DDLTask extends Task<DDLWor
 
       if (crtTbl.getDefaultSerdeProps() != null) {
         Iterator<Entry<String, String>> iter = crtTbl.getDefaultSerdeProps().entrySet()
-          .iterator();
+            .iterator();
         while (iter.hasNext()) {
           Entry<String, String> m = iter.next();
           tbl.setSerdeParam(m.getKey(), m.getValue());

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java Fri Feb 28 02:13:45 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.hive.ql.exec.ma
 import org.apache.hadoop.hive.ql.exec.persistence.HashMapWrapper;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinObjectSerDeContext;
+import org.apache.hadoop.hive.ql.exec.persistence.MapJoinEagerRowContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinRowContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe;
@@ -51,7 +52,6 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 import org.apache.hadoop.util.ReflectionUtils;
 
-
 public class HashTableSinkOperator extends TerminalOperator<HashTableSinkDesc> implements
     Serializable {
   private static final long serialVersionUID = 1L;
@@ -96,7 +96,7 @@ public class HashTableSinkOperator exten
   private transient MapJoinTableContainerSerDe[] mapJoinTableSerdes;  
 
   private static final Object[] EMPTY_OBJECT_ARRAY = new Object[0];
-  private static final MapJoinRowContainer EMPTY_ROW_CONTAINER = new MapJoinRowContainer();
+  private static final MapJoinEagerRowContainer EMPTY_ROW_CONTAINER = new MapJoinEagerRowContainer();
   static {
     EMPTY_ROW_CONTAINER.add(EMPTY_OBJECT_ARRAY);
   }
@@ -186,7 +186,7 @@ public class HashTableSinkOperator exten
         if (pos == posBigTableAlias) {
           continue;
         }
-        mapJoinTables[pos] = new HashMapWrapper(hashTableThreshold, hashTableLoadFactor);        
+        mapJoinTables[pos] = new HashMapWrapper(hashTableThreshold, hashTableLoadFactor);
         TableDesc valueTableDesc = conf.getValueTblFilteredDescs().get(pos);
         SerDe valueSerDe = (SerDe) ReflectionUtils.newInstance(valueTableDesc.getDeserializerClass(), null);
         valueSerDe.initialize(null, valueTableDesc.getProperties());
@@ -241,7 +241,7 @@ public class HashTableSinkOperator exten
     MapJoinRowContainer rowContainer = tableContainer.get(key);
     if (rowContainer == null) {
       if(value.length != 0) {
-        rowContainer = new MapJoinRowContainer();
+        rowContainer = new MapJoinEagerRowContainer();
         rowContainer.add(value);
       } else {
         rowContainer = EMPTY_ROW_CONTAINER;

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java Fri Feb 28 02:13:45 2014
@@ -85,7 +85,7 @@ public class JoinOperator extends Common
       }
 
       // number of rows for the key in the given table
-      long sz = storage[alias].size();
+      long sz = storage[alias].rowCount();
       StructObjectInspector soi = (StructObjectInspector) inputObjInspectors[tag];
       StructField sf = soi.getStructFieldRef(Utilities.ReduceField.KEY
           .toString());
@@ -101,7 +101,7 @@ public class JoinOperator extends Common
           // storage,
           // to preserve the correctness for outer joins.
           checkAndGenObject();
-          storage[alias].clear();
+          storage[alias].clearRows();
         }
       } else {
         if (sz == nextSz) {

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java Fri Feb 28 02:13:45 2014
@@ -198,7 +198,7 @@ public class MapJoinOperator extends Abs
             }
           } else {
             joinNeeded = true;
-            storage[pos] = rowContainer.copy();
+            storage[pos] = rowContainer.copy(); // TODO: why copy?
             aliasFilterTags[pos] = rowContainer.getAliasFilter();
           }
         }
@@ -211,7 +211,7 @@ public class MapJoinOperator extends Abs
         checkAndGenObject();
       }
       // done with the row
-      storage[tag].clear();
+      storage[tag].clearRows();
       for (byte pos = 0; pos < order.length; pos++) {
         if (pos != tag) {
           storage[pos] = null;

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java Fri Feb 28 02:13:45 2014
@@ -60,7 +60,7 @@ public class PTFPartition {
   }
 
   public void reset() throws HiveException {
-    elems.clear();
+    elems.clearRows();
   }
 
   public SerDe getSerDe() {
@@ -82,7 +82,7 @@ public class PTFPartition {
 
   public void append(Object o) throws HiveException {
 
-    if ( elems.size() == Integer.MAX_VALUE ) {
+    if ( elems.rowCount() == Integer.MAX_VALUE ) {
       throw new HiveException(String.format("Cannot add more than %d elements to a PTFPartition",
           Integer.MAX_VALUE));
     }
@@ -94,7 +94,7 @@ public class PTFPartition {
   }
 
   public int size() {
-    return (int) elems.size();
+    return (int) elems.rowCount();
   }
 
   public PTFPartitionIterator<Object> iterator() throws HiveException {

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java Fri Feb 28 02:13:45 2014
@@ -313,7 +313,7 @@ public class SMBMapJoinOperator extends 
     boolean allFetchDone = allFetchDone();
     // if all left data in small tables are less than and equal to the left data
     // in big table, let's them catch up
-    while (bigTblRowContainer != null && bigTblRowContainer.size() > 0
+    while (bigTblRowContainer != null && bigTblRowContainer.rowCount() > 0
         && !allFetchDone) {
       joinOneGroup();
       bigTblRowContainer = this.candidateStorage[this.posBigTable];
@@ -341,7 +341,7 @@ public class SMBMapJoinOperator extends 
       joinOneGroup();
       dataInCache = false;
       for (byte pos = 0; pos < order.length; pos++) {
-        if (this.candidateStorage[pos].size() > 0) {
+        if (this.candidateStorage[pos].rowCount() > 0) {
           dataInCache = true;
           break;
         }
@@ -397,7 +397,7 @@ public class SMBMapJoinOperator extends 
     }
     checkAndGenObject();
     for (Byte pos : needFetchList) {
-      this.candidateStorage[pos].clear();
+      this.candidateStorage[pos].clearRows();
       this.keyWritables[pos] = null;
     }
     return needFetchList;
@@ -437,7 +437,7 @@ public class SMBMapJoinOperator extends 
     this.keyWritables[t] = this.nextKeyWritables[t];
     this.nextKeyWritables[t] = null;
     RowContainer<List<Object>> oldRowContainer = this.candidateStorage[t];
-    oldRowContainer.clear();
+    oldRowContainer.clearRows();
     this.candidateStorage[t] = this.nextGroupStorage[t];
     this.nextGroupStorage[t] = oldRowContainer;
   }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/AbstractRowContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/AbstractRowContainer.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/AbstractRowContainer.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/AbstractRowContainer.java Fri Feb 28 02:13:45 2014
@@ -20,29 +20,24 @@ package org.apache.hadoop.hive.ql.exec.p
 
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 
-public abstract class AbstractRowContainer<ROW> {
-
-  public AbstractRowContainer() {
+public interface AbstractRowContainer<ROW> {
 
+  public interface RowIterator<ROW> {
+    public ROW first() throws HiveException;
+    public ROW next() throws HiveException;
   }
 
-  public abstract void add(ROW t) throws HiveException;
-
-  public abstract ROW first() throws HiveException;
+  public RowIterator<ROW> rowIter() throws HiveException;
 
-  public abstract ROW next() throws HiveException;
+  public void add(ROW t) throws HiveException;
 
   /**
-   * Get the number of elements in the RowContainer.
-   *
    * @return number of elements in the RowContainer
    */
-
-  public abstract long size();
+  public int rowCount() throws HiveException;
 
   /**
    * Remove all elements in the RowContainer.
    */
-
-  public abstract void clear() throws HiveException;
+  public void clearRows() throws HiveException;
 }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinRowContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinRowContainer.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinRowContainer.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinRowContainer.java Fri Feb 28 02:13:45 2014
@@ -19,161 +19,20 @@
 package org.apache.hadoop.hive.ql.exec.persistence;
 
 import java.io.IOException;
-import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
-import java.util.AbstractList;
-import java.util.ArrayList;
-import java.util.ConcurrentModificationException;
 import java.util.List;
 
-import org.apache.hadoop.hive.serde2.SerDe;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.serde2.io.ShortWritable;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
-import org.apache.hadoop.io.Writable;
-
-@SuppressWarnings("deprecation")
-public class MapJoinRowContainer extends AbstractRowContainer<List<Object>> {
-  private static final Object[] EMPTY_OBJECT_ARRAY = new Object[0];
-  
-  private final List<List<Object>> list;
-  private int index;
-  private byte aliasFilter = (byte) 0xff;
-
-  public MapJoinRowContainer() {
-    index = 0;
-    list = new ArrayList<List<Object>>(1);
-  } 
-
-  @Override
-  public void add(List<Object> t) {
-    list.add(t);
-  }
-
-  public void add(Object[] t) {
-    add(toList(t));
-  }
-
-  @Override
-  public List<Object> first() {
-    index = 0;
-    if (index < list.size()) {
-      return list.get(index);
-    }
-    return null;
-  }
-
-  @Override
-  public List<Object> next() {
-    index++;
-    if (index < list.size()) {
-      return list.get(index);
-    }
-    return null;
-  }
-
-  /**
-   * Get the number of elements in the RowContainer.
-   *
-   * @return number of elements in the RowContainer
-   */
-  @Override
-  public long size() {
-    return list.size();
-  }
-
-  /**
-   * Remove all elements in the RowContainer.
-   */
-  @Override
-  public void clear() {
-    list.clear();
-    index = 0;
-  }
-  
-  public byte getAliasFilter() {
-    return aliasFilter;
-  }
-  
-  public MapJoinRowContainer copy() {
-    MapJoinRowContainer result = new MapJoinRowContainer();
-    for(List<Object> item : list) {
-      result.add(item);
-    }
-    return result;
-  }
-
-  public void read(MapJoinObjectSerDeContext context, ObjectInputStream in, Writable container)
-  throws IOException, SerDeException {
-    clear();
-    long numRows = in.readLong();
-    for (long rowIndex = 0L; rowIndex < numRows; rowIndex++) {
-      container.readFields(in);
-      read(context, container);
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  public void read(MapJoinObjectSerDeContext context, Writable currentValue) throws SerDeException {
-    SerDe serde = context.getSerDe();
-    List<Object> value = (List<Object>)ObjectInspectorUtils.copyToStandardObject(serde.deserialize(currentValue),
-        serde.getObjectInspector(), ObjectInspectorCopyOption.WRITABLE);
-    if(value == null) {
-      add(toList(EMPTY_OBJECT_ARRAY));
-    } else {
-      Object[] valuesArray = value.toArray();
-      if (context.hasFilterTag()) {
-        aliasFilter &= ((ShortWritable)valuesArray[valuesArray.length - 1]).get();
-      }
-      add(toList(valuesArray));
-    }
-  }
-
-  public void write(MapJoinObjectSerDeContext context, ObjectOutputStream out)
-  throws IOException, SerDeException {
-    SerDe serde = context.getSerDe();
-    ObjectInspector valueObjectInspector = context.getStandardOI();
-    long numRows = size();
-    long numRowsWritten = 0L;
-    out.writeLong(numRows);
-    for (List<Object> row = first(); row != null; row = next()) {
-      serde.serialize(row.toArray(), valueObjectInspector).write(out);
-      ++numRowsWritten;      
-    }
-    if(numRows != size()) {
-      throw new ConcurrentModificationException("Values was modifified while persisting");
-    }
-    if(numRowsWritten != numRows) {
-      throw new IllegalStateException("Expected to write " + numRows + " but wrote " + numRowsWritten);
-    }
-  }
-  
-  private List<Object> toList(Object[] array) {
-    return new NoCopyingArrayList(array);
-  }
-  /**
-   * In this use case our objects will not be modified
-   * so we don't care about copying in and out.
-   */
-  private static class NoCopyingArrayList extends AbstractList<Object> {
-    private Object[] array;
-    public NoCopyingArrayList(Object[] array) {
-      this.array = array;
-    }
-    @Override
-    public Object get(int index) {
-      return array[index];
-    }
-
-    @Override
-    public int size() {
-      return array.length;
-    }
-    
-    public Object[] toArray() {
-      return array;
-    }    
-  }
-}
+
+public interface MapJoinRowContainer extends AbstractRowContainer<List<Object>> {
+
+  public byte getAliasFilter() throws HiveException;
+
+  public MapJoinRowContainer copy() throws HiveException;
+
+  public void add(Object[] value) throws HiveException;
+
+  public void write(MapJoinObjectSerDeContext valueContext, ObjectOutputStream out)
+      throws IOException, SerDeException;
+}
\ No newline at end of file

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java Fri Feb 28 02:13:45 2014
@@ -21,18 +21,18 @@ package org.apache.hadoop.hive.ql.exec.p
 import java.util.Map;
 import java.util.Set;
 
-public interface MapJoinTableContainer {  
-  
+public interface MapJoinTableContainer {
+
   public int size();
-  
+
   public MapJoinRowContainer get(MapJoinKey key);
-  
+
   public void put(MapJoinKey key, MapJoinRowContainer value);
-  
+
   public Set<Map.Entry<MapJoinKey, MapJoinRowContainer>> entrySet();
-  
+
   public Map<String, String> getMetaData();
-  
+
   public void clear();
 
 }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java Fri Feb 28 02:13:45 2014
@@ -57,7 +57,7 @@ public class MapJoinTableContainerSerDe 
     try {
       String name = in.readUTF();
       Map<String, String> metaData = (Map<String, String>) in.readObject();
-      tableContainer = create(name, metaData);      
+      tableContainer = create(name, metaData);
     } catch (IOException e) {
       throw new HiveException("IO error while trying to create table container", e);
     } catch (ClassNotFoundException e) {
@@ -70,7 +70,7 @@ public class MapJoinTableContainerSerDe 
       for (int keyIndex = 0; keyIndex < numKeys; keyIndex++) {
         MapJoinKey key = new MapJoinKey();
         key.read(keyContext, in, keyContainer);
-        MapJoinRowContainer values = new MapJoinRowContainer();
+        MapJoinEagerRowContainer values = new MapJoinEagerRowContainer();
         values.read(valueContext, in, valueContainer);
         tableContainer.put(key, values);
       }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java Fri Feb 28 02:13:45 2014
@@ -100,7 +100,7 @@ public class PTFRowContainer<Row extends
         blkInfo.startOffset = rw.outStream.getLength();
         blockInfos.add(blkInfo);
       } catch(IOException e) {
-        clear();
+        clearRows();
         LOG.error(e.toString(), e);
         throw new HiveException(e);
       }
@@ -149,8 +149,8 @@ public class PTFRowContainer<Row extends
   }
 
   @Override
-  public void clear() throws HiveException {
-    super.clear();
+  public void clearRows() throws HiveException {
+    super.clearRows();
     resetReadBlocks();
     blockInfos = new ArrayList<PTFRowContainer.BlockInfo>();
   }
@@ -207,7 +207,7 @@ public class PTFRowContainer<Row extends
       }
 
     } catch(Exception e) {
-      clear();
+      clearRows();
       LOG.error(e.toString(), e);
       if ( e instanceof HiveException ) {
         throw (HiveException) e;

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java Fri Feb 28 02:13:45 2014
@@ -71,7 +71,8 @@ import org.apache.hadoop.util.Reflection
  * reading.
  *
  */
-public class RowContainer<ROW extends List<Object>> extends AbstractRowContainer<ROW> {
+public class RowContainer<ROW extends List<Object>>
+  implements AbstractRowContainer<ROW>, AbstractRowContainer.RowIterator<ROW> {
 
   protected static Log LOG = LogFactory.getLog(RowContainer.class);
 
@@ -178,6 +179,11 @@ public class RowContainer<ROW extends Li
   }
 
   @Override
+  public AbstractRowContainer.RowIterator<ROW> rowIter() {
+    return this;
+  }
+
+  @Override
   public ROW first() throws HiveException {
     if (size == 0) {
       return null;
@@ -316,7 +322,7 @@ public class RowContainer<ROW extends Li
 
       this.numFlushedBlocks++;
     } catch (Exception e) {
-      clear();
+      clearRows();
       LOG.error(e.toString(), e);
       if ( e instanceof HiveException ) {
         throw (HiveException) e;
@@ -331,8 +337,8 @@ public class RowContainer<ROW extends Li
    * @return number of elements in the RowContainer
    */
   @Override
-  public long size() {
-    return size;
+  public int rowCount() {
+    return (int)size;
   }
 
   protected boolean nextBlock(int readIntoOffset) throws HiveException {
@@ -372,7 +378,7 @@ public class RowContainer<ROW extends Li
     } catch (Exception e) {
       LOG.error(e.getMessage(), e);
       try {
-        this.clear();
+        this.clearRows();
       } catch (HiveException e1) {
         LOG.error(e.getMessage(), e);
       }
@@ -392,14 +398,14 @@ public class RowContainer<ROW extends Li
         + destPath.toString());
     destFs
         .copyFromLocalFile(true, tempOutPath, new Path(destPath, new Path(tempOutPath.getName())));
-    clear();
+    clearRows();
   }
 
   /**
    * Remove all elements in the RowContainer.
    */
   @Override
-  public void clear() throws HiveException {
+  public void clearRows() throws HiveException {
     itrCursor = 0;
     addCursor = 0;
     numFlushedBlocks = 0;
@@ -524,7 +530,7 @@ public class RowContainer<ROW extends Li
           hiveOutputFormat, serde.getSerializedClass(), false,
           tblDesc.getProperties(), tempOutPath, reporter);
     } catch (Exception e) {
-      clear();
+      clearRows();
       LOG.error(e.toString(), e);
       throw new HiveException(e);
     }
@@ -586,8 +592,7 @@ public class RowContainer<ROW extends Li
   }
 
   protected void close() throws HiveException {
-    clear();
+    clearRows();
     currentReadBlock = firstReadBlockPointer = currentWriteBlock = null;
   }
-
 }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java Fri Feb 28 02:13:45 2014
@@ -28,13 +28,14 @@ import org.apache.hadoop.hive.ql.exec.Ma
 import org.apache.hadoop.hive.ql.exec.MapredContext;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
 import org.apache.hadoop.hive.ql.exec.persistence.HashMapWrapper;
+import org.apache.hadoop.hive.ql.exec.persistence.LazyFlatRowContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey;
-import org.apache.hadoop.hive.ql.exec.persistence.MapJoinRowContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.tez.runtime.api.LogicalInput;
 import org.apache.tez.runtime.library.api.KeyValueReader;
@@ -68,6 +69,7 @@ public class HashTableLoader implements 
     int hashTableThreshold = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD);
     float hashTableLoadFactor = HiveConf.getFloatVar(hconf,
         HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR);
+    boolean useLazyRows = HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEMAPJOINLAZYHASHTABLE);
 
     for (int pos = 0; pos < mapJoinTables.length; pos++) {
       if (pos == desc.getPosBigTable()) {
@@ -87,12 +89,13 @@ public class HashTableLoader implements 
           MapJoinKey key = new MapJoinKey();
           key.read(mapJoinTableSerdes[pos].getKeyContext(), (Writable)kvReader.getCurrentKey());
 
-          MapJoinRowContainer values = tableContainer.get(key);
-          if(values == null){
-        	  values = new MapJoinRowContainer();
-        	  tableContainer.put(key, values);
+          LazyFlatRowContainer values = (LazyFlatRowContainer)tableContainer.get(key);
+          if (values == null) {
+            values = new LazyFlatRowContainer();
+            tableContainer.put(key, values);
           }
-          values.read(mapJoinTableSerdes[pos].getValueContext(), (Writable)kvReader.getCurrentValue());
+          values.add(mapJoinTableSerdes[pos].getValueContext(),
+              (BytesWritable)kvReader.getCurrentValue(), useLazyRows);
         }
 
         mapJoinTables[pos] = tableContainer;

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java Fri Feb 28 02:13:45 2014
@@ -80,6 +80,9 @@ public class DataWritableReadSupport ext
         // listColumns contains partition columns which are metadata only
         if (fileSchema.containsField(col)) {
           typeListTable.add(fileSchema.getType(col));
+        } else {
+          // below allows schema evolution
+          typeListTable.add(new PrimitiveType(Repetition.OPTIONAL, PrimitiveTypeName.BINARY, col));
         }
       }
       MessageType tableSchema = new MessageType(TABLE_SCHEMA, typeListTable);



Mime
View raw message