hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ga...@apache.org
Subject svn commit: r1668753 [7/7] - in /hive/trunk: itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/ metastore/if/ metastore/src/gen/thrift/gen-cpp/ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ metastore/s...
Date Mon, 23 Mar 2015 22:22:07 GMT
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1668753&r1=1668752&r2=1668753&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Mon
Mar 23 22:22:06 2015
@@ -66,6 +66,7 @@ import org.apache.hadoop.hive.common.met
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
+import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
 import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest;
 import org.apache.hadoop.hive.metastore.api.AddPartitionsResult;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
@@ -5566,6 +5567,12 @@ public class HiveMetaStore extends Thrif
     }
 
     @Override
+    public void add_dynamic_partitions(AddDynamicPartitions rqst)
+        throws NoSuchTxnException, TxnAbortedException, TException {
+      getTxnHandler().addDynamicPartitions(rqst);
+    }
+
+    @Override
     public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest
request)
         throws MetaException, TException {
 

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1668753&r1=1668752&r2=1668753&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
(original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
Mon Mar 23 22:22:06 2015
@@ -54,6 +54,7 @@ import org.apache.hadoop.hive.conf.HiveC
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.conf.HiveConfUtil;
 import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
+import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
 import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest;
 import org.apache.hadoop.hive.metastore.api.AddPartitionsResult;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
@@ -1915,6 +1916,12 @@ public class HiveMetaStoreClient impleme
   }
 
   @Override
+  public void addDynamicPartitions(long txnId, String dbName, String tableName,
+                                   List<String> partNames) throws TException {
+    client.add_dynamic_partitions(new AddDynamicPartitions(txnId, dbName, tableName, partNames));
+  }
+
+  @Override
   public NotificationEventResponse getNextNotification(long lastEventId, int maxEvents,
                                                        NotificationFilter filter) throws
TException {
     NotificationEventRequest rqst = new NotificationEventRequest(lastEventId);

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1668753&r1=1668752&r2=1668753&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Mon
Mar 23 22:22:06 2015
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.metastore
 
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
 import org.apache.hadoop.hive.metastore.api.CompactionType;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
@@ -1351,6 +1352,18 @@ public interface IMetaStoreClient {
   ShowCompactResponse showCompactions() throws TException;
 
   /**
+   * Send a list of partitions to the metastore to indicate which partitions were loaded
+   * dynamically.
+   * @param txnId id of the transaction
+   * @param dbName database name
+   * @param tableName table name
+   * @param partNames partition name, as constructed by Warehouse.makePartName
+   * @throws TException
+   */
+  void addDynamicPartitions(long txnId, String dbName, String tableName, List<String>
partNames)
+    throws TException;
+
+  /**
    * A filter provided by the client that determines if a given notification event should
be
    * returned.
    */

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java?rev=1668753&r1=1668752&r2=1668753&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
(original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
Mon Mar 23 22:22:06 2015
@@ -22,7 +22,7 @@ import org.apache.hadoop.hive.metastore.
 /**
  * Information on a possible or running compaction.
  */
-public class CompactionInfo {
+public class CompactionInfo implements Comparable<CompactionInfo> {
   public long id;
   public String dbname;
   public String tableName;
@@ -68,4 +68,9 @@ public class CompactionInfo {
   public boolean isMajorCompaction() {
     return CompactionType.MAJOR == type;
   }
+
+  @Override
+  public int compareTo(CompactionInfo o) {
+    return getFullPartitionName().compareTo(o.getFullPartitionName());
+  }
 }

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java?rev=1668753&r1=1668752&r2=1668753&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java Mon
Mar 23 22:22:06 2015
@@ -783,6 +783,48 @@ public class TxnHandler {
     }
   }
 
+  public void addDynamicPartitions(AddDynamicPartitions rqst)
+      throws NoSuchTxnException,  TxnAbortedException, MetaException {
+    Connection dbConn = null;
+    Statement stmt = null;
+    try {
+      try {
+        dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+        stmt = dbConn.createStatement();
+        // Heartbeat this first to make sure the transaction is still valid.
+        heartbeatTxn(dbConn, rqst.getTxnid());
+        for (String partName : rqst.getPartitionnames()) {
+          StringBuilder buff = new StringBuilder();
+          buff.append("insert into TXN_COMPONENTS (tc_txnid, tc_database, tc_table, tc_partition)
values (");
+          buff.append(rqst.getTxnid());
+          buff.append(", '");
+          buff.append(rqst.getDbname());
+          buff.append("', '");
+          buff.append(rqst.getTablename());
+          buff.append("', '");
+          buff.append(partName);
+          buff.append("')");
+          String s = buff.toString();
+          LOG.debug("Going to execute update <" + s + ">");
+          stmt.executeUpdate(s);
+        }
+        LOG.debug("Going to commit");
+        dbConn.commit();
+      } catch (SQLException e) {
+        LOG.debug("Going to rollback");
+        rollbackDBConn(dbConn);
+        checkRetryable(dbConn, e, "addDynamicPartitions");
+        throw new MetaException("Unable to insert into from transaction database " +
+          StringUtils.stringifyException(e));
+      } finally {
+        closeStmt(stmt);
+        closeDbConn(dbConn);
+      }
+    } catch (RetryException e) {
+      addDynamicPartitions(rqst);
+    }
+  }
+
   /**
    * For testing only, do not use.
    */

Modified: hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java?rev=1668753&r1=1668752&r2=1668753&view=diff
==============================================================================
--- hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
(original)
+++ hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
Mon Mar 23 22:22:06 2015
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hive.metastore.txn;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.*;
 import org.apache.log4j.Level;
@@ -26,8 +28,11 @@ import org.junit.Before;
 import org.junit.Test;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
 
 import static junit.framework.Assert.*;
 
@@ -38,6 +43,7 @@ public class TestCompactionTxnHandler {
 
   private HiveConf conf = new HiveConf();
   private CompactionTxnHandler txnHandler;
+  static final private Log LOG = LogFactory.getLog(TestCompactionTxnHandler.class);
 
   public TestCompactionTxnHandler() throws Exception {
     TxnDbUtil.setConfValues(conf);
@@ -417,6 +423,40 @@ public class TestCompactionTxnHandler {
     assertEquals(3, txnList.getOpen_txnsSize());
   }
 
+  @Test
+  public void addDynamicPartitions() throws Exception {
+    String dbName = "default";
+    String tableName = "adp_table";
+    OpenTxnsResponse openTxns = txnHandler.openTxns(new OpenTxnRequest(1, "me", "localhost"));
+    long txnId = openTxns.getTxn_ids().get(0);
+    // lock a table, as in dynamic partitions
+    LockComponent lc = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, dbName);
+    lc.setTablename(tableName);
+    LockRequest lr = new LockRequest(Arrays.asList(lc), "me", "localhost");
+    lr.setTxnid(txnId);
+    LockResponse lock = txnHandler.lock(new LockRequest(Arrays.asList(lc), "me", "localhost"));
+    assertEquals(LockState.ACQUIRED, lock.getState());
+
+    txnHandler.addDynamicPartitions(new AddDynamicPartitions(txnId, dbName, tableName,
+        Arrays.asList("ds=yesterday", "ds=today")));
+    txnHandler.commitTxn(new CommitTxnRequest(txnId));
+
+    Set<CompactionInfo> potentials = txnHandler.findPotentialCompactions(1000);
+    assertEquals(2, potentials.size());
+    SortedSet<CompactionInfo> sorted = new TreeSet<CompactionInfo>(potentials);
+
+    int i = 0;
+    for (CompactionInfo ci : sorted) {
+      assertEquals(dbName, ci.dbname);
+      assertEquals(tableName, ci.tableName);
+      switch (i++) {
+      case 0: assertEquals("ds=today", ci.partName); break;
+      case 1: assertEquals("ds=yesterday", ci.partName); break;
+      default: throw new RuntimeException("What?");
+      }
+    }
+  }
+
   @Before
   public void setUp() throws Exception {
     TxnDbUtil.prepDb();

Modified: hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java?rev=1668753&r1=1668752&r2=1668753&view=diff
==============================================================================
--- hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
(original)
+++ hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
Mon Mar 23 22:22:06 2015
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.metastore
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreThread;
 import org.apache.hadoop.hive.metastore.api.*;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Driver.java?rev=1668753&r1=1668752&r2=1668753&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Driver.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Driver.java Mon Mar 23 22:22:06 2015
@@ -971,6 +971,7 @@ public class Driver implements CommandPr
         if (txnId == SessionState.NO_CURRENT_TXN) {
           txnId = txnMgr.openTxn(userFromUGI);
           ss.setCurrentTxn(txnId);
+          LOG.debug("Setting current transaction to " + txnId);
         }
         // Set the transaction id in all of the acid file sinks
         if (acidSinks != null) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java?rev=1668753&r1=1668752&r2=1668753&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java Mon Mar 23 22:22:06
2015
@@ -379,7 +379,8 @@ public class MoveTask extends Task<MoveW
                 dpCtx.getNumDPCols(),
                 tbd.getHoldDDLTime(),
                 isSkewedStoredAsDirs(tbd),
-                work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID);
+                work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID,
+                SessionState.get().getCurrentTxn());
             console.printInfo("\t Time taken for load dynamic partitions : "  +
                 (System.currentTimeMillis() - startTime));
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1668753&r1=1668752&r2=1668753&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Mon Mar 23 22:22:06
2015
@@ -1521,12 +1521,13 @@ private void constructOneLBLocationMap(F
    * @param holdDDLTime
    * @param listBucketingEnabled
    * @param isAcid true if this is an ACID operation
+   * @param txnId txnId, can be 0 unless isAcid == true
    * @return partition map details (PartitionSpec and Partition)
    * @throws HiveException
    */
   public Map<Map<String, String>, Partition> loadDynamicPartitions(Path loadPath,
       String tableName, Map<String, String> partSpec, boolean replace,
-      int numDP, boolean holdDDLTime, boolean listBucketingEnabled, boolean isAcid)
+      int numDP, boolean holdDDLTime, boolean listBucketingEnabled, boolean isAcid, long
txnId)
       throws HiveException {
 
     Set<Path> validPartitions = new HashSet<Path>();
@@ -1584,9 +1585,18 @@ private void constructOneLBLocationMap(F
         partitionsMap.put(fullPartSpec, newPartition);
         LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec);
       }
+      if (isAcid) {
+        List<String> partNames = new ArrayList<>(partitionsMap.size());
+        for (Partition p : partitionsMap.values()) {
+          partNames.add(p.getName());
+        }
+        metaStoreClient.addDynamicPartitions(txnId, tbl.getDbName(), tbl.getTableName(),
partNames);
+      }
       return partitionsMap;
     } catch (IOException e) {
       throw new HiveException(e);
+    } catch (TException te) {
+      throw new HiveException(te);
     }
   }
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java?rev=1668753&r1=1668752&r2=1668753&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java Mon Mar
23 22:22:06 2015
@@ -28,6 +28,7 @@ import org.apache.hadoop.hive.conf.HiveC
 import org.apache.hadoop.hive.metastore.api.CompactionRequest;
 import org.apache.hadoop.hive.metastore.api.CompactionType;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
@@ -87,12 +88,28 @@ public class Initiator extends Compactor
             LOG.debug("Checking to see if we should compact " + ci.getFullPartitionName());
             try {
               Table t = resolveTable(ci);
+              if (t == null) {
+                // Most likely this means it's a temp table
+                LOG.debug("Can't find table " + ci.getFullTableName() + ", assuming it's
a temp " +
+                    "table and moving on.");
+                continue;
+              }
+
               // check if no compaction set for this table
               if (noAutoCompactSet(t)) {
                 LOG.info("Table " + tableName(t) + " marked true so we will not compact it.");
                 continue;
               }
 
+              // Check to see if this is a table level request on a partitioned table.  If
so,
+              // then it's a dynamic partitioning case and we shouldn't check the table itself.
+              if (t.getPartitionKeys() != null && t.getPartitionKeys().size() >
0 &&
+                  ci.partName  == null) {
+                LOG.debug("Skipping entry for " + ci.getFullTableName() + " as it is from
dynamic" +
+                    " partitioning");
+                continue;
+              }
+
               // Check if we already have initiated or are working on a compaction for this
partition
               // or table.  If so, skip it.  If we are just waiting on cleaning we can still
check,
               // as it may be time to compact again even though we haven't cleaned.

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java?rev=1668753&r1=1668752&r2=1668753&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java Mon
Mar 23 22:22:06 2015
@@ -99,17 +99,22 @@ public abstract class CompactorTest {
   }
 
   protected Table newTable(String dbName, String tableName, boolean partitioned) throws TException
{
-    return newTable(dbName, tableName, partitioned, new HashMap<String, String>(),
null);
+    return newTable(dbName, tableName, partitioned, new HashMap<String, String>(),
null, false);
   }
 
   protected Table newTable(String dbName, String tableName, boolean partitioned,
                            Map<String, String> parameters)  throws TException {
-    return newTable(dbName, tableName, partitioned, parameters, null);
+    return newTable(dbName, tableName, partitioned, parameters, null, false);
 
   }
 
+  protected Table newTempTable(String tableName) throws TException {
+    return newTable("default", tableName, false, null, null, true);
+  }
+
   protected Table newTable(String dbName, String tableName, boolean partitioned,
-                           Map<String, String> parameters, List<Order> sortCols)
+                           Map<String, String> parameters, List<Order> sortCols,
+                           boolean  isTemporary)
       throws  TException {
     Table table = new Table();
     table.setTableName(tableName);
@@ -123,6 +128,7 @@ public abstract class CompactorTest {
     }
 
     table.setParameters(parameters);
+    if (isTemporary) table.setTemporary(true);
 
     // drop the table first, in case some previous test created it
     ms.dropTable(dbName, tableName);

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java?rev=1668753&r1=1668752&r2=1668753&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java Mon
Mar 23 22:22:06 2015
@@ -625,5 +625,32 @@ public class TestInitiator extends Compa
     Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
   }
 
-  // TODO test compactions with legacy file types
+  @Test
+  public void noCompactTableDynamicPartitioning() throws Exception {
+    Table t = newTable("default", "nctdp", true);
+    Partition p = newPartition(t, "today");
+
+    addBaseFile(t, p, 20L, 20);
+    addDeltaFile(t, p, 21L, 22L, 2);
+    addDeltaFile(t, p, 23L, 24L, 2);
+
+    burnThroughTransactions(23);
+
+    long txnid = openTxn();
+    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
+    comp.setTablename("nctdp");
+    List<LockComponent> components = new ArrayList<LockComponent>(1);
+    components.add(comp);
+    LockRequest req = new LockRequest(components, "me", "localhost");
+    req.setTxnid(txnid);
+    LockResponse res = txnHandler.lock(req);
+    txnHandler.commitTxn(new CommitTxnRequest(txnid));
+
+    startInitiator();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(0, compacts.size());
+  }
+
 }

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java?rev=1668753&r1=1668752&r2=1668753&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java Mon Mar
23 22:22:06 2015
@@ -204,7 +204,7 @@ public class TestWorker extends Compacto
     List<Order> sortCols = new ArrayList<Order>(1);
     sortCols.add(new Order("b", 1));
 
-    Table t = newTable("default", "st", false, new HashMap<String, String>(), sortCols);
+    Table t = newTable("default", "st", false, new HashMap<String, String>(), sortCols,
false);
 
     addBaseFile(t, null, 20L, 20);
     addDeltaFile(t, null, 21L, 22L, 2);
@@ -229,7 +229,7 @@ public class TestWorker extends Compacto
     List<Order> sortCols = new ArrayList<Order>(1);
     sortCols.add(new Order("b", 1));
 
-    Table t = newTable("default", "sp", true, new HashMap<String, String>(), sortCols);
+    Table t = newTable("default", "sp", true, new HashMap<String, String>(), sortCols,
false);
     Partition p = newPartition(t, "today", sortCols);
 
     addBaseFile(t, p, 20L, 20);



Mime
View raw message