hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ga...@apache.org
Subject svn commit: r1622748 [13/13] - in /hive/trunk/metastore: if/ src/gen/thrift/gen-cpp/ src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ src/gen/thrift/gen-php/metastore/ src/gen/thrift/gen-py/hive_metastore/ src/gen/thrift/gen-rb/ src/ja...
Date Fri, 05 Sep 2014 17:52:34 GMT
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java?rev=1622748&r1=1622747&r2=1622748&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java Fri Sep  5 17:52:32 2014
@@ -57,6 +57,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.model.MRoleMap;
 import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
 import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.thrift.TException;
 
 public interface RawStore extends Configurable {
@@ -130,6 +131,9 @@ public interface RawStore extends Config
   public abstract boolean addPartitions(String dbName, String tblName, List<Partition> parts)
       throws InvalidObjectException, MetaException;
 
+  public abstract boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists)
+      throws InvalidObjectException, MetaException;
+
   public abstract Partition getPartition(String dbName, String tableName,
       List<String> part_vals) throws MetaException, NoSuchObjectException;
 

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java?rev=1622748&r1=1622747&r2=1622748&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java Fri Sep  5 17:52:32 2014
@@ -507,8 +507,18 @@ public class Warehouse {
    */
   public FileStatus[] getFileStatusesForSD(StorageDescriptor desc)
       throws MetaException {
+    return getFileStatusesForLocation(desc.getLocation());
+  }
+
+  /**
+   * @param location
+   * @return array of FileStatus objects corresponding to the files
+   * making up the passed storage description
+   */
+  public FileStatus[] getFileStatusesForLocation(String location)
+      throws MetaException {
     try {
-      Path path = new Path(desc.getLocation());
+      Path path = new Path(location);
       FileSystem fileSys = path.getFileSystem(conf);
       return HiveStatsUtils.getFileStatusRecurse(path, -1, fileSys);
     } catch (IOException ioe) {

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddPartitionEvent.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddPartitionEvent.java?rev=1622748&r1=1622747&r2=1622748&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddPartitionEvent.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddPartitionEvent.java Fri Sep  5 17:52:32 2014
@@ -21,19 +21,23 @@ package org.apache.hadoop.hive.metastore
 import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 
 import java.util.Arrays;
+import java.util.Iterator;
 import java.util.List;
 
 public class AddPartitionEvent extends ListenerEvent {
 
   private final Table table;
   private final List<Partition> partitions;
+  private PartitionSpecProxy partitionSpecProxy;
 
   public AddPartitionEvent(Table table, List<Partition> partitions, boolean status, HMSHandler handler) {
     super(status, handler);
     this.table = table;
     this.partitions = partitions;
+    this.partitionSpecProxy = null;
   }
 
   public AddPartitionEvent(Table table, Partition partition, boolean status, HMSHandler handler) {
@@ -41,6 +45,16 @@ public class AddPartitionEvent extends L
   }
 
   /**
+   * Alternative constructor to use PartitionSpec APIs.
+   */
+  public AddPartitionEvent(Table table, PartitionSpecProxy partitionSpec, boolean status, HMSHandler handler) {
+    super(status, handler);
+    this.table = table;
+    this.partitions = null;
+    this.partitionSpecProxy = partitionSpec;
+  }
+
+  /**
    * @return The table.
    */
   public Table getTable() {
@@ -54,4 +68,11 @@ public class AddPartitionEvent extends L
     return partitions;
   }
 
+  /**
+   * @return Iterator for partitions.
+   */
+  public Iterator<Partition> getPartitionIterator() {
+    return partitionSpecProxy == null ? null : partitionSpecProxy.getPartitionIterator();
+  }
+
 }

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreAddPartitionEvent.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreAddPartitionEvent.java?rev=1622748&r1=1622747&r2=1622748&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreAddPartitionEvent.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreAddPartitionEvent.java Fri Sep  5 17:52:32 2014
@@ -21,19 +21,23 @@ package org.apache.hadoop.hive.metastore
 import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 
 import java.util.Arrays;
+import java.util.Iterator;
 import java.util.List;
 
 public class PreAddPartitionEvent extends PreEventContext {
 
   private final Table table;
   private final List<Partition> partitions;
+  private PartitionSpecProxy partitionSpecProxy;
 
   public PreAddPartitionEvent (Table table, List<Partition> partitions, HMSHandler handler) {
     super(PreEventType.ADD_PARTITION, handler);
     this.table = table;
     this.partitions = partitions;
+    this.partitionSpecProxy = null;
   }
 
   public PreAddPartitionEvent(Table table, Partition partition, HMSHandler handler) {
@@ -41,6 +45,14 @@ public class PreAddPartitionEvent extend
   }
 
   /**
+   * Alternative constructor, using
+   */
+  public PreAddPartitionEvent(Table table, PartitionSpecProxy partitionSpecProxy, HMSHandler handler) {
+    this(table, (List<Partition>)null, handler);
+    this.partitionSpecProxy = partitionSpecProxy;
+  }
+
+  /**
    * @return the partitions
    */
   public List<Partition> getPartitions() {
@@ -53,4 +65,11 @@ public class PreAddPartitionEvent extend
   public Table getTable() {
     return table ;
   }
+
+  /**
+   * @return Iterator over partition-list.
+   */
+  public Iterator<Partition> getPartitionIterator() {
+    return partitionSpecProxy == null ? null : partitionSpecProxy.getPartitionIterator();
+  }
 }

Added: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java?rev=1622748&view=auto
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java (added)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java Fri Sep  5 17:52:32 2014
@@ -0,0 +1,210 @@
+package org.apache.hadoop.hive.metastore.partition.spec;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Implementation of PartitionSpecProxy that composes a list of PartitionSpecProxy.
+ */
+public class CompositePartitionSpecProxy extends PartitionSpecProxy {
+
+  private String dbName;
+  private String tableName;
+  private List<PartitionSpec> partitionSpecs;
+  private List<PartitionSpecProxy> partitionSpecProxies;
+  private int size = 0;
+
+  protected CompositePartitionSpecProxy(List<PartitionSpec> partitionSpecs) {
+    this.partitionSpecs = partitionSpecs;
+    if (partitionSpecs.isEmpty()) {
+      dbName = null;
+      tableName = null;
+    }
+    else {
+      dbName = partitionSpecs.get(0).getDbName();
+      tableName = partitionSpecs.get(0).getTableName();
+      this.partitionSpecProxies = new ArrayList<PartitionSpecProxy>(partitionSpecs.size());
+      for (PartitionSpec partitionSpec : partitionSpecs) {
+        PartitionSpecProxy partitionSpecProxy = Factory.get(partitionSpec);
+        this.partitionSpecProxies.add(partitionSpecProxy);
+        size += partitionSpecProxy.size();
+      }
+    }
+    // Assert class-invariant.
+    assert isValid() : "Invalid CompositePartitionSpecProxy!";
+  }
+
+  protected CompositePartitionSpecProxy(String dbName, String tableName, List<PartitionSpec> partitionSpecs) {
+    this.dbName = dbName;
+    this.tableName = tableName;
+    this.partitionSpecs = partitionSpecs;
+    this.partitionSpecProxies = new ArrayList<PartitionSpecProxy>(partitionSpecs.size());
+    for (PartitionSpec partitionSpec : partitionSpecs) {
+      this.partitionSpecProxies.add(PartitionSpecProxy.Factory.get(partitionSpec));
+    }
+    // Assert class-invariant.
+    assert isValid() : "Invalid CompositePartitionSpecProxy!";
+  }
+
+  private boolean isValid() {
+    for (PartitionSpecProxy partitionSpecProxy : partitionSpecProxies) {
+      if (partitionSpecProxy instanceof CompositePartitionSpecProxy) {
+        return false;
+      }
+    }
+
+    return true;
+  }
+
+  @Override
+  public int size() {
+    return size;
+  }
+
+  /**
+   * Iterator to iterate over all Partitions, across all PartitionSpecProxy instances within the Composite.
+   */
+  public static class Iterator implements PartitionIterator {
+
+    private CompositePartitionSpecProxy composite;
+    private List<PartitionSpecProxy> partitionSpecProxies;
+    private int index = -1; // Index into partitionSpecs.
+    private PartitionIterator iterator = null;
+
+    public Iterator(CompositePartitionSpecProxy composite) {
+      this.composite = composite;
+      this.partitionSpecProxies = composite.partitionSpecProxies;
+
+      if (this.partitionSpecProxies != null && !this.partitionSpecProxies.isEmpty()) {
+        this.index = 0;
+        this.iterator = this.partitionSpecProxies.get(this.index).getPartitionIterator();
+      }
+    }
+
+    @Override
+    public boolean hasNext() {
+
+      if (iterator == null) {
+        return false;
+      }
+
+      if (iterator.hasNext()) {
+        return true;
+      }
+
+      while ( ++index < partitionSpecProxies.size()
+          && !(iterator = partitionSpecProxies.get(index).getPartitionIterator()).hasNext());
+
+      return index < partitionSpecProxies.size() && iterator.hasNext();
+
+    }
+
+    @Override
+    public Partition next() {
+
+        if (iterator.hasNext())
+          return iterator.next();
+
+        while (++index < partitionSpecProxies.size()
+            && !(iterator = partitionSpecProxies.get(index).getPartitionIterator()).hasNext());
+
+        return index == partitionSpecProxies.size()? null : iterator.next();
+
+    }
+
+    @Override
+    public void remove() {
+      iterator.remove();
+    }
+
+    @Override
+    public Partition getCurrent() {
+      return iterator.getCurrent();
+    }
+
+    @Override
+    public String getDbName() {
+      return composite.dbName;
+    }
+
+    @Override
+    public String getTableName() {
+      return composite.tableName;
+    }
+
+    @Override
+    public Map<String, String> getParameters() {
+      return iterator.getParameters();
+    }
+
+    @Override
+    public void setParameters(Map<String, String> parameters) {
+      iterator.setParameters(parameters);
+    }
+
+    @Override
+    public String getLocation() {
+      return iterator.getLocation();
+    }
+
+    @Override
+    public void putToParameters(String key, String value) {
+      iterator.putToParameters(key, value);
+    }
+
+    @Override
+    public void setCreateTime(long time) {
+      iterator.setCreateTime(time);
+    }
+  }
+
+  @Override
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+    for (PartitionSpecProxy partSpecProxy : partitionSpecProxies) {
+      partSpecProxy.setDbName(dbName);
+    }
+  }
+
+  @Override
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+    for (PartitionSpecProxy partSpecProxy : partitionSpecProxies) {
+      partSpecProxy.setTableName(tableName);
+    }
+  }
+
+  @Override
+  public String getDbName() {
+    return dbName;
+  }
+
+  @Override
+  public String getTableName() {
+    return tableName;
+  }
+
+  @Override
+  public PartitionIterator getPartitionIterator() {
+    return new Iterator(this);
+  }
+
+  @Override
+  public List<PartitionSpec> toPartitionSpec() {
+    return partitionSpecs;
+  }
+
+  @Override
+  public void setRootLocation(String rootLocation) throws MetaException {
+    for (PartitionSpecProxy partSpecProxy : partitionSpecProxies) {
+      partSpecProxy.setRootLocation(rootLocation);
+    }
+  }
+}

Added: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java?rev=1622748&view=auto
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java (added)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java Fri Sep  5 17:52:32 2014
@@ -0,0 +1,153 @@
+package org.apache.hadoop.hive.metastore.partition.spec;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * PartitionSpecProxy implementation that composes a List of Partitions.
+ */
+public class PartitionListComposingSpecProxy extends PartitionSpecProxy {
+
+  private PartitionSpec partitionSpec;
+
+  protected PartitionListComposingSpecProxy(PartitionSpec partitionSpec) {
+    assert partitionSpec.isSetPartitionList()
+        : "Partition-list should have been set.";
+    this.partitionSpec = partitionSpec;
+  }
+
+  @Override
+  public String getDbName() {
+    return partitionSpec.getDbName();
+  }
+
+  @Override
+  public String getTableName() {
+    return partitionSpec.getTableName();
+  }
+
+  @Override
+  public PartitionIterator getPartitionIterator() {
+    return new Iterator(this);
+  }
+
+  @Override
+  public List<PartitionSpec> toPartitionSpec() {
+    return Arrays.asList(partitionSpec);
+  }
+
+  @Override
+  public int size() {
+    return partitionSpec.getPartitionList().getPartitionsSize();
+  }
+
+  @Override
+  public void setDbName(String dbName) {
+    partitionSpec.setDbName(dbName);
+    for (Partition partition : partitionSpec.getPartitionList().getPartitions()) {
+      partition.setDbName(dbName);
+    }
+  }
+
+  @Override
+  public void setTableName(String tableName) {
+    partitionSpec.setTableName(tableName);
+    for (Partition partition : partitionSpec.getPartitionList().getPartitions()) {
+      partition.setTableName(tableName);
+    }
+  }
+
+  @Override
+  public void setRootLocation(String newRootPath) throws MetaException {
+
+    String oldRootPath = partitionSpec.getRootPath();
+
+    if (oldRootPath == null) {
+      throw new MetaException("No common root-path. Can't replace root-path!");
+    }
+
+    for (Partition partition : partitionSpec.getPartitionList().getPartitions()) {
+      String location = partition.getSd().getLocation();
+      if (location.startsWith(oldRootPath)) {
+        partition.getSd().setLocation(location.replace(oldRootPath, newRootPath));
+      }
+      else {
+        throw new MetaException("Common root-path not found. Can't replace root-path!");
+      }
+    }
+  }
+
+  public static class Iterator implements PartitionIterator {
+
+    PartitionListComposingSpecProxy partitionSpecProxy;
+    List<Partition> partitionList;
+    int index;
+
+    public Iterator(PartitionListComposingSpecProxy partitionSpecProxy) {
+      this.partitionSpecProxy = partitionSpecProxy;
+      this.partitionList = partitionSpecProxy.partitionSpec.getPartitionList().getPartitions();
+      this.index = 0;
+    }
+
+    @Override
+    public Partition getCurrent() {
+      return partitionList.get(index);
+    }
+
+    @Override
+    public String getDbName() {
+      return partitionSpecProxy.getDbName();
+    }
+
+    @Override
+    public String getTableName() {
+      return partitionSpecProxy.getTableName();
+    }
+
+    @Override
+    public Map<String, String> getParameters() {
+      return partitionList.get(index).getParameters();
+    }
+
+    @Override
+    public void setParameters(Map<String, String> parameters) {
+      partitionList.get(index).setParameters(parameters);
+    }
+
+    @Override
+    public String getLocation() {
+      return partitionList.get(index).getSd().getLocation();
+    }
+
+    @Override
+    public void putToParameters(String key, String value) {
+      partitionList.get(index).putToParameters(key, value);
+    }
+
+    @Override
+    public void setCreateTime(long time) {
+      partitionList.get(index).setCreateTime((int)time);
+    }
+
+    @Override
+    public boolean hasNext() {
+      return index < partitionList.size();
+    }
+
+    @Override
+    public Partition next() {
+      return partitionList.get(index++);
+    }
+
+    @Override
+    public void remove() {
+      partitionList.remove(index);
+    }
+  } // class Iterator;
+
+} // class PartitionListComposingSpecProxy;

Added: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java?rev=1622748&view=auto
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java (added)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java Fri Sep  5 17:52:32 2014
@@ -0,0 +1,181 @@
+package org.apache.hadoop.hive.metastore.partition.spec;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Polymorphic proxy class, equivalent to org.apache.hadoop.hive.metastore.api.PartitionSpec.
+ */
+public abstract class PartitionSpecProxy {
+
+  /**
+   * The number of Partition instances represented by the PartitionSpec.
+   * @return Number of partitions.
+   */
+  public abstract int size();
+
+  /**
+   * Setter for name of the DB.
+   * @param dbName The name of the DB.
+   */
+  public abstract void setDbName(String dbName);
+
+  /**
+   * Setter for name of the table.
+   * @param tableName The name of the table.
+   */
+  public abstract void setTableName(String tableName);
+
+  /**
+   * Getter for name of the DB.
+   * @return The name of the DB.
+   */
+  public abstract String getDbName();
+
+  /**
+   * Getter for name of the table.
+   * @return The name of the table.
+   */
+  public abstract String getTableName();
+
+  /**
+   * Iterator to the (virtual) sequence of Partitions represented by the PartitionSpec.
+   * @return A PartitionIterator to the beginning of the Partition sequence.
+   */
+  public abstract PartitionIterator getPartitionIterator();
+
+  /**
+   * Conversion to a org.apache.hadoop.hive.metastore.api.PartitionSpec sequence.
+   * @return A list of org.apache.hadoop.hive.metastore.api.PartitionSpec instances.
+   */
+  public abstract List<PartitionSpec> toPartitionSpec();
+
+  /**
+   * Setter for the common root-location for all partitions in the PartitionSet.
+   * @param rootLocation The new common root-location.
+   * @throws MetaException
+   */
+  public abstract void setRootLocation(String rootLocation) throws MetaException;
+
+  /**
+   * Factory to construct PartitionSetProxy instances, from PartitionSets.
+   */
+  public static class Factory {
+
+    /**
+     * Factory method. Construct PartitionSpecProxy from raw PartitionSpec.
+     * @param partSpec Raw PartitionSpec from the Thrift API.
+     * @return PartitionSpecProxy instance.
+     */
+    public static PartitionSpecProxy get(PartitionSpec partSpec) {
+
+      if (partSpec == null) {
+        return null;
+      }
+      else
+      if (partSpec.isSetPartitionList()) {
+        return new PartitionListComposingSpecProxy(partSpec);
+      }
+      else
+      if (partSpec.isSetSharedSDPartitionSpec()) {
+        return new PartitionSpecWithSharedSDProxy(partSpec);
+      }
+
+      assert false : "Unsupported type of PartitionSpec!";
+      return null;
+    }
+
+    /**
+     * Factory method to construct CompositePartitionSpecProxy.
+     * @param partitionSpecs List of raw PartitionSpecs.
+     * @return A CompositePartitionSpecProxy instance.
+     */
+    public static PartitionSpecProxy get(List<PartitionSpec> partitionSpecs) {
+      return new CompositePartitionSpecProxy(partitionSpecs);
+    }
+
+  } // class Factory;
+
+  /**
+   * Iterator to iterate over Partitions corresponding to a PartitionSpec.
+   */
+  public static interface PartitionIterator extends java.util.Iterator<Partition> {
+
+    /**
+     * Getter for the Partition "pointed to" by the iterator.
+     * Like next(), but without advancing the iterator.
+     * @return The "current" partition object.
+     */
+    public Partition getCurrent();
+
+    /**
+     * Getter for the name of the DB.
+     * @return Name of the DB.
+     */
+    public String getDbName();
+
+    /**
+     * Getter for the name of the table.
+     * @return Name of the table.
+     */
+    public String getTableName();
+
+    /**
+     * Getter for the Partition parameters.
+     * @return Key-value map for Partition-level parameters.
+     */
+    public Map<String, String> getParameters();
+
+    /**
+     * Setter for Partition parameters.
+     * @param parameters Key-value map fo Partition-level parameters.
+     */
+    public void setParameters(Map<String, String> parameters);
+
+    /**
+     * Insert an individual parameter to a Partition's parameter-set.
+     * @param key
+     * @param value
+     */
+    public void putToParameters(String key, String value);
+
+    /**
+     * Getter for Partition-location.
+     * @return Partition's location.
+     */
+    public String getLocation();
+
+    /**
+     * Setter for creation-time of a Partition.
+     * @param time Timestamp indicating the time of creation of the Partition.
+     */
+    public void setCreateTime(long time);
+
+  } // class PartitionIterator;
+
+  /**
+   * Simple wrapper class for pre-constructed Partitions, to expose a PartitionIterator interface,
+   * where the iterator-sequence consists of just one Partition.
+   */
+  public static class SimplePartitionWrapperIterator implements PartitionIterator {
+    private Partition partition;
+    public SimplePartitionWrapperIterator(Partition partition) {this.partition = partition;}
+
+    @Override public Partition getCurrent() { return partition; }
+    @Override public String getDbName() { return partition.getDbName(); }
+    @Override public String getTableName() { return partition.getTableName(); }
+    @Override public Map<String, String> getParameters() { return partition.getParameters(); }
+    @Override public void setParameters(Map<String, String> parameters) { partition.setParameters(parameters); }
+    @Override public void putToParameters(String key, String value) { partition.putToParameters(key, value);}
+    @Override public String getLocation() { return partition.getSd().getLocation(); }
+    @Override public void setCreateTime(long time) { partition.setCreateTime((int)time);}
+    @Override public boolean hasNext() { return false; } // No next partition.
+    @Override public Partition next() { return null; } // No next partition.
+    @Override public void remove() {} // Do nothing.
+  } // P
+
+} // class PartitionSpecProxy;

Added: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java?rev=1622748&view=auto
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java (added)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java Fri Sep  5 17:52:32 2014
@@ -0,0 +1,154 @@
+package org.apache.hadoop.hive.metastore.partition.spec;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+import org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD;
+import org.apache.hadoop.hive.metastore.api.PartitionWithoutSD;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Subclass of PartitionSpecProxy that pulls out commonality of
+ * StorageDescriptor properties within a Partition-list into a common
+ * StorageDescriptor instance.
+ */
+public class PartitionSpecWithSharedSDProxy extends PartitionSpecProxy {
+
+  private PartitionSpec partitionSpec;
+
+  public PartitionSpecWithSharedSDProxy(PartitionSpec partitionSpec) {
+    assert partitionSpec.isSetSharedSDPartitionSpec();
+    this.partitionSpec = partitionSpec;
+  }
+
+  @Override
+  public int size() {
+    return partitionSpec.getSharedSDPartitionSpec().getPartitionsSize();
+  }
+
+  @Override
+  public void setDbName(String dbName) {
+    partitionSpec.setDbName(dbName);
+  }
+
+  @Override
+  public void setTableName(String tableName) {
+    partitionSpec.setTableName(tableName);
+  }
+
+  @Override
+  public String getDbName() {
+    return partitionSpec.getDbName();
+  }
+
+  @Override
+  public String getTableName() {
+    return partitionSpec.getTableName();
+  }
+
+  public PartitionIterator getPartitionIterator() {
+    return new Iterator(this);
+  }
+
+  @Override
+  public List<PartitionSpec> toPartitionSpec() {
+    return Arrays.asList(partitionSpec);
+  }
+
+  @Override
+  public void setRootLocation(String rootLocation) throws MetaException {
+    partitionSpec.setRootPath(rootLocation);
+    partitionSpec.getSharedSDPartitionSpec().getSd().setLocation(rootLocation);
+  }
+
+  /**
+   * Iterator implementation to iterate over all Partitions within the PartitionSpecWithSharedSDProxy.
+   */
+  public static class Iterator implements PartitionIterator {
+
+    private PartitionSpecWithSharedSDProxy partitionSpecWithSharedSDProxy;
+    private PartitionSpecWithSharedSD pSpec;
+    private int index;
+
+    Iterator(PartitionSpecWithSharedSDProxy partitionSpecWithSharedSDProxy) {
+      this.partitionSpecWithSharedSDProxy = partitionSpecWithSharedSDProxy;
+      this.pSpec = this.partitionSpecWithSharedSDProxy.partitionSpec.getSharedSDPartitionSpec();
+      this.index = 0;
+    }
+
+    @Override
+    public boolean hasNext() {
+      return index < pSpec.getPartitions().size();
+    }
+
+    @Override
+    public Partition next() {
+      Partition partition = getCurrent();
+      ++index;
+      return partition;
+    }
+
+    @Override
+    public void remove() {
+      pSpec.getPartitions().remove(index);
+    }
+
+    @Override
+    public Partition getCurrent() {
+      PartitionWithoutSD partWithoutSD = pSpec.getPartitions().get(index);
+      StorageDescriptor partSD = new StorageDescriptor(pSpec.getSd());
+      partSD.setLocation(partSD.getLocation() + partWithoutSD.getRelativePath());
+
+      return new Partition(
+          partWithoutSD.getValues(),
+          partitionSpecWithSharedSDProxy.partitionSpec.getDbName(),
+          partitionSpecWithSharedSDProxy.partitionSpec.getTableName(),
+          partWithoutSD.getCreateTime(),
+          partWithoutSD.getLastAccessTime(),
+          partSD,
+          partWithoutSD.getParameters()
+      );
+    }
+
+    @Override
+    public String getDbName() {
+      return partitionSpecWithSharedSDProxy.partitionSpec.getDbName();
+    }
+
+    @Override
+    public String getTableName() {
+      return partitionSpecWithSharedSDProxy.partitionSpec.getTableName();
+    }
+
+    @Override
+    public Map<String, String> getParameters() {
+      return pSpec.getPartitions().get(index).getParameters();
+    }
+
+    @Override
+    public void setParameters(Map<String, String> parameters) {
+      pSpec.getPartitions().get(index).setParameters(parameters);
+    }
+
+    @Override
+    public String getLocation() {
+      return pSpec.getSd().getLocation() + pSpec.getPartitions().get(index).getRelativePath();
+    }
+
+    @Override
+    public void putToParameters(String key, String value) {
+      pSpec.getPartitions().get(index).putToParameters(key, value);
+    }
+
+    @Override
+    public void setCreateTime(long time) {
+      pSpec.getPartitions().get(index).setCreateTime((int)time);
+    }
+
+  } // static class Iterator;
+
+}

Modified: hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java?rev=1622748&r1=1622747&r2=1622748&view=diff
==============================================================================
--- hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java (original)
+++ hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java Fri Sep  5 17:52:32 2014
@@ -56,6 +56,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.model.MRoleMap;
 import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
 import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.thrift.TException;
 
 /**
@@ -677,6 +678,11 @@ public class DummyRawStoreControlledComm
   }
 
   @Override
+  public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
+    return false;
+  }
+
+  @Override
   public void dropPartitions(String dbName, String tblName, List<String> partNames)
       throws MetaException, NoSuchObjectException {
     objectStore.dropPartitions(dbName, tblName, partNames);

Modified: hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java?rev=1622748&r1=1622747&r2=1622748&view=diff
==============================================================================
--- hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java (original)
+++ hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java Fri Sep  5 17:52:32 2014
@@ -57,6 +57,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.model.MRoleMap;
 import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
 import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.thrift.TException;
 
 /**
@@ -699,6 +700,11 @@ public class DummyRawStoreForJdoConnecti
   }
 
   @Override
+  public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
+    return false;
+  }
+
+  @Override
   public void dropPartitions(String dbName, String tblName, List<String> partNames) {
   }
 

Added: hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java?rev=1622748&view=auto
==============================================================================
--- hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java (added)
+++ hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java Fri Sep  5 17:52:32 2014
@@ -0,0 +1,20 @@
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+
+import java.util.List;
+
+/**
+ * Test Mock-out for PartitionExpressionForMetastore.
+ */
+public class MockPartitionExpressionForMetastore implements PartitionExpressionProxy {
+  @Override
+  public String convertExprToFilter(byte[] expr) throws MetaException {
+    return null;
+  }
+
+  @Override
+  public boolean filterPartitionsByExpr(List<String> columnNames, byte[] expr, String defaultPartitionName, List<String> partitionNames) throws MetaException {
+    return false;
+  }
+}

Added: hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java?rev=1622748&view=auto
==============================================================================
--- hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java (added)
+++ hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java Fri Sep  5 17:52:32 2014
@@ -0,0 +1,399 @@
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.partition.spec.CompositePartitionSpecProxy;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe;
+import org.apache.hadoop.util.ExitUtil;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.security.Permission;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Test to check PartitionSpec support in HiveMetaStore.
+ */
+public class TestHiveMetaStorePartitionSpecs {
+
+  private static final Logger LOG = LoggerFactory.getLogger(TestHiveMetaStorePartitionSpecs.class);
+  private static final String msPort = "20102";
+  private static HiveConf hiveConf;
+  private static SecurityManager securityManager;
+
+  public static class NoExitSecurityManager extends SecurityManager {
+
+    @Override
+    public void checkPermission(Permission perm) {
+      // allow anything.
+    }
+
+    @Override
+    public void checkPermission(Permission perm, Object context) {
+      // allow anything.
+    }
+
+    @Override
+    public void checkExit(int status) {
+
+      super.checkExit(status);
+      throw new ExitUtil.ExitException(status, "System.exit() was called. Raising exception. ");
+    }
+  }
+
+  private static class RunMS implements Runnable {
+
+    @Override
+    public void run() {
+      try {
+        HiveMetaStore.main(new String[]{"-v", "-p", msPort, "--hiveconf",
+            "hive.metastore.expression.proxy=" + MockPartitionExpressionForMetastore.class.getCanonicalName()});
+      } catch (Throwable t) {
+        LOG.error("Exiting. Got exception from metastore: ", t);
+      }
+    }
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    LOG.info("Shutting down metastore.");
+    System.setSecurityManager(securityManager);
+  }
+
+  @BeforeClass
+  public static void startMetaStoreServer() throws Exception {
+
+    Thread t = new Thread(new RunMS());
+    t.start();
+    Thread.sleep(5000);
+
+    securityManager = System.getSecurityManager();
+    System.setSecurityManager(new NoExitSecurityManager());
+    hiveConf = new HiveConf(TestHiveMetaStorePartitionSpecs.class);
+    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:"
+        + msPort);
+    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
+    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname,
+        "false");
+    hiveConf.set(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS.name(), MockPartitionExpressionForMetastore.class.getCanonicalName());
+    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
+    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
+  }
+
+  private static String dbName = "testpartitionspecs_db";
+  private static String tableName = "testpartitionspecs_table";
+  private static int nDates = 10;
+  private static String datePrefix = "2014010";
+
+  private static void createTable(HiveMetaStoreClient hmsc, boolean enablePartitionGrouping) throws Exception {
+
+
+    List<FieldSchema> columns = new ArrayList<FieldSchema>();
+    columns.add(new FieldSchema("foo", "string", ""));
+    columns.add(new FieldSchema("bar", "string", ""));
+
+    List<FieldSchema> partColumns = new ArrayList<FieldSchema>();
+    partColumns.add(new FieldSchema("dt", "string", ""));
+    partColumns.add(new FieldSchema("blurb", "string", ""));
+
+    SerDeInfo serdeInfo = new SerDeInfo("LBCSerDe", LazyBinaryColumnarSerDe.class.getCanonicalName(), new HashMap<String, String>());
+
+    StorageDescriptor storageDescriptor
+        = new StorageDescriptor(columns, null,
+        "org.apache.hadoop.hive.ql.io.RCFileInputFormat",
+        "org.apache.hadoop.hive.ql.io.RCFileOutputFormat",
+        false, 0, serdeInfo, null, null, null);
+
+    Map<String, String> tableParameters = new HashMap<String, String>();
+    tableParameters.put("hive.hcatalog.partition.spec.grouping.enabled", enablePartitionGrouping? "true":"false");
+    Table table = new Table(tableName, dbName, "", 0, 0, 0, storageDescriptor, partColumns, tableParameters, "", "", "");
+
+    hmsc.createTable(table);
+
+  }
+
+  private static void clearAndRecreateDB(HiveMetaStoreClient hmsc) throws Exception {
+    hmsc.dropDatabase(dbName,
+                      true,   // Delete data.
+                      true,   // Ignore unknownDB.
+                      true    // Cascade.
+                      );
+
+    hmsc.createDatabase(new Database(dbName,
+                                     "",    // Description.
+                                     null,  // Location.
+                                     null   // Parameters.
+                       ));
+  }
+
+  // Get partition-path. For grid='XYZ', place the partition outside the table-path.
+  private static String getPartitionPath(Table table, List<String> partValues) {
+
+    return partValues.get(1).equalsIgnoreCase("isLocatedOutsideTablePath")? // i.e. Is the partition outside the table-dir?
+           table.getSd().getLocation().replace(table.getTableName(), "location_outside_" + table.getTableName())
+              + "_" + partValues.get(0) + "_" + partValues.get(1)
+        : null ; // Use defaults... Partitions are put in the table directory.
+
+  }
+
+  private static void populatePartitions(HiveMetaStoreClient hmsc, Table table, List<String> blurbs) throws Exception {
+    for (int i=0; i< nDates; ++i) {
+      for (String blurb : blurbs) {
+        StorageDescriptor sd = new StorageDescriptor(table.getSd());
+        // Add partitions located in the table-directory (i.e. default).
+        List<String> values = Arrays.asList(datePrefix + i, blurb);
+        sd.setLocation(getPartitionPath(table, values));
+        hmsc.add_partition(new Partition(values, dbName, tableName, 0, 0, sd, null));
+      }
+    }
+  }
+
+  private void testGetPartitionSpecs(boolean enablePartitionGrouping) {
+    try {
+      HiveMetaStoreClient hmsc = new HiveMetaStoreClient(hiveConf);
+      clearAndRecreateDB(hmsc);
+      createTable(hmsc, enablePartitionGrouping);
+      Table table = hmsc.getTable(dbName, tableName);
+      populatePartitions(hmsc, table, Arrays.asList("isLocatedInTablePath", "isLocatedOutsideTablePath"));
+
+      PartitionSpecProxy partitionSpecProxy = hmsc.listPartitionSpecs(dbName, tableName, -1);
+      Assert.assertEquals( "Unexpected number of partitions.", nDates * 2, partitionSpecProxy.size());
+
+      Map<String, List<String>> locationToDateMap = new HashMap<String, List<String>>();
+      locationToDateMap.put("isLocatedInTablePath",  new ArrayList<String>());
+      locationToDateMap.put("isLocatedOutsideTablePath", new ArrayList<String>());
+      PartitionSpecProxy.PartitionIterator iterator = partitionSpecProxy.getPartitionIterator();
+
+      while (iterator.hasNext()) {
+        Partition partition = iterator.next();
+        locationToDateMap.get(partition.getValues().get(1)).add(partition.getValues().get(0));
+      }
+
+      List<String> expectedDates = new ArrayList<String>(nDates);
+      for (int i=0; i<nDates; ++i) {
+        expectedDates.add(datePrefix + i);
+      }
+
+      Assert.assertArrayEquals("Unexpected date-values.", expectedDates.toArray(), locationToDateMap.get("isLocatedInTablePath").toArray());
+      Assert.assertArrayEquals("Unexpected date-values.", expectedDates.toArray(), locationToDateMap.get("isLocatedOutsideTablePath").toArray());
+
+      partitionSpecProxy = hmsc.listPartitionSpecsByFilter(dbName, tableName, "blurb = \"isLocatedOutsideTablePath\"", -1);
+      locationToDateMap.get("isLocatedInTablePath").clear();
+      locationToDateMap.get("isLocatedOutsideTablePath").clear();
+      iterator = partitionSpecProxy.getPartitionIterator();
+
+      while (iterator.hasNext()) {
+        Partition partition = iterator.next();
+        locationToDateMap.get(partition.getValues().get(1)).add(partition.getValues().get(0));
+      }
+
+      Assert.assertEquals("Unexpected date-values.", 0, locationToDateMap.get("isLocatedInTablePath").size());
+      Assert.assertArrayEquals("Unexpected date-values.", expectedDates.toArray(), locationToDateMap.get("isLocatedOutsideTablePath").toArray());
+
+
+    }
+    catch (Throwable t) {
+      LOG.error("Unexpected Exception!", t);
+      t.printStackTrace();
+      Assert.assertTrue("Unexpected Exception!", false);
+    }
+  }
+
+  /**
+   * Test for HiveMetaStoreClient.listPartitionSpecs() and HiveMetaStoreClient.listPartitionSpecsByFilter().
+   * Check behaviour with and without Partition-grouping enabled.
+   */
+  @Test
+  public void testGetPartitionSpecs_WithAndWithoutPartitionGrouping() {
+    testGetPartitionSpecs(true);
+    testGetPartitionSpecs(false);
+  }
+
+
+  /**
+   * Test to confirm that partitions can be added using PartitionSpecs.
+   */
+  @Test
+  public void testAddPartitions() {
+    try {
+      // Create source table.
+      HiveMetaStoreClient hmsc = new HiveMetaStoreClient(hiveConf);
+      clearAndRecreateDB(hmsc);
+      createTable(hmsc, true);
+      Table table = hmsc.getTable(dbName, tableName);
+      populatePartitions(hmsc, table, Arrays.asList("isLocatedInTablePath", "isLocatedOutsideTablePath"));
+
+      // Clone the table,
+      String targetTableName = "cloned_" + tableName;
+      Table targetTable = new Table(table);
+      targetTable.setTableName(targetTableName);
+      StorageDescriptor targetTableSd = new StorageDescriptor(targetTable.getSd());
+      targetTableSd.setLocation(
+          targetTableSd.getLocation().replace( tableName, targetTableName));
+      hmsc.createTable(targetTable);
+
+      // Get partition-list from source.
+      PartitionSpecProxy partitionsForAddition
+          = hmsc.listPartitionSpecsByFilter(dbName, tableName, "blurb = \"isLocatedInTablePath\"", -1);
+      partitionsForAddition.setTableName(targetTableName);
+      partitionsForAddition.setRootLocation(targetTableSd.getLocation());
+
+      Assert.assertEquals("Unexpected number of partitions added. ",
+          partitionsForAddition.size(), hmsc.add_partitions_pspec(partitionsForAddition));
+
+      // Check that the added partitions are as expected.
+      PartitionSpecProxy clonedPartitions = hmsc.listPartitionSpecs(dbName, targetTableName, -1);
+      Assert.assertEquals("Unexpected number of partitions returned. ",
+          partitionsForAddition.size(), clonedPartitions.size());
+
+      PartitionSpecProxy.PartitionIterator sourceIterator = partitionsForAddition.getPartitionIterator(),
+                                           targetIterator = clonedPartitions.getPartitionIterator();
+
+      while (targetIterator.hasNext()) {
+        Partition sourcePartition = sourceIterator.next(),
+                  targetPartition = targetIterator.next();
+        Assert.assertEquals("Mismatched values.",
+            sourcePartition.getValues(), targetPartition.getValues());
+        Assert.assertEquals("Mismatched locations.",
+            sourcePartition.getSd().getLocation(), targetPartition.getSd().getLocation());
+      }
+    }
+    catch (Throwable t) {
+      LOG.error("Unexpected Exception!", t);
+      t.printStackTrace();
+      Assert.assertTrue("Unexpected Exception!", false);
+    }
+  }
+
+  /**
+   * Test to confirm that Partition-grouping behaves correctly when Table-schemas evolve.
+   * Partitions must be grouped by location and schema.
+   */
+  @Test
+  public void testFetchingPartitionsWithDifferentSchemas() {
+    try {
+      // Create source table.
+      HiveMetaStoreClient hmsc = new HiveMetaStoreClient(hiveConf);
+      clearAndRecreateDB(hmsc);
+      createTable(hmsc, true);
+      Table table = hmsc.getTable(dbName, tableName);
+      populatePartitions(hmsc,
+                         table,
+                         Arrays.asList("isLocatedInTablePath", "isLocatedOutsideTablePath") // Blurb list.
+                        );
+
+      // Modify table schema. Add columns.
+      List<FieldSchema> fields = table.getSd().getCols();
+      fields.add(new FieldSchema("goo", "string", "Entirely new column. Doesn't apply to older partitions."));
+      table.getSd().setCols(fields);
+      hmsc.alter_table(dbName, tableName, table);
+      // Check that the change stuck.
+      table =  hmsc.getTable(dbName,tableName);
+      Assert.assertEquals("Unexpected number of table columns.",
+          3, table.getSd().getColsSize());
+
+      // Add partitions with new schema.
+      // Mark Partitions with new schema with different blurb.
+      populatePartitions(hmsc, table, Arrays.asList("hasNewColumn"));
+
+      // Retrieve *all* partitions from the table.
+      PartitionSpecProxy partitionSpecProxy = hmsc.listPartitionSpecs(dbName, tableName, -1);
+      Assert.assertEquals("Unexpected number of partitions.", nDates * 3, partitionSpecProxy.size());
+
+      // Confirm grouping.
+      Assert.assertTrue("Unexpected type of PartitionSpecProxy.", partitionSpecProxy instanceof CompositePartitionSpecProxy);
+      CompositePartitionSpecProxy compositePartitionSpecProxy = (CompositePartitionSpecProxy)partitionSpecProxy;
+      List<PartitionSpec> partitionSpecs = compositePartitionSpecProxy.toPartitionSpec();
+      Assert.assertTrue("PartitionSpec[0] should have been a SharedSDPartitionSpec.",
+          partitionSpecs.get(0).isSetSharedSDPartitionSpec());
+      Assert.assertEquals("PartitionSpec[0] should use the table-path as the common root location. ",
+          table.getSd().getLocation(), partitionSpecs.get(0).getRootPath());
+      Assert.assertTrue("PartitionSpec[1] should have been a SharedSDPartitionSpec.",
+          partitionSpecs.get(1).isSetSharedSDPartitionSpec());
+      Assert.assertEquals("PartitionSpec[1] should use the table-path as the common root location. ",
+          table.getSd().getLocation(), partitionSpecs.get(1).getRootPath());
+      Assert.assertTrue("PartitionSpec[2] should have been a ListComposingPartitionSpec.",
+          partitionSpecs.get(2).isSetPartitionList());
+
+      // Categorize the partitions returned, and confirm that all partitions are accounted for.
+      PartitionSpecProxy.PartitionIterator iterator = partitionSpecProxy.getPartitionIterator();
+      Map<String, List<Partition>> blurbToPartitionList = new HashMap<String, List<Partition>>(3);
+      while (iterator.hasNext()) {
+
+        Partition partition = iterator.next();
+        String blurb = partition.getValues().get(1);
+
+        if (!blurbToPartitionList.containsKey(blurb)) {
+          blurbToPartitionList.put(blurb, new ArrayList<Partition>(nDates));
+        }
+
+        blurbToPartitionList.get(blurb).add(partition);
+
+      } // </Classification>
+
+      // All partitions with blurb="isLocatedOutsideTablePath" should have 2 columns,
+      // and must have locations outside the table directory.
+      for (Partition partition : blurbToPartitionList.get("isLocatedOutsideTablePath")) {
+        Assert.assertEquals("Unexpected number of columns.", 2, partition.getSd().getCols().size());
+        Assert.assertEquals("Unexpected first column.", "foo", partition.getSd().getCols().get(0).getName());
+        Assert.assertEquals("Unexpected second column.", "bar", partition.getSd().getCols().get(1).getName());
+        String partitionLocation = partition.getSd().getLocation();
+        String tableLocation = table.getSd().getLocation();
+        Assert.assertTrue("Unexpected partition location: " + partitionLocation + ". " +
+            "Partition should have been outside table location: " + tableLocation,
+            !partitionLocation.startsWith(tableLocation));
+      }
+
+      // All partitions with blurb="isLocatedInTablePath" should have 2 columns,
+      // and must have locations within the table directory.
+      for (Partition partition : blurbToPartitionList.get("isLocatedInTablePath")) {
+        Assert.assertEquals("Unexpected number of columns.", 2, partition.getSd().getCols().size());
+        Assert.assertEquals("Unexpected first column.", "foo", partition.getSd().getCols().get(0).getName());
+        Assert.assertEquals("Unexpected second column.", "bar", partition.getSd().getCols().get(1).getName());
+        String partitionLocation = partition.getSd().getLocation();
+        String tableLocation = table.getSd().getLocation();
+        Assert.assertTrue("Unexpected partition location: " + partitionLocation + ". " +
+                "Partition should have been within table location: " + tableLocation,
+            partitionLocation.startsWith(tableLocation));
+      }
+
+      // All partitions with blurb="hasNewColumn" were added after the table schema changed,
+      // and must have 3 columns. Also, the partition locations must lie within the table directory.
+      for (Partition partition : blurbToPartitionList.get("hasNewColumn")) {
+        Assert.assertEquals("Unexpected number of columns.", 3, partition.getSd().getCols().size());
+        Assert.assertEquals("Unexpected first column.", "foo", partition.getSd().getCols().get(0).getName());
+        Assert.assertEquals("Unexpected second column.", "bar", partition.getSd().getCols().get(1).getName());
+        Assert.assertEquals("Unexpected third column.", "goo", partition.getSd().getCols().get(2).getName());
+        String partitionLocation = partition.getSd().getLocation();
+        String tableLocation = table.getSd().getLocation();
+        Assert.assertTrue("Unexpected partition location: " + partitionLocation + ". " +
+                "Partition should have been within table location: " + tableLocation,
+            partitionLocation.startsWith(tableLocation));
+      }
+
+    }
+    catch (Throwable t) {
+      LOG.error("Unexpected Exception!", t);
+      t.printStackTrace();
+      Assert.assertTrue("Unexpected Exception!", false);
+    }
+  }
+
+}



Mime
View raw message