hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ser...@apache.org
Subject [18/31] hive git commit: HIVE-17488 Move first set of classes to standalone metastore. This closes #244. (Alan Gates, reviewed by Owen O'Malley)
Date Fri, 15 Sep 2017 19:52:19 GMT
http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java
new file mode 100644
index 0000000..e34335d
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.partition.spec;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Implementation of PartitionSpecProxy that composes a list of PartitionSpecProxy.
+ */
+public class CompositePartitionSpecProxy extends PartitionSpecProxy {
+
+  private String dbName;
+  private String tableName;
+  private List<PartitionSpec> partitionSpecs;
+  private List<PartitionSpecProxy> partitionSpecProxies;
+  private int size = 0;
+
+  protected CompositePartitionSpecProxy(List<PartitionSpec> partitionSpecs) {
+    this.partitionSpecs = partitionSpecs;
+    if (partitionSpecs.isEmpty()) {
+      dbName = null;
+      tableName = null;
+    }
+    else {
+      dbName = partitionSpecs.get(0).getDbName();
+      tableName = partitionSpecs.get(0).getTableName();
+      this.partitionSpecProxies = new ArrayList<>(partitionSpecs.size());
+      for (PartitionSpec partitionSpec : partitionSpecs) {
+        PartitionSpecProxy partitionSpecProxy = Factory.get(partitionSpec);
+        this.partitionSpecProxies.add(partitionSpecProxy);
+        size += partitionSpecProxy.size();
+      }
+    }
+    // Assert class-invariant.
+    assert isValid() : "Invalid CompositePartitionSpecProxy!";
+  }
+
+  protected CompositePartitionSpecProxy(String dbName, String tableName, List<PartitionSpec> partitionSpecs) {
+    this.dbName = dbName;
+    this.tableName = tableName;
+    this.partitionSpecs = partitionSpecs;
+    this.partitionSpecProxies = new ArrayList<>(partitionSpecs.size());
+    for (PartitionSpec partitionSpec : partitionSpecs) {
+      this.partitionSpecProxies.add(PartitionSpecProxy.Factory.get(partitionSpec));
+    }
+    // Assert class-invariant.
+    assert isValid() : "Invalid CompositePartitionSpecProxy!";
+  }
+
+  private boolean isValid() {
+    for (PartitionSpecProxy partitionSpecProxy : partitionSpecProxies) {
+      if (partitionSpecProxy instanceof CompositePartitionSpecProxy) {
+        return false;
+      }
+    }
+
+    return true;
+  }
+
+  @Override
+  public int size() {
+    return size;
+  }
+
+  /**
+   * Iterator to iterate over all Partitions, across all PartitionSpecProxy instances within the Composite.
+   */
+  public static class Iterator implements PartitionIterator {
+
+    private CompositePartitionSpecProxy composite;
+    private List<PartitionSpecProxy> partitionSpecProxies;
+    private int index = -1; // Index into partitionSpecs.
+    private PartitionIterator iterator = null;
+
+    public Iterator(CompositePartitionSpecProxy composite) {
+      this.composite = composite;
+      this.partitionSpecProxies = composite.partitionSpecProxies;
+
+      if (this.partitionSpecProxies != null && !this.partitionSpecProxies.isEmpty()) {
+        this.index = 0;
+        this.iterator = this.partitionSpecProxies.get(this.index).getPartitionIterator();
+      }
+    }
+
+    @Override
+    public boolean hasNext() {
+
+      if (iterator == null) {
+        return false;
+      }
+
+      if (iterator.hasNext()) {
+        return true;
+      }
+
+      while ( ++index < partitionSpecProxies.size()
+          && !(iterator = partitionSpecProxies.get(index).getPartitionIterator()).hasNext());
+
+      return index < partitionSpecProxies.size() && iterator.hasNext();
+
+    }
+
+    @Override
+    public Partition next() {
+
+        if (iterator.hasNext())
+          return iterator.next();
+
+        while (++index < partitionSpecProxies.size()
+            && !(iterator = partitionSpecProxies.get(index).getPartitionIterator()).hasNext());
+
+        return index == partitionSpecProxies.size()? null : iterator.next();
+
+    }
+
+    @Override
+    public void remove() {
+      iterator.remove();
+    }
+
+    @Override
+    public Partition getCurrent() {
+      return iterator.getCurrent();
+    }
+
+    @Override
+    public String getDbName() {
+      return composite.dbName;
+    }
+
+    @Override
+    public String getTableName() {
+      return composite.tableName;
+    }
+
+    @Override
+    public Map<String, String> getParameters() {
+      return iterator.getParameters();
+    }
+
+    @Override
+    public void setParameters(Map<String, String> parameters) {
+      iterator.setParameters(parameters);
+    }
+
+    @Override
+    public String getLocation() {
+      return iterator.getLocation();
+    }
+
+    @Override
+    public void putToParameters(String key, String value) {
+      iterator.putToParameters(key, value);
+    }
+
+    @Override
+    public void setCreateTime(long time) {
+      iterator.setCreateTime(time);
+    }
+  }
+
+  @Override
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+    for (PartitionSpecProxy partSpecProxy : partitionSpecProxies) {
+      partSpecProxy.setDbName(dbName);
+    }
+  }
+
+  @Override
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+    for (PartitionSpecProxy partSpecProxy : partitionSpecProxies) {
+      partSpecProxy.setTableName(tableName);
+    }
+  }
+
+  @Override
+  public String getDbName() {
+    return dbName;
+  }
+
+  @Override
+  public String getTableName() {
+    return tableName;
+  }
+
+  @Override
+  public PartitionIterator getPartitionIterator() {
+    return new Iterator(this);
+  }
+
+  @Override
+  public List<PartitionSpec> toPartitionSpec() {
+    return partitionSpecs;
+  }
+
+  @Override
+  public void setRootLocation(String rootLocation) throws MetaException {
+    for (PartitionSpecProxy partSpecProxy : partitionSpecProxies) {
+      partSpecProxy.setRootLocation(rootLocation);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java
new file mode 100644
index 0000000..7b0550b
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.partition.spec;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * PartitionSpecProxy implementation that composes a List of Partitions.
+ */
+public class PartitionListComposingSpecProxy extends PartitionSpecProxy {
+
+  private PartitionSpec partitionSpec;
+
+  protected PartitionListComposingSpecProxy(PartitionSpec partitionSpec) {
+    assert partitionSpec.isSetPartitionList()
+        : "Partition-list should have been set.";
+    this.partitionSpec = partitionSpec;
+  }
+
+  @Override
+  public String getDbName() {
+    return partitionSpec.getDbName();
+  }
+
+  @Override
+  public String getTableName() {
+    return partitionSpec.getTableName();
+  }
+
+  @Override
+  public PartitionIterator getPartitionIterator() {
+    return new Iterator(this);
+  }
+
+  @Override
+  public List<PartitionSpec> toPartitionSpec() {
+    return Arrays.asList(partitionSpec);
+  }
+
+  @Override
+  public int size() {
+    return partitionSpec.getPartitionList().getPartitionsSize();
+  }
+
+  @Override
+  public void setDbName(String dbName) {
+    partitionSpec.setDbName(dbName);
+    for (Partition partition : partitionSpec.getPartitionList().getPartitions()) {
+      partition.setDbName(dbName);
+    }
+  }
+
+  @Override
+  public void setTableName(String tableName) {
+    partitionSpec.setTableName(tableName);
+    for (Partition partition : partitionSpec.getPartitionList().getPartitions()) {
+      partition.setTableName(tableName);
+    }
+  }
+
+  @Override
+  public void setRootLocation(String newRootPath) throws MetaException {
+
+    String oldRootPath = partitionSpec.getRootPath();
+
+    if (oldRootPath == null) {
+      throw new MetaException("No common root-path. Can't replace root-path!");
+    }
+
+    for (Partition partition : partitionSpec.getPartitionList().getPartitions()) {
+      String location = partition.getSd().getLocation();
+      if (location.startsWith(oldRootPath)) {
+        partition.getSd().setLocation(location.replace(oldRootPath, newRootPath));
+      }
+      else {
+        throw new MetaException("Common root-path not found. Can't replace root-path!");
+      }
+    }
+  }
+
+  public static class Iterator implements PartitionIterator {
+
+    PartitionListComposingSpecProxy partitionSpecProxy;
+    List<Partition> partitionList;
+    int index;
+
+    public Iterator(PartitionListComposingSpecProxy partitionSpecProxy) {
+      this.partitionSpecProxy = partitionSpecProxy;
+      this.partitionList = partitionSpecProxy.partitionSpec.getPartitionList().getPartitions();
+      this.index = 0;
+    }
+
+    @Override
+    public Partition getCurrent() {
+      return partitionList.get(index);
+    }
+
+    @Override
+    public String getDbName() {
+      return partitionSpecProxy.getDbName();
+    }
+
+    @Override
+    public String getTableName() {
+      return partitionSpecProxy.getTableName();
+    }
+
+    @Override
+    public Map<String, String> getParameters() {
+      return partitionList.get(index).getParameters();
+    }
+
+    @Override
+    public void setParameters(Map<String, String> parameters) {
+      partitionList.get(index).setParameters(parameters);
+    }
+
+    @Override
+    public String getLocation() {
+      return partitionList.get(index).getSd().getLocation();
+    }
+
+    @Override
+    public void putToParameters(String key, String value) {
+      partitionList.get(index).putToParameters(key, value);
+    }
+
+    @Override
+    public void setCreateTime(long time) {
+      partitionList.get(index).setCreateTime((int)time);
+    }
+
+    @Override
+    public boolean hasNext() {
+      return index < partitionList.size();
+    }
+
+    @Override
+    public Partition next() {
+      return partitionList.get(index++);
+    }
+
+    @Override
+    public void remove() {
+      partitionList.remove(index);
+    }
+  } // class Iterator;
+
+} // class PartitionListComposingSpecProxy;

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java
new file mode 100644
index 0000000..2640a24
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.partition.spec;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Polymorphic proxy class, equivalent to org.apache.hadoop.hive.metastore.api.PartitionSpec.
+ */
+public abstract class PartitionSpecProxy {
+
+  /**
+   * The number of Partition instances represented by the PartitionSpec.
+   * @return Number of partitions.
+   */
+  public abstract int size();
+
+  /**
+   * Setter for name of the DB.
+   * @param dbName The name of the DB.
+   */
+  public abstract void setDbName(String dbName);
+
+  /**
+   * Setter for name of the table.
+   * @param tableName The name of the table.
+   */
+  public abstract void setTableName(String tableName);
+
+  /**
+   * Getter for name of the DB.
+   * @return The name of the DB.
+   */
+  public abstract String getDbName();
+
+  /**
+   * Getter for name of the table.
+   * @return The name of the table.
+   */
+  public abstract String getTableName();
+
+  /**
+   * Iterator to the (virtual) sequence of Partitions represented by the PartitionSpec.
+   * @return A PartitionIterator to the beginning of the Partition sequence.
+   */
+  public abstract PartitionIterator getPartitionIterator();
+
+  /**
+   * Conversion to a org.apache.hadoop.hive.metastore.api.PartitionSpec sequence.
+   * @return A list of org.apache.hadoop.hive.metastore.api.PartitionSpec instances.
+   */
+  public abstract List<PartitionSpec> toPartitionSpec();
+
+  /**
+   * Setter for the common root-location for all partitions in the PartitionSet.
+   * @param rootLocation The new common root-location.
+   * @throws MetaException
+   */
+  public abstract void setRootLocation(String rootLocation) throws MetaException;
+
+  /**
+   * Factory to construct PartitionSetProxy instances, from PartitionSets.
+   */
+  public static class Factory {
+
+    /**
+     * Factory method. Construct PartitionSpecProxy from raw PartitionSpec.
+     * @param partSpec Raw PartitionSpec from the Thrift API.
+     * @return PartitionSpecProxy instance.
+     */
+    public static PartitionSpecProxy get(PartitionSpec partSpec) {
+
+      if (partSpec == null) {
+        return null;
+      }
+      else
+      if (partSpec.isSetPartitionList()) {
+        return new PartitionListComposingSpecProxy(partSpec);
+      }
+      else
+      if (partSpec.isSetSharedSDPartitionSpec()) {
+        return new PartitionSpecWithSharedSDProxy(partSpec);
+      }
+
+      assert false : "Unsupported type of PartitionSpec!";
+      return null;
+    }
+
+    /**
+     * Factory method to construct CompositePartitionSpecProxy.
+     * @param partitionSpecs List of raw PartitionSpecs.
+     * @return A CompositePartitionSpecProxy instance.
+     */
+    public static PartitionSpecProxy get(List<PartitionSpec> partitionSpecs) {
+      return new CompositePartitionSpecProxy(partitionSpecs);
+    }
+
+  } // class Factory;
+
+  /**
+   * Iterator to iterate over Partitions corresponding to a PartitionSpec.
+   */
+  public interface PartitionIterator extends java.util.Iterator<Partition> {
+
+    /**
+     * Getter for the Partition "pointed to" by the iterator.
+     * Like next(), but without advancing the iterator.
+     * @return The "current" partition object.
+     */
+    Partition getCurrent();
+
+    /**
+     * Getter for the name of the DB.
+     * @return Name of the DB.
+     */
+    String getDbName();
+
+    /**
+     * Getter for the name of the table.
+     * @return Name of the table.
+     */
+    String getTableName();
+
+    /**
+     * Getter for the Partition parameters.
+     * @return Key-value map for Partition-level parameters.
+     */
+    Map<String, String> getParameters();
+
+    /**
+     * Setter for Partition parameters.
+     * @param parameters Key-value map fo Partition-level parameters.
+     */
+    void setParameters(Map<String, String> parameters);
+
+    /**
+     * Insert an individual parameter to a Partition's parameter-set.
+     * @param key
+     * @param value
+     */
+    void putToParameters(String key, String value);
+
+    /**
+     * Getter for Partition-location.
+     * @return Partition's location.
+     */
+    String getLocation();
+
+    /**
+     * Setter for creation-time of a Partition.
+     * @param time Timestamp indicating the time of creation of the Partition.
+     */
+    void setCreateTime(long time);
+
+  } // class PartitionIterator;
+
+  /**
+   * Simple wrapper class for pre-constructed Partitions, to expose a PartitionIterator interface,
+   * where the iterator-sequence consists of just one Partition.
+   */
+  public static class SimplePartitionWrapperIterator implements PartitionIterator {
+    private Partition partition;
+    public SimplePartitionWrapperIterator(Partition partition) {this.partition = partition;}
+
+    @Override public Partition getCurrent() { return partition; }
+    @Override public String getDbName() { return partition.getDbName(); }
+    @Override public String getTableName() { return partition.getTableName(); }
+    @Override public Map<String, String> getParameters() { return partition.getParameters(); }
+    @Override public void setParameters(Map<String, String> parameters) { partition.setParameters(parameters); }
+    @Override public void putToParameters(String key, String value) { partition.putToParameters(key, value);}
+    @Override public String getLocation() { return partition.getSd().getLocation(); }
+    @Override public void setCreateTime(long time) { partition.setCreateTime((int)time);}
+    @Override public boolean hasNext() { return false; } // No next partition.
+    @Override public Partition next() { return null; } // No next partition.
+    @Override public void remove() {} // Do nothing.
+  } // P
+
+} // class PartitionSpecProxy;

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java
new file mode 100644
index 0000000..36b05f7
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.partition.spec;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+import org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD;
+import org.apache.hadoop.hive.metastore.api.PartitionWithoutSD;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Subclass of PartitionSpecProxy that pulls out commonality of
+ * StorageDescriptor properties within a Partition-list into a common
+ * StorageDescriptor instance.
+ */
+public class PartitionSpecWithSharedSDProxy extends PartitionSpecProxy {
+
+  private PartitionSpec partitionSpec;
+
+  public PartitionSpecWithSharedSDProxy(PartitionSpec partitionSpec) {
+    assert partitionSpec.isSetSharedSDPartitionSpec();
+    this.partitionSpec = partitionSpec;
+  }
+
+  @Override
+  public int size() {
+    return partitionSpec.getSharedSDPartitionSpec().getPartitionsSize();
+  }
+
+  @Override
+  public void setDbName(String dbName) {
+    partitionSpec.setDbName(dbName);
+  }
+
+  @Override
+  public void setTableName(String tableName) {
+    partitionSpec.setTableName(tableName);
+  }
+
+  @Override
+  public String getDbName() {
+    return partitionSpec.getDbName();
+  }
+
+  @Override
+  public String getTableName() {
+    return partitionSpec.getTableName();
+  }
+
+  public PartitionIterator getPartitionIterator() {
+    return new Iterator(this);
+  }
+
+  @Override
+  public List<PartitionSpec> toPartitionSpec() {
+    return Arrays.asList(partitionSpec);
+  }
+
+  @Override
+  public void setRootLocation(String rootLocation) throws MetaException {
+    partitionSpec.setRootPath(rootLocation);
+    partitionSpec.getSharedSDPartitionSpec().getSd().setLocation(rootLocation);
+  }
+
+  /**
+   * Iterator implementation to iterate over all Partitions within the PartitionSpecWithSharedSDProxy.
+   */
+  public static class Iterator implements PartitionIterator {
+
+    private PartitionSpecWithSharedSDProxy partitionSpecWithSharedSDProxy;
+    private PartitionSpecWithSharedSD pSpec;
+    private int index;
+
+    Iterator(PartitionSpecWithSharedSDProxy partitionSpecWithSharedSDProxy) {
+      this.partitionSpecWithSharedSDProxy = partitionSpecWithSharedSDProxy;
+      this.pSpec = this.partitionSpecWithSharedSDProxy.partitionSpec.getSharedSDPartitionSpec();
+      this.index = 0;
+    }
+
+    @Override
+    public boolean hasNext() {
+      return index < pSpec.getPartitions().size();
+    }
+
+    @Override
+    public Partition next() {
+      Partition partition = getCurrent();
+      ++index;
+      return partition;
+    }
+
+    @Override
+    public void remove() {
+      pSpec.getPartitions().remove(index);
+    }
+
+    @Override
+    public Partition getCurrent() {
+      PartitionWithoutSD partWithoutSD = pSpec.getPartitions().get(index);
+      StorageDescriptor partSD = new StorageDescriptor(pSpec.getSd());
+      partSD.setLocation(partSD.getLocation() + partWithoutSD.getRelativePath());
+
+      return new Partition(
+          partWithoutSD.getValues(),
+          partitionSpecWithSharedSDProxy.partitionSpec.getDbName(),
+          partitionSpecWithSharedSDProxy.partitionSpec.getTableName(),
+          partWithoutSD.getCreateTime(),
+          partWithoutSD.getLastAccessTime(),
+          partSD,
+          partWithoutSD.getParameters()
+      );
+    }
+
+    @Override
+    public String getDbName() {
+      return partitionSpecWithSharedSDProxy.partitionSpec.getDbName();
+    }
+
+    @Override
+    public String getTableName() {
+      return partitionSpecWithSharedSDProxy.partitionSpec.getTableName();
+    }
+
+    @Override
+    public Map<String, String> getParameters() {
+      return pSpec.getPartitions().get(index).getParameters();
+    }
+
+    @Override
+    public void setParameters(Map<String, String> parameters) {
+      pSpec.getPartitions().get(index).setParameters(parameters);
+    }
+
+    @Override
+    public String getLocation() {
+      return pSpec.getSd().getLocation() + pSpec.getPartitions().get(index).getRelativePath();
+    }
+
+    @Override
+    public void putToParameters(String key, String value) {
+      pSpec.getPartitions().get(index).putToParameters(key, value);
+    }
+
+    @Override
+    public void setCreateTime(long time) {
+      pSpec.getPartitions().get(index).setCreateTime((int)time);
+    }
+
+  } // static class Iterator;
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
new file mode 100644
index 0000000..4ba11b8
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
@@ -0,0 +1,641 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.tools;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HiveMetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.IllegalFormatException;
+import java.util.List;
+
+public class HiveSchemaHelper {
+  public static final String DB_DERBY = "derby";
+  public static final String DB_HIVE = "hive";
+  public static final String DB_MSSQL = "mssql";
+  public static final String DB_MYSQL = "mysql";
+  public static final String DB_POSTGRACE = "postgres";
+  public static final String DB_ORACLE = "oracle";
+
+  /***
+   * Get JDBC connection to metastore db
+   *
+   * @param userName metastore connection username
+   * @param password metastore connection password
+   * @param printInfo print connection parameters
+   * @param conf hive config object
+   * @return metastore connection object
+   * @throws org.apache.hadoop.hive.metastore.HiveMetaException
+   */
+  public static Connection getConnectionToMetastore(String userName,
+      String password, String url, String driver, boolean printInfo,
+      Configuration conf)
+      throws HiveMetaException {
+    try {
+      url = url == null ? getValidConfVar(
+        MetastoreConf.ConfVars.CONNECTURLKEY, conf) : url;
+      driver = driver == null ? getValidConfVar(
+        MetastoreConf.ConfVars.CONNECTION_DRIVER, conf) : driver;
+      if (printInfo) {
+        System.out.println("Metastore connection URL:\t " + url);
+        System.out.println("Metastore Connection Driver :\t " + driver);
+        System.out.println("Metastore connection User:\t " + userName);
+      }
+      if ((userName == null) || userName.isEmpty()) {
+        throw new HiveMetaException("UserName empty ");
+      }
+
+      // load required JDBC driver
+      Class.forName(driver);
+
+      // Connect using the JDBC URL and user/pass from conf
+      return DriverManager.getConnection(url, userName, password);
+    } catch (IOException e) {
+      throw new HiveMetaException("Failed to get schema version.", e);
+    } catch (SQLException e) {
+      throw new HiveMetaException("Failed to get schema version.", e);
+    } catch (ClassNotFoundException e) {
+      throw new HiveMetaException("Failed to load driver", e);
+    }
+  }
+
+  public static Connection getConnectionToMetastore(MetaStoreConnectionInfo info) throws HiveMetaException {
+    return getConnectionToMetastore(info.getUsername(), info.getPassword(), info.getUrl(),
+        info.getDriver(), info.getPrintInfo(), info.getConf());
+  }
+
+  public static String getValidConfVar(MetastoreConf.ConfVars confVar, Configuration conf)
+      throws IOException {
+    String confVarStr = conf.get(confVar.varname);
+    if (confVarStr == null || confVarStr.isEmpty()) {
+      throw new IOException("Empty " + confVar.varname);
+    }
+    return confVarStr.trim();
+  }
+
+  public interface NestedScriptParser {
+
+    enum CommandType {
+      PARTIAL_STATEMENT,
+      TERMINATED_STATEMENT,
+      COMMENT
+    }
+
+    String DEFAULT_DELIMITER = ";";
+    String DEFAULT_QUOTE = "\"";
+
+    /**
+     * Find the type of given command
+     *
+     * @param dbCommand
+     * @return
+     */
+    boolean isPartialCommand(String dbCommand) throws IllegalArgumentException;
+
+    /**
+     * Parse the DB specific nesting format and extract the inner script name if any
+     *
+     * @param dbCommand command from parent script
+     * @return
+     * @throws IllegalFormatException
+     */
+    String getScriptName(String dbCommand) throws IllegalArgumentException;
+
+    /**
+     * Find if the given command is a nested script execution
+     *
+     * @param dbCommand
+     * @return
+     */
+    boolean isNestedScript(String dbCommand);
+
+    /**
+     * Find if the given command should not be passed to DB
+     *
+     * @param dbCommand
+     * @return
+     */
+    boolean isNonExecCommand(String dbCommand);
+
+    /**
+     * Get the SQL statement delimiter
+     *
+     * @return
+     */
+    String getDelimiter();
+
+    /**
+     * Get the SQL indentifier quotation character
+     *
+     * @return
+     */
+    String getQuoteCharacter();
+
+    /**
+     * Clear any client specific tags
+     *
+     * @return
+     */
+    String cleanseCommand(String dbCommand);
+
+    /**
+     * Does the DB required table/column names quoted
+     *
+     * @return
+     */
+    boolean needsQuotedIdentifier();
+
+    /**
+     * Flatten the nested upgrade script into a buffer
+     *
+     * @param scriptDir  upgrade script directory
+     * @param scriptFile upgrade script file
+     * @return string of sql commands
+     */
+    String buildCommand(String scriptDir, String scriptFile)
+        throws IllegalFormatException, IOException;
+
+    /**
+     * Flatten the nested upgrade script into a buffer
+     *
+     * @param scriptDir  upgrade script directory
+     * @param scriptFile upgrade script file
+     * @param fixQuotes whether to replace quote characters
+     * @return string of sql commands
+     */
+    String buildCommand(String scriptDir, String scriptFile, boolean fixQuotes)
+        throws IllegalFormatException, IOException;
+  }
+
+  /***
+   * Base implementation of NestedScriptParser
+   * abstractCommandParser.
+   *
+   */
+  private static abstract class AbstractCommandParser implements NestedScriptParser {
+    private List<String> dbOpts;
+    private String msUsername;
+    private String msPassword;
+    private Configuration conf;
+
+    public AbstractCommandParser(String dbOpts, String msUsername, String msPassword,
+        Configuration conf) {
+      setDbOpts(dbOpts);
+      this.msUsername = msUsername;
+      this.msPassword = msPassword;
+      this.conf = conf;
+    }
+
+    @Override
+    public boolean isPartialCommand(String dbCommand) throws IllegalArgumentException{
+      if (dbCommand == null || dbCommand.isEmpty()) {
+        throw new IllegalArgumentException("invalid command line " + dbCommand);
+      }
+      dbCommand = dbCommand.trim();
+      if (dbCommand.endsWith(getDelimiter()) || isNonExecCommand(dbCommand)) {
+        return false;
+      } else {
+        return true;
+      }
+    }
+
+    @Override
+    public boolean isNonExecCommand(String dbCommand) {
+      return (dbCommand.startsWith("--") || dbCommand.startsWith("#"));
+    }
+
+    @Override
+    public String getDelimiter() {
+      return DEFAULT_DELIMITER;
+    }
+
+    @Override
+    public String getQuoteCharacter() {
+      return DEFAULT_QUOTE;
+    }
+
+
+    @Override
+    public String cleanseCommand(String dbCommand) {
+      // strip off the delimiter
+      if (dbCommand.endsWith(getDelimiter())) {
+        dbCommand = dbCommand.substring(0,
+            dbCommand.length() - getDelimiter().length());
+      }
+      return dbCommand;
+    }
+
+    @Override
+    public boolean needsQuotedIdentifier() {
+      return false;
+    }
+
+    @Override
+    public String buildCommand(
+      String scriptDir, String scriptFile) throws IllegalFormatException, IOException {
+      return buildCommand(scriptDir, scriptFile, false);
+    }
+
+    @Override
+    public String buildCommand(
+      String scriptDir, String scriptFile, boolean fixQuotes) throws IllegalFormatException, IOException {
+      BufferedReader bfReader =
+          new BufferedReader(new FileReader(scriptDir + File.separatorChar + scriptFile));
+      String currLine;
+      StringBuilder sb = new StringBuilder();
+      String currentCommand = null;
+      while ((currLine = bfReader.readLine()) != null) {
+        currLine = currLine.trim();
+
+        if (fixQuotes && !getQuoteCharacter().equals(DEFAULT_QUOTE)) {
+          currLine = currLine.replace("\\\"", getQuoteCharacter());
+        }
+
+        if (currLine.isEmpty()) {
+          continue; // skip empty lines
+        }
+
+        if (currentCommand == null) {
+          currentCommand = currLine;
+        } else {
+          currentCommand = currentCommand + " " + currLine;
+        }
+        if (isPartialCommand(currLine)) {
+          // if its a partial line, continue collecting the pieces
+          continue;
+        }
+
+        // if this is a valid executable command then add it to the buffer
+        if (!isNonExecCommand(currentCommand)) {
+          currentCommand = cleanseCommand(currentCommand);
+          if (isNestedScript(currentCommand)) {
+            // if this is a nested sql script then flatten it
+            String currScript = getScriptName(currentCommand);
+            sb.append(buildCommand(scriptDir, currScript));
+          } else {
+            // Now we have a complete statement, process it
+            // write the line to buffer
+            sb.append(currentCommand);
+            sb.append(System.getProperty("line.separator"));
+          }
+        }
+        currentCommand = null;
+      }
+      bfReader.close();
+      return sb.toString();
+    }
+
+    private void setDbOpts(String dbOpts) {
+      if (dbOpts != null) {
+        this.dbOpts = Lists.newArrayList(dbOpts.split(","));
+      } else {
+        this.dbOpts = Lists.newArrayList();
+      }
+    }
+
+    protected List<String> getDbOpts() {
+      return dbOpts;
+    }
+
+    protected String getMsUsername() {
+      return msUsername;
+    }
+
+    protected String getMsPassword() {
+      return msPassword;
+    }
+
+    protected Configuration getConf() {
+      return conf;
+    }
+  }
+
+  // Derby commandline parser
+  public static class DerbyCommandParser extends AbstractCommandParser {
+    private static final String DERBY_NESTING_TOKEN = "RUN";
+
+    public DerbyCommandParser(String dbOpts, String msUsername, String msPassword,
+        Configuration conf) {
+      super(dbOpts, msUsername, msPassword, conf);
+    }
+
+    @Override
+    public String getScriptName(String dbCommand) throws IllegalArgumentException {
+
+      if (!isNestedScript(dbCommand)) {
+        throw new IllegalArgumentException("Not a script format " + dbCommand);
+      }
+      String[] tokens = dbCommand.split(" ");
+      if (tokens.length != 2) {
+        throw new IllegalArgumentException("Couldn't parse line " + dbCommand);
+      }
+      return tokens[1].replace(";", "").replaceAll("'", "");
+    }
+
+    @Override
+    public boolean isNestedScript(String dbCommand) {
+      // Derby script format is RUN '<file>'
+     return dbCommand.startsWith(DERBY_NESTING_TOKEN);
+    }
+  }
+
+  // Derby commandline parser
+  public static class HiveCommandParser extends AbstractCommandParser {
+    private static String HIVE_NESTING_TOKEN = "SOURCE";
+    private final NestedScriptParser nestedDbCommandParser;
+
+    public HiveCommandParser(String dbOpts, String msUsername, String msPassword,
+        Configuration conf, String metaDbType) {
+      super(dbOpts, msUsername, msPassword, conf);
+      nestedDbCommandParser = getDbCommandParser(metaDbType);
+    }
+
+    @Override
+    public String getQuoteCharacter() {
+      return nestedDbCommandParser.getQuoteCharacter();
+    }
+
+    @Override
+    public String getScriptName(String dbCommand) throws IllegalArgumentException {
+
+      if (!isNestedScript(dbCommand)) {
+        throw new IllegalArgumentException("Not a script format " + dbCommand);
+      }
+      String[] tokens = dbCommand.split(" ");
+      if (tokens.length != 2) {
+        throw new IllegalArgumentException("Couldn't parse line " + dbCommand);
+      }
+      return tokens[1].replace(";", "");
+    }
+
+    @Override
+    public boolean isNestedScript(String dbCommand) {
+     return dbCommand.startsWith(HIVE_NESTING_TOKEN);
+    }
+  }
+
+  // MySQL parser
+  public static class MySqlCommandParser extends AbstractCommandParser {
+    private static final String MYSQL_NESTING_TOKEN = "SOURCE";
+    private static final String DELIMITER_TOKEN = "DELIMITER";
+    private String delimiter = DEFAULT_DELIMITER;
+
+    public MySqlCommandParser(String dbOpts, String msUsername, String msPassword,
+        Configuration conf) {
+      super(dbOpts, msUsername, msPassword, conf);
+    }
+
+    @Override
+    public boolean isPartialCommand(String dbCommand) throws IllegalArgumentException{
+      boolean isPartial = super.isPartialCommand(dbCommand);
+      // if this is a delimiter directive, reset our delimiter
+      if (dbCommand.startsWith(DELIMITER_TOKEN)) {
+        String[] tokens = dbCommand.split(" ");
+        if (tokens.length != 2) {
+          throw new IllegalArgumentException("Couldn't parse line " + dbCommand);
+        }
+        delimiter = tokens[1];
+      }
+      return isPartial;
+    }
+
+    @Override
+    public String getScriptName(String dbCommand) throws IllegalArgumentException {
+      String[] tokens = dbCommand.split(" ");
+      if (tokens.length != 2) {
+        throw new IllegalArgumentException("Couldn't parse line " + dbCommand);
+      }
+      // remove ending ';'
+      return tokens[1].replace(";", "");
+    }
+
+    @Override
+    public boolean isNestedScript(String dbCommand) {
+      return dbCommand.startsWith(MYSQL_NESTING_TOKEN);
+    }
+
+    @Override
+    public String getDelimiter() {
+      return delimiter;
+    }
+
+    @Override
+    public String getQuoteCharacter() {
+      return "`";
+    }
+
+    @Override
+    public boolean isNonExecCommand(String dbCommand) {
+      return super.isNonExecCommand(dbCommand) ||
+          (dbCommand.startsWith("/*") && dbCommand.endsWith("*/")) ||
+          dbCommand.startsWith(DELIMITER_TOKEN);
+    }
+
+    @Override
+    public String cleanseCommand(String dbCommand) {
+      return super.cleanseCommand(dbCommand).replaceAll("/\\*.*?\\*/[^;]", "");
+    }
+
+  }
+
+  // Postgres specific parser
+  public static class PostgresCommandParser extends AbstractCommandParser {
+    private static final String POSTGRES_NESTING_TOKEN = "\\i";
+    @VisibleForTesting
+    public static final String POSTGRES_STANDARD_STRINGS_OPT = "SET standard_conforming_strings";
+    @VisibleForTesting
+    public static final String POSTGRES_SKIP_STANDARD_STRINGS_DBOPT = "postgres.filter.81";
+
+    public PostgresCommandParser(String dbOpts, String msUsername, String msPassword,
+        Configuration conf) {
+      super(dbOpts, msUsername, msPassword, conf);
+    }
+
+    @Override
+    public String getScriptName(String dbCommand) throws IllegalArgumentException {
+      String[] tokens = dbCommand.split(" ");
+      if (tokens.length != 2) {
+        throw new IllegalArgumentException("Couldn't parse line " + dbCommand);
+      }
+      // remove ending ';'
+      return tokens[1].replace(";", "");
+    }
+
+    @Override
+    public boolean isNestedScript(String dbCommand) {
+      return dbCommand.startsWith(POSTGRES_NESTING_TOKEN);
+    }
+
+    @Override
+    public boolean needsQuotedIdentifier() {
+      return true;
+    }
+
+    @Override
+    public boolean isNonExecCommand(String dbCommand) {
+      // Skip "standard_conforming_strings" command which is read-only in older
+      // Postgres versions like 8.1
+      // See: http://www.postgresql.org/docs/8.2/static/release-8-1.html
+      if (getDbOpts().contains(POSTGRES_SKIP_STANDARD_STRINGS_DBOPT)) {
+        if (dbCommand.startsWith(POSTGRES_STANDARD_STRINGS_OPT)) {
+          return true;
+        }
+      }
+      return super.isNonExecCommand(dbCommand);
+    }
+  }
+
+  //Oracle specific parser
+  public static class OracleCommandParser extends AbstractCommandParser {
+    private static final String ORACLE_NESTING_TOKEN = "@";
+
+    public OracleCommandParser(String dbOpts, String msUsername, String msPassword,
+        Configuration conf) {
+      super(dbOpts, msUsername, msPassword, conf);
+    }
+
+    @Override
+    public String getScriptName(String dbCommand) throws IllegalArgumentException {
+      if (!isNestedScript(dbCommand)) {
+        throw new IllegalArgumentException("Not a nested script format " + dbCommand);
+      }
+      // remove ending ';' and starting '@'
+      return dbCommand.replace(";", "").replace(ORACLE_NESTING_TOKEN, "");
+    }
+
+    @Override
+    public boolean isNestedScript(String dbCommand) {
+      return dbCommand.startsWith(ORACLE_NESTING_TOKEN);
+    }
+  }
+
+  //MSSQL specific parser
+  public static class MSSQLCommandParser extends AbstractCommandParser {
+    private static final String MSSQL_NESTING_TOKEN = ":r";
+
+    public MSSQLCommandParser(String dbOpts, String msUsername, String msPassword,
+        Configuration conf) {
+      super(dbOpts, msUsername, msPassword, conf);
+    }
+
+    @Override
+    public String getScriptName(String dbCommand) throws IllegalArgumentException {
+      String[] tokens = dbCommand.split(" ");
+      if (tokens.length != 2) {
+        throw new IllegalArgumentException("Couldn't parse line " + dbCommand);
+      }
+      return tokens[1];
+    }
+
+    @Override
+    public boolean isNestedScript(String dbCommand) {
+      return dbCommand.startsWith(MSSQL_NESTING_TOKEN);
+    }
+  }
+
+  public static NestedScriptParser getDbCommandParser(String dbName) {
+    return getDbCommandParser(dbName, null);
+  }
+
+  public static NestedScriptParser getDbCommandParser(String dbName, String metaDbName) {
+    return getDbCommandParser(dbName, null, null, null, null, metaDbName);
+  }
+
+  public static NestedScriptParser getDbCommandParser(String dbName,
+      String dbOpts, String msUsername, String msPassword,
+      Configuration conf, String metaDbType) {
+    if (dbName.equalsIgnoreCase(DB_DERBY)) {
+      return new DerbyCommandParser(dbOpts, msUsername, msPassword, conf);
+    } else if (dbName.equalsIgnoreCase(DB_HIVE)) {
+      return new HiveCommandParser(dbOpts, msUsername, msPassword, conf, metaDbType);
+    } else if (dbName.equalsIgnoreCase(DB_MSSQL)) {
+      return new MSSQLCommandParser(dbOpts, msUsername, msPassword, conf);
+    } else if (dbName.equalsIgnoreCase(DB_MYSQL)) {
+      return new MySqlCommandParser(dbOpts, msUsername, msPassword, conf);
+    } else if (dbName.equalsIgnoreCase(DB_POSTGRACE)) {
+      return new PostgresCommandParser(dbOpts, msUsername, msPassword, conf);
+    } else if (dbName.equalsIgnoreCase(DB_ORACLE)) {
+      return new OracleCommandParser(dbOpts, msUsername, msPassword, conf);
+    } else {
+      throw new IllegalArgumentException("Unknown dbType " + dbName);
+    }
+  }
+
+  public static class MetaStoreConnectionInfo {
+    private final String userName;
+    private final String password;
+    private final String url;
+    private final String driver;
+    private final boolean printInfo;
+    private final Configuration conf;
+    private final String dbType;
+
+    public MetaStoreConnectionInfo(String userName, String password, String url, String driver,
+                                   boolean printInfo, Configuration conf, String dbType) {
+      super();
+      this.userName = userName;
+      this.password = password;
+      this.url = url;
+      this.driver = driver;
+      this.printInfo = printInfo;
+      this.conf = conf;
+      this.dbType = dbType;
+    }
+
+    public String getPassword() {
+      return password;
+    }
+
+    public String getUrl() {
+      return url;
+    }
+
+    public String getDriver() {
+      return driver;
+    }
+
+    public boolean isPrintInfo() {
+      return printInfo;
+    }
+
+    public Configuration getConf() {
+      return conf;
+    }
+
+    public String getUsername() {
+      return userName;
+    }
+
+    public boolean getPrintInfo() {
+      return printInfo;
+    }
+
+    public String getDbType() {
+      return dbType;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
new file mode 100644
index 0000000..2310df6
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.utils;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Trash;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+public class FileUtils {
+  private static final Logger LOG = LoggerFactory.getLogger(FileUtils.class);
+
+  /**
+   * Move a particular file or directory to the trash.
+   * @param fs FileSystem to use
+   * @param f path of file or directory to move to trash.
+   * @param conf
+   * @return true if move successful
+   * @throws IOException
+   */
+  public static boolean moveToTrash(FileSystem fs, Path f, Configuration conf, boolean purge)
+      throws IOException {
+    LOG.debug("deleting  " + f);
+    boolean result = false;
+    try {
+      if(purge) {
+        LOG.debug("purge is set to true. Not moving to Trash " + f);
+      } else {
+        result = Trash.moveToAppropriateTrash(fs, f, conf);
+        if (result) {
+          LOG.trace("Moved to trash: " + f);
+          return true;
+        }
+      }
+    } catch (IOException ioe) {
+      // for whatever failure reason including that trash has lower encryption zone
+      // retry with force delete
+      LOG.warn(ioe.getMessage() + "; Force to delete it.");
+    }
+
+    result = fs.delete(f, true);
+    if (!result) {
+      LOG.error("Failed to delete " + f);
+    }
+    return result;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java
new file mode 100644
index 0000000..81f8a85
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.utils;
+
+public class JavaUtils {
+  /**
+   * Standard way of getting classloader in Hive code (outside of Hadoop).
+   *
+   * Uses the context loader to get access to classpaths to auxiliary and jars
+   * added with 'add jar' command. Falls back to current classloader.
+   *
+   * In Hadoop-related code, we use Configuration.getClassLoader().
+   * @return the class loader
+   */
+  public static ClassLoader getClassLoader() {
+    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+    if (classLoader == null) {
+      classLoader = JavaUtils.class.getClassLoader();
+    }
+    return classLoader;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
new file mode 100644
index 0000000..3ef7e51
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.utils;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MetaStoreUtils {
+  private static final Logger LOG = LoggerFactory.getLogger(MetaStoreUtils.class);
+
+  /**
+   * Catches exceptions that can't be handled and bundles them to MetaException
+   *
+   * @param e exception to wrap.
+   * @throws MetaException wrapper for the exception
+   */
+  public static void logAndThrowMetaException(Exception e) throws MetaException {
+    String exInfo = "Got exception: " + e.getClass().getName() + " "
+        + e.getMessage();
+    LOG.error(exInfo, e);
+    LOG.error("Converting exception to MetaException");
+    throw new MetaException(exInfo);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetastoreVersionInfo.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetastoreVersionInfo.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetastoreVersionInfo.java
new file mode 100644
index 0000000..de54ff3
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetastoreVersionInfo.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.utils;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.annotation.MetastoreVersionAnnotation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Class that uses package information to figure out which version of the metastore this program is.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class MetastoreVersionInfo {
+  private static final Logger LOG = LoggerFactory.getLogger(MetastoreVersionInfo.class);
+
+  private static Package myPackage;
+  private static MetastoreVersionAnnotation version;
+
+  static {
+    myPackage = MetastoreVersionAnnotation.class.getPackage();
+    version = myPackage.getAnnotation(MetastoreVersionAnnotation.class);
+  }
+
+  /**
+   * Get the meta-data for the Hive package.
+   * @return
+   */
+  static Package getPackage() {
+    return myPackage;
+  }
+
+  /**
+   * Get the Hive version.
+   * @return the Hive version string, eg. "0.6.3-dev"
+   */
+  public static String getVersion() {
+    return version != null ? version.version() : "Unknown";
+  }
+
+  /**
+   * Get the Hive short version, with major/minor/change version numbers.
+   * @return short version string, eg. "0.6.3"
+   */
+  public static String getShortVersion() {
+    return version != null ? version.shortVersion() : "Unknown";
+  }
+
+  /**
+   * Get the git revision number for the root directory
+   * @return the revision number, eg. "451451"
+   */
+  public static String getRevision() {
+    return version != null ? version.revision() : "Unknown";
+  }
+
+  /**
+   * Get the branch on which this originated.
+   * @return The branch name, e.g. "trunk" or "branches/branch-0.20"
+   */
+  public static String getBranch() {
+    return version != null ? version.branch() : "Unknown";
+  }
+
+  /**
+   * The date that Hive was compiled.
+   * @return the compilation date in unix date format
+   */
+  public static String getDate() {
+    return version != null ? version.date() : "Unknown";
+  }
+
+  /**
+   * The user that compiled Hive.
+   * @return the username of the user
+   */
+  public static String getUser() {
+    return version != null ? version.user() : "Unknown";
+  }
+
+  /**
+   * Get the git URL for the root Hive directory.
+   */
+  public static String getUrl() {
+    return version != null ? version.url() : "Unknown";
+  }
+
+  /**
+   * Get the checksum of the source files from which Hive was
+   * built.
+   **/
+  public static String getSrcChecksum() {
+    return version != null ? version.srcChecksum() : "Unknown";
+  }
+
+  /**
+   * Returns the buildVersion which includes version,
+   * revision, user and date.
+   */
+  public static String getBuildVersion(){
+    return MetastoreVersionInfo.getVersion() +
+    " from " + MetastoreVersionInfo.getRevision() +
+    " by " + MetastoreVersionInfo.getUser() +
+    " source checksum " + MetastoreVersionInfo.getSrcChecksum();
+  }
+
+  public static void main(String[] args) {
+    LOG.debug("version: "+ version);
+    System.out.println("Hive " + getVersion());
+    System.out.println("Git " + getUrl() + " -r " + getRevision());
+    System.out.println("Compiled by " + getUser() + " on " + getDate());
+    System.out.println("From source with checksum " + getSrcChecksum());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/protobuf/org/apache/hadoop/hive/metastore/metastore.proto
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/protobuf/org/apache/hadoop/hive/metastore/metastore.proto b/standalone-metastore/src/main/protobuf/org/apache/hadoop/hive/metastore/metastore.proto
new file mode 100644
index 0000000..29b99b4
--- /dev/null
+++ b/standalone-metastore/src/main/protobuf/org/apache/hadoop/hive/metastore/metastore.proto
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+
+message SplitInfo {
+  required int64 offset = 1;
+  required int64 length = 2;
+  required int32 index = 3;
+}
+
+message SplitInfos {
+  repeated SplitInfo infos = 1;
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/resources/saveVersion.sh
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/resources/saveVersion.sh b/standalone-metastore/src/main/resources/saveVersion.sh
new file mode 100755
index 0000000..c9bde68
--- /dev/null
+++ b/standalone-metastore/src/main/resources/saveVersion.sh
@@ -0,0 +1,91 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# This file is used to generate the package-info.java class that
+# records the version, revision, branch, user, timestamp, and url
+unset LANG
+unset LC_CTYPE
+unset LC_TIME
+version=$1
+shortversion=$2
+src_dir=$3
+revision=$4
+branch=$5
+url=$6
+user=`whoami`
+date=`date`
+dir=`pwd`
+cwd=`dirname $dir`
+if [ "$revision" = "" ]; then
+    if git rev-parse HEAD 2>/dev/null > /dev/null ; then
+        revision=`git log -1 --pretty=format:"%H"`
+        hostname=`hostname`
+        branch=`git branch | sed -n -e 's/^* //p'`
+        url="git://${hostname}${cwd}"
+    elif [ -d .svn ]; then
+        revision=`svn info ../ | sed -n -e 's/Last Changed Rev: \(.*\)/\1/p'`
+        url=`svn info ../ | sed -n -e 's/^URL: \(.*\)/\1/p'`
+  # Get canonical branch (branches/X, tags/X, or trunk)
+        branch=`echo $url | sed -n -e 's,.*\(branches/.*\)$,\1,p' \
+            -e 's,.*\(tags/.*\)$,\1,p' \
+            -e 's,.*trunk$,trunk,p'`
+    else
+        revision="Unknown"
+        branch="Unknown"
+        url="file://$cwd"
+    fi
+fi
+if [ "$branch" = "" ]; then
+    branch="Unknown"
+fi
+if [ "$url" = "" ]; then
+    url="file://$cwd"
+fi
+
+if [ -x /sbin/md5 ]; then
+  md5="/sbin/md5"
+else
+  md5="md5sum"
+fi
+
+srcChecksum=`find ../ -name '*.java' | grep -v generated-sources | LC_ALL=C sort | xargs $md5 | $md5 | cut -d ' ' -f 1`
+
+mkdir -p $src_dir/gen/org/apache/hadoop/hive/metastore/annotation
+
+# In Windows, all the following string ends with \r, need to get rid of them
+branch=`echo $branch | tr -d '\r'`
+user=`echo $user | tr -d '\r'`
+date=`echo $date | tr -d '\r'`
+url=`echo $url | tr -d '\r'`
+srcChecksum=`echo $srcChecksum | tr -d '\r'`
+
+cat << EOF | \
+  sed -e "s/VERSION/$version/" -e "s/SHORTVERSION/$shortversion/" \
+      -e "s/USER/$user/" -e "s/DATE/$date/" \
+      -e "s|URL|$url|" -e "s/REV/$revision/" \
+      -e "s|BRANCH|$branch|" -e "s/SRCCHECKSUM/$srcChecksum/" \
+      > $src_dir/gen/org/apache/hadoop/hive/metastore/annotation/package-info.java
+/*
+ * Generated by saveVersion.sh
+ */
+@MetastoreVersionAnnotation(version="VERSION", shortVersion="SHORTVERSION",
+                         revision="REV", branch="BRANCH",
+                         user="USER", date="DATE", url="URL",
+                         srcChecksum="SRCCHECKSUM")
+package org.apache.hadoop.hive.metastore.annotation;
+EOF

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java
new file mode 100644
index 0000000..6e06026
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java
@@ -0,0 +1,267 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
+import org.apache.hadoop.hive.metastore.AggregateStatsCache.AggrColStats;
+import org.apache.hadoop.hive.metastore.AggregateStatsCache.Key;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hive.common.util.BloomFilter;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestAggregateStatsCache {
+  static String DB_NAME = "db";
+  static String TAB_PREFIX = "tab";
+  static String PART_PREFIX = "part";
+  static String COL_PREFIX = "col";
+  static int NUM_TABS = 2;
+  static int NUM_PARTS = 20;
+  static int NUM_COLS = 5;
+  static int MAX_CACHE_NODES = 10;
+  static int MAX_PARTITIONS_PER_CACHE_NODE = 10;
+  static long TIME_TO_LIVE = 2;
+  static long MAX_WRITER_WAIT = 1;
+  static long MAX_READER_WAIT = 1;
+  static double FALSE_POSITIVE_PROBABILITY = 0.01;
+  static double MAX_VARIANCE = 0.5;
+  static AggregateStatsCache cache;
+  static List<String> tables = new ArrayList<>();
+  static List<String> tabParts = new ArrayList<>();
+  static List<String> tabCols = new ArrayList<>();
+
+  @BeforeClass
+  public static void beforeTest() {
+    // All data intitializations
+    initializeTables();
+    initializePartitions();
+    initializeColumns();
+  }
+
+  // tab1, tab2
+  private static void initializeTables() {
+    for (int i = 1; i <= NUM_TABS; i++) {
+      tables.add(TAB_PREFIX + i);
+    }
+  }
+
+  // part1 ... part20
+  private static void initializePartitions() {
+    for (int i = 1; i <= NUM_PARTS; i++) {
+      tabParts.add(PART_PREFIX + i);
+    }
+  }
+
+  // col1 ... col5
+  private static void initializeColumns() {
+    for (int i = 1; i <= NUM_COLS; i++) {
+      tabCols.add(COL_PREFIX + i);
+    }
+  }
+
+  @AfterClass
+  public static void afterTest() {
+  }
+
+  @Before
+  public void setUp() {
+    Configuration conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setLongVar(conf, ConfVars.AGGREGATE_STATS_CACHE_SIZE, MAX_CACHE_NODES);
+    MetastoreConf.setLongVar(conf, ConfVars.AGGREGATE_STATS_CACHE_MAX_PARTITIONS,
+        MAX_PARTITIONS_PER_CACHE_NODE);
+    MetastoreConf.setDoubleVar(conf, ConfVars.AGGREGATE_STATS_CACHE_FPP, FALSE_POSITIVE_PROBABILITY);
+    MetastoreConf.setDoubleVar(conf, ConfVars.AGGREGATE_STATS_CACHE_MAX_VARIANCE, MAX_VARIANCE);
+    MetastoreConf.setTimeVar(conf, ConfVars.AGGREGATE_STATS_CACHE_TTL, TIME_TO_LIVE, TimeUnit.SECONDS);
+    MetastoreConf.setTimeVar(conf, ConfVars.AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT,
+        MAX_WRITER_WAIT, TimeUnit.SECONDS);
+    MetastoreConf.setTimeVar(conf, ConfVars.AGGREGATE_STATS_CACHE_MAX_READER_WAIT,
+        MAX_READER_WAIT, TimeUnit.SECONDS);
+    cache = AggregateStatsCache.getInstance(conf);
+  }
+
+  @After
+  public void tearDown() {
+  }
+
+  @Test
+  public void testCacheKey() {
+    Key k1 = new Key("db", "tbl1", "col");
+    Key k2 = new Key("db", "tbl1", "col");
+    // k1 equals k2
+    Assert.assertEquals(k1, k2);
+    Key k3 = new Key("db", "tbl2", "col");
+    // k1 not equals k3
+    Assert.assertNotEquals(k1, k3);
+  }
+
+  @Test
+  public void testBasicAddAndGet() throws Exception {
+    // Partnames: [tab1part1...tab1part9]
+    List<String> partNames = preparePartNames(tables.get(0), 1, 9);
+    // Prepare the bloom filter
+    BloomFilter bloomFilter = prepareBloomFilter(partNames);
+    // Add a dummy aggregate stats object for the above parts (part1...part9) of tab1 for col1
+    String tblName = tables.get(0);
+    String colName = tabCols.get(0);
+    int highVal = 100, lowVal = 10, numDVs = 50, numNulls = 5;
+    // We'll treat this as the aggregate col stats for part1...part9 of tab1, col1
+    ColumnStatisticsObj aggrColStats =
+        getDummyLongColStat(colName, highVal, lowVal, numDVs, numNulls);
+    // Now add to cache the dummy colstats for these 10 partitions
+    cache.add(DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter);
+    // Now get from cache
+    AggrColStats aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames);
+    Assert.assertNotNull(aggrStatsCached);
+
+    ColumnStatisticsObj aggrColStatsCached = aggrStatsCached.getColStats();
+    Assert.assertEquals(aggrColStats, aggrColStatsCached);
+
+    // Now get a non-existant entry
+    aggrStatsCached = cache.get("dbNotThere", tblName, colName, partNames);
+    Assert.assertNull(aggrStatsCached);
+  }
+
+  @Test
+  public void testAddGetWithVariance() throws Exception {
+    // Partnames: [tab1part1...tab1part9]
+    List<String> partNames = preparePartNames(tables.get(0), 1, 9);
+    // Prepare the bloom filter
+    BloomFilter bloomFilter = prepareBloomFilter(partNames);
+    // Add a dummy aggregate stats object for the above parts (part1...part9) of tab1 for col1
+    String tblName = tables.get(0);
+    String colName = tabCols.get(0);
+    int highVal = 100, lowVal = 10, numDVs = 50, numNulls = 5;
+    // We'll treat this as the aggregate col stats for part1...part9 of tab1, col1
+    ColumnStatisticsObj aggrColStats =
+        getDummyLongColStat(colName, highVal, lowVal, numDVs, numNulls);
+    // Now add to cache
+    cache.add(DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter);
+
+    // Now prepare partnames with only 5 partitions: [tab1part1...tab1part5]
+    partNames = preparePartNames(tables.get(0), 1, 5);
+    // This get should fail because its variance ((10-5)/5) is way past MAX_VARIANCE (0.5)
+    AggrColStats aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames);
+    Assert.assertNull(aggrStatsCached);
+
+    // Now prepare partnames with 10 partitions: [tab1part11...tab1part20], but with no overlap
+    partNames = preparePartNames(tables.get(0), 11, 20);
+    // This get should fail because its variance ((10-0)/10) is way past MAX_VARIANCE (0.5)
+    aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames);
+    Assert.assertNull(aggrStatsCached);
+
+    // Now prepare partnames with 9 partitions: [tab1part1...tab1part8], which are contained in the
+    // object that we added to the cache
+    partNames = preparePartNames(tables.get(0), 1, 8);
+    // This get should succeed because its variance ((10-9)/9) is within past MAX_VARIANCE (0.5)
+    aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames);
+    Assert.assertNotNull(aggrStatsCached);
+    ColumnStatisticsObj aggrColStatsCached = aggrStatsCached.getColStats();
+    Assert.assertEquals(aggrColStats, aggrColStatsCached);
+  }
+
+  @Test
+  public void testTimeToLive() throws Exception {
+    // Add a dummy node to cache
+    // Partnames: [tab1part1...tab1part9]
+    List<String> partNames = preparePartNames(tables.get(0), 1, 9);
+    // Prepare the bloom filter
+    BloomFilter bloomFilter = prepareBloomFilter(partNames);
+    // Add a dummy aggregate stats object for the above parts (part1...part9) of tab1 for col1
+    String tblName = tables.get(0);
+    String colName = tabCols.get(0);
+    int highVal = 100, lowVal = 10, numDVs = 50, numNulls = 5;
+    // We'll treat this as the aggregate col stats for part1...part9 of tab1, col1
+    ColumnStatisticsObj aggrColStats =
+        getDummyLongColStat(colName, highVal, lowVal, numDVs, numNulls);
+    // Now add to cache
+    cache.add(DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter);
+
+    // Sleep for 3 seconds
+    Thread.sleep(3000);
+
+    // Get should fail now (since TTL is 2s) and we've snoozed for 3 seconds
+    AggrColStats aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames);
+    Assert.assertNull(aggrStatsCached);
+  }
+
+  /**
+   * Prepares an array of partition names by getting partitions from minPart ... maxPart and
+   * prepending with table name
+   * Example: [tab1part1, tab1part2 ...]
+   *
+   * @param tabName
+   * @param minPart
+   * @param maxPart
+   * @return
+   * @throws Exception
+   */
+  private List<String> preparePartNames(String tabName, int minPart, int maxPart) throws Exception {
+    if ((minPart < 1) || (maxPart > NUM_PARTS)) {
+      throw new Exception("tabParts does not have these partition numbers");
+    }
+    List<String> partNames = new ArrayList<>();
+    for (int i = minPart; i <= maxPart; i++) {
+      String partName = tabParts.get(i-1);
+      partNames.add(tabName + partName);
+    }
+    return partNames;
+  }
+
+  /**
+   * Prepares a bloom filter from the list of partition names
+   * @param partNames
+   * @return
+   */
+  private BloomFilter prepareBloomFilter(List <String> partNames) {
+    BloomFilter bloomFilter =
+        new BloomFilter(MAX_PARTITIONS_PER_CACHE_NODE, FALSE_POSITIVE_PROBABILITY);
+    for (String partName: partNames) {
+      bloomFilter.add(partName.getBytes());
+    }
+    return bloomFilter;
+  }
+
+  private ColumnStatisticsObj getDummyLongColStat(String colName, int highVal, int lowVal, int numDVs, int numNulls) {
+    ColumnStatisticsObj aggrColStats = new ColumnStatisticsObj();
+    aggrColStats.setColName(colName);
+    aggrColStats.setColType("long");
+    LongColumnStatsData longStatsData = new LongColumnStatsData();
+    longStatsData.setHighValue(highVal);
+    longStatsData.setLowValue(lowVal);
+    longStatsData.setNumDVs(numDVs);
+    longStatsData.setNumNulls(numNulls);
+    ColumnStatisticsData aggrColStatsData = new ColumnStatisticsData();
+    aggrColStatsData.setLongStats(longStatsData);
+    aggrColStats.setStatsData(aggrColStatsData);
+    return aggrColStats;
+  }
+}


Mime
View raw message