hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ga...@apache.org
Subject svn commit: r1657394 [1/5] - in /hive/branches/hbase-metastore: bin/ext/ common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/conf/ itests/hive-unit/ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase...
Date Wed, 04 Feb 2015 20:00:51 GMT
Author: gates
Date: Wed Feb  4 20:00:49 2015
New Revision: 1657394

URL: http://svn.apache.org/r1657394
Log:
HIVE-9453 Initial patch [hbase-metastore branch] (Alan Gates, reviewed by Thejas Nair)

Added:
    hive/branches/hbase-metastore/bin/ext/hbaseschematool.sh
    hive/branches/hbase-metastore/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/
    hive/branches/hbase-metastore/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/Counter.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/DatabaseWritable.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseSchemaTool.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/ObjectCache.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionCache.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionWritable.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/RoleWritable.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedStorageDescriptor.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StorageDescriptorWritable.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TableWritable.java
    hive/branches/hbase-metastore/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/
    hive/branches/hbase-metastore/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java
    hive/branches/hbase-metastore/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
    hive/branches/hbase-metastore/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java
    hive/branches/hbase-metastore/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestStatsCache.java
Modified:
    hive/branches/hbase-metastore/common/src/java/org/apache/hadoop/hive/common/ObjectPair.java
    hive/branches/hbase-metastore/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/branches/hbase-metastore/itests/hive-unit/pom.xml
    hive/branches/hbase-metastore/metastore/if/hive_metastore.thrift
    hive/branches/hbase-metastore/metastore/pom.xml
    hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
    hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
    hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
    hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
    hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
    hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
    hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
    hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
    hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
    hive/branches/hbase-metastore/metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java
    hive/branches/hbase-metastore/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
    hive/branches/hbase-metastore/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
    hive/branches/hbase-metastore/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    hive/branches/hbase-metastore/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    hive/branches/hbase-metastore/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java

Added: hive/branches/hbase-metastore/bin/ext/hbaseschematool.sh
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/bin/ext/hbaseschematool.sh?rev=1657394&view=auto
==============================================================================
--- hive/branches/hbase-metastore/bin/ext/hbaseschematool.sh (added)
+++ hive/branches/hbase-metastore/bin/ext/hbaseschematool.sh Wed Feb  4 20:00:49 2015
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+THISSERVICE=hbaseschematool
+export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} "
+
+hbaseschematool () {
+  CLASS=org.apache.hadoop.hive.metastore.hbase.HBaseSchemaTool
+  HIVE_OPTS=''
+  execHiveCmd $CLASS "$@"
+}
+
+hbaseschematool_help () {
+  echo "usage ./hive hbaseschematool [-d <dbname>] <cmd>"
+} 

Modified: hive/branches/hbase-metastore/common/src/java/org/apache/hadoop/hive/common/ObjectPair.java
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/common/src/java/org/apache/hadoop/hive/common/ObjectPair.java?rev=1657394&r1=1657393&r2=1657394&view=diff
==============================================================================
--- hive/branches/hbase-metastore/common/src/java/org/apache/hadoop/hive/common/ObjectPair.java (original)
+++ hive/branches/hbase-metastore/common/src/java/org/apache/hadoop/hive/common/ObjectPair.java Wed Feb  4 20:00:49 2015
@@ -75,6 +75,11 @@ public class ObjectPair<F, S> {
         this.getSecond().equals(that.getSecond());
   }
 
+  @Override
+  public int hashCode() {
+    return first.hashCode() * 31 + second.hashCode();
+  }
+
   public String toString() {
     return first + ":" + second;
   }

Modified: hive/branches/hbase-metastore/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1657394&r1=1657393&r2=1657394&view=diff
==============================================================================
--- hive/branches/hbase-metastore/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/hbase-metastore/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Wed Feb  4 20:00:49 2015
@@ -373,6 +373,17 @@ public class HiveConf extends Configurat
     METASTOREURIS("hive.metastore.uris", "",
         "Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."),
 
+    METASTORE_FASTPATH("hive.metastore.fastpath", false,
+        "Used to avoid all of the proxies and object copies in the metastore.  Note, if this is " +
+            "set, you MUST use a local metastore (hive.metastore.uris must be empty) otherwise " +
+            "undefined and most likely undesired behavior will result"),
+    METASTORE_HBASE_CACHE_SIZE("hive.metastore.hbase.cache.size", 100000, "Maximum number of " +
+        "objects we will place in the hbase metastore cache.  The objects will be divided up by " +
+        "types that we need to cache."),
+    METASTORE_HBASE_CACHE_TIME_TO_LIVE("hive.metastore.hbase.cache.ttl", "600s",
+        new TimeValidator(TimeUnit.SECONDS),
+        "Number of seconds for stats items to live in the cache"),
+
     METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3,
         "Number of retries while opening a connection to metastore"),
     METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1,

Modified: hive/branches/hbase-metastore/itests/hive-unit/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/itests/hive-unit/pom.xml?rev=1657394&r1=1657393&r2=1657394&view=diff
==============================================================================
--- hive/branches/hbase-metastore/itests/hive-unit/pom.xml (original)
+++ hive/branches/hbase-metastore/itests/hive-unit/pom.xml Wed Feb  4 20:00:49 2015
@@ -245,6 +245,27 @@
           <scope>test</scope>
         </dependency>
         <dependency>
+          <groupId>org.apache.hbase</groupId>
+          <artifactId>hbase-server</artifactId>
+          <version>${hbase.hadoop2.version}</version>
+          <type>test-jar</type>
+          <scope>test</scope>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hbase</groupId>
+          <artifactId>hbase-hadoop-compat</artifactId>
+          <version>${hbase.hadoop2.version}</version>
+          <type>test-jar</type>
+          <scope>test</scope>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hbase</groupId>
+          <artifactId>hbase-hadoop2-compat</artifactId>
+          <version>${hbase.hadoop2.version}</version>
+          <type>test-jar</type>
+          <scope>test</scope>
+        </dependency>
+        <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-minicluster</artifactId>
           <scope>test</scope>

Added: hive/branches/hbase-metastore/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java?rev=1657394&view=auto
==============================================================================
--- hive/branches/hbase-metastore/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java (added)
+++ hive/branches/hbase-metastore/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java Wed Feb  4 20:00:49 2015
@@ -0,0 +1,793 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hive.metastore.hbase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Decimal;
+import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Integration tests with HBase Mini-cluster for HBaseStore
+ */
+public class TestHBaseStoreIntegration {
+
+  private static final Log LOG = LogFactory.getLog(TestHBaseStoreIntegration.class.getName());
+
+  private static HBaseTestingUtility utility;
+  private static HTableInterface tblTable;
+  private static HTableInterface sdTable;
+  private static HTableInterface partTable;
+  private static HTableInterface dbTable;
+  private static HTableInterface roleTable;
+  private static Map<String, String> emptyParameters = new HashMap<String, String>();
+
+  @Rule public ExpectedException thrown = ExpectedException.none();
+  @Mock private HConnection hconn;
+  private HBaseStore store;
+  private HiveConf conf;
+
+  @BeforeClass
+  public static void startMiniCluster() throws Exception {
+    utility = new HBaseTestingUtility();
+    utility.startMiniCluster();
+    byte[][] families = new byte[][] {HBaseReadWrite.CATALOG_CF, HBaseReadWrite.STATS_CF};
+    tblTable = utility.createTable(HBaseReadWrite.TABLE_TABLE.getBytes(HBaseUtils.ENCODING),
+        families);
+    sdTable = utility.createTable(HBaseReadWrite.SD_TABLE.getBytes(HBaseUtils.ENCODING),
+        HBaseReadWrite.CATALOG_CF);
+    partTable = utility.createTable(HBaseReadWrite.PART_TABLE.getBytes(HBaseUtils.ENCODING),
+        families);
+    dbTable = utility.createTable(HBaseReadWrite.DB_TABLE.getBytes(HBaseUtils.ENCODING),
+        HBaseReadWrite.CATALOG_CF);
+    roleTable = utility.createTable(HBaseReadWrite.ROLE_TABLE.getBytes(HBaseUtils.ENCODING),
+        HBaseReadWrite.CATALOG_CF);
+  }
+
+  @AfterClass
+  public static void shutdownMiniCluster() throws Exception {
+    utility.shutdownMiniCluster();
+  }
+
+  @Before
+  public void setupConnection() throws IOException {
+    MockitoAnnotations.initMocks(this);
+    Mockito.when(hconn.getTable(HBaseReadWrite.SD_TABLE)).thenReturn(sdTable);
+    Mockito.when(hconn.getTable(HBaseReadWrite.TABLE_TABLE)).thenReturn(tblTable);
+    Mockito.when(hconn.getTable(HBaseReadWrite.PART_TABLE)).thenReturn(partTable);
+    Mockito.when(hconn.getTable(HBaseReadWrite.DB_TABLE)).thenReturn(dbTable);
+    Mockito.when(hconn.getTable(HBaseReadWrite.ROLE_TABLE)).thenReturn(roleTable);
+    conf = new HiveConf();
+    // Turn off caching, as we want to test actual interaction with HBase
+    conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true);
+    HBaseReadWrite hbase = HBaseReadWrite.getInstance(conf);
+    hbase.setConnection(hconn);
+    store = new HBaseStore();
+    store.setConf(conf);
+  }
+
+  @Test
+  public void createDb() throws Exception {
+    String dbname = "mydb";
+    Database db = new Database(dbname, "no description", "file:///tmp", emptyParameters);
+    store.createDatabase(db);
+
+    Database d = store.getDatabase("mydb");
+    Assert.assertEquals(dbname, d.getName());
+    Assert.assertEquals("no description", d.getDescription());
+    Assert.assertEquals("file:///tmp", d.getLocationUri());
+  }
+
+  @Test
+  public void dropDb() throws Exception {
+    String dbname = "anotherdb";
+    Database db = new Database(dbname, "no description", "file:///tmp", emptyParameters);
+    store.createDatabase(db);
+
+    Database d = store.getDatabase(dbname);
+    Assert.assertNotNull(d);
+
+    store.dropDatabase(dbname);
+    thrown.expect(NoSuchObjectException.class);
+    store.getDatabase(dbname);
+  }
+
+  @Test
+  public void createTable() throws Exception {
+    int startTime = (int)(System.currentTimeMillis() / 1000);
+    List<FieldSchema> cols = new ArrayList<FieldSchema>();
+    cols.add(new FieldSchema("col1", "int", "nocomment"));
+    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+    StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
+        serde, null, null, emptyParameters);
+    Table table = new Table("mytable", "default", "me", startTime, startTime, 0, sd, null,
+        emptyParameters, null, null, null);
+    store.createTable(table);
+
+    Table t = store.getTable("default", "mytable");
+    Assert.assertEquals(1, t.getSd().getColsSize());
+    Assert.assertEquals("col1", t.getSd().getCols().get(0).getName());
+    Assert.assertEquals("int", t.getSd().getCols().get(0).getType());
+    Assert.assertEquals("nocomment", t.getSd().getCols().get(0).getComment());
+    Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName());
+    Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib());
+    Assert.assertEquals("file:/tmp", t.getSd().getLocation());
+    Assert.assertEquals("input", t.getSd().getInputFormat());
+    Assert.assertEquals("output", t.getSd().getOutputFormat());
+    Assert.assertEquals("me", t.getOwner());
+    Assert.assertEquals("default", t.getDbName());
+    Assert.assertEquals("mytable", t.getTableName());
+  }
+
+  @Test
+  public void alterTable() throws Exception {
+    String tableName = "alttable";
+    int startTime = (int)(System.currentTimeMillis() / 1000);
+    List<FieldSchema> cols = new ArrayList<FieldSchema>();
+    cols.add(new FieldSchema("col1", "int", "nocomment"));
+    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+    StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
+        serde, null, null, emptyParameters);
+    Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null,
+        emptyParameters, null, null, null);
+    store.createTable(table);
+
+    startTime += 10;
+    table.setLastAccessTime(startTime);
+    store.alterTable("default", tableName, table);
+
+    Table t = store.getTable("default", tableName);
+    Assert.assertEquals(1, t.getSd().getColsSize());
+    Assert.assertEquals("col1", t.getSd().getCols().get(0).getName());
+    Assert.assertEquals("int", t.getSd().getCols().get(0).getType());
+    Assert.assertEquals("nocomment", t.getSd().getCols().get(0).getComment());
+    Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName());
+    Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib());
+    Assert.assertEquals("file:/tmp", t.getSd().getLocation());
+    Assert.assertEquals("input", t.getSd().getInputFormat());
+    Assert.assertEquals("output", t.getSd().getOutputFormat());
+    Assert.assertEquals("me", t.getOwner());
+    Assert.assertEquals("default", t.getDbName());
+    Assert.assertEquals(tableName, t.getTableName());
+    Assert.assertEquals(startTime, t.getLastAccessTime());
+  }
+
+  @Test
+  public void dropTable() throws Exception {
+    String tableName = "dtable";
+    int startTime = (int)(System.currentTimeMillis() / 1000);
+    List<FieldSchema> cols = new ArrayList<FieldSchema>();
+    cols.add(new FieldSchema("col1", "int", "nocomment"));
+    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+    StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
+        serde, null, null, emptyParameters);
+    Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null,
+        emptyParameters, null, null, null);
+    store.createTable(table);
+
+    Table t = store.getTable("default", tableName);
+    Assert.assertNotNull(t);
+
+    store.dropTable("default", tableName);
+    Assert.assertNull(store.getTable("default", tableName));
+  }
+
+  @Test
+  public void createPartition() throws Exception {
+    String dbName = "default";
+    String tableName = "myparttable";
+    int startTime = (int)(System.currentTimeMillis() / 1000);
+    List<FieldSchema> cols = new ArrayList<FieldSchema>();
+    cols.add(new FieldSchema("col1", "int", "nocomment"));
+    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+    StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
+        serde, null, null, emptyParameters);
+    List<FieldSchema> partCols = new ArrayList<FieldSchema>();
+    partCols.add(new FieldSchema("pc", "string", ""));
+    Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols,
+        emptyParameters, null, null, null);
+    store.createTable(table);
+
+    List<String> vals = new ArrayList<String>();
+    vals.add("fred");
+    StorageDescriptor psd = new StorageDescriptor(sd);
+    psd.setLocation("file:/tmp/pc=fred");
+    Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd,
+        emptyParameters);
+    store.addPartition(part);
+
+    Partition p = store.getPartition(dbName, tableName, vals);
+    Assert.assertEquals(1, p.getSd().getColsSize());
+    Assert.assertEquals("col1", p.getSd().getCols().get(0).getName());
+    Assert.assertEquals("int", p.getSd().getCols().get(0).getType());
+    Assert.assertEquals("nocomment", p.getSd().getCols().get(0).getComment());
+    Assert.assertEquals("serde", p.getSd().getSerdeInfo().getName());
+    Assert.assertEquals("seriallib", p.getSd().getSerdeInfo().getSerializationLib());
+    Assert.assertEquals("file:/tmp/pc=fred", p.getSd().getLocation());
+    Assert.assertEquals("input", p.getSd().getInputFormat());
+    Assert.assertEquals("output", p.getSd().getOutputFormat());
+    Assert.assertEquals(dbName, p.getDbName());
+    Assert.assertEquals(tableName, p.getTableName());
+    Assert.assertEquals(1, p.getValuesSize());
+    Assert.assertEquals("fred", p.getValues().get(0));
+  }
+
+  // TODO - Fix this and the next test.  They depend on test execution order and are bogus.
+  @Test
+  public void createManyPartitions() throws Exception {
+    String dbName = "default";
+    String tableName = "manyParts";
+    int startTime = (int)(System.currentTimeMillis() / 1000);
+    List<FieldSchema> cols = new ArrayList<FieldSchema>();
+    cols.add(new FieldSchema("col1", "int", "nocomment"));
+    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+    StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
+        serde, null, null, emptyParameters);
+    List<FieldSchema> partCols = new ArrayList<FieldSchema>();
+    partCols.add(new FieldSchema("pc", "string", ""));
+    Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols,
+        emptyParameters, null, null, null);
+    store.createTable(table);
+
+    List<String> partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan");
+    for (String val : partVals) {
+      List<String> vals = new ArrayList<String>();
+      vals.add(val);
+      StorageDescriptor psd = new StorageDescriptor(sd);
+      psd.setLocation("file:/tmp/pc=" + val);
+      Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd,
+          emptyParameters);
+      store.addPartition(part);
+
+      Partition p = store.getPartition(dbName, tableName, vals);
+      Assert.assertEquals("file:/tmp/pc=" + val, p.getSd().getLocation());
+    }
+
+    Assert.assertEquals(2, HBaseReadWrite.getInstance(conf).countStorageDescriptor());
+
+  }
+
+  @Test
+  public void createDifferentPartition() throws Exception {
+    int startTime = (int)(System.currentTimeMillis() / 1000);
+    Map<String, String> emptyParameters = new HashMap<String, String>();
+    List<FieldSchema> cols = new ArrayList<FieldSchema>();
+    cols.add(new FieldSchema("col1", "int", "nocomment"));
+    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+    StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input2", "output", false, 0,
+        serde, null, null, emptyParameters);
+    Table table = new Table("differenttable", "default", "me", startTime, startTime, 0, sd, null,
+        emptyParameters, null, null, null);
+    store.createTable(table);
+
+    Assert.assertEquals(3, HBaseReadWrite.getInstance(conf).countStorageDescriptor());
+
+  }
+
+  @Test
+  public void getPartitions() throws Exception {
+    String dbName = "default";
+    String tableName = "manyParts";
+    int startTime = (int)(System.currentTimeMillis() / 1000);
+    List<FieldSchema> cols = new ArrayList<FieldSchema>();
+    cols.add(new FieldSchema("col1", "int", "nocomment"));
+    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+    StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
+        serde, null, null, emptyParameters);
+    List<FieldSchema> partCols = new ArrayList<FieldSchema>();
+    partCols.add(new FieldSchema("pc", "string", ""));
+    Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols,
+        emptyParameters, null, null, null);
+    store.createTable(table);
+
+    List<String> partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan");
+    for (String val : partVals) {
+      List<String> vals = new ArrayList<String>();
+      vals.add(val);
+      StorageDescriptor psd = new StorageDescriptor(sd);
+      psd.setLocation("file:/tmp/pc=" + val);
+      Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd,
+          emptyParameters);
+      store.addPartition(part);
+
+      Partition p = store.getPartition(dbName, tableName, vals);
+      Assert.assertEquals("file:/tmp/pc=" + val, p.getSd().getLocation());
+    }
+
+    List<Partition> parts = store.getPartitions(dbName, tableName, -1);
+    Assert.assertEquals(5, parts.size());
+    String[] pv = new String[5];
+    for (int i = 0; i < 5; i++) pv[i] = parts.get(i).getValues().get(0);
+    Arrays.sort(pv);
+    Assert.assertArrayEquals(pv, partVals.toArray(new String[5]));
+  }
+
+  @Test
+  public void listPartitions() throws Exception {
+    String dbName = "default";
+    String tableName = "listParts";
+    int startTime = (int)(System.currentTimeMillis() / 1000);
+    List<FieldSchema> cols = new ArrayList<FieldSchema>();
+    cols.add(new FieldSchema("col1", "int", "nocomment"));
+    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+    StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
+        serde, null, null, emptyParameters);
+    List<FieldSchema> partCols = new ArrayList<FieldSchema>();
+    partCols.add(new FieldSchema("pc", "string", ""));
+    partCols.add(new FieldSchema("region", "string", ""));
+    Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols,
+        emptyParameters, null, null, null);
+    store.createTable(table);
+
+    String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}};
+    for (String[] pv : partVals) {
+      List<String> vals = new ArrayList<String>();
+      for (String v : pv) vals.add(v);
+      StorageDescriptor psd = new StorageDescriptor(sd);
+      psd.setLocation("file:/tmp/pc=" + pv[0] + "/region=" + pv[1]);
+      Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd,
+          emptyParameters);
+      store.addPartition(part);
+    }
+
+    List<String> names = store.listPartitionNames(dbName, tableName, (short) -1);
+    Assert.assertEquals(2, names.size());
+    String[] resultNames = names.toArray(new String[names.size()]);
+    Arrays.sort(resultNames);
+    Assert.assertArrayEquals(resultNames, new String[]{"pc=today/region=north america",
+        "pc=tomorrow/region=europe"});
+
+    List<Partition> parts = store.getPartitionsByNames(dbName, tableName, names);
+    Assert.assertArrayEquals(partVals[0], parts.get(0).getValues().toArray(new String[2]));
+    Assert.assertArrayEquals(partVals[1], parts.get(1).getValues().toArray(new String[2]));
+
+    store.dropPartitions(dbName, tableName, names);
+    List<Partition> afterDropParts = store.getPartitions(dbName, tableName, -1);
+    Assert.assertEquals(0, afterDropParts.size());
+  }
+
+  @Test
+  public void dropPartition() throws Exception {
+    String dbName = "default";
+    String tableName = "myparttable2";
+    int startTime = (int)(System.currentTimeMillis() / 1000);
+    List<FieldSchema> cols = new ArrayList<FieldSchema>();
+    cols.add(new FieldSchema("col1", "int", "nocomment"));
+    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+    StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
+        serde, null, null, emptyParameters);
+    List<FieldSchema> partCols = new ArrayList<FieldSchema>();
+    partCols.add(new FieldSchema("pc", "string", ""));
+    Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols,
+        emptyParameters, null, null, null);
+    store.createTable(table);
+
+    List<String> vals = Arrays.asList("fred");
+    StorageDescriptor psd = new StorageDescriptor(sd);
+    psd.setLocation("file:/tmp/pc=fred");
+    Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd,
+        emptyParameters);
+    store.addPartition(part);
+
+    Assert.assertNotNull(store.getPartition(dbName, tableName, vals));
+    store.dropPartition(dbName, tableName, vals);
+    thrown.expect(NoSuchObjectException.class);
+    store.getPartition(dbName, tableName, vals);
+  }
+
+  @Test
+  public void createRole() throws Exception {
+    int now = (int)System.currentTimeMillis();
+    String roleName = "myrole";
+    store.addRole(roleName, "me");
+
+    Role r = store.getRole(roleName);
+    Assert.assertEquals(roleName, r.getRoleName());
+    Assert.assertEquals("me", r.getOwnerName());
+    Assert.assertTrue(now <= r.getCreateTime());
+  }
+
+  @Test
+  public void dropRole() throws Exception {
+    int now = (int)System.currentTimeMillis();
+    String roleName = "anotherrole";
+    store.addRole(roleName, "me");
+
+    Role r = store.getRole(roleName);
+    Assert.assertEquals(roleName, r.getRoleName());
+    Assert.assertEquals("me", r.getOwnerName());
+    Assert.assertTrue(now <= r.getCreateTime());
+
+    store.removeRole(roleName);
+    thrown.expect(NoSuchObjectException.class);
+    store.getRole(roleName);
+  }
+
+  @Test
+  public void tableStatistics() throws Exception {
+    long now = System.currentTimeMillis();
+    String dbname = "default";
+    String tableName = "statstable";
+    String boolcol = "boolcol";
+    String longcol = "longcol";
+    String doublecol = "doublecol";
+    String stringcol = "stringcol";
+    String binarycol = "bincol";
+    String decimalcol = "deccol";
+    long trues = 37;
+    long falses = 12;
+    long booleanNulls = 2;
+    long longHigh = 120938479124L;
+    long longLow = -12341243213412124L;
+    long longNulls = 23;
+    long longDVs = 213L;
+    double doubleHigh = 123423.23423;
+    double doubleLow = 0.00001234233;
+    long doubleNulls = 92;
+    long doubleDVs = 1234123421L;
+    long strMaxLen = 1234;
+    double strAvgLen = 32.3;
+    long strNulls = 987;
+    long strDVs = 906;
+    long binMaxLen = 123412987L;
+    double binAvgLen = 76.98;
+    long binNulls = 976998797L;
+    Decimal decHigh = new Decimal();
+    decHigh.setScale((short)3);
+    decHigh.setUnscaled("3876".getBytes()); // I have no clue how this is translated, but it
+    // doesn't matter
+    Decimal decLow = new Decimal();
+    decLow.setScale((short)3);
+    decLow.setUnscaled("38".getBytes());
+    long decNulls = 13;
+    long decDVs = 923947293L;
+
+    List<FieldSchema> cols = new ArrayList<FieldSchema>();
+    cols.add(new FieldSchema(boolcol, "boolean", "nocomment"));
+    cols.add(new FieldSchema(longcol, "long", "nocomment"));
+    cols.add(new FieldSchema(doublecol, "double", "nocomment"));
+    cols.add(new FieldSchema(stringcol, "varchar(32)", "nocomment"));
+    cols.add(new FieldSchema(binarycol, "binary", "nocomment"));
+    cols.add(new FieldSchema(decimalcol, "decimal(5, 3)", "nocomment"));
+    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+    StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
+        serde, null, null, emptyParameters);
+    Table table = new Table(tableName, dbname, "me", (int)now / 1000, (int)now / 1000, 0, sd, null,
+        emptyParameters, null, null, null);
+    store.createTable(table);
+
+    ColumnStatistics stats = new ColumnStatistics();
+    ColumnStatisticsDesc desc = new ColumnStatisticsDesc();
+    desc.setLastAnalyzed(now);
+    desc.setDbName(dbname);
+    desc.setTableName(tableName);
+    desc.setIsTblLevel(true);
+    stats.setStatsDesc(desc);
+
+    // Do one column of each type
+    ColumnStatisticsObj obj = new ColumnStatisticsObj();
+    obj.setColName(boolcol);
+    obj.setColType("boolean");
+    ColumnStatisticsData data = new ColumnStatisticsData();
+    BooleanColumnStatsData boolData = new BooleanColumnStatsData();
+    boolData.setNumTrues(trues);
+    boolData.setNumFalses(falses);
+    boolData.setNumNulls(booleanNulls);
+    data.setBooleanStats(boolData);
+    obj.setStatsData(data);
+    stats.addToStatsObj(obj);
+
+    obj = new ColumnStatisticsObj();
+    obj.setColName(longcol);
+    obj.setColType("long");
+    data = new ColumnStatisticsData();
+    LongColumnStatsData longData = new LongColumnStatsData();
+    longData.setHighValue(longHigh);
+    longData.setLowValue(longLow);
+    longData.setNumNulls(longNulls);
+    longData.setNumDVs(longDVs);
+    data.setLongStats(longData);
+    obj.setStatsData(data);
+    stats.addToStatsObj(obj);
+
+    obj = new ColumnStatisticsObj();
+    obj.setColName(doublecol);
+    obj.setColType("double");
+    data = new ColumnStatisticsData();
+    DoubleColumnStatsData doubleData = new DoubleColumnStatsData();
+    doubleData.setHighValue(doubleHigh);
+    doubleData.setLowValue(doubleLow);
+    doubleData.setNumNulls(doubleNulls);
+    doubleData.setNumDVs(doubleDVs);
+    data.setDoubleStats(doubleData);
+    obj.setStatsData(data);
+    stats.addToStatsObj(obj);
+
+    store.updateTableColumnStatistics(stats);
+
+    stats = store.getTableColumnStatistics(dbname, tableName,
+        Arrays.asList(boolcol, longcol, doublecol));
+
+    // We'll check all of the individual values later.
+    Assert.assertEquals(3, stats.getStatsObjSize());
+
+    // check that we can fetch just some of the columns
+    stats = store.getTableColumnStatistics(dbname, tableName, Arrays.asList(boolcol));
+    Assert.assertEquals(1, stats.getStatsObjSize());
+
+    stats = new ColumnStatistics();
+    stats.setStatsDesc(desc);
+
+
+    obj = new ColumnStatisticsObj();
+    obj.setColName(stringcol);
+    obj.setColType("string");
+    data = new ColumnStatisticsData();
+    StringColumnStatsData strData = new StringColumnStatsData();
+    strData.setMaxColLen(strMaxLen);
+    strData.setAvgColLen(strAvgLen);
+    strData.setNumNulls(strNulls);
+    strData.setNumDVs(strDVs);
+    data.setStringStats(strData);
+    obj.setStatsData(data);
+    stats.addToStatsObj(obj);
+
+    obj = new ColumnStatisticsObj();
+    obj.setColName(binarycol);
+    obj.setColType("binary");
+    data = new ColumnStatisticsData();
+    BinaryColumnStatsData binData = new BinaryColumnStatsData();
+    binData.setMaxColLen(binMaxLen);
+    binData.setAvgColLen(binAvgLen);
+    binData.setNumNulls(binNulls);
+    data.setBinaryStats(binData);
+    obj.setStatsData(data);
+    stats.addToStatsObj(obj);
+
+    obj = new ColumnStatisticsObj();
+    obj.setColName(decimalcol);
+    obj.setColType("decimal(5,3)");
+    data = new ColumnStatisticsData();
+    DecimalColumnStatsData decData = new DecimalColumnStatsData();
+    LOG.debug("Setting decimal high value to " + decHigh.getScale() + " <" + new String(decHigh.getUnscaled()) + ">");
+    decData.setHighValue(decHigh);
+    decData.setLowValue(decLow);
+    decData.setNumNulls(decNulls);
+    decData.setNumDVs(decDVs);
+    data.setDecimalStats(decData);
+    obj.setStatsData(data);
+    stats.addToStatsObj(obj);
+
+    store.updateTableColumnStatistics(stats);
+
+    stats = store.getTableColumnStatistics(dbname, tableName,
+        Arrays.asList(boolcol, longcol, doublecol, stringcol, binarycol, decimalcol));
+    Assert.assertEquals(now, stats.getStatsDesc().getLastAnalyzed());
+    Assert.assertEquals(dbname, stats.getStatsDesc().getDbName());
+    Assert.assertEquals(tableName, stats.getStatsDesc().getTableName());
+    Assert.assertTrue(stats.getStatsDesc().isIsTblLevel());
+
+    Assert.assertEquals(6, stats.getStatsObjSize());
+
+    ColumnStatisticsData colData = stats.getStatsObj().get(0).getStatsData();
+    Assert.assertEquals(ColumnStatisticsData._Fields.BOOLEAN_STATS, colData.getSetField());
+    boolData = colData.getBooleanStats();
+    Assert.assertEquals(trues, boolData.getNumTrues());
+    Assert.assertEquals(falses, boolData.getNumFalses());
+    Assert.assertEquals(booleanNulls, boolData.getNumNulls());
+
+    colData = stats.getStatsObj().get(1).getStatsData();
+    Assert.assertEquals(ColumnStatisticsData._Fields.LONG_STATS, colData.getSetField());
+    longData = colData.getLongStats();
+    Assert.assertEquals(longHigh, longData.getHighValue());
+    Assert.assertEquals(longLow, longData.getLowValue());
+    Assert.assertEquals(longNulls, longData.getNumNulls());
+    Assert.assertEquals(longDVs, longData.getNumDVs());
+
+    colData = stats.getStatsObj().get(2).getStatsData();
+    Assert.assertEquals(ColumnStatisticsData._Fields.DOUBLE_STATS, colData.getSetField());
+    doubleData = colData.getDoubleStats();
+    Assert.assertEquals(doubleHigh, doubleData.getHighValue(), 0.01);
+    Assert.assertEquals(doubleLow, doubleData.getLowValue(), 0.01);
+    Assert.assertEquals(doubleNulls, doubleData.getNumNulls());
+    Assert.assertEquals(doubleDVs, doubleData.getNumDVs());
+
+    colData = stats.getStatsObj().get(3).getStatsData();
+    Assert.assertEquals(ColumnStatisticsData._Fields.STRING_STATS, colData.getSetField());
+    strData = colData.getStringStats();
+    Assert.assertEquals(strMaxLen, strData.getMaxColLen());
+    Assert.assertEquals(strAvgLen, strData.getAvgColLen(), 0.01);
+    Assert.assertEquals(strNulls, strData.getNumNulls());
+    Assert.assertEquals(strDVs, strData.getNumDVs());
+
+    colData = stats.getStatsObj().get(4).getStatsData();
+    Assert.assertEquals(ColumnStatisticsData._Fields.BINARY_STATS, colData.getSetField());
+    binData = colData.getBinaryStats();
+    Assert.assertEquals(binMaxLen, binData.getMaxColLen());
+    Assert.assertEquals(binAvgLen, binData.getAvgColLen(), 0.01);
+    Assert.assertEquals(binNulls, binData.getNumNulls());
+
+    colData = stats.getStatsObj().get(5).getStatsData();
+    Assert.assertEquals(ColumnStatisticsData._Fields.DECIMAL_STATS, colData.getSetField());
+    decData = colData.getDecimalStats();
+    Assert.assertEquals(decHigh, decData.getHighValue());
+    Assert.assertEquals(decLow, decData.getLowValue());
+    Assert.assertEquals(decNulls, decData.getNumNulls());
+    Assert.assertEquals(decDVs, decData.getNumDVs());
+
+  }
+
+  @Test
+  public void partitionStatistics() throws Exception {
+    long now = System.currentTimeMillis();
+    String dbname = "default";
+    String tableName = "statspart";
+    String[] partNames = {"ds=today", "ds=yesterday"};
+    String[] partVals = {"today", "yesterday"};
+    String boolcol = "boolcol";
+    String longcol = "longcol";
+    String doublecol = "doublecol";
+    String stringcol = "stringcol";
+    String binarycol = "bincol";
+    String decimalcol = "deccol";
+    long trues = 37;
+    long falses = 12;
+    long booleanNulls = 2;
+    long strMaxLen = 1234;
+    double strAvgLen = 32.3;
+    long strNulls = 987;
+    long strDVs = 906;
+
+    List<FieldSchema> cols = new ArrayList<FieldSchema>();
+    cols.add(new FieldSchema(boolcol, "boolean", "nocomment"));
+    cols.add(new FieldSchema(longcol, "long", "nocomment"));
+    cols.add(new FieldSchema(doublecol, "double", "nocomment"));
+    cols.add(new FieldSchema(stringcol, "varchar(32)", "nocomment"));
+    cols.add(new FieldSchema(binarycol, "binary", "nocomment"));
+    cols.add(new FieldSchema(decimalcol, "decimal(5, 3)", "nocomment"));
+    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+    StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
+        serde, null, null, emptyParameters);
+    List<FieldSchema> partCols = new ArrayList<FieldSchema>();
+    partCols.add(new FieldSchema("ds", "string", ""));
+    Table table = new Table(tableName, dbname, "me", (int)now / 1000, (int)now / 1000, 0, sd, partCols,
+        emptyParameters, null, null, null);
+    store.createTable(table);
+
+    for (int i = 0; i < partNames.length; i++) {
+      ColumnStatistics stats = new ColumnStatistics();
+      ColumnStatisticsDesc desc = new ColumnStatisticsDesc();
+      desc.setLastAnalyzed(now);
+      desc.setDbName(dbname);
+      desc.setTableName(tableName);
+      desc.setIsTblLevel(false);
+      desc.setPartName(partNames[i]);
+      stats.setStatsDesc(desc);
+
+      ColumnStatisticsObj obj = new ColumnStatisticsObj();
+      obj.setColName(boolcol);
+      obj.setColType("boolean");
+      ColumnStatisticsData data = new ColumnStatisticsData();
+      BooleanColumnStatsData boolData = new BooleanColumnStatsData();
+      boolData.setNumTrues(trues);
+      boolData.setNumFalses(falses);
+      boolData.setNumNulls(booleanNulls);
+      data.setBooleanStats(boolData);
+      obj.setStatsData(data);
+      stats.addToStatsObj(obj);
+
+      store.updatePartitionColumnStatistics(stats, Arrays.asList(partVals[i]));
+    }
+
+    List<ColumnStatistics> statsList = store.getPartitionColumnStatistics(dbname, tableName,
+        Arrays.asList(partNames), Arrays.asList(boolcol));
+
+    Assert.assertEquals(2, statsList.size());
+    for (int i = 0; i < partNames.length; i++) {
+      Assert.assertEquals(1, statsList.get(i).getStatsObjSize());
+    }
+
+    for (int i = 0; i < partNames.length; i++) {
+      ColumnStatistics stats = new ColumnStatistics();
+      ColumnStatisticsDesc desc = new ColumnStatisticsDesc();
+      desc.setLastAnalyzed(now);
+      desc.setDbName(dbname);
+      desc.setTableName(tableName);
+      desc.setIsTblLevel(false);
+      desc.setPartName(partNames[i]);
+      stats.setStatsDesc(desc);
+
+      ColumnStatisticsObj obj = new ColumnStatisticsObj();
+      obj.setColName(stringcol);
+      obj.setColType("string");
+      ColumnStatisticsData data = new ColumnStatisticsData();
+      StringColumnStatsData strData = new StringColumnStatsData();
+      strData.setMaxColLen(strMaxLen);
+      strData.setAvgColLen(strAvgLen);
+      strData.setNumNulls(strNulls);
+      strData.setNumDVs(strDVs);
+      data.setStringStats(strData);
+      obj.setStatsData(data);
+      stats.addToStatsObj(obj);
+
+      store.updatePartitionColumnStatistics(stats, Arrays.asList(partVals[i]));
+    }
+
+    // Make sure when we ask for one we only get one
+    statsList = store.getPartitionColumnStatistics(dbname, tableName,
+        Arrays.asList(partNames), Arrays.asList(boolcol));
+
+    Assert.assertEquals(2, statsList.size());
+    for (int i = 0; i < partNames.length; i++) {
+      Assert.assertEquals(1, statsList.get(i).getStatsObjSize());
+    }
+
+    statsList = store.getPartitionColumnStatistics(dbname, tableName,
+        Arrays.asList(partNames), Arrays.asList(boolcol, stringcol));
+
+    Assert.assertEquals(2, statsList.size());
+    for (int i = 0; i < partNames.length; i++) {
+      Assert.assertEquals(2, statsList.get(i).getStatsObjSize());
+      // Just check one piece of the data, I don't need to check it all again
+      Assert.assertEquals(booleanNulls,
+          statsList.get(i).getStatsObj().get(0).getStatsData().getBooleanStats().getNumNulls());
+      Assert.assertEquals(strDVs,
+          statsList.get(i).getStatsObj().get(1).getStatsData().getStringStats().getNumDVs());
+    }
+  }
+}

Modified: hive/branches/hbase-metastore/metastore/if/hive_metastore.thrift
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/if/hive_metastore.thrift?rev=1657394&r1=1657393&r2=1657394&view=diff
==============================================================================
--- hive/branches/hbase-metastore/metastore/if/hive_metastore.thrift (original)
+++ hive/branches/hbase-metastore/metastore/if/hive_metastore.thrift Wed Feb  4 20:00:49 2015
@@ -1134,6 +1134,8 @@ service ThriftHiveMetastore extends fb30
   // Notification logging calls
   NotificationEventResponse get_next_notification(1:NotificationEventRequest rqst) 
   CurrentNotificationEventId get_current_notificationEventId()
+
+  void flushCache()
 }
 
 // * Note about the DDL_TIME: When creating or altering a table or a partition,

Modified: hive/branches/hbase-metastore/metastore/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/pom.xml?rev=1657394&r1=1657393&r2=1657394&view=diff
==============================================================================
--- hive/branches/hbase-metastore/metastore/pom.xml (original)
+++ hive/branches/hbase-metastore/metastore/pom.xml Wed Feb  4 20:00:49 2015
@@ -163,6 +163,11 @@
           <version>${hadoop-23.version}</version>
           <optional>true</optional>
         </dependency>
+        <dependency>
+          <groupId>org.apache.hbase</groupId>
+          <artifactId>hbase-client</artifactId>
+          <version>${hbase.hadoop2.version}</version>
+        </dependency>
       </dependencies>
     </profile>
     <profile>

Modified: hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp?rev=1657394&r1=1657393&r2=1657394&view=diff
==============================================================================
--- hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp (original)
+++ hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp Wed Feb  4 20:00:49 2015
@@ -27397,6 +27397,116 @@ uint32_t ThriftHiveMetastore_get_current
   return xfer;
 }
 
+uint32_t ThriftHiveMetastore_flushCache_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    xfer += iprot->skip(ftype);
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_flushCache_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_flushCache_args");
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_flushCache_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_flushCache_pargs");
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_flushCache_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    xfer += iprot->skip(ftype);
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_flushCache_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_flushCache_result");
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_flushCache_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    xfer += iprot->skip(ftype);
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
 void ThriftHiveMetastoreClient::getMetaConf(std::string& _return, const std::string& key)
 {
   send_getMetaConf(key);
@@ -35023,6 +35133,58 @@ void ThriftHiveMetastoreClient::recv_get
   throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_current_notificationEventId failed: unknown result");
 }
 
+void ThriftHiveMetastoreClient::flushCache()
+{
+  send_flushCache();
+  recv_flushCache();
+}
+
+void ThriftHiveMetastoreClient::send_flushCache()
+{
+  int32_t cseqid = 0;
+  oprot_->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  ThriftHiveMetastore_flushCache_pargs args;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+}
+
+void ThriftHiveMetastoreClient::recv_flushCache()
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  iprot_->readMessageBegin(fname, mtype, rseqid);
+  if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+    ::apache::thrift::TApplicationException x;
+    x.read(iprot_);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+    throw x;
+  }
+  if (mtype != ::apache::thrift::protocol::T_REPLY) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  if (fname.compare("flushCache") != 0) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  ThriftHiveMetastore_flushCache_presult result;
+  result.read(iprot_);
+  iprot_->readMessageEnd();
+  iprot_->getTransport()->readEnd();
+
+  return;
+}
+
 bool ThriftHiveMetastoreProcessor::dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext) {
   ProcessMap::iterator pfn;
   pfn = processMap_.find(fname);
@@ -42141,6 +42303,59 @@ void ThriftHiveMetastoreProcessor::proce
   }
 }
 
+void ThriftHiveMetastoreProcessor::process_flushCache(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+  void* ctx = NULL;
+  if (this->eventHandler_.get() != NULL) {
+    ctx = this->eventHandler_->getContext("ThriftHiveMetastore.flushCache", callContext);
+  }
+  ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.flushCache");
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.flushCache");
+  }
+
+  ThriftHiveMetastore_flushCache_args args;
+  args.read(iprot);
+  iprot->readMessageEnd();
+  uint32_t bytes = iprot->getTransport()->readEnd();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.flushCache", bytes);
+  }
+
+  ThriftHiveMetastore_flushCache_result result;
+  try {
+    iface_->flushCache();
+  } catch (const std::exception& e) {
+    if (this->eventHandler_.get() != NULL) {
+      this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.flushCache");
+    }
+
+    ::apache::thrift::TApplicationException x(e.what());
+    oprot->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+    x.write(oprot);
+    oprot->writeMessageEnd();
+    oprot->getTransport()->writeEnd();
+    oprot->getTransport()->flush();
+    return;
+  }
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.flushCache");
+  }
+
+  oprot->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_REPLY, seqid);
+  result.write(oprot);
+  oprot->writeMessageEnd();
+  bytes = oprot->getTransport()->writeEnd();
+  oprot->getTransport()->flush();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.flushCache", bytes);
+  }
+}
+
 ::boost::shared_ptr< ::apache::thrift::TProcessor > ThriftHiveMetastoreProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) {
   ::apache::thrift::ReleaseHandler< ThriftHiveMetastoreIfFactory > cleanup(handlerFactory_);
   ::boost::shared_ptr< ThriftHiveMetastoreIf > handler(handlerFactory_->getHandler(connInfo), cleanup);

Modified: hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h?rev=1657394&r1=1657393&r2=1657394&view=diff
==============================================================================
--- hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h (original)
+++ hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h Wed Feb  4 20:00:49 2015
@@ -135,6 +135,7 @@ class ThriftHiveMetastoreIf : virtual pu
   virtual void show_compact(ShowCompactResponse& _return, const ShowCompactRequest& rqst) = 0;
   virtual void get_next_notification(NotificationEventResponse& _return, const NotificationEventRequest& rqst) = 0;
   virtual void get_current_notificationEventId(CurrentNotificationEventId& _return) = 0;
+  virtual void flushCache() = 0;
 };
 
 class ThriftHiveMetastoreIfFactory : virtual public  ::facebook::fb303::FacebookServiceIfFactory {
@@ -544,6 +545,9 @@ class ThriftHiveMetastoreNull : virtual
   void get_current_notificationEventId(CurrentNotificationEventId& /* _return */) {
     return;
   }
+  void flushCache() {
+    return;
+  }
 };
 
 typedef struct _ThriftHiveMetastore_getMetaConf_args__isset {
@@ -16777,6 +16781,80 @@ class ThriftHiveMetastore_get_current_no
 
 };
 
+
+class ThriftHiveMetastore_flushCache_args {
+ public:
+
+  ThriftHiveMetastore_flushCache_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_flushCache_args() throw() {}
+
+
+  bool operator == (const ThriftHiveMetastore_flushCache_args & /* rhs */) const
+  {
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_flushCache_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_flushCache_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_flushCache_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_flushCache_pargs() throw() {}
+
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_flushCache_result {
+ public:
+
+  ThriftHiveMetastore_flushCache_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_flushCache_result() throw() {}
+
+
+  bool operator == (const ThriftHiveMetastore_flushCache_result & /* rhs */) const
+  {
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_flushCache_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_flushCache_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_flushCache_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_flushCache_presult() throw() {}
+
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
 class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public  ::facebook::fb303::FacebookServiceClient {
  public:
   ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) :
@@ -17146,6 +17224,9 @@ class ThriftHiveMetastoreClient : virtua
   void get_current_notificationEventId(CurrentNotificationEventId& _return);
   void send_get_current_notificationEventId();
   void recv_get_current_notificationEventId(CurrentNotificationEventId& _return);
+  void flushCache();
+  void send_flushCache();
+  void recv_flushCache();
 };
 
 class ThriftHiveMetastoreProcessor : public  ::facebook::fb303::FacebookServiceProcessor {
@@ -17275,6 +17356,7 @@ class ThriftHiveMetastoreProcessor : pub
   void process_show_compact(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_next_notification(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_current_notificationEventId(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_flushCache(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
  public:
   ThriftHiveMetastoreProcessor(boost::shared_ptr<ThriftHiveMetastoreIf> iface) :
      ::facebook::fb303::FacebookServiceProcessor(iface),
@@ -17398,6 +17480,7 @@ class ThriftHiveMetastoreProcessor : pub
     processMap_["show_compact"] = &ThriftHiveMetastoreProcessor::process_show_compact;
     processMap_["get_next_notification"] = &ThriftHiveMetastoreProcessor::process_get_next_notification;
     processMap_["get_current_notificationEventId"] = &ThriftHiveMetastoreProcessor::process_get_current_notificationEventId;
+    processMap_["flushCache"] = &ThriftHiveMetastoreProcessor::process_flushCache;
   }
 
   virtual ~ThriftHiveMetastoreProcessor() {}
@@ -18572,6 +18655,15 @@ class ThriftHiveMetastoreMultiface : vir
     return;
   }
 
+  void flushCache() {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->flushCache();
+    }
+    ifaces_[i]->flushCache();
+  }
+
 };
 
 }}} // namespace

Modified: hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp?rev=1657394&r1=1657393&r2=1657394&view=diff
==============================================================================
--- hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (original)
+++ hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp Wed Feb  4 20:00:49 2015
@@ -617,6 +617,11 @@ class ThriftHiveMetastoreHandler : virtu
     printf("get_current_notificationEventId\n");
   }
 
+  void flushCache() {
+    // Your implementation goes here
+    printf("flushCache\n");
+  }
+
 };
 
 int main(int argc, char **argv) {

Modified: hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java?rev=1657394&r1=1657393&r2=1657394&view=diff
==============================================================================
--- hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java (original)
+++ hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java Wed Feb  4 20:00:49 2015
@@ -184,7 +184,7 @@ public class SkewedInfo implements org.a
 
         __this__skewedColValueLocationMaps.put(__this__skewedColValueLocationMaps_copy_key, __this__skewedColValueLocationMaps_copy_value);
       }
-        this.skewedColValueLocationMaps = __this__skewedColValueLocationMaps;
+      this.skewedColValueLocationMaps = __this__skewedColValueLocationMaps;
     }
   }
 

Modified: hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java?rev=1657394&r1=1657393&r2=1657394&view=diff
==============================================================================
--- hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (original)
+++ hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java Wed Feb  4 20:00:49 2015
@@ -276,6 +276,8 @@ public class ThriftHiveMetastore {
 
     public CurrentNotificationEventId get_current_notificationEventId() throws org.apache.thrift.TException;
 
+    public void flushCache() throws org.apache.thrift.TException;
+
   }
 
   public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface {
@@ -518,6 +520,8 @@ public class ThriftHiveMetastore {
 
     public void get_current_notificationEventId(org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_current_notificationEventId_call> resultHandler) throws org.apache.thrift.TException;
 
+    public void flushCache(org.apache.thrift.async.AsyncMethodCallback<AsyncClient.flushCache_call> resultHandler) throws org.apache.thrift.TException;
+
   }
 
   public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface {
@@ -4053,6 +4057,25 @@ public class ThriftHiveMetastore {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_current_notificationEventId failed: unknown result");
     }
 
+    public void flushCache() throws org.apache.thrift.TException
+    {
+      send_flushCache();
+      recv_flushCache();
+    }
+
+    public void send_flushCache() throws org.apache.thrift.TException
+    {
+      flushCache_args args = new flushCache_args();
+      sendBase("flushCache", args);
+    }
+
+    public void recv_flushCache() throws org.apache.thrift.TException
+    {
+      flushCache_result result = new flushCache_result();
+      receiveBase(result, "flushCache");
+      return;
+    }
+
   }
   public static class AsyncClient extends com.facebook.fb303.FacebookService.AsyncClient implements AsyncIface {
     public static class Factory implements org.apache.thrift.async.TAsyncClientFactory<AsyncClient> {
@@ -8317,6 +8340,35 @@ public class ThriftHiveMetastore {
       }
     }
 
+    public void flushCache(org.apache.thrift.async.AsyncMethodCallback<flushCache_call> resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      flushCache_call method_call = new flushCache_call(resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class flushCache_call extends org.apache.thrift.async.TAsyncMethodCall {
+      public flushCache_call(org.apache.thrift.async.AsyncMethodCallback<flushCache_call> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("flushCache", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        flushCache_args args = new flushCache_args();
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_flushCache();
+      }
+    }
+
   }
 
   public static class Processor<I extends Iface> extends com.facebook.fb303.FacebookService.Processor<I> implements org.apache.thrift.TProcessor {
@@ -8449,6 +8501,7 @@ public class ThriftHiveMetastore {
       processMap.put("show_compact", new show_compact());
       processMap.put("get_next_notification", new get_next_notification());
       processMap.put("get_current_notificationEventId", new get_current_notificationEventId());
+      processMap.put("flushCache", new flushCache());
       return processMap;
     }
 
@@ -11547,6 +11600,26 @@ public class ThriftHiveMetastore {
       }
     }
 
+    public static class flushCache<I extends Iface> extends org.apache.thrift.ProcessFunction<I, flushCache_args> {
+      public flushCache() {
+        super("flushCache");
+      }
+
+      public flushCache_args getEmptyArgsInstance() {
+        return new flushCache_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public flushCache_result getResult(I iface, flushCache_args args) throws org.apache.thrift.TException {
+        flushCache_result result = new flushCache_result();
+        iface.flushCache();
+        return result;
+      }
+    }
+
   }
 
   public static class getMetaConf_args implements org.apache.thrift.TBase<getMetaConf_args, getMetaConf_args._Fields>, java.io.Serializable, Cloneable   {
@@ -137530,6 +137603,498 @@ public class ThriftHiveMetastore {
       }
     }
 
+  }
+
+  public static class flushCache_args implements org.apache.thrift.TBase<flushCache_args, flushCache_args._Fields>, java.io.Serializable, Cloneable   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("flushCache_args");
+
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new flushCache_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new flushCache_argsTupleSchemeFactory());
+    }
+
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(flushCache_args.class, metaDataMap);
+    }
+
+    public flushCache_args() {
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public flushCache_args(flushCache_args other) {
+    }
+
+    public flushCache_args deepCopy() {
+      return new flushCache_args(this);
+    }
+
+    @Override
+    public void clear() {
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof flushCache_args)
+        return this.equals((flushCache_args)that);
+      return false;
+    }
+
+    public boolean equals(flushCache_args that) {
+      if (that == null)
+        return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      HashCodeBuilder builder = new HashCodeBuilder();
+
+      return builder.toHashCode();
+    }
+
+    public int compareTo(flushCache_args other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+      flushCache_args typedOther = (flushCache_args)other;
+
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("flushCache_args(");
+      boolean first = true;
+
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class flushCache_argsStandardSchemeFactory implements SchemeFactory {
+      public flushCache_argsStandardScheme getScheme() {
+        return new flushCache_argsStandardScheme();
+      }
+    }
+
+    private static class flushCache_argsStandardScheme extends StandardScheme<flushCache_args> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_args struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, flushCache_args struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class flushCache_argsTupleSchemeFactory implements SchemeFactory {
+      public flushCache_argsTupleScheme getScheme() {
+        return new flushCache_argsTupleScheme();
+      }
+    }
+
+    private static class flushCache_argsTupleScheme extends TupleScheme<flushCache_args> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, flushCache_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, flushCache_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+      }
+    }
+
+  }
+
+  public static class flushCache_result implements org.apache.thrift.TBase<flushCache_result, flushCache_result._Fields>, java.io.Serializable, Cloneable   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("flushCache_result");
+
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new flushCache_resultStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new flushCache_resultTupleSchemeFactory());
+    }
+
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(flushCache_result.class, metaDataMap);
+    }
+
+    public flushCache_result() {
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public flushCache_result(flushCache_result other) {
+    }
+
+    public flushCache_result deepCopy() {
+      return new flushCache_result(this);
+    }
+
+    @Override
+    public void clear() {
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof flushCache_result)
+        return this.equals((flushCache_result)that);
+      return false;
+    }
+
+    public boolean equals(flushCache_result that) {
+      if (that == null)
+        return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      HashCodeBuilder builder = new HashCodeBuilder();
+
+      return builder.toHashCode();
+    }
+
+    public int compareTo(flushCache_result other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+      flushCache_result typedOther = (flushCache_result)other;
+
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+      }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("flushCache_result(");
+      boolean first = true;
+
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class flushCache_resultStandardSchemeFactory implements SchemeFactory {
+      public flushCache_resultStandardScheme getScheme() {
+        return new flushCache_resultStandardScheme();
+      }
+    }
+
+    private static class flushCache_resultStandardScheme extends StandardScheme<flushCache_result> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_result struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, flushCache_result struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class flushCache_resultTupleSchemeFactory implements SchemeFactory {
+      public flushCache_resultTupleScheme getScheme() {
+        return new flushCache_resultTupleScheme();
+      }
+    }
+
+    private static class flushCache_resultTupleScheme extends TupleScheme<flushCache_result> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+      }
+    }
+
   }
 
 }

Modified: hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
URL: http://svn.apache.org/viewvc/hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php?rev=1657394&r1=1657393&r2=1657394&view=diff
==============================================================================
--- hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php (original)
+++ hive/branches/hbase-metastore/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php Wed Feb  4 20:00:49 2015
@@ -135,6 +135,7 @@ interface ThriftHiveMetastoreIf extends
   public function show_compact(\metastore\ShowCompactRequest $rqst);
   public function get_next_notification(\metastore\NotificationEventRequest $rqst);
   public function get_current_notificationEventId();
+  public function flushCache();
 }
 
 class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf {
@@ -6987,6 +6988,53 @@ class ThriftHiveMetastoreClient extends
     throw new \Exception("get_current_notificationEventId failed: unknown result");
   }
 
+  public function flushCache()
+  {
+    $this->send_flushCache();
+    $this->recv_flushCache();
+  }
+
+  public function send_flushCache()
+  {
+    $args = new \metastore\ThriftHiveMetastore_flushCache_args();
+    $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'flushCache', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('flushCache', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_flushCache()
+  {
+    $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_flushCache_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_flushCache_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    return;
+  }
+
 }
 
 // HELPER FUNCTIONS AND STRUCTURES
@@ -33611,6 +33659,106 @@ class ThriftHiveMetastore_get_current_no
     $xfer += $output->writeFieldStop();
     $xfer += $output->writeStructEnd();
     return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_flushCache_args {
+  static $_TSPEC;
+
+
+  public function __construct() {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        );
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_flushCache_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_flushCache_args');
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_flushCache_result {
+  static $_TSPEC;
+
+
+  public function __construct() {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        );
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_flushCache_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_flushCache_result');
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
   }
 
 }



Mime
View raw message