hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ser...@apache.org
Subject [21/31] hive git commit: HIVE-17488 Move first set of classes to standalone metastore. This closes #244. (Alan Gates, reviewed by Owen O'Malley)
Date Fri, 15 Sep 2017 19:52:22 GMT
http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java
deleted file mode 100644
index 6123a1e..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.hooks.JDOConnectionURLHook;
-import org.apache.hadoop.util.ReflectionUtils;
-
-/**
- * MetaStoreInit defines functions to init/update MetaStore connection url.
- *
- */
-public class MetaStoreInit {
-
-  private static final Logger LOG = LoggerFactory.getLogger(MetaStoreInit.class);
-
-  static class MetaStoreInitData {
-    JDOConnectionURLHook urlHook = null;
-    String urlHookClassName = "";
-  }
-
-  /**
-   * Updates the connection URL in hiveConf using the hook (if a hook has been
-   * set using hive.metastore.ds.connection.url.hook property)
-   * @param originalConf - original configuration used to look up hook settings
-   * @param activeConf - the configuration file in use for looking up db url
-   * @param badUrl
-   * @param updateData - hook information
-   * @return true if a new connection URL was loaded into the thread local
-   *         configuration
-   * @throws MetaException
-   */
-  static boolean updateConnectionURL(HiveConf originalConf, Configuration activeConf,
-    String badUrl, MetaStoreInitData updateData)
-      throws MetaException {
-    String connectUrl = null;
-    String currentUrl = MetaStoreInit.getConnectionURL(activeConf);
-    try {
-      // We always call init because the hook name in the configuration could
-      // have changed.
-      MetaStoreInit.initConnectionUrlHook(originalConf, updateData);
-      if (updateData.urlHook != null) {
-        if (badUrl != null) {
-          updateData.urlHook.notifyBadConnectionUrl(badUrl);
-        }
-        connectUrl = updateData.urlHook.getJdoConnectionUrl(originalConf);
-      }
-    } catch (Exception e) {
-      LOG.error("Exception while getting connection URL from the hook: " +
-          e);
-    }
-
-    if (connectUrl != null && !connectUrl.equals(currentUrl)) {
-      LOG.error(
-          String.format("Overriding %s with %s",
-              HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(),
-              connectUrl));
-      activeConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(),
-          connectUrl);
-      return true;
-    }
-    return false;
-  }
-
-  static String getConnectionURL(Configuration conf) {
-    return conf.get(
-        HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(), "");
-  }
-
-  // Multiple threads could try to initialize at the same time.
-  synchronized private static void initConnectionUrlHook(HiveConf hiveConf,
-    MetaStoreInitData updateData) throws ClassNotFoundException {
-
-    String className =
-        hiveConf.get(HiveConf.ConfVars.METASTORECONNECTURLHOOK.toString(), "").trim();
-    if (className.equals("")) {
-      updateData.urlHookClassName = "";
-      updateData.urlHook = null;
-      return;
-    }
-    boolean urlHookChanged = !updateData.urlHookClassName.equals(className);
-    if (updateData.urlHook == null || urlHookChanged) {
-      updateData.urlHookClassName = className.trim();
-
-      Class<?> urlHookClass = Class.forName(updateData.urlHookClassName, true,
-          JavaUtils.getClassLoader());
-      updateData.urlHook = (JDOConnectionURLHook) ReflectionUtils.newInstance(urlHookClass, null);
-    }
-    return;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
deleted file mode 100644
index 8117ca1..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileReader;
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper;
-import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo;
-import org.apache.hive.common.util.HiveVersionInfo;
-
-import com.google.common.collect.ImmutableMap;
-
-
-public class MetaStoreSchemaInfo implements IMetaStoreSchemaInfo {
-  protected static final String UPGRADE_FILE_PREFIX = "upgrade-";
-  private static final String INIT_FILE_PREFIX = "hive-schema-";
-  private static final String VERSION_UPGRADE_LIST = "upgrade.order";
-  private static final String PRE_UPGRADE_PREFIX = "pre-";
-  protected final String dbType;
-  private String[] hiveSchemaVersions;
-  private final String hiveHome;
-
-  // Some version upgrades often don't change schema. So they are equivalent to
-  // a version
-  // that has a corresponding schema. eg "0.13.1" is equivalent to "0.13.0"
-  private static final Map<String, String> EQUIVALENT_VERSIONS =
-      ImmutableMap.of("0.13.1", "0.13.0",
-          "1.0.0", "0.14.0",
-          "1.0.1", "1.0.0",
-          "1.1.1", "1.1.0",
-          "1.2.1", "1.2.0"
-      );
-
-  public MetaStoreSchemaInfo(String hiveHome, String dbType) throws HiveMetaException {
-    this.hiveHome = hiveHome;
-    this.dbType = dbType;
-  }
-
-  private void loadAllUpgradeScripts(String dbType) throws HiveMetaException {
-    // load upgrade order for the given dbType
-    List<String> upgradeOrderList = new ArrayList<String>();
-    String upgradeListFile = getMetaStoreScriptDir() + File.separator +
-        VERSION_UPGRADE_LIST + "." + dbType;
-    try (FileReader fr = new FileReader(upgradeListFile);
-        BufferedReader bfReader = new BufferedReader(fr)) {
-      String currSchemaVersion;
-      while ((currSchemaVersion = bfReader.readLine()) != null) {
-        upgradeOrderList.add(currSchemaVersion.trim());
-      }
-    } catch (FileNotFoundException e) {
-      throw new HiveMetaException("File " + upgradeListFile + "not found ", e);
-    } catch (IOException e) {
-      throw new HiveMetaException("Error reading " + upgradeListFile, e);
-    }
-    hiveSchemaVersions = upgradeOrderList.toArray(new String[0]);
-  }
-
-  /***
-   * Get the list of sql scripts required to upgrade from the give version to current
-   * @param fromVersion
-   * @return
-   * @throws HiveMetaException
-   */
-  @Override
-  public List<String> getUpgradeScripts(String fromVersion)
-      throws HiveMetaException {
-    List <String> upgradeScriptList = new ArrayList<String>();
-
-    // check if we are already at current schema level
-    if (getHiveSchemaVersion().equals(fromVersion)) {
-      return upgradeScriptList;
-    }
-    loadAllUpgradeScripts(dbType);
-    // Find the list of scripts to execute for this upgrade
-    int firstScript = hiveSchemaVersions.length;
-    for (int i=0; i < hiveSchemaVersions.length; i++) {
-      if (hiveSchemaVersions[i].startsWith(fromVersion)) {
-        firstScript = i;
-      }
-    }
-    if (firstScript == hiveSchemaVersions.length) {
-      throw new HiveMetaException("Unknown version specified for upgrade " +
-              fromVersion + " Metastore schema may be too old or newer");
-    }
-
-    for (int i=firstScript; i < hiveSchemaVersions.length; i++) {
-      String scriptFile = generateUpgradeFileName(hiveSchemaVersions[i]);
-      upgradeScriptList.add(scriptFile);
-    }
-    return upgradeScriptList;
-  }
-
-  /***
-   * Get the name of the script to initialize the schema for given version
-   * @param toVersion Target version. If it's null, then the current server version is used
-   * @return
-   * @throws HiveMetaException
-   */
-  @Override
-  public String generateInitFileName(String toVersion) throws HiveMetaException {
-    if (toVersion == null) {
-      toVersion = getHiveSchemaVersion();
-    }
-    String initScriptName = INIT_FILE_PREFIX + toVersion + "." +
-        dbType + SQL_FILE_EXTENSION;
-    // check if the file exists
-    if (!(new File(getMetaStoreScriptDir() + File.separatorChar +
-          initScriptName).exists())) {
-      throw new HiveMetaException("Unknown version specified for initialization: " + toVersion);
-    }
-    return initScriptName;
-  }
-
-  /**
-   * Find the directory of metastore scripts
-   * @return
-   */
-  @Override
-  public String getMetaStoreScriptDir() {
-    return  hiveHome + File.separatorChar +
-     "scripts" + File.separatorChar + "metastore" +
-    File.separatorChar + "upgrade" + File.separatorChar + dbType;
-  }
-
-  // format the upgrade script name eg upgrade-x-y-dbType.sql
-  private String generateUpgradeFileName(String fileVersion) {
-    return UPGRADE_FILE_PREFIX +  fileVersion + "." + dbType + SQL_FILE_EXTENSION;
-  }
-
-  @Override
-  public String getPreUpgradeScriptName(int index, String upgradeScriptName) {
-    return PRE_UPGRADE_PREFIX + index + "-" + upgradeScriptName;
-  }
-
-  @Override
-  public String getHiveSchemaVersion() {
-    String hiveVersion = HiveVersionInfo.getShortVersion();
-    return getEquivalentVersion(hiveVersion);
-  }
-
-  private static String getEquivalentVersion(String hiveVersion) {
-    // if there is an equivalent version, return that, else return this version
-    String equivalentVersion = EQUIVALENT_VERSIONS.get(hiveVersion);
-    if (equivalentVersion != null) {
-      return equivalentVersion;
-    } else {
-      return hiveVersion;
-    }
-  }
-
-  @Override
-  public boolean isVersionCompatible(String hiveVersion, String dbVersion) {
-    hiveVersion = getEquivalentVersion(hiveVersion);
-    dbVersion = getEquivalentVersion(dbVersion);
-    if (hiveVersion.equals(dbVersion)) {
-      return true;
-    }
-    String[] hiveVerParts = hiveVersion.split("\\.");
-    String[] dbVerParts = dbVersion.split("\\.");
-    if (hiveVerParts.length != 3 || dbVerParts.length != 3) {
-      // these are non standard version numbers. can't perform the
-      // comparison on these, so assume that they are incompatible
-      return false;
-    }
-
-    for (int i = 0; i < dbVerParts.length; i++) {
-      int dbVerPart = Integer.parseInt(dbVerParts[i]);
-      int hiveVerPart = Integer.parseInt(hiveVerParts[i]);
-      if (dbVerPart > hiveVerPart) {
-        return true;
-      } else if (dbVerPart < hiveVerPart) {
-        return false;
-      } else {
-        continue; // compare next part
-      }
-    }
-
-    return true;
-  }
-
-  @Override
-  public String getMetaStoreSchemaVersion(MetaStoreConnectionInfo connectionInfo)
-      throws HiveMetaException {
-    String versionQuery;
-    boolean needsQuotedIdentifier =
-        HiveSchemaHelper.getDbCommandParser(connectionInfo.getDbType()).needsQuotedIdentifier();
-    if (needsQuotedIdentifier) {
-      versionQuery = "select t.\"SCHEMA_VERSION\" from \"VERSION\" t";
-    } else {
-      versionQuery = "select t.SCHEMA_VERSION from VERSION t";
-    }
-    try (Connection metastoreDbConnection =
-        HiveSchemaHelper.getConnectionToMetastore(connectionInfo); Statement stmt =
-        metastoreDbConnection.createStatement()) {
-      ResultSet res = stmt.executeQuery(versionQuery);
-      if (!res.next()) {
-        throw new HiveMetaException("Could not find version info in metastore VERSION table.");
-      }
-      String currentSchemaVersion = res.getString(1);
-      if (res.next()) {
-        throw new HiveMetaException("Multiple versions were found in metastore.");
-      }
-      return currentSchemaVersion;
-    } catch (SQLException e) {
-      throw new HiveMetaException("Failed to get schema version, Cause:" + e.getMessage());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfoFactory.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfoFactory.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfoFactory.java
deleted file mode 100644
index 1133cf2..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfoFactory.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Factory class implementation to create instances of IMetaStoreSchemaInfo
- * based on the provided configuration
- */
-public class MetaStoreSchemaInfoFactory {
-  public static final Logger LOG = LoggerFactory.getLogger(MetaStoreSchemaInfoFactory.class);
-
-  public static IMetaStoreSchemaInfo get(Configuration conf) {
-    String hiveHome = System.getenv("HIVE_HOME");
-    if (hiveHome == null) {
-      LOG.debug("HIVE_HOME is not set. Using current directory instead");
-      hiveHome = ".";
-    }
-    return get(conf, hiveHome, null);
-  }
-
-  public static IMetaStoreSchemaInfo get(Configuration conf, String hiveHome, String dbType) {
-    String className = conf.get(HiveConf.ConfVars.METASTORE_SCHEMA_INFO_CLASS.varname,
-        HiveConf.ConfVars.METASTORE_SCHEMA_INFO_CLASS.defaultStrVal);
-    Class<?> clasz = null;
-    try {
-      clasz = conf.getClassByName(className);
-    } catch (ClassNotFoundException e) {
-      LOG.error("Unable to load class " + className, e);
-      throw new IllegalArgumentException(e);
-    }
-    Constructor<?> constructor = null;
-    try {
-      constructor = clasz.getConstructor(String.class, String.class);
-      constructor.setAccessible(true);
-      return (IMetaStoreSchemaInfo) constructor.newInstance(hiveHome, dbType);
-    } catch (NoSuchMethodException | InstantiationException | IllegalAccessException
-        | IllegalArgumentException | InvocationTargetException e) {
-      LOG.error("Unable to create instance of class " + className, e);
-      throw new IllegalArgumentException(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java
deleted file mode 100644
index b62c45f..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * A thread that runs in the metastore, separate from the threads in the thrift service.
- */
-public interface MetaStoreThread {
-
-  /**
-   * Set the Hive configuration for this thread.
-   * @param conf
-   */
-  void setHiveConf(HiveConf conf);
-
-  /**
-   * Set the id for this thread.
-   * @param threadId
-   */
-  void setThreadId(int threadId);
-
-  /**
-   * Initialize the thread.  This must not be called until after
-   * {@link #setHiveConf(org.apache.hadoop.hive.conf.HiveConf)} and  {@link #setThreadId(int)}
-   * have been called.
-   * @param stop a flag to watch for when to stop.  If this value is set to true,
-   *             the thread will terminate the next time through its main loop.
-   * @param looped a flag that is set to true everytime a thread goes through it's main loop.
-   *               This is purely for testing so that tests can assure themselves that the thread
-   *               has run through it's loop once.  The test can set this value to false.  The
-   *               thread should then assure that the loop has been gone completely through at
-   *               least once.
-   */
-  void init(AtomicBoolean stop, AtomicBoolean looped) throws MetaException;
-
-  /**
-   * Run the thread in the background.  This must not be called until
-   * {@link MetaStoreThread#init(java.util.concurrent.atomic.AtomicBoolean,java.util.concurrent.atomic.AtomicBoolean)} has
-   * been called.
-   */
-  void start();
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java b/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java
deleted file mode 100644
index e8ffbd5..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-/**
- * Class to generalize the switches for dropPartitions().
- */
-public class PartitionDropOptions {
-
-  public boolean deleteData = true;
-  public boolean ifExists = false;
-  public boolean returnResults = true;
-  public boolean purgeData = false;
-
-  public static PartitionDropOptions instance() { return new PartitionDropOptions(); }
-
-  public PartitionDropOptions deleteData(boolean deleteData) {
-    this.deleteData = deleteData;
-    return this;
-  }
-
-  public PartitionDropOptions ifExists(boolean ifExists) {
-    this.ifExists = ifExists;
-    return this;
-  }
-
-  public PartitionDropOptions returnResults(boolean returnResults) {
-    this.returnResults = returnResults;
-    return this;
-  }
-
-  public PartitionDropOptions purgeData(boolean purgeData) {
-    this.purgeData = purgeData;
-    return this;
-  }
-
-} // class PartitionDropSwitches;
-

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java b/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java
deleted file mode 100644
index af0a6bd..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.util.List;
-
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
-
-/**
- * The proxy interface that metastore uses for variety of QL operations (metastore can't depend
- * on QL because QL depends on metastore; creating metastore-client module would be a proper way
- * to solve this problem).
- */
-public interface PartitionExpressionProxy {
-
-  /**
-   * Converts serialized Hive expression into filter in the format suitable for Filter.g.
-   * @param expr Serialized expression.
-   * @return The filter string.
-   */
-  public String convertExprToFilter(byte[] expr) throws MetaException;
-
-  /**
-   * Filters the partition names via serialized Hive expression.
-   * @param partColumns Partition columns in the underlying table.
-   * @param expr Serialized expression.
-   * @param defaultPartitionName Default partition name from job or server configuration.
-   * @param partitionNames Partition names; the list is modified in place.
-   * @return Whether there were any unknown partitions preserved in the name list.
-   */
-  boolean filterPartitionsByExpr(List<FieldSchema> partColumns,
-      byte[] expr, String defaultPartitionName, List<String> partitionNames) throws MetaException;
-
-  /**
-   * Determines the file metadata type from input format of the source table or partition.
-   * @param inputFormat Input format name.
-   * @return The file metadata type.
-   */
-  FileMetadataExprType getMetadataType(String inputFormat);
-
-  /**
-   * Gets a separate proxy that can be used to call file-format-specific methods.
-   * @param type The file metadata type.
-   * @return The proxy.
-   */
-  FileFormatProxy getFileFormatProxy(FileMetadataExprType type);
-
-  /**
-   * Creates SARG from serialized representation.
-   * @param expr SARG, serialized as Kryo.
-   * @return SARG.
-   */
-  SearchArgument createSarg(byte[] expr);
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/TServerSocketKeepAlive.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TServerSocketKeepAlive.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TServerSocketKeepAlive.java
deleted file mode 100644
index 95bd76e..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/TServerSocketKeepAlive.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.net.SocketException;
-
-import org.apache.thrift.transport.TServerSocket;
-import org.apache.thrift.transport.TSocket;
-import org.apache.thrift.transport.TTransportException;
-
-/**
- * TServerSocketKeepAlive - like TServerSocket, but will enable keepalive for
- * accepted sockets.
- *
- */
-public class TServerSocketKeepAlive extends TServerSocket {
-  public TServerSocketKeepAlive(TServerSocket serverSocket) throws TTransportException {
-    super(serverSocket.getServerSocket());
-  }
-
-  @Override
-  protected TSocket acceptImpl() throws TTransportException {
-    TSocket ts = super.acceptImpl();
-    try {
-      ts.getSocket().setKeepAlive(true);
-    } catch (SocketException e) {
-      throw new TTransportException(e);
-    }
-    return ts;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/TableType.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TableType.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TableType.java
deleted file mode 100644
index e9e16d7..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/TableType.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-/**
- * Typesafe enum for types of tables described by the metastore.
- */
-public enum TableType {
-  MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW, INDEX_TABLE, MATERIALIZED_VIEW
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/annotation/NoReconnect.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/annotation/NoReconnect.java b/metastore/src/java/org/apache/hadoop/hive/metastore/annotation/NoReconnect.java
deleted file mode 100644
index edf0831..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/annotation/NoReconnect.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hadoop.hive.metastore.annotation;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-@Target({ElementType.METHOD})
-@Retention(RetentionPolicy.RUNTIME)
-public @interface NoReconnect {
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
deleted file mode 100644
index a1a2fb9..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore.hooks;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.common.classification.InterfaceAudience;
-import org.apache.hadoop.hive.common.classification.InterfaceStability;
-
-/**
- * JDOConnectURLHook is used to get the URL that JDO uses to connect to the
- * database that stores the metastore data. Classes implementing this must be
- * thread-safe (for Thrift server).
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public interface JDOConnectionURLHook {
-
-  /**
-   * Gets the connection URL to supply to JDO. In addition to initialization,
-   * this method will be called after a connection failure for each reconnect
-   * attempt.
-   *
-   * @param conf The configuration used to initialize this instance of the HMS
-   * @return the connection URL
-   * @throws Exception
-   */
-  public String getJdoConnectionUrl(Configuration conf)
-  throws Exception;
-
-  /**
-   * Alerts this that the connection URL was bad. Can be used to collect stats,
-   * etc.
-   *
-   * @param url
-   */
-  public void notifyBadConnectionUrl(String url);
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java b/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java
deleted file mode 100644
index 7e94e34..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore.partition.spec;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionSpec;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Implementation of PartitionSpecProxy that composes a list of PartitionSpecProxy.
- */
-public class CompositePartitionSpecProxy extends PartitionSpecProxy {
-
-  private String dbName;
-  private String tableName;
-  private List<PartitionSpec> partitionSpecs;
-  private List<PartitionSpecProxy> partitionSpecProxies;
-  private int size = 0;
-
-  protected CompositePartitionSpecProxy(List<PartitionSpec> partitionSpecs) {
-    this.partitionSpecs = partitionSpecs;
-    if (partitionSpecs.isEmpty()) {
-      dbName = null;
-      tableName = null;
-    }
-    else {
-      dbName = partitionSpecs.get(0).getDbName();
-      tableName = partitionSpecs.get(0).getTableName();
-      this.partitionSpecProxies = new ArrayList<PartitionSpecProxy>(partitionSpecs.size());
-      for (PartitionSpec partitionSpec : partitionSpecs) {
-        PartitionSpecProxy partitionSpecProxy = Factory.get(partitionSpec);
-        this.partitionSpecProxies.add(partitionSpecProxy);
-        size += partitionSpecProxy.size();
-      }
-    }
-    // Assert class-invariant.
-    assert isValid() : "Invalid CompositePartitionSpecProxy!";
-  }
-
-  protected CompositePartitionSpecProxy(String dbName, String tableName, List<PartitionSpec> partitionSpecs) {
-    this.dbName = dbName;
-    this.tableName = tableName;
-    this.partitionSpecs = partitionSpecs;
-    this.partitionSpecProxies = new ArrayList<PartitionSpecProxy>(partitionSpecs.size());
-    for (PartitionSpec partitionSpec : partitionSpecs) {
-      this.partitionSpecProxies.add(PartitionSpecProxy.Factory.get(partitionSpec));
-    }
-    // Assert class-invariant.
-    assert isValid() : "Invalid CompositePartitionSpecProxy!";
-  }
-
-  private boolean isValid() {
-    for (PartitionSpecProxy partitionSpecProxy : partitionSpecProxies) {
-      if (partitionSpecProxy instanceof CompositePartitionSpecProxy) {
-        return false;
-      }
-    }
-
-    return true;
-  }
-
-  @Override
-  public int size() {
-    return size;
-  }
-
-  /**
-   * Iterator to iterate over all Partitions, across all PartitionSpecProxy instances within the Composite.
-   */
-  public static class Iterator implements PartitionIterator {
-
-    private CompositePartitionSpecProxy composite;
-    private List<PartitionSpecProxy> partitionSpecProxies;
-    private int index = -1; // Index into partitionSpecs.
-    private PartitionIterator iterator = null;
-
-    public Iterator(CompositePartitionSpecProxy composite) {
-      this.composite = composite;
-      this.partitionSpecProxies = composite.partitionSpecProxies;
-
-      if (this.partitionSpecProxies != null && !this.partitionSpecProxies.isEmpty()) {
-        this.index = 0;
-        this.iterator = this.partitionSpecProxies.get(this.index).getPartitionIterator();
-      }
-    }
-
-    @Override
-    public boolean hasNext() {
-
-      if (iterator == null) {
-        return false;
-      }
-
-      if (iterator.hasNext()) {
-        return true;
-      }
-
-      while ( ++index < partitionSpecProxies.size()
-          && !(iterator = partitionSpecProxies.get(index).getPartitionIterator()).hasNext());
-
-      return index < partitionSpecProxies.size() && iterator.hasNext();
-
-    }
-
-    @Override
-    public Partition next() {
-
-        if (iterator.hasNext())
-          return iterator.next();
-
-        while (++index < partitionSpecProxies.size()
-            && !(iterator = partitionSpecProxies.get(index).getPartitionIterator()).hasNext());
-
-        return index == partitionSpecProxies.size()? null : iterator.next();
-
-    }
-
-    @Override
-    public void remove() {
-      iterator.remove();
-    }
-
-    @Override
-    public Partition getCurrent() {
-      return iterator.getCurrent();
-    }
-
-    @Override
-    public String getDbName() {
-      return composite.dbName;
-    }
-
-    @Override
-    public String getTableName() {
-      return composite.tableName;
-    }
-
-    @Override
-    public Map<String, String> getParameters() {
-      return iterator.getParameters();
-    }
-
-    @Override
-    public void setParameters(Map<String, String> parameters) {
-      iterator.setParameters(parameters);
-    }
-
-    @Override
-    public String getLocation() {
-      return iterator.getLocation();
-    }
-
-    @Override
-    public void putToParameters(String key, String value) {
-      iterator.putToParameters(key, value);
-    }
-
-    @Override
-    public void setCreateTime(long time) {
-      iterator.setCreateTime(time);
-    }
-  }
-
-  @Override
-  public void setDbName(String dbName) {
-    this.dbName = dbName;
-    for (PartitionSpecProxy partSpecProxy : partitionSpecProxies) {
-      partSpecProxy.setDbName(dbName);
-    }
-  }
-
-  @Override
-  public void setTableName(String tableName) {
-    this.tableName = tableName;
-    for (PartitionSpecProxy partSpecProxy : partitionSpecProxies) {
-      partSpecProxy.setTableName(tableName);
-    }
-  }
-
-  @Override
-  public String getDbName() {
-    return dbName;
-  }
-
-  @Override
-  public String getTableName() {
-    return tableName;
-  }
-
-  @Override
-  public PartitionIterator getPartitionIterator() {
-    return new Iterator(this);
-  }
-
-  @Override
-  public List<PartitionSpec> toPartitionSpec() {
-    return partitionSpecs;
-  }
-
-  @Override
-  public void setRootLocation(String rootLocation) throws MetaException {
-    for (PartitionSpecProxy partSpecProxy : partitionSpecProxies) {
-      partSpecProxy.setRootLocation(rootLocation);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java b/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java
deleted file mode 100644
index 154011e..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore.partition.spec;
-
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionSpec;
-
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-
-/**
- * PartitionSpecProxy implementation that composes a List of Partitions.
- */
-public class PartitionListComposingSpecProxy extends PartitionSpecProxy {
-
-  private PartitionSpec partitionSpec;
-
-  protected PartitionListComposingSpecProxy(PartitionSpec partitionSpec) {
-    assert partitionSpec.isSetPartitionList()
-        : "Partition-list should have been set.";
-    this.partitionSpec = partitionSpec;
-  }
-
-  @Override
-  public String getDbName() {
-    return partitionSpec.getDbName();
-  }
-
-  @Override
-  public String getTableName() {
-    return partitionSpec.getTableName();
-  }
-
-  @Override
-  public PartitionIterator getPartitionIterator() {
-    return new Iterator(this);
-  }
-
-  @Override
-  public List<PartitionSpec> toPartitionSpec() {
-    return Arrays.asList(partitionSpec);
-  }
-
-  @Override
-  public int size() {
-    return partitionSpec.getPartitionList().getPartitionsSize();
-  }
-
-  @Override
-  public void setDbName(String dbName) {
-    partitionSpec.setDbName(dbName);
-    for (Partition partition : partitionSpec.getPartitionList().getPartitions()) {
-      partition.setDbName(dbName);
-    }
-  }
-
-  @Override
-  public void setTableName(String tableName) {
-    partitionSpec.setTableName(tableName);
-    for (Partition partition : partitionSpec.getPartitionList().getPartitions()) {
-      partition.setTableName(tableName);
-    }
-  }
-
-  @Override
-  public void setRootLocation(String newRootPath) throws MetaException {
-
-    String oldRootPath = partitionSpec.getRootPath();
-
-    if (oldRootPath == null) {
-      throw new MetaException("No common root-path. Can't replace root-path!");
-    }
-
-    for (Partition partition : partitionSpec.getPartitionList().getPartitions()) {
-      String location = partition.getSd().getLocation();
-      if (location.startsWith(oldRootPath)) {
-        partition.getSd().setLocation(location.replace(oldRootPath, newRootPath));
-      }
-      else {
-        throw new MetaException("Common root-path not found. Can't replace root-path!");
-      }
-    }
-  }
-
-  public static class Iterator implements PartitionIterator {
-
-    PartitionListComposingSpecProxy partitionSpecProxy;
-    List<Partition> partitionList;
-    int index;
-
-    public Iterator(PartitionListComposingSpecProxy partitionSpecProxy) {
-      this.partitionSpecProxy = partitionSpecProxy;
-      this.partitionList = partitionSpecProxy.partitionSpec.getPartitionList().getPartitions();
-      this.index = 0;
-    }
-
-    @Override
-    public Partition getCurrent() {
-      return partitionList.get(index);
-    }
-
-    @Override
-    public String getDbName() {
-      return partitionSpecProxy.getDbName();
-    }
-
-    @Override
-    public String getTableName() {
-      return partitionSpecProxy.getTableName();
-    }
-
-    @Override
-    public Map<String, String> getParameters() {
-      return partitionList.get(index).getParameters();
-    }
-
-    @Override
-    public void setParameters(Map<String, String> parameters) {
-      partitionList.get(index).setParameters(parameters);
-    }
-
-    @Override
-    public String getLocation() {
-      return partitionList.get(index).getSd().getLocation();
-    }
-
-    @Override
-    public void putToParameters(String key, String value) {
-      partitionList.get(index).putToParameters(key, value);
-    }
-
-    @Override
-    public void setCreateTime(long time) {
-      partitionList.get(index).setCreateTime((int)time);
-    }
-
-    @Override
-    public boolean hasNext() {
-      return index < partitionList.size();
-    }
-
-    @Override
-    public Partition next() {
-      return partitionList.get(index++);
-    }
-
-    @Override
-    public void remove() {
-      partitionList.remove(index);
-    }
-  } // class Iterator;
-
-} // class PartitionListComposingSpecProxy;

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java b/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java
deleted file mode 100644
index fdb0867..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore.partition.spec;
-
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionSpec;
-
-import java.util.List;
-import java.util.Map;
-
-/**
- * Polymorphic proxy class, equivalent to org.apache.hadoop.hive.metastore.api.PartitionSpec.
- */
-public abstract class PartitionSpecProxy {
-
-  /**
-   * The number of Partition instances represented by the PartitionSpec.
-   * @return Number of partitions.
-   */
-  public abstract int size();
-
-  /**
-   * Setter for name of the DB.
-   * @param dbName The name of the DB.
-   */
-  public abstract void setDbName(String dbName);
-
-  /**
-   * Setter for name of the table.
-   * @param tableName The name of the table.
-   */
-  public abstract void setTableName(String tableName);
-
-  /**
-   * Getter for name of the DB.
-   * @return The name of the DB.
-   */
-  public abstract String getDbName();
-
-  /**
-   * Getter for name of the table.
-   * @return The name of the table.
-   */
-  public abstract String getTableName();
-
-  /**
-   * Iterator to the (virtual) sequence of Partitions represented by the PartitionSpec.
-   * @return A PartitionIterator to the beginning of the Partition sequence.
-   */
-  public abstract PartitionIterator getPartitionIterator();
-
-  /**
-   * Conversion to a org.apache.hadoop.hive.metastore.api.PartitionSpec sequence.
-   * @return A list of org.apache.hadoop.hive.metastore.api.PartitionSpec instances.
-   */
-  public abstract List<PartitionSpec> toPartitionSpec();
-
-  /**
-   * Setter for the common root-location for all partitions in the PartitionSet.
-   * @param rootLocation The new common root-location.
-   * @throws MetaException
-   */
-  public abstract void setRootLocation(String rootLocation) throws MetaException;
-
-  /**
-   * Factory to construct PartitionSetProxy instances, from PartitionSets.
-   */
-  public static class Factory {
-
-    /**
-     * Factory method. Construct PartitionSpecProxy from raw PartitionSpec.
-     * @param partSpec Raw PartitionSpec from the Thrift API.
-     * @return PartitionSpecProxy instance.
-     */
-    public static PartitionSpecProxy get(PartitionSpec partSpec) {
-
-      if (partSpec == null) {
-        return null;
-      }
-      else
-      if (partSpec.isSetPartitionList()) {
-        return new PartitionListComposingSpecProxy(partSpec);
-      }
-      else
-      if (partSpec.isSetSharedSDPartitionSpec()) {
-        return new PartitionSpecWithSharedSDProxy(partSpec);
-      }
-
-      assert false : "Unsupported type of PartitionSpec!";
-      return null;
-    }
-
-    /**
-     * Factory method to construct CompositePartitionSpecProxy.
-     * @param partitionSpecs List of raw PartitionSpecs.
-     * @return A CompositePartitionSpecProxy instance.
-     */
-    public static PartitionSpecProxy get(List<PartitionSpec> partitionSpecs) {
-      return new CompositePartitionSpecProxy(partitionSpecs);
-    }
-
-  } // class Factory;
-
-  /**
-   * Iterator to iterate over Partitions corresponding to a PartitionSpec.
-   */
-  public static interface PartitionIterator extends java.util.Iterator<Partition> {
-
-    /**
-     * Getter for the Partition "pointed to" by the iterator.
-     * Like next(), but without advancing the iterator.
-     * @return The "current" partition object.
-     */
-    public Partition getCurrent();
-
-    /**
-     * Getter for the name of the DB.
-     * @return Name of the DB.
-     */
-    public String getDbName();
-
-    /**
-     * Getter for the name of the table.
-     * @return Name of the table.
-     */
-    public String getTableName();
-
-    /**
-     * Getter for the Partition parameters.
-     * @return Key-value map for Partition-level parameters.
-     */
-    public Map<String, String> getParameters();
-
-    /**
-     * Setter for Partition parameters.
-     * @param parameters Key-value map fo Partition-level parameters.
-     */
-    public void setParameters(Map<String, String> parameters);
-
-    /**
-     * Insert an individual parameter to a Partition's parameter-set.
-     * @param key
-     * @param value
-     */
-    public void putToParameters(String key, String value);
-
-    /**
-     * Getter for Partition-location.
-     * @return Partition's location.
-     */
-    public String getLocation();
-
-    /**
-     * Setter for creation-time of a Partition.
-     * @param time Timestamp indicating the time of creation of the Partition.
-     */
-    public void setCreateTime(long time);
-
-  } // class PartitionIterator;
-
-  /**
-   * Simple wrapper class for pre-constructed Partitions, to expose a PartitionIterator interface,
-   * where the iterator-sequence consists of just one Partition.
-   */
-  public static class SimplePartitionWrapperIterator implements PartitionIterator {
-    private Partition partition;
-    public SimplePartitionWrapperIterator(Partition partition) {this.partition = partition;}
-
-    @Override public Partition getCurrent() { return partition; }
-    @Override public String getDbName() { return partition.getDbName(); }
-    @Override public String getTableName() { return partition.getTableName(); }
-    @Override public Map<String, String> getParameters() { return partition.getParameters(); }
-    @Override public void setParameters(Map<String, String> parameters) { partition.setParameters(parameters); }
-    @Override public void putToParameters(String key, String value) { partition.putToParameters(key, value);}
-    @Override public String getLocation() { return partition.getSd().getLocation(); }
-    @Override public void setCreateTime(long time) { partition.setCreateTime((int)time);}
-    @Override public boolean hasNext() { return false; } // No next partition.
-    @Override public Partition next() { return null; } // No next partition.
-    @Override public void remove() {} // Do nothing.
-  } // P
-
-} // class PartitionSpecProxy;

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java b/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java
deleted file mode 100644
index 6a3e147..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore.partition.spec;
-
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionSpec;
-import org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD;
-import org.apache.hadoop.hive.metastore.api.PartitionWithoutSD;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Subclass of PartitionSpecProxy that pulls out commonality of
- * StorageDescriptor properties within a Partition-list into a common
- * StorageDescriptor instance.
- */
-public class PartitionSpecWithSharedSDProxy extends PartitionSpecProxy {
-
-  private PartitionSpec partitionSpec;
-
-  public PartitionSpecWithSharedSDProxy(PartitionSpec partitionSpec) {
-    assert partitionSpec.isSetSharedSDPartitionSpec();
-    this.partitionSpec = partitionSpec;
-  }
-
-  @Override
-  public int size() {
-    return partitionSpec.getSharedSDPartitionSpec().getPartitionsSize();
-  }
-
-  @Override
-  public void setDbName(String dbName) {
-    partitionSpec.setDbName(dbName);
-  }
-
-  @Override
-  public void setTableName(String tableName) {
-    partitionSpec.setTableName(tableName);
-  }
-
-  @Override
-  public String getDbName() {
-    return partitionSpec.getDbName();
-  }
-
-  @Override
-  public String getTableName() {
-    return partitionSpec.getTableName();
-  }
-
-  public PartitionIterator getPartitionIterator() {
-    return new Iterator(this);
-  }
-
-  @Override
-  public List<PartitionSpec> toPartitionSpec() {
-    return Arrays.asList(partitionSpec);
-  }
-
-  @Override
-  public void setRootLocation(String rootLocation) throws MetaException {
-    partitionSpec.setRootPath(rootLocation);
-    partitionSpec.getSharedSDPartitionSpec().getSd().setLocation(rootLocation);
-  }
-
-  /**
-   * Iterator implementation to iterate over all Partitions within the PartitionSpecWithSharedSDProxy.
-   */
-  public static class Iterator implements PartitionIterator {
-
-    private PartitionSpecWithSharedSDProxy partitionSpecWithSharedSDProxy;
-    private PartitionSpecWithSharedSD pSpec;
-    private int index;
-
-    Iterator(PartitionSpecWithSharedSDProxy partitionSpecWithSharedSDProxy) {
-      this.partitionSpecWithSharedSDProxy = partitionSpecWithSharedSDProxy;
-      this.pSpec = this.partitionSpecWithSharedSDProxy.partitionSpec.getSharedSDPartitionSpec();
-      this.index = 0;
-    }
-
-    @Override
-    public boolean hasNext() {
-      return index < pSpec.getPartitions().size();
-    }
-
-    @Override
-    public Partition next() {
-      Partition partition = getCurrent();
-      ++index;
-      return partition;
-    }
-
-    @Override
-    public void remove() {
-      pSpec.getPartitions().remove(index);
-    }
-
-    @Override
-    public Partition getCurrent() {
-      PartitionWithoutSD partWithoutSD = pSpec.getPartitions().get(index);
-      StorageDescriptor partSD = new StorageDescriptor(pSpec.getSd());
-      partSD.setLocation(partSD.getLocation() + partWithoutSD.getRelativePath());
-
-      return new Partition(
-          partWithoutSD.getValues(),
-          partitionSpecWithSharedSDProxy.partitionSpec.getDbName(),
-          partitionSpecWithSharedSDProxy.partitionSpec.getTableName(),
-          partWithoutSD.getCreateTime(),
-          partWithoutSD.getLastAccessTime(),
-          partSD,
-          partWithoutSD.getParameters()
-      );
-    }
-
-    @Override
-    public String getDbName() {
-      return partitionSpecWithSharedSDProxy.partitionSpec.getDbName();
-    }
-
-    @Override
-    public String getTableName() {
-      return partitionSpecWithSharedSDProxy.partitionSpec.getTableName();
-    }
-
-    @Override
-    public Map<String, String> getParameters() {
-      return pSpec.getPartitions().get(index).getParameters();
-    }
-
-    @Override
-    public void setParameters(Map<String, String> parameters) {
-      pSpec.getPartitions().get(index).setParameters(parameters);
-    }
-
-    @Override
-    public String getLocation() {
-      return pSpec.getSd().getLocation() + pSpec.getPartitions().get(index).getRelativePath();
-    }
-
-    @Override
-    public void putToParameters(String key, String value) {
-      pSpec.getPartitions().get(index).putToParameters(key, value);
-    }
-
-    @Override
-    public void setCreateTime(long time) {
-      pSpec.getPartitions().get(index).setCreateTime((int)time);
-    }
-
-  } // static class Iterator;
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java b/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
deleted file mode 100644
index 620ea5f..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
+++ /dev/null
@@ -1,640 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.tools;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Lists;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaException;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.util.IllegalFormatException;
-import java.util.List;
-
-public class HiveSchemaHelper {
-  public static final String DB_DERBY = "derby";
-  public static final String DB_HIVE = "hive";
-  public static final String DB_MSSQL = "mssql";
-  public static final String DB_MYSQL = "mysql";
-  public static final String DB_POSTGRACE = "postgres";
-  public static final String DB_ORACLE = "oracle";
-
-  /***
-   * Get JDBC connection to metastore db
-   *
-   * @param userName metastore connection username
-   * @param password metastore connection password
-   * @param printInfo print connection parameters
-   * @param hiveConf hive config object
-   * @return metastore connection object
-   * @throws org.apache.hadoop.hive.metastore.HiveMetaException
-   */
-  public static Connection getConnectionToMetastore(String userName,
-      String password, String url, String driver, boolean printInfo,
-      HiveConf hiveConf)
-      throws HiveMetaException {
-    try {
-      url = url == null ? getValidConfVar(
-        HiveConf.ConfVars.METASTORECONNECTURLKEY, hiveConf) : url;
-      driver = driver == null ? getValidConfVar(
-        HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, hiveConf) : driver;
-      if (printInfo) {
-        System.out.println("Metastore connection URL:\t " + url);
-        System.out.println("Metastore Connection Driver :\t " + driver);
-        System.out.println("Metastore connection User:\t " + userName);
-      }
-      if ((userName == null) || userName.isEmpty()) {
-        throw new HiveMetaException("UserName empty ");
-      }
-
-      // load required JDBC driver
-      Class.forName(driver);
-
-      // Connect using the JDBC URL and user/pass from conf
-      return DriverManager.getConnection(url, userName, password);
-    } catch (IOException e) {
-      throw new HiveMetaException("Failed to get schema version.", e);
-    } catch (SQLException e) {
-      throw new HiveMetaException("Failed to get schema version.", e);
-    } catch (ClassNotFoundException e) {
-      throw new HiveMetaException("Failed to load driver", e);
-    }
-  }
-
-  public static Connection getConnectionToMetastore(MetaStoreConnectionInfo info) throws HiveMetaException {
-    return getConnectionToMetastore(info.getUsername(), info.getPassword(), info.getUrl(),
-        info.getDriver(), info.getPrintInfo(), info.getHiveConf());
-  }
-
-  public static String getValidConfVar(HiveConf.ConfVars confVar, HiveConf hiveConf)
-      throws IOException {
-    String confVarStr = hiveConf.get(confVar.varname);
-    if (confVarStr == null || confVarStr.isEmpty()) {
-      throw new IOException("Empty " + confVar.varname);
-    }
-    return confVarStr.trim();
-  }
-
-  public interface NestedScriptParser {
-
-    public enum CommandType {
-      PARTIAL_STATEMENT,
-      TERMINATED_STATEMENT,
-      COMMENT
-    }
-
-    static final String DEFAULT_DELIMITER = ";";
-    static final String DEFAULT_QUOTE = "\"";
-
-    /**
-     * Find the type of given command
-     *
-     * @param dbCommand
-     * @return
-     */
-    public boolean isPartialCommand(String dbCommand) throws IllegalArgumentException;
-
-    /**
-     * Parse the DB specific nesting format and extract the inner script name if any
-     *
-     * @param dbCommand command from parent script
-     * @return
-     * @throws IllegalFormatException
-     */
-    public String getScriptName(String dbCommand) throws IllegalArgumentException;
-
-    /**
-     * Find if the given command is a nested script execution
-     *
-     * @param dbCommand
-     * @return
-     */
-    public boolean isNestedScript(String dbCommand);
-
-    /**
-     * Find if the given command should not be passed to DB
-     *
-     * @param dbCommand
-     * @return
-     */
-    public boolean isNonExecCommand(String dbCommand);
-
-    /**
-     * Get the SQL statement delimiter
-     *
-     * @return
-     */
-    public String getDelimiter();
-
-    /**
-     * Get the SQL indentifier quotation character
-     *
-     * @return
-     */
-    public String getQuoteCharacter();
-
-    /**
-     * Clear any client specific tags
-     *
-     * @return
-     */
-    public String cleanseCommand(String dbCommand);
-
-    /**
-     * Does the DB required table/column names quoted
-     *
-     * @return
-     */
-    public boolean needsQuotedIdentifier();
-
-    /**
-     * Flatten the nested upgrade script into a buffer
-     *
-     * @param scriptDir  upgrade script directory
-     * @param scriptFile upgrade script file
-     * @return string of sql commands
-     */
-    public String buildCommand(String scriptDir, String scriptFile)
-        throws IllegalFormatException, IOException;
-
-    /**
-     * Flatten the nested upgrade script into a buffer
-     *
-     * @param scriptDir  upgrade script directory
-     * @param scriptFile upgrade script file
-     * @param fixQuotes whether to replace quote characters
-     * @return string of sql commands
-     */
-    public String buildCommand(String scriptDir, String scriptFile, boolean fixQuotes)
-        throws IllegalFormatException, IOException;
-  }
-
-  /***
-   * Base implementation of NestedScriptParser
-   * abstractCommandParser.
-   *
-   */
-  private static abstract class AbstractCommandParser implements NestedScriptParser {
-    private List<String> dbOpts;
-    private String msUsername;
-    private String msPassword;
-    private HiveConf hiveConf;
-
-    public AbstractCommandParser(String dbOpts, String msUsername, String msPassword,
-        HiveConf hiveConf) {
-      setDbOpts(dbOpts);
-      this.msUsername = msUsername;
-      this.msPassword = msPassword;
-      this.hiveConf = hiveConf;
-    }
-
-    @Override
-    public boolean isPartialCommand(String dbCommand) throws IllegalArgumentException{
-      if (dbCommand == null || dbCommand.isEmpty()) {
-        throw new IllegalArgumentException("invalid command line " + dbCommand);
-      }
-      dbCommand = dbCommand.trim();
-      if (dbCommand.endsWith(getDelimiter()) || isNonExecCommand(dbCommand)) {
-        return false;
-      } else {
-        return true;
-      }
-    }
-
-    @Override
-    public boolean isNonExecCommand(String dbCommand) {
-      return (dbCommand.startsWith("--") || dbCommand.startsWith("#"));
-    }
-
-    @Override
-    public String getDelimiter() {
-      return DEFAULT_DELIMITER;
-    }
-
-    @Override
-    public String getQuoteCharacter() {
-      return DEFAULT_QUOTE;
-    }
-
-
-    @Override
-    public String cleanseCommand(String dbCommand) {
-      // strip off the delimiter
-      if (dbCommand.endsWith(getDelimiter())) {
-        dbCommand = dbCommand.substring(0,
-            dbCommand.length() - getDelimiter().length());
-      }
-      return dbCommand;
-    }
-
-    @Override
-    public boolean needsQuotedIdentifier() {
-      return false;
-    }
-
-    @Override
-    public String buildCommand(
-      String scriptDir, String scriptFile) throws IllegalFormatException, IOException {
-      return buildCommand(scriptDir, scriptFile, false);
-    }
-
-    @Override
-    public String buildCommand(
-      String scriptDir, String scriptFile, boolean fixQuotes) throws IllegalFormatException, IOException {
-      BufferedReader bfReader =
-          new BufferedReader(new FileReader(scriptDir + File.separatorChar + scriptFile));
-      String currLine;
-      StringBuilder sb = new StringBuilder();
-      String currentCommand = null;
-      while ((currLine = bfReader.readLine()) != null) {
-        currLine = currLine.trim();
-
-        if (fixQuotes && !getQuoteCharacter().equals(DEFAULT_QUOTE)) {
-          currLine = currLine.replace("\\\"", getQuoteCharacter());
-        }
-
-        if (currLine.isEmpty()) {
-          continue; // skip empty lines
-        }
-
-        if (currentCommand == null) {
-          currentCommand = currLine;
-        } else {
-          currentCommand = currentCommand + " " + currLine;
-        }
-        if (isPartialCommand(currLine)) {
-          // if its a partial line, continue collecting the pieces
-          continue;
-        }
-
-        // if this is a valid executable command then add it to the buffer
-        if (!isNonExecCommand(currentCommand)) {
-          currentCommand = cleanseCommand(currentCommand);
-          if (isNestedScript(currentCommand)) {
-            // if this is a nested sql script then flatten it
-            String currScript = getScriptName(currentCommand);
-            sb.append(buildCommand(scriptDir, currScript));
-          } else {
-            // Now we have a complete statement, process it
-            // write the line to buffer
-            sb.append(currentCommand);
-            sb.append(System.getProperty("line.separator"));
-          }
-        }
-        currentCommand = null;
-      }
-      bfReader.close();
-      return sb.toString();
-    }
-
-    private void setDbOpts(String dbOpts) {
-      if (dbOpts != null) {
-        this.dbOpts = Lists.newArrayList(dbOpts.split(","));
-      } else {
-        this.dbOpts = Lists.newArrayList();
-      }
-    }
-
-    protected List<String> getDbOpts() {
-      return dbOpts;
-    }
-
-    protected String getMsUsername() {
-      return msUsername;
-    }
-
-    protected String getMsPassword() {
-      return msPassword;
-    }
-
-    protected HiveConf getHiveConf() {
-      return hiveConf;
-    }
-  }
-
-  // Derby commandline parser
-  public static class DerbyCommandParser extends AbstractCommandParser {
-    private static final String DERBY_NESTING_TOKEN = "RUN";
-
-    public DerbyCommandParser(String dbOpts, String msUsername, String msPassword,
-        HiveConf hiveConf) {
-      super(dbOpts, msUsername, msPassword, hiveConf);
-    }
-
-    @Override
-    public String getScriptName(String dbCommand) throws IllegalArgumentException {
-
-      if (!isNestedScript(dbCommand)) {
-        throw new IllegalArgumentException("Not a script format " + dbCommand);
-      }
-      String[] tokens = dbCommand.split(" ");
-      if (tokens.length != 2) {
-        throw new IllegalArgumentException("Couldn't parse line " + dbCommand);
-      }
-      return tokens[1].replace(";", "").replaceAll("'", "");
-    }
-
-    @Override
-    public boolean isNestedScript(String dbCommand) {
-      // Derby script format is RUN '<file>'
-     return dbCommand.startsWith(DERBY_NESTING_TOKEN);
-    }
-  }
-
-  // Derby commandline parser
-  public static class HiveCommandParser extends AbstractCommandParser {
-    private static String HIVE_NESTING_TOKEN = "SOURCE";
-    private final NestedScriptParser nestedDbCommandParser;
-
-    public HiveCommandParser(String dbOpts, String msUsername, String msPassword,
-        HiveConf hiveConf, String metaDbType) {
-      super(dbOpts, msUsername, msPassword, hiveConf);
-      nestedDbCommandParser = getDbCommandParser(metaDbType);
-    }
-
-    @Override
-    public String getQuoteCharacter() {
-      return nestedDbCommandParser.getQuoteCharacter();
-    }
-
-    @Override
-    public String getScriptName(String dbCommand) throws IllegalArgumentException {
-
-      if (!isNestedScript(dbCommand)) {
-        throw new IllegalArgumentException("Not a script format " + dbCommand);
-      }
-      String[] tokens = dbCommand.split(" ");
-      if (tokens.length != 2) {
-        throw new IllegalArgumentException("Couldn't parse line " + dbCommand);
-      }
-      return tokens[1].replace(";", "");
-    }
-
-    @Override
-    public boolean isNestedScript(String dbCommand) {
-     return dbCommand.startsWith(HIVE_NESTING_TOKEN);
-    }
-  }
-
-  // MySQL parser
-  public static class MySqlCommandParser extends AbstractCommandParser {
-    private static final String MYSQL_NESTING_TOKEN = "SOURCE";
-    private static final String DELIMITER_TOKEN = "DELIMITER";
-    private String delimiter = DEFAULT_DELIMITER;
-
-    public MySqlCommandParser(String dbOpts, String msUsername, String msPassword,
-        HiveConf hiveConf) {
-      super(dbOpts, msUsername, msPassword, hiveConf);
-    }
-
-    @Override
-    public boolean isPartialCommand(String dbCommand) throws IllegalArgumentException{
-      boolean isPartial = super.isPartialCommand(dbCommand);
-      // if this is a delimiter directive, reset our delimiter
-      if (dbCommand.startsWith(DELIMITER_TOKEN)) {
-        String[] tokens = dbCommand.split(" ");
-        if (tokens.length != 2) {
-          throw new IllegalArgumentException("Couldn't parse line " + dbCommand);
-        }
-        delimiter = tokens[1];
-      }
-      return isPartial;
-    }
-
-    @Override
-    public String getScriptName(String dbCommand) throws IllegalArgumentException {
-      String[] tokens = dbCommand.split(" ");
-      if (tokens.length != 2) {
-        throw new IllegalArgumentException("Couldn't parse line " + dbCommand);
-      }
-      // remove ending ';'
-      return tokens[1].replace(";", "");
-    }
-
-    @Override
-    public boolean isNestedScript(String dbCommand) {
-      return dbCommand.startsWith(MYSQL_NESTING_TOKEN);
-    }
-
-    @Override
-    public String getDelimiter() {
-      return delimiter;
-    }
-
-    @Override
-    public String getQuoteCharacter() {
-      return "`";
-    }
-
-    @Override
-    public boolean isNonExecCommand(String dbCommand) {
-      return super.isNonExecCommand(dbCommand) ||
-          (dbCommand.startsWith("/*") && dbCommand.endsWith("*/")) ||
-          dbCommand.startsWith(DELIMITER_TOKEN);
-    }
-
-    @Override
-    public String cleanseCommand(String dbCommand) {
-      return super.cleanseCommand(dbCommand).replaceAll("/\\*.*?\\*/[^;]", "");
-    }
-
-  }
-
-  // Postgres specific parser
-  public static class PostgresCommandParser extends AbstractCommandParser {
-    private static final String POSTGRES_NESTING_TOKEN = "\\i";
-    @VisibleForTesting
-    public static final String POSTGRES_STANDARD_STRINGS_OPT = "SET standard_conforming_strings";
-    @VisibleForTesting
-    public static final String POSTGRES_SKIP_STANDARD_STRINGS_DBOPT = "postgres.filter.81";
-
-    public PostgresCommandParser(String dbOpts, String msUsername, String msPassword,
-        HiveConf hiveConf) {
-      super(dbOpts, msUsername, msPassword, hiveConf);
-    }
-
-    @Override
-    public String getScriptName(String dbCommand) throws IllegalArgumentException {
-      String[] tokens = dbCommand.split(" ");
-      if (tokens.length != 2) {
-        throw new IllegalArgumentException("Couldn't parse line " + dbCommand);
-      }
-      // remove ending ';'
-      return tokens[1].replace(";", "");
-    }
-
-    @Override
-    public boolean isNestedScript(String dbCommand) {
-      return dbCommand.startsWith(POSTGRES_NESTING_TOKEN);
-    }
-
-    @Override
-    public boolean needsQuotedIdentifier() {
-      return true;
-    }
-
-    @Override
-    public boolean isNonExecCommand(String dbCommand) {
-      // Skip "standard_conforming_strings" command which is read-only in older
-      // Postgres versions like 8.1
-      // See: http://www.postgresql.org/docs/8.2/static/release-8-1.html
-      if (getDbOpts().contains(POSTGRES_SKIP_STANDARD_STRINGS_DBOPT)) {
-        if (dbCommand.startsWith(POSTGRES_STANDARD_STRINGS_OPT)) {
-          return true;
-        }
-      }
-      return super.isNonExecCommand(dbCommand);
-    }
-  }
-
-  //Oracle specific parser
-  public static class OracleCommandParser extends AbstractCommandParser {
-    private static final String ORACLE_NESTING_TOKEN = "@";
-
-    public OracleCommandParser(String dbOpts, String msUsername, String msPassword,
-        HiveConf hiveConf) {
-      super(dbOpts, msUsername, msPassword, hiveConf);
-    }
-
-    @Override
-    public String getScriptName(String dbCommand) throws IllegalArgumentException {
-      if (!isNestedScript(dbCommand)) {
-        throw new IllegalArgumentException("Not a nested script format " + dbCommand);
-      }
-      // remove ending ';' and starting '@'
-      return dbCommand.replace(";", "").replace(ORACLE_NESTING_TOKEN, "");
-    }
-
-    @Override
-    public boolean isNestedScript(String dbCommand) {
-      return dbCommand.startsWith(ORACLE_NESTING_TOKEN);
-    }
-  }
-
-  //MSSQL specific parser
-  public static class MSSQLCommandParser extends AbstractCommandParser {
-    private static final String MSSQL_NESTING_TOKEN = ":r";
-
-    public MSSQLCommandParser(String dbOpts, String msUsername, String msPassword,
-        HiveConf hiveConf) {
-      super(dbOpts, msUsername, msPassword, hiveConf);
-    }
-
-    @Override
-    public String getScriptName(String dbCommand) throws IllegalArgumentException {
-      String[] tokens = dbCommand.split(" ");
-      if (tokens.length != 2) {
-        throw new IllegalArgumentException("Couldn't parse line " + dbCommand);
-      }
-      return tokens[1];
-    }
-
-    @Override
-    public boolean isNestedScript(String dbCommand) {
-      return dbCommand.startsWith(MSSQL_NESTING_TOKEN);
-    }
-  }
-
-  public static NestedScriptParser getDbCommandParser(String dbName) {
-    return getDbCommandParser(dbName, null);
-  }
-
-  public static NestedScriptParser getDbCommandParser(String dbName, String metaDbName) {
-    return getDbCommandParser(dbName, null, null, null, null, metaDbName);
-  }
-
-  public static NestedScriptParser getDbCommandParser(String dbName,
-      String dbOpts, String msUsername, String msPassword,
-      HiveConf hiveConf, String metaDbType) {
-    if (dbName.equalsIgnoreCase(DB_DERBY)) {
-      return new DerbyCommandParser(dbOpts, msUsername, msPassword, hiveConf);
-    } else if (dbName.equalsIgnoreCase(DB_HIVE)) {
-      return new HiveCommandParser(dbOpts, msUsername, msPassword, hiveConf, metaDbType);
-    } else if (dbName.equalsIgnoreCase(DB_MSSQL)) {
-      return new MSSQLCommandParser(dbOpts, msUsername, msPassword, hiveConf);
-    } else if (dbName.equalsIgnoreCase(DB_MYSQL)) {
-      return new MySqlCommandParser(dbOpts, msUsername, msPassword, hiveConf);
-    } else if (dbName.equalsIgnoreCase(DB_POSTGRACE)) {
-      return new PostgresCommandParser(dbOpts, msUsername, msPassword, hiveConf);
-    } else if (dbName.equalsIgnoreCase(DB_ORACLE)) {
-      return new OracleCommandParser(dbOpts, msUsername, msPassword, hiveConf);
-    } else {
-      throw new IllegalArgumentException("Unknown dbType " + dbName);
-    }
-  }
-
-  public static class MetaStoreConnectionInfo {
-    private final String userName;
-    private final String password;
-    private final String url;
-    private final String driver;
-    private final boolean printInfo;
-    private final HiveConf hiveConf;
-    private final String dbType;
-
-    public MetaStoreConnectionInfo(String userName, String password, String url, String driver,
-        boolean printInfo, HiveConf hiveConf, String dbType) {
-      super();
-      this.userName = userName;
-      this.password = password;
-      this.url = url;
-      this.driver = driver;
-      this.printInfo = printInfo;
-      this.hiveConf = hiveConf;
-      this.dbType = dbType;
-    }
-
-    public String getPassword() {
-      return password;
-    }
-
-    public String getUrl() {
-      return url;
-    }
-
-    public String getDriver() {
-      return driver;
-    }
-
-    public boolean isPrintInfo() {
-      return printInfo;
-    }
-
-    public HiveConf getHiveConf() {
-      return hiveConf;
-    }
-
-    public String getUsername() {
-      return userName;
-    }
-
-    public boolean getPrintInfo() {
-      return printInfo;
-    }
-
-    public String getDbType() {
-      return dbType;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/protobuf/org/apache/hadoop/hive/metastore/metastore.proto
----------------------------------------------------------------------
diff --git a/metastore/src/protobuf/org/apache/hadoop/hive/metastore/metastore.proto b/metastore/src/protobuf/org/apache/hadoop/hive/metastore/metastore.proto
deleted file mode 100644
index 29b99b4..0000000
--- a/metastore/src/protobuf/org/apache/hadoop/hive/metastore/metastore.proto
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-
-message SplitInfo {
-  required int64 offset = 1;
-  required int64 length = 2;
-  required int32 index = 3;
-}
-
-message SplitInfos {
-  repeated SplitInfo infos = 1;
-}


Mime
View raw message