spark-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From r...@apache.org
Subject [1/2] spark git commit: [SPARK-13477][SQL] Expose new user-facing Catalog interface
Date Wed, 27 Apr 2016 04:29:31 GMT
Repository: spark
Updated Branches:
  refs/heads/master d93976d86 -> d8a83a564


http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala
index 6b1d413..855e7e2 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala
@@ -183,8 +183,8 @@ case class ShowPartitionsCommand(
        * 2. If it is a datasource table.
        * 3. If it is a view or index table.
        */
-      if (tab.tableType == CatalogTableType.VIRTUAL_VIEW ||
-        tab.tableType == CatalogTableType.INDEX_TABLE) {
+      if (tab.tableType == CatalogTableType.VIEW ||
+        tab.tableType == CatalogTableType.INDEX) {
         throw new AnalysisException("SHOW PARTITIONS is not allowed on a view or index table: " +
           s"${tab.qualifiedName}")
       }

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala
index 31900b4..f670f63 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala
@@ -323,10 +323,10 @@ object CreateDataSourceTableUtils extends Logging {
 
     val tableType = if (isExternal) {
       tableProperties.put("EXTERNAL", "TRUE")
-      CatalogTableType.EXTERNAL_TABLE
+      CatalogTableType.EXTERNAL
     } else {
       tableProperties.put("EXTERNAL", "FALSE")
-      CatalogTableType.MANAGED_TABLE
+      CatalogTableType.MANAGED
     }
 
     val maybeSerDe = HiveSerDe.sourceToSerDe(provider, sparkSession.sessionState.conf)

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
index ecde332..12167ee 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
@@ -204,10 +204,10 @@ case class DropTable(
       // If the command DROP VIEW is to drop a table or DROP TABLE is to drop a view
       // issue an exception.
       catalog.getTableMetadataOption(tableName).map(_.tableType match {
-        case CatalogTableType.VIRTUAL_VIEW if !isView =>
+        case CatalogTableType.VIEW if !isView =>
           throw new AnalysisException(
             "Cannot drop a view with DROP TABLE. Please use DROP VIEW instead")
-        case o if o != CatalogTableType.VIRTUAL_VIEW && isView =>
+        case o if o != CatalogTableType.VIEW && isView =>
           throw new AnalysisException(
             s"Cannot drop a table with DROP VIEW. Please use DROP TABLE instead")
         case _ =>
@@ -527,10 +527,10 @@ private[sql] object DDLUtils {
       tableIdentifier: TableIdentifier,
       isView: Boolean): Unit = {
     catalog.getTableMetadataOption(tableIdentifier).map(_.tableType match {
-      case CatalogTableType.VIRTUAL_VIEW if !isView =>
+      case CatalogTableType.VIEW if !isView =>
         throw new AnalysisException(
           "Cannot alter a view with ALTER TABLE. Please use ALTER VIEW instead")
-      case o if o != CatalogTableType.VIRTUAL_VIEW && isView =>
+      case o if o != CatalogTableType.VIEW && isView =>
         throw new AnalysisException(
           s"Cannot alter a table with ALTER VIEW. Please use ALTER TABLE instead")
       case _ =>

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
index 700a704..8d9feec 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
@@ -73,7 +73,7 @@ case class CreateTableLike(
 
     val tableToCreate = catalog.getTableMetadata(sourceTable).copy(
       identifier = targetTable,
-      tableType = CatalogTableType.MANAGED_TABLE,
+      tableType = CatalogTableType.MANAGED,
       createTime = System.currentTimeMillis,
       lastAccessTime = -1).withNewStorage(locationUri = None)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
index f42b56f..1641780 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
@@ -52,7 +52,7 @@ case class CreateViewCommand(
 
   override def output: Seq[Attribute] = Seq.empty[Attribute]
 
-  require(tableDesc.tableType == CatalogTableType.VIRTUAL_VIEW)
+  require(tableDesc.tableType == CatalogTableType.VIEW)
   require(tableDesc.viewText.isDefined)
 
   private val tableIdentifier = tableDesc.identifier

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/core/src/main/scala/org/apache/spark/sql/internal/CatalogImpl.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/CatalogImpl.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/CatalogImpl.scala
new file mode 100644
index 0000000..976c9c5
--- /dev/null
+++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/CatalogImpl.scala
@@ -0,0 +1,352 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.internal
+
+import scala.collection.JavaConverters._
+import scala.reflect.runtime.universe.TypeTag
+
+import org.apache.spark.annotation.Experimental
+import org.apache.spark.sql.{AnalysisException, DataFrame, Dataset, SparkSession}
+import org.apache.spark.sql.catalog.{Catalog, Column, Database, Function, Table}
+import org.apache.spark.sql.catalyst.{DefinedByConstructorParams, TableIdentifier}
+import org.apache.spark.sql.catalyst.catalog.SessionCatalog
+import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
+import org.apache.spark.sql.catalyst.plans.logical.LocalRelation
+import org.apache.spark.sql.execution.datasources.CreateTableUsing
+import org.apache.spark.sql.types.StructType
+
+
+/**
+ * Internal implementation of the user-facing [[Catalog]].
+ */
+class CatalogImpl(sparkSession: SparkSession) extends Catalog {
+
+  private def sessionCatalog: SessionCatalog = sparkSession.sessionState.catalog
+
+  private def requireDatabaseExists(dbName: String): Unit = {
+    if (!sessionCatalog.databaseExists(dbName)) {
+      throw new AnalysisException(s"Database '$dbName' does not exist.")
+    }
+  }
+
+  private def requireTableExists(dbName: String, tableName: String): Unit = {
+    if (!sessionCatalog.tableExists(TableIdentifier(tableName, Some(dbName)))) {
+      throw new AnalysisException(s"Table '$tableName' does not exist in database '$dbName'.")
+    }
+  }
+
+  private def makeDataset[T <: DefinedByConstructorParams: TypeTag](data: Seq[T]): Dataset[T] = {
+    val enc = ExpressionEncoder[T]()
+    val encoded = data.map(d => enc.toRow(d).copy())
+    val plan = new LocalRelation(enc.schema.toAttributes, encoded)
+    val queryExecution = sparkSession.executePlan(plan)
+    new Dataset[T](sparkSession, queryExecution, enc)
+  }
+
+  /**
+   * Returns the current default database in this session.
+   */
+  override def currentDatabase: String = sessionCatalog.getCurrentDatabase
+
+  /**
+   * Sets the current default database in this session.
+   */
+  @throws[AnalysisException]("database does not exist")
+  override def setCurrentDatabase(dbName: String): Unit = {
+    requireDatabaseExists(dbName)
+    sessionCatalog.setCurrentDatabase(dbName)
+  }
+
+  /**
+   * Returns a list of databases available across all sessions.
+   */
+  override def listDatabases(): Dataset[Database] = {
+    val databases = sessionCatalog.listDatabases().map { dbName =>
+      val metadata = sessionCatalog.getDatabaseMetadata(dbName)
+      new Database(
+        name = metadata.name,
+        description = metadata.description,
+        locationUri = metadata.locationUri)
+    }
+    makeDataset(databases)
+  }
+
+  /**
+   * Returns a list of tables in the current database.
+   * This includes all temporary tables.
+   */
+  override def listTables(): Dataset[Table] = {
+    listTables(currentDatabase)
+  }
+
+  /**
+   * Returns a list of tables in the specified database.
+   * This includes all temporary tables.
+   */
+  @throws[AnalysisException]("database does not exist")
+  override def listTables(dbName: String): Dataset[Table] = {
+    requireDatabaseExists(dbName)
+    val tables = sessionCatalog.listTables(dbName).map { tableIdent =>
+      val isTemp = tableIdent.database.isEmpty
+      val metadata = if (isTemp) None else Some(sessionCatalog.getTableMetadata(tableIdent))
+      new Table(
+        name = tableIdent.identifier,
+        database = metadata.flatMap(_.identifier.database).orNull,
+        description = metadata.flatMap(_.comment).orNull,
+        tableType = metadata.map(_.tableType.name).getOrElse("TEMPORARY"),
+        isTemporary = isTemp)
+    }
+    makeDataset(tables)
+  }
+
+  /**
+   * Returns a list of functions registered in the current database.
+   * This includes all temporary functions
+   */
+  override def listFunctions(): Dataset[Function] = {
+    listFunctions(currentDatabase)
+  }
+
+  /**
+   * Returns a list of functions registered in the specified database.
+   * This includes all temporary functions
+   */
+  @throws[AnalysisException]("database does not exist")
+  override def listFunctions(dbName: String): Dataset[Function] = {
+    requireDatabaseExists(dbName)
+    val functions = sessionCatalog.listFunctions(dbName).map { funcIdent =>
+      val metadata = sessionCatalog.lookupFunctionInfo(funcIdent)
+      new Function(
+        name = funcIdent.identifier,
+        description = null, // for now, this is always undefined
+        className = metadata.getClassName,
+        isTemporary = funcIdent.database.isEmpty)
+    }
+    makeDataset(functions)
+  }
+
+  /**
+   * Returns a list of columns for the given table in the current database.
+   */
+  @throws[AnalysisException]("table does not exist")
+  override def listColumns(tableName: String): Dataset[Column] = {
+    listColumns(currentDatabase, tableName)
+  }
+
+  /**
+   * Returns a list of columns for the given table in the specified database.
+   */
+  @throws[AnalysisException]("database or table does not exist")
+  override def listColumns(dbName: String, tableName: String): Dataset[Column] = {
+    requireTableExists(dbName, tableName)
+    val tableMetadata = sessionCatalog.getTableMetadata(TableIdentifier(tableName, Some(dbName)))
+    val partitionColumnNames = tableMetadata.partitionColumnNames.toSet
+    val bucketColumnNames = tableMetadata.bucketColumnNames.toSet
+    val columns = tableMetadata.schema.map { c =>
+      new Column(
+        name = c.name,
+        description = c.comment.orNull,
+        dataType = c.dataType,
+        nullable = c.nullable,
+        isPartition = partitionColumnNames.contains(c.name),
+        isBucket = bucketColumnNames.contains(c.name))
+    }
+    makeDataset(columns)
+  }
+
+  /**
+   * :: Experimental ::
+   * Creates an external table from the given path and returns the corresponding DataFrame.
+   * It will use the default data source configured by spark.sql.sources.default.
+   *
+   * @group ddl_ops
+   * @since 2.0.0
+   */
+  @Experimental
+  override def createExternalTable(tableName: String, path: String): DataFrame = {
+    val dataSourceName = sparkSession.sessionState.conf.defaultDataSourceName
+    createExternalTable(tableName, path, dataSourceName)
+  }
+
+  /**
+   * :: Experimental ::
+   * Creates an external table from the given path based on a data source
+   * and returns the corresponding DataFrame.
+   *
+   * @group ddl_ops
+   * @since 2.0.0
+   */
+  @Experimental
+  override def createExternalTable(tableName: String, path: String, source: String): DataFrame = {
+    createExternalTable(tableName, source, Map("path" -> path))
+  }
+
+  /**
+   * :: Experimental ::
+   * Creates an external table from the given path based on a data source and a set of options.
+   * Then, returns the corresponding DataFrame.
+   *
+   * @group ddl_ops
+   * @since 2.0.0
+   */
+  @Experimental
+  override def createExternalTable(
+      tableName: String,
+      source: String,
+      options: java.util.Map[String, String]): DataFrame = {
+    createExternalTable(tableName, source, options.asScala.toMap)
+  }
+
+  /**
+   * :: Experimental ::
+   * (Scala-specific)
+   * Creates an external table from the given path based on a data source and a set of options.
+   * Then, returns the corresponding DataFrame.
+   *
+   * @group ddl_ops
+   * @since 2.0.0
+   */
+  @Experimental
+  override def createExternalTable(
+      tableName: String,
+      source: String,
+      options: Map[String, String]): DataFrame = {
+    val tableIdent = sparkSession.sessionState.sqlParser.parseTableIdentifier(tableName)
+    val cmd =
+      CreateTableUsing(
+        tableIdent,
+        userSpecifiedSchema = None,
+        source,
+        temporary = false,
+        options,
+        allowExisting = false,
+        managedIfNoPath = false)
+    sparkSession.executePlan(cmd).toRdd
+    sparkSession.table(tableIdent)
+  }
+
+  /**
+   * :: Experimental ::
+   * Create an external table from the given path based on a data source, a schema and
+   * a set of options. Then, returns the corresponding DataFrame.
+   *
+   * @group ddl_ops
+   * @since 2.0.0
+   */
+  @Experimental
+  override def createExternalTable(
+      tableName: String,
+      source: String,
+      schema: StructType,
+      options: java.util.Map[String, String]): DataFrame = {
+    createExternalTable(tableName, source, schema, options.asScala.toMap)
+  }
+
+  /**
+   * :: Experimental ::
+   * (Scala-specific)
+   * Create an external table from the given path based on a data source, a schema and
+   * a set of options. Then, returns the corresponding DataFrame.
+   *
+   * @group ddl_ops
+   * @since 2.0.0
+   */
+  @Experimental
+  override def createExternalTable(
+      tableName: String,
+      source: String,
+      schema: StructType,
+      options: Map[String, String]): DataFrame = {
+    val tableIdent = sparkSession.sessionState.sqlParser.parseTableIdentifier(tableName)
+    val cmd =
+      CreateTableUsing(
+        tableIdent,
+        userSpecifiedSchema = Some(schema),
+        source,
+        temporary = false,
+        options,
+        allowExisting = false,
+        managedIfNoPath = false)
+    sparkSession.executePlan(cmd).toRdd
+    sparkSession.table(tableIdent)
+  }
+
+  /**
+   * Drops the temporary table with the given table name in the catalog.
+   * If the table has been cached/persisted before, it's also unpersisted.
+   *
+   * @param tableName the name of the table to be unregistered.
+   * @group ddl_ops
+   * @since 2.0.0
+   */
+  override def dropTempTable(tableName: String): Unit = {
+    sparkSession.cacheManager.tryUncacheQuery(sparkSession.table(tableName))
+    sessionCatalog.dropTable(TableIdentifier(tableName), ignoreIfNotExists = true)
+  }
+
+  /**
+   * Returns true if the table is currently cached in-memory.
+   *
+   * @group cachemgmt
+   * @since 2.0.0
+   */
+  override def isCached(tableName: String): Boolean = {
+    sparkSession.cacheManager.lookupCachedData(sparkSession.table(tableName)).nonEmpty
+  }
+
+  /**
+   * Caches the specified table in-memory.
+   *
+   * @group cachemgmt
+   * @since 2.0.0
+   */
+  override def cacheTable(tableName: String): Unit = {
+    sparkSession.cacheManager.cacheQuery(sparkSession.table(tableName), Some(tableName))
+  }
+
+  /**
+   * Removes the specified table from the in-memory cache.
+   *
+   * @group cachemgmt
+   * @since 2.0.0
+   */
+  override def uncacheTable(tableName: String): Unit = {
+    sparkSession.cacheManager.uncacheQuery(sparkSession.table(tableName))
+  }
+
+  /**
+   * Removes all cached tables from the in-memory cache.
+   *
+   * @group cachemgmt
+   * @since 2.0.0
+   */
+  override def clearCache(): Unit = {
+    sparkSession.cacheManager.clearCache()
+  }
+
+  /**
+   * Returns true if the [[Dataset]] is currently cached in-memory.
+   *
+   * @group cachemgmt
+   * @since 2.0.0
+   */
+  protected[sql] def isCached(qName: Dataset[_]): Boolean = {
+    sparkSession.cacheManager.lookupCachedData(qName).nonEmpty
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
index e601ff1..58330c4 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
@@ -69,7 +69,7 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach {
   private def createTable(catalog: SessionCatalog, name: TableIdentifier): Unit = {
     catalog.createTable(CatalogTable(
       identifier = name,
-      tableType = CatalogTableType.EXTERNAL_TABLE,
+      tableType = CatalogTableType.EXTERNAL,
       storage = CatalogStorageFormat(None, None, None, None, Map()),
       schema = Seq()), ignoreIfExists = false)
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/core/src/test/scala/org/apache/spark/sql/internal/CatalogSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/internal/CatalogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/internal/CatalogSuite.scala
new file mode 100644
index 0000000..986d8f5
--- /dev/null
+++ b/sql/core/src/test/scala/org/apache/spark/sql/internal/CatalogSuite.scala
@@ -0,0 +1,271 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.internal
+
+import org.scalatest.BeforeAndAfterEach
+
+import org.apache.spark.SparkFunSuite
+import org.apache.spark.sql.{AnalysisException, SparkSession}
+import org.apache.spark.sql.catalog.{Column, Database, Function, Table}
+import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier}
+import org.apache.spark.sql.catalyst.catalog._
+import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionInfo}
+import org.apache.spark.sql.catalyst.plans.logical.Range
+import org.apache.spark.sql.test.SharedSQLContext
+
+
+/**
+ * Tests for the user-facing [[org.apache.spark.sql.catalog.Catalog]].
+ */
+class CatalogSuite
+  extends SparkFunSuite
+  with BeforeAndAfterEach
+  with SharedSQLContext {
+
+  private def sparkSession: SparkSession = sqlContext.sparkSession
+  private def sessionCatalog: SessionCatalog = sparkSession.sessionState.catalog
+
+  private val utils = new CatalogTestUtils {
+    override val tableInputFormat: String = "com.fruit.eyephone.CameraInputFormat"
+    override val tableOutputFormat: String = "com.fruit.eyephone.CameraOutputFormat"
+    override def newEmptyCatalog(): ExternalCatalog = sparkSession.sharedState.externalCatalog
+  }
+
+  private def createDatabase(name: String): Unit = {
+    sessionCatalog.createDatabase(utils.newDb(name), ignoreIfExists = false)
+  }
+
+  private def dropDatabase(name: String): Unit = {
+    sessionCatalog.dropDatabase(name, ignoreIfNotExists = false, cascade = true)
+  }
+
+  private def createTable(name: String, db: Option[String] = None): Unit = {
+    sessionCatalog.createTable(utils.newTable(name, db), ignoreIfExists = false)
+  }
+
+  private def createTempTable(name: String): Unit = {
+    sessionCatalog.createTempTable(name, Range(1, 2, 3, 4, Seq()), overrideIfExists = true)
+  }
+
+  private def dropTable(name: String, db: Option[String] = None): Unit = {
+    sessionCatalog.dropTable(TableIdentifier(name, db), ignoreIfNotExists = false)
+  }
+
+  private def createFunction(name: String, db: Option[String] = None): Unit = {
+    sessionCatalog.createFunction(utils.newFunc(name, db), ignoreIfExists = false)
+  }
+
+  private def createTempFunction(name: String): Unit = {
+    val info = new ExpressionInfo("className", name)
+    val tempFunc = (e: Seq[Expression]) => e.head
+    sessionCatalog.createTempFunction(name, info, tempFunc, ignoreIfExists = false)
+  }
+
+  private def dropFunction(name: String, db: Option[String] = None): Unit = {
+    sessionCatalog.dropFunction(FunctionIdentifier(name, db), ignoreIfNotExists = false)
+  }
+
+  private def dropTempFunction(name: String): Unit = {
+    sessionCatalog.dropTempFunction(name, ignoreIfNotExists = false)
+  }
+
+  private def testListColumns(tableName: String, dbName: Option[String]): Unit = {
+    val tableMetadata = sessionCatalog.getTableMetadata(TableIdentifier(tableName, dbName))
+    val columns = dbName
+      .map { db => sparkSession.catalog.listColumns(db, tableName) }
+      .getOrElse { sparkSession.catalog.listColumns(tableName) }
+    assume(tableMetadata.schema.nonEmpty, "bad test")
+    assume(tableMetadata.partitionColumnNames.nonEmpty, "bad test")
+    assume(tableMetadata.bucketColumnNames.nonEmpty, "bad test")
+    assert(columns.collect().map(_.name).toSet == tableMetadata.schema.map(_.name).toSet)
+    columns.collect().foreach { col =>
+      assert(col.isPartition == tableMetadata.partitionColumnNames.contains(col.name))
+      assert(col.isBucket == tableMetadata.bucketColumnNames.contains(col.name))
+    }
+  }
+
+  override def afterEach(): Unit = {
+    try {
+      sessionCatalog.reset()
+    } finally {
+      super.afterEach()
+    }
+  }
+
+  test("current database") {
+    assert(sparkSession.catalog.currentDatabase == "default")
+    assert(sessionCatalog.getCurrentDatabase == "default")
+    createDatabase("my_db")
+    sparkSession.catalog.setCurrentDatabase("my_db")
+    assert(sparkSession.catalog.currentDatabase == "my_db")
+    assert(sessionCatalog.getCurrentDatabase == "my_db")
+    val e = intercept[AnalysisException] {
+      sparkSession.catalog.setCurrentDatabase("unknown_db")
+    }
+    assert(e.getMessage.contains("unknown_db"))
+  }
+
+  test("list databases") {
+    assert(sparkSession.catalog.listDatabases().collect().map(_.name).toSet == Set("default"))
+    createDatabase("my_db1")
+    createDatabase("my_db2")
+    assert(sparkSession.catalog.listDatabases().collect().map(_.name).toSet ==
+      Set("default", "my_db1", "my_db2"))
+    dropDatabase("my_db1")
+    assert(sparkSession.catalog.listDatabases().collect().map(_.name).toSet ==
+      Set("default", "my_db2"))
+  }
+
+  test("list tables") {
+    assert(sparkSession.catalog.listTables().collect().isEmpty)
+    createTable("my_table1")
+    createTable("my_table2")
+    createTempTable("my_temp_table")
+    assert(sparkSession.catalog.listTables().collect().map(_.name).toSet ==
+      Set("my_table1", "my_table2", "my_temp_table"))
+    dropTable("my_table1")
+    assert(sparkSession.catalog.listTables().collect().map(_.name).toSet ==
+      Set("my_table2", "my_temp_table"))
+    dropTable("my_temp_table")
+    assert(sparkSession.catalog.listTables().collect().map(_.name).toSet == Set("my_table2"))
+  }
+
+  test("list tables with database") {
+    assert(sparkSession.catalog.listTables("default").collect().isEmpty)
+    createDatabase("my_db1")
+    createDatabase("my_db2")
+    createTable("my_table1", Some("my_db1"))
+    createTable("my_table2", Some("my_db2"))
+    createTempTable("my_temp_table")
+    assert(sparkSession.catalog.listTables("default").collect().map(_.name).toSet ==
+      Set("my_temp_table"))
+    assert(sparkSession.catalog.listTables("my_db1").collect().map(_.name).toSet ==
+      Set("my_table1", "my_temp_table"))
+    assert(sparkSession.catalog.listTables("my_db2").collect().map(_.name).toSet ==
+      Set("my_table2", "my_temp_table"))
+    dropTable("my_table1", Some("my_db1"))
+    assert(sparkSession.catalog.listTables("my_db1").collect().map(_.name).toSet ==
+      Set("my_temp_table"))
+    assert(sparkSession.catalog.listTables("my_db2").collect().map(_.name).toSet ==
+      Set("my_table2", "my_temp_table"))
+    dropTable("my_temp_table")
+    assert(sparkSession.catalog.listTables("default").collect().map(_.name).isEmpty)
+    assert(sparkSession.catalog.listTables("my_db1").collect().map(_.name).isEmpty)
+    assert(sparkSession.catalog.listTables("my_db2").collect().map(_.name).toSet ==
+      Set("my_table2"))
+    val e = intercept[AnalysisException] {
+      sparkSession.catalog.listTables("unknown_db")
+    }
+    assert(e.getMessage.contains("unknown_db"))
+  }
+
+  test("list functions") {
+    assert(Set("+", "current_database", "window").subsetOf(
+      sparkSession.catalog.listFunctions().collect().map(_.name).toSet))
+    createFunction("my_func1")
+    createFunction("my_func2")
+    createTempFunction("my_temp_func")
+    val funcNames1 = sparkSession.catalog.listFunctions().collect().map(_.name).toSet
+    assert(funcNames1.contains("my_func1"))
+    assert(funcNames1.contains("my_func2"))
+    assert(funcNames1.contains("my_temp_func"))
+    dropFunction("my_func1")
+    dropTempFunction("my_temp_func")
+    val funcNames2 = sparkSession.catalog.listFunctions().collect().map(_.name).toSet
+    assert(!funcNames2.contains("my_func1"))
+    assert(funcNames2.contains("my_func2"))
+    assert(!funcNames2.contains("my_temp_func"))
+  }
+
+  test("list functions with database") {
+    assert(Set("+", "current_database", "window").subsetOf(
+      sparkSession.catalog.listFunctions("default").collect().map(_.name).toSet))
+    createDatabase("my_db1")
+    createDatabase("my_db2")
+    createFunction("my_func1", Some("my_db1"))
+    createFunction("my_func2", Some("my_db2"))
+    createTempFunction("my_temp_func")
+    val funcNames1 = sparkSession.catalog.listFunctions("my_db1").collect().map(_.name).toSet
+    val funcNames2 = sparkSession.catalog.listFunctions("my_db2").collect().map(_.name).toSet
+    assert(funcNames1.contains("my_func1"))
+    assert(!funcNames1.contains("my_func2"))
+    assert(funcNames1.contains("my_temp_func"))
+    assert(!funcNames2.contains("my_func1"))
+    assert(funcNames2.contains("my_func2"))
+    assert(funcNames2.contains("my_temp_func"))
+    dropFunction("my_func1", Some("my_db1"))
+    dropTempFunction("my_temp_func")
+    val funcNames1b = sparkSession.catalog.listFunctions("my_db1").collect().map(_.name).toSet
+    val funcNames2b = sparkSession.catalog.listFunctions("my_db2").collect().map(_.name).toSet
+    assert(!funcNames1b.contains("my_func1"))
+    assert(!funcNames1b.contains("my_temp_func"))
+    assert(funcNames2b.contains("my_func2"))
+    assert(!funcNames2b.contains("my_temp_func"))
+    val e = intercept[AnalysisException] {
+      sparkSession.catalog.listFunctions("unknown_db")
+    }
+    assert(e.getMessage.contains("unknown_db"))
+  }
+
+  test("list columns") {
+    createTable("tab1")
+    testListColumns("tab1", dbName = None)
+  }
+
+  test("list columns in database") {
+    createDatabase("db1")
+    createTable("tab1", Some("db1"))
+    testListColumns("tab1", dbName = Some("db1"))
+  }
+
+  test("Database.toString") {
+    assert(new Database("cool_db", "cool_desc", "cool_path").toString ==
+      "Database[name='cool_db', description='cool_desc', path='cool_path']")
+    assert(new Database("cool_db", null, "cool_path").toString ==
+      "Database[name='cool_db', path='cool_path']")
+  }
+
+  test("Table.toString") {
+    assert(new Table("volley", "databasa", "one", "world", isTemporary = true).toString ==
+      "Table[name='volley', database='databasa', description='one', " +
+        "tableType='world', isTemporary='true']")
+    assert(new Table("volley", null, null, "world", isTemporary = true).toString ==
+      "Table[name='volley', tableType='world', isTemporary='true']")
+  }
+
+  test("Function.toString") {
+    assert(new Function("nama", "commenta", "classNameAh", isTemporary = true).toString ==
+      "Function[name='nama', description='commenta', className='classNameAh', isTemporary='true']")
+    assert(new Function("nama", null, "classNameAh", isTemporary = false).toString ==
+      "Function[name='nama', className='classNameAh', isTemporary='false']")
+  }
+
+  test("Column.toString") {
+    assert(new Column("namama", "descaca", "datatapa",
+      nullable = true, isPartition = false, isBucket = true).toString ==
+        "Column[name='namama', description='descaca', dataType='datatapa', " +
+          "nullable='true', isPartition='false', isBucket='true']")
+    assert(new Column("namama", null, "datatapa",
+      nullable = false, isPartition = true, isBucket = true).toString ==
+      "Column[name='namama', dataType='datatapa', " +
+        "nullable='false', isPartition='true', isBucket='true']")
+  }
+
+  // TODO: add tests for the rest of them
+
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index 01b7cfb..c4db4f3 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -173,7 +173,7 @@ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Log
       // Then, if alias is specified, wrap the table with a Subquery using the alias.
       // Otherwise, wrap the table with a Subquery using the table name.
       alias.map(a => SubqueryAlias(a, qualifiedTable)).getOrElse(qualifiedTable)
-    } else if (table.tableType == CatalogTableType.VIRTUAL_VIEW) {
+    } else if (table.tableType == CatalogTableType.VIEW) {
       val viewText = table.viewText.getOrElse(sys.error("Invalid view without text."))
       alias match {
         // because hive use things like `_c0` to build the expanded text

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala
index 367fcf1..5b580d0 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala
@@ -77,10 +77,10 @@ private[hive] case class MetastoreRelation(
     catalogTable.properties.foreach { case (k, v) => tableParameters.put(k, v) }
 
     tTable.setTableType(catalogTable.tableType match {
-      case CatalogTableType.EXTERNAL_TABLE => HiveTableType.EXTERNAL_TABLE.toString
-      case CatalogTableType.MANAGED_TABLE => HiveTableType.MANAGED_TABLE.toString
-      case CatalogTableType.INDEX_TABLE => HiveTableType.INDEX_TABLE.toString
-      case CatalogTableType.VIRTUAL_VIEW => HiveTableType.VIRTUAL_VIEW.toString
+      case CatalogTableType.EXTERNAL => HiveTableType.EXTERNAL_TABLE.toString
+      case CatalogTableType.MANAGED => HiveTableType.MANAGED_TABLE.toString
+      case CatalogTableType.INDEX => HiveTableType.INDEX_TABLE.toString
+      case CatalogTableType.VIEW => HiveTableType.VIRTUAL_VIEW.toString
     })
 
     val sd = new org.apache.hadoop.hive.metastore.api.StorageDescriptor()

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
index 6a7345f..d651791 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
@@ -317,10 +317,10 @@ private[hive] class HiveClientImpl(
       CatalogTable(
         identifier = TableIdentifier(h.getTableName, Option(h.getDbName)),
         tableType = h.getTableType match {
-          case HiveTableType.EXTERNAL_TABLE => CatalogTableType.EXTERNAL_TABLE
-          case HiveTableType.MANAGED_TABLE => CatalogTableType.MANAGED_TABLE
-          case HiveTableType.INDEX_TABLE => CatalogTableType.INDEX_TABLE
-          case HiveTableType.VIRTUAL_VIEW => CatalogTableType.VIRTUAL_VIEW
+          case HiveTableType.EXTERNAL_TABLE => CatalogTableType.EXTERNAL
+          case HiveTableType.MANAGED_TABLE => CatalogTableType.MANAGED
+          case HiveTableType.INDEX_TABLE => CatalogTableType.INDEX
+          case HiveTableType.VIRTUAL_VIEW => CatalogTableType.VIEW
         },
         schema = schema,
         partitionColumnNames = partCols.map(_.name),
@@ -696,13 +696,13 @@ private[hive] class HiveClientImpl(
     // Otherwise, Hive metastore will change the table to a MANAGED_TABLE.
     // (metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java#L1095-L1105)
     hiveTable.setTableType(table.tableType match {
-      case CatalogTableType.EXTERNAL_TABLE =>
+      case CatalogTableType.EXTERNAL =>
         hiveTable.setProperty("EXTERNAL", "TRUE")
         HiveTableType.EXTERNAL_TABLE
-      case CatalogTableType.MANAGED_TABLE =>
+      case CatalogTableType.MANAGED =>
         HiveTableType.MANAGED_TABLE
-      case CatalogTableType.INDEX_TABLE => HiveTableType.INDEX_TABLE
-      case CatalogTableType.VIRTUAL_VIEW => HiveTableType.VIRTUAL_VIEW
+      case CatalogTableType.INDEX => HiveTableType.INDEX_TABLE
+      case CatalogTableType.VIEW => HiveTableType.VIRTUAL_VIEW
     })
     // Note: In Hive the schema and partition columns must be disjoint sets
     val (partCols, schema) = table.schema.map(toHiveColumn).partition { c =>

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
index 97bd47a..4ca5619 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
@@ -166,7 +166,7 @@ class CachedTableSuite extends QueryTest with TestHiveSingleton {
     tempPath.delete()
     table("src").write.mode(SaveMode.Overwrite).parquet(tempPath.toString)
     sql("DROP TABLE IF EXISTS refreshTable")
-    createExternalTable("refreshTable", tempPath.toString, "parquet")
+    sparkSession.catalog.createExternalTable("refreshTable", tempPath.toString, "parquet")
     checkAnswer(
       table("refreshTable"),
       table("src").collect())
@@ -190,7 +190,7 @@ class CachedTableSuite extends QueryTest with TestHiveSingleton {
 
     // Drop the table and create it again.
     sql("DROP TABLE refreshTable")
-    createExternalTable("refreshTable", tempPath.toString, "parquet")
+    sparkSession.catalog.createExternalTable("refreshTable", tempPath.toString, "parquet")
     // It is not cached.
     assert(!isCached("refreshTable"), "refreshTable should not be cached.")
     // Refresh the table. REFRESH TABLE command should not make a uncached

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDDLCommandSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDDLCommandSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDDLCommandSuite.scala
index cff1127..ec581b6 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDDLCommandSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDDLCommandSuite.scala
@@ -70,7 +70,7 @@ class HiveDDLCommandSuite extends PlanTest {
     assert(exists)
     assert(desc.identifier.database == Some("mydb"))
     assert(desc.identifier.table == "page_view")
-    assert(desc.tableType == CatalogTableType.EXTERNAL_TABLE)
+    assert(desc.tableType == CatalogTableType.EXTERNAL)
     assert(desc.storage.locationUri == Some("/user/external/page_view"))
     assert(desc.schema ==
       CatalogColumn("viewtime", "int") ::
@@ -120,7 +120,7 @@ class HiveDDLCommandSuite extends PlanTest {
     assert(exists)
     assert(desc.identifier.database == Some("mydb"))
     assert(desc.identifier.table == "page_view")
-    assert(desc.tableType == CatalogTableType.EXTERNAL_TABLE)
+    assert(desc.tableType == CatalogTableType.EXTERNAL)
     assert(desc.storage.locationUri == Some("/user/external/page_view"))
     assert(desc.schema ==
       CatalogColumn("viewtime", "int") ::
@@ -151,7 +151,7 @@ class HiveDDLCommandSuite extends PlanTest {
     assert(exists == false)
     assert(desc.identifier.database == None)
     assert(desc.identifier.table == "page_view")
-    assert(desc.tableType == CatalogTableType.MANAGED_TABLE)
+    assert(desc.tableType == CatalogTableType.MANAGED)
     assert(desc.storage.locationUri == None)
     assert(desc.schema == Seq.empty[CatalogColumn])
     assert(desc.viewText == None) // TODO will be SQLText
@@ -187,7 +187,7 @@ class HiveDDLCommandSuite extends PlanTest {
     assert(exists == false)
     assert(desc.identifier.database == None)
     assert(desc.identifier.table == "ctas2")
-    assert(desc.tableType == CatalogTableType.MANAGED_TABLE)
+    assert(desc.tableType == CatalogTableType.MANAGED)
     assert(desc.storage.locationUri == None)
     assert(desc.schema == Seq.empty[CatalogColumn])
     assert(desc.viewText == None) // TODO will be SQLText
@@ -318,7 +318,7 @@ class HiveDDLCommandSuite extends PlanTest {
     assert(!allowExisting)
     assert(desc.identifier.database.isEmpty)
     assert(desc.identifier.table == "my_table")
-    assert(desc.tableType == CatalogTableType.MANAGED_TABLE)
+    assert(desc.tableType == CatalogTableType.MANAGED)
     assert(desc.schema == Seq(CatalogColumn("id", "int"), CatalogColumn("name", "string")))
     assert(desc.partitionColumnNames.isEmpty)
     assert(desc.sortColumnNames.isEmpty)
@@ -353,7 +353,7 @@ class HiveDDLCommandSuite extends PlanTest {
   test("create table - external") {
     val query = "CREATE EXTERNAL TABLE tab1 (id int, name string)"
     val (desc, _) = extractTableDesc(query)
-    assert(desc.tableType == CatalogTableType.EXTERNAL_TABLE)
+    assert(desc.tableType == CatalogTableType.EXTERNAL)
   }
 
   test("create table - if not exists") {
@@ -480,7 +480,7 @@ class HiveDDLCommandSuite extends PlanTest {
     assert(allowExisting)
     assert(desc.identifier.database == Some("dbx"))
     assert(desc.identifier.table == "my_table")
-    assert(desc.tableType == CatalogTableType.EXTERNAL_TABLE)
+    assert(desc.tableType == CatalogTableType.EXTERNAL)
     assert(desc.schema == Seq(
       CatalogColumn("id", "int"),
       CatalogColumn("name", "string"),
@@ -506,7 +506,7 @@ class HiveDDLCommandSuite extends PlanTest {
     assert(!exists)
     assert(desc.identifier.database.isEmpty)
     assert(desc.identifier.table == "view1")
-    assert(desc.tableType == CatalogTableType.VIRTUAL_VIEW)
+    assert(desc.tableType == CatalogTableType.VIEW)
     assert(desc.storage.locationUri.isEmpty)
     assert(desc.schema == Seq.empty[CatalogColumn])
     assert(desc.viewText == Option("SELECT * FROM tab1"))
@@ -530,7 +530,7 @@ class HiveDDLCommandSuite extends PlanTest {
     val (desc, exists) = extractTableDesc(v1)
     assert(desc.identifier.database.isEmpty)
     assert(desc.identifier.table == "view1")
-    assert(desc.tableType == CatalogTableType.VIRTUAL_VIEW)
+    assert(desc.tableType == CatalogTableType.VIEW)
     assert(desc.storage.locationUri.isEmpty)
     assert(desc.schema ==
       CatalogColumn("col1", null, nullable = true, None) ::

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogSuite.scala
index cb60a2c..bf9935a 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogSuite.scala
@@ -26,7 +26,7 @@ import org.apache.spark.sql.hive.client.HiveClient
 /**
  * Test suite for the [[HiveExternalCatalog]].
  */
-class HiveExternalCatalogSuite extends CatalogTestCases {
+class HiveExternalCatalogSuite extends ExternalCatalogSuite {
 
   private val client: HiveClient = {
     // We create a metastore at a temp location to avoid any potential

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
index d1a1490..0d6a2e7 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
@@ -90,7 +90,7 @@ class DataSourceWithHiveMetastoreCatalogSuite
         assert(hiveTable.storage.serde === Some(serde))
 
         assert(hiveTable.partitionColumnNames.isEmpty)
-        assert(hiveTable.tableType === CatalogTableType.MANAGED_TABLE)
+        assert(hiveTable.tableType === CatalogTableType.MANAGED)
 
         val columns = hiveTable.schema
         assert(columns.map(_.name) === Seq("d1", "d2"))
@@ -121,7 +121,7 @@ class DataSourceWithHiveMetastoreCatalogSuite
           assert(hiveTable.storage.outputFormat === Some(outputFormat))
           assert(hiveTable.storage.serde === Some(serde))
 
-          assert(hiveTable.tableType === CatalogTableType.EXTERNAL_TABLE)
+          assert(hiveTable.tableType === CatalogTableType.EXTERNAL)
           assert(hiveTable.storage.locationUri ===
             Some(path.toURI.toString.stripSuffix(File.separator)))
 
@@ -153,7 +153,7 @@ class DataSourceWithHiveMetastoreCatalogSuite
           assert(hiveTable.storage.serde === Some(serde))
 
           assert(hiveTable.partitionColumnNames.isEmpty)
-          assert(hiveTable.tableType === CatalogTableType.EXTERNAL_TABLE)
+          assert(hiveTable.tableType === CatalogTableType.EXTERNAL)
 
           val columns = hiveTable.schema
           assert(columns.map(_.name) === Seq("d1", "d2"))

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
index 7cd01c9..31ba735 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
@@ -502,13 +502,13 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
         }
 
         withSQLConf(SQLConf.DEFAULT_DATA_SOURCE_NAME.key -> "json") {
-          createExternalTable("createdJsonTable", tempPath.toString)
+          sparkSession.catalog.createExternalTable("createdJsonTable", tempPath.toString)
           assert(table("createdJsonTable").schema === df.schema)
           checkAnswer(sql("SELECT * FROM createdJsonTable"), df)
 
           assert(
             intercept[AnalysisException] {
-              createExternalTable("createdJsonTable", jsonFilePath.toString)
+              sparkSession.catalog.createExternalTable("createdJsonTable", jsonFilePath.toString)
             }.getMessage.contains("Table createdJsonTable already exists."),
             "We should complain that createdJsonTable already exists")
         }
@@ -520,7 +520,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
         // Try to specify the schema.
         withSQLConf(SQLConf.DEFAULT_DATA_SOURCE_NAME.key -> "not a source name") {
           val schema = StructType(StructField("b", StringType, true) :: Nil)
-          createExternalTable(
+          sparkSession.catalog.createExternalTable(
             "createdJsonTable",
             "org.apache.spark.sql.json",
             schema,
@@ -539,7 +539,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
   test("path required error") {
     assert(
       intercept[AnalysisException] {
-        createExternalTable(
+        sparkSession.catalog.createExternalTable(
           "createdJsonTable",
           "org.apache.spark.sql.json",
           Map.empty[String, String])
@@ -725,7 +725,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
       val schema = StructType(StructField("int", IntegerType, true) :: Nil)
       val hiveTable = CatalogTable(
         identifier = TableIdentifier(tableName, Some("default")),
-        tableType = CatalogTableType.MANAGED_TABLE,
+        tableType = CatalogTableType.MANAGED,
         schema = Seq.empty,
         storage = CatalogStorageFormat(
           locationUri = None,

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala
index 916a470..9341b38 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala
@@ -149,7 +149,7 @@ class VersionsSuite extends SparkFunSuite with Logging {
       val table =
         CatalogTable(
           identifier = TableIdentifier("src", Some("default")),
-          tableType = CatalogTableType.MANAGED_TABLE,
+          tableType = CatalogTableType.MANAGED,
           schema = Seq(CatalogColumn("key", "int")),
           storage = CatalogStorageFormat(
             locationUri = None,

http://git-wip-us.apache.org/repos/asf/spark/blob/d8a83a56/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
index fd19fcb..e23272d 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
@@ -73,7 +73,7 @@ class HiveDDLSuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
           hiveContext.sessionState.catalog
             .getTableMetadata(TableIdentifier(tabName, Some("default")))
         // It is a managed table, although it uses external in SQL
-        assert(hiveTable.tableType == CatalogTableType.MANAGED_TABLE)
+        assert(hiveTable.tableType == CatalogTableType.MANAGED)
 
         assert(tmpDir.listFiles.nonEmpty)
         sql(s"DROP TABLE $tabName")
@@ -102,7 +102,7 @@ class HiveDDLSuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
           hiveContext.sessionState.catalog
             .getTableMetadata(TableIdentifier(tabName, Some("default")))
         // This data source table is external table
-        assert(hiveTable.tableType == CatalogTableType.EXTERNAL_TABLE)
+        assert(hiveTable.tableType == CatalogTableType.EXTERNAL)
 
         assert(tmpDir.listFiles.nonEmpty)
         sql(s"DROP TABLE $tabName")
@@ -166,7 +166,7 @@ class HiveDDLSuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
         }
 
         val hiveTable = catalog.getTableMetadata(TableIdentifier(externalTab, Some("default")))
-        assert(hiveTable.tableType == CatalogTableType.EXTERNAL_TABLE)
+        assert(hiveTable.tableType == CatalogTableType.EXTERNAL)
         // After data insertion, all the directory are not empty
         assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org


Mime
View raw message