atlas-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jma...@apache.org
Subject [21/51] [abbrv] incubator-atlas git commit: Refactor packages and scripts to Atlas (cherry picked from commit 414beba)
Date Sun, 14 Jun 2015 17:45:01 GMT
http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/30711973/tools/src/test/scala/org/apache/atlas/tools/hive/HiveMockMetadataService.scala
----------------------------------------------------------------------
diff --git a/tools/src/test/scala/org/apache/atlas/tools/hive/HiveMockMetadataService.scala b/tools/src/test/scala/org/apache/atlas/tools/hive/HiveMockMetadataService.scala
new file mode 100755
index 0000000..6c90082
--- /dev/null
+++ b/tools/src/test/scala/org/apache/atlas/tools/hive/HiveMockMetadataService.scala
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.atlas.tools.hive
+
+object HiveMockMetadataService {
+
+    def getTable(dbName: String, table: String): Table = {
+        return Table(dbName, table,
+            StorageDescriptor(
+                List[FieldSchema](
+                    FieldSchema("d_date_sk", "int", null),
+                    FieldSchema("d_date_id", "string", null),
+                    FieldSchema("d_date", "string", null),
+                    FieldSchema("d_month_seq", "int", null),
+                    FieldSchema("d_week_seq", "int", null),
+                    FieldSchema("d_quarter_seq", "int", null),
+                    FieldSchema("d_year", "int", null),
+                    FieldSchema("d_dow", "int", null),
+                    FieldSchema("d_moy", "int", null),
+                    FieldSchema("d_dom", "int", null),
+                    FieldSchema("d_qoy", "int", null),
+                    FieldSchema("d_fy_year", "int", null),
+                    FieldSchema("d_fy_quarter_seq", "int", null),
+                    FieldSchema("d_fy_week_seq", "int", null),
+                    FieldSchema("d_day_name", "string", null),
+                    FieldSchema("d_quarter_name", "string", null),
+                    FieldSchema("d_holiday", "string", null),
+                    FieldSchema("d_weekend", "string", null),
+                    FieldSchema("d_following_holiday", "string", null),
+                    FieldSchema("d_first_dom", "int", null),
+                    FieldSchema("d_last_dom", "int", null),
+                    FieldSchema("d_same_day_ly", "int", null),
+                    FieldSchema("d_same_day_lq", "int", null),
+                    FieldSchema("d_current_day", "string", null),
+                    FieldSchema("d_current_week", "string", null),
+                    FieldSchema("d_current_month", "string", null),
+                    FieldSchema("d_current_quarter", "string", null),
+                    FieldSchema("d_current_year", "string", null)
+                ),
+                "file:/tmp/warehouse/tpcds.db/date_dim",
+                "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat",
+                "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat",
+                false,
+                0, List[String](), List[String](),
+                Map[String, String](),
+                false
+            ),
+            Map[String, String](),
+            "Table")
+    }
+
+    case class FieldSchema(name: String, typeName: String, comment: String)
+
+    case class SerDe(name: String, serializationLib: String, parameters: Map[String, String])
+
+    case class StorageDescriptor(fields: List[FieldSchema],
+                                 location: String, inputFormat: String,
+                                 outputFormat: String, compressed: Boolean,
+                                 numBuckets: Int, bucketColumns: List[String],
+                                 sortColumns: List[String],
+                                 parameters: Map[String, String],
+                                 storedAsSubDirs: Boolean
+                                    )
+
+    case class Table(dbName: String, tableName: String, storageDesc: StorageDescriptor,
+                     parameters: Map[String, String],
+                     tableType: String)
+}

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/30711973/tools/src/test/scala/org/apache/atlas/tools/thrift/ThriftLexerTest.scala
----------------------------------------------------------------------
diff --git a/tools/src/test/scala/org/apache/atlas/tools/thrift/ThriftLexerTest.scala b/tools/src/test/scala/org/apache/atlas/tools/thrift/ThriftLexerTest.scala
new file mode 100755
index 0000000..000b883
--- /dev/null
+++ b/tools/src/test/scala/org/apache/atlas/tools/thrift/ThriftLexerTest.scala
@@ -0,0 +1,525 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.atlas.tools.thrift
+
+import org.junit.{Assert, Test}
+
+import scala.util.parsing.input.CharArrayReader
+
+class ThriftLexerTest {
+
+    @Test def testSimple {
+        val p = new ThriftParser
+        val r = scan(p, """efg abc""")
+        Assert.assertTrue(r.successful)
+
+    }
+
+    @Test def testStruct {
+        val p = new ThriftParser
+        val r = scan(p, """struct PartitionSpecWithSharedSD {
+           1: list<PartitionWithoutSD> partitions,
+           2: StorageDescriptor sd,
+         }""")
+        Assert.assertTrue(r.successful)
+
+    }
+
+    @Test def testTableStruct {
+        val p = new ThriftParser
+        val r = scan(p, """// table information
+         struct Table {
+           1: string tableName,                // name of the table
+           2: string dbName,                   // database name ('default')
+           3: string owner,                    // owner of this table
+           4: i32    createTime,               // creation time of the table
+           5: i32    lastAccessTime,           // last access time (usually this will be filled from HDFS and shouldn't be relied on)
+           6: i32    retention,                // retention time
+           7: StorageDescriptor sd,            // storage descriptor of the table
+           8: list<FieldSchema> partitionKeys, // partition keys of the table. only primitive types are supported
+           9: map<string, string> parameters,   // to store comments or any other user level parameters
+           10: string viewOriginalText,         // original view text, null for non-view
+           11: string viewExpandedText,         // expanded view text, null for non-view
+           12: string tableType,                 // table type enum, e.g. EXTERNAL_TABLE
+           13: optional PrincipalPrivilegeSet privileges,
+           14: optional bool temporary=false
+         }""")
+        Assert.assertTrue(r.successful)
+
+    }
+
+    @Test def testIncorrectStruct {
+        val p = new ThriftParser
+        val r = scan(p, """// table information
+         struct Table {
+                          | 1: string tableName,                // name of the table
+                          | 2: string dbName
+          }""")
+        Assert.assertFalse(r.successful)
+
+    }
+
+    @Test def testNegativeInt {
+        val p = new ThriftParser
+        val r = scan(p, """-1""")
+        Assert.assertTrue(r.successful)
+
+    }
+
+    @Test def testComment {
+        val p = new ThriftParser
+        val r = scan(p, """/**
+                      * Licensed to the Apache Software Foundation (ASF) under one
+                      * or more contributor license agreements.  See the NOTICE file
+                      * distributed with this work for additional information
+                      * regarding copyright ownership.  The ASF licenses this file
+                      * to you under the Apache License, Version 2.0 (the
+                      * "License"); you may not use this file except in compliance
+                      * with the License.  You may obtain a copy of the License at
+                      *
+                      *     http://www.apache.org/licenses/LICENSE-2.0
+                      *
+                      * Unless required by applicable law or agreed to in writing, software
+                      * distributed under the License is distributed on an "AS IS" BASIS,
+                      * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+                      * See the License for the specific language governing permissions and
+                      * limitations under the License.
+                      */""")
+    }
+
+    def scan(p: ThriftParser, str: String): p.lexical.ParseResult[_] = {
+        val l = p.lexical
+        var s: l.Input = new CharArrayReader(str.toCharArray)
+        var r = (l.whitespace.? ~ l.token)(s)
+        s = r.next
+
+        while (r.successful && !s.atEnd) {
+            s = r.next
+            if (!s.atEnd) {
+                r = (l.whitespace.? ~ l.token)(s)
+            }
+        }
+        r.asInstanceOf[p.lexical.ParseResult[_]]
+    }
+
+    @Test def testService {
+        val p = new ThriftParser
+        val r = scan(p, """/**
+             * This interface is live.
+             */
+             service ThriftHiveMetastore extends fb303.FacebookService
+             {
+               string getMetaConf(1:string key) throws(1:MetaException o1)
+               void setMetaConf(1:string key, 2:string value) throws(1:MetaException o1)
+
+               void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
+               Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2)
+               void drop_database(1:string name, 2:bool deleteData, 3:bool cascade) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+               list<string> get_databases(1:string pattern) throws(1:MetaException o1)
+               list<string> get_all_databases() throws(1:MetaException o1)
+               void alter_database(1:string dbname, 2:Database db) throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
+               // returns the type with given name (make seperate calls for the dependent types if needed)
+               Type get_type(1:string name)  throws(1:MetaException o1, 2:NoSuchObjectException o2)
+               bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
+               bool drop_type(1:string type) throws(1:MetaException o1, 2:NoSuchObjectException o2)
+               map<string, Type> get_type_all(1:string name)
+                                             throws(1:MetaException o2)
+
+               // Gets a list of FieldSchemas describing the columns of a particular table
+               list<FieldSchema> get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3),
+
+               // Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table
+               list<FieldSchema> get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
+
+               // create a Hive table. Following fields must be set
+               // tableName
+               // database        (only 'default' for now until Hive QL supports databases)
+               // owner           (not needed, but good to have for tracking purposes)
+               // sd.cols         (list of field schemas)
+               // sd.inputFormat  (SequenceFileInputFormat (binary like falcon tables or u_full) or TextInputFormat)
+               // sd.outputFormat (SequenceFileInputFormat (binary) or TextInputFormat)
+               // sd.serdeInfo.serializationLib (SerDe class name eg org.apache.hadoop.hive.serde.simple_meta.MetadataTypedColumnsetSerDe
+               // * See notes on DDL_TIME
+               void create_table(1:Table tbl) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4)
+               void create_table_with_environment_context(1:Table tbl,
+                   2:EnvironmentContext environment_context)
+                   throws (1:AlreadyExistsException o1,
+                           2:InvalidObjectException o2, 3:MetaException o3,
+                           4:NoSuchObjectException o4)
+               // drops the table and all the partitions associated with it if the table has partitions
+               // delete data (including partitions) if deleteData is set to true
+               void drop_table(1:string dbname, 2:string name, 3:bool deleteData)
+                                    throws(1:NoSuchObjectException o1, 2:MetaException o3)
+               void drop_table_with_environment_context(1:string dbname, 2:string name, 3:bool deleteData,
+                   4:EnvironmentContext environment_context)
+                                    throws(1:NoSuchObjectException o1, 2:MetaException o3)
+               list<string> get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1)
+               list<string> get_all_tables(1: string db_name) throws (1: MetaException o1)
+
+               Table get_table(1:string dbname, 2:string tbl_name)
+                                    throws (1:MetaException o1, 2:NoSuchObjectException o2)
+               list<Table> get_table_objects_by_name(1:string dbname, 2:list<string> tbl_names)
+             				   throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
+
+               // Get a list of table names that match a filter.
+               // The filter operators are LIKE, <, <=, >, >=, =, <>
+               //
+               // In the filter statement, values interpreted as strings must be enclosed in quotes,
+               // while values interpreted as integers should not be.  Strings and integers are the only
+               // supported value types.
+               //
+               // The currently supported key names in the filter are:
+               // Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name
+               //   and supports all filter operators
+               // Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times
+               //   and supports all filter operators except LIKE
+               // Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values
+               //   and only supports the filter operators = and <>.
+               //   Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement.
+               //   For example, to filter on parameter keys called "retention", the key name in the filter
+               //   statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention"
+               //   Also, = and <> only work for keys that exist
+               //   in the tables. E.g., if you are looking for tables where key1 <> value, it will only
+               //   look at tables that have a value for the parameter key1.
+               // Some example filter statements include:
+               // filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " +
+               //   Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0";
+               // filter = Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " +
+               //   Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\""
+               // @param dbName
+               //          The name of the database from which you will retrieve the table names
+               // @param filterType
+               //          The type of filter
+               // @param filter
+               //          The filter string
+               // @param max_tables
+               //          The maximum number of tables returned
+               // @return  A list of table names that match the desired filter
+               list<string> get_table_names_by_filter(1:string dbname, 2:string filter, 3:i16 max_tables=-1)
+                                    throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
+
+               // alter table applies to only future partitions not for existing partitions
+               // * See notes on DDL_TIME
+               void alter_table(1:string dbname, 2:string tbl_name, 3:Table new_tbl)
+                                    throws (1:InvalidOperationException o1, 2:MetaException o2)
+               void alter_table_with_environment_context(1:string dbname, 2:string tbl_name,
+                   3:Table new_tbl, 4:EnvironmentContext environment_context)
+                   throws (1:InvalidOperationException o1, 2:MetaException o2)
+               // the following applies to only tables that have partitions
+               // * See notes on DDL_TIME
+               Partition add_partition(1:Partition new_part)
+                                    throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+               Partition add_partition_with_environment_context(1:Partition new_part,
+                   2:EnvironmentContext environment_context)
+                   throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2,
+                   3:MetaException o3)
+               i32 add_partitions(1:list<Partition> new_parts)
+                                    throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+               i32 add_partitions_pspec(1:list<PartitionSpec> new_parts)
+                                    throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+               Partition append_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals)
+                                    throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+               AddPartitionsResult add_partitions_req(1:AddPartitionsRequest request)
+                                    throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+               Partition append_partition_with_environment_context(1:string db_name, 2:string tbl_name,
+                   3:list<string> part_vals, 4:EnvironmentContext environment_context)
+                                    throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+               Partition append_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name)
+                                    throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+               Partition append_partition_by_name_with_environment_context(1:string db_name, 2:string tbl_name,
+                   3:string part_name, 4:EnvironmentContext environment_context)
+                                    throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+               bool drop_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:bool deleteData)
+                                    throws(1:NoSuchObjectException o1, 2:MetaException o2)
+               bool drop_partition_with_environment_context(1:string db_name, 2:string tbl_name,
+                   3:list<string> part_vals, 4:bool deleteData, 5:EnvironmentContext environment_context)
+                                    throws(1:NoSuchObjectException o1, 2:MetaException o2)
+               bool drop_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name, 4:bool deleteData)
+                                    throws(1:NoSuchObjectException o1, 2:MetaException o2)
+               bool drop_partition_by_name_with_environment_context(1:string db_name, 2:string tbl_name,
+                   3:string part_name, 4:bool deleteData, 5:EnvironmentContext environment_context)
+                                    throws(1:NoSuchObjectException o1, 2:MetaException o2)
+               DropPartitionsResult drop_partitions_req(1: DropPartitionsRequest req)
+                                    throws(1:NoSuchObjectException o1, 2:MetaException o2)
+
+               Partition get_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals)
+                                    throws(1:MetaException o1, 2:NoSuchObjectException o2)
+               Partition exchange_partition(1:map<string, string> partitionSpecs, 2:string source_db,
+                   3:string source_table_name, 4:string dest_db, 5:string dest_table_name)
+                   throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3,
+                   4:InvalidInputException o4)
+
+               Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list<string> part_vals,
+                   4: string user_name, 5: list<string> group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
+               Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name)
+                                    throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
+               // returns all the partitions for this table in reverse chronological order.
+               // If max parts is given then it will return only that many.
+               list<Partition> get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
+                                    throws(1:NoSuchObjectException o1, 2:MetaException o2)
+               list<Partition> get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1,
+                  4: string user_name, 5: list<string> group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2)
+
+               list<PartitionSpec> get_partitions_pspec(1:string db_name, 2:string tbl_name, 3:i32 max_parts=-1)
+                                    throws(1:NoSuchObjectException o1, 2:MetaException o2)
+
+               list<string> get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
+                                    throws(1:MetaException o2)
+
+               // get_partition*_ps methods allow filtering by a partial partition specification,
+               // as needed for dynamic partitions. The values that are not restricted should
+               // be empty strings. Nulls were considered (instead of "") but caused errors in
+               // generated Python code. The size of part_vals may be smaller than the
+               // number of partition columns - the unspecified values are considered the same
+               // as "".
+               list<Partition> get_partitions_ps(1:string db_name 2:string tbl_name
+               	3:list<string> part_vals, 4:i16 max_parts=-1)
+                                    throws(1:MetaException o1, 2:NoSuchObjectException o2)
+               list<Partition> get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:i16 max_parts=-1,
+                  5: string user_name, 6: list<string> group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2)
+
+               list<string> get_partition_names_ps(1:string db_name,
+               	2:string tbl_name, 3:list<string> part_vals, 4:i16 max_parts=-1)
+               	                   throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
+               // get the partitions matching the given partition filter
+               list<Partition> get_partitions_by_filter(1:string db_name 2:string tbl_name
+                 3:string filter, 4:i16 max_parts=-1)
+                                    throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
+               // List partitions as PartitionSpec instances.
+               list<PartitionSpec> get_part_specs_by_filter(1:string db_name 2:string tbl_name
+                 3:string filter, 4:i32 max_parts=-1)
+                                    throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
+               // get the partitions matching the given partition filter
+               // unlike get_partitions_by_filter, takes serialized hive expression, and with that can work
+               // with any filter (get_partitions_by_filter only works if the filter can be pushed down to JDOQL.
+               PartitionsByExprResult get_partitions_by_expr(1:PartitionsByExprRequest req)
+                                    throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
+               // get partitions give a list of partition names
+               list<Partition> get_partitions_by_names(1:string db_name 2:string tbl_name 3:list<string> names)
+                                    throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
+               // changes the partition to the new partition object. partition is identified from the part values
+               // in the new_part
+               // * See notes on DDL_TIME
+               void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part)
+                                    throws (1:InvalidOperationException o1, 2:MetaException o2)
+
+               // change a list of partitions. All partitions are altered atomically and all
+               // prehooks are fired together followed by all post hooks
+               void alter_partitions(1:string db_name, 2:string tbl_name, 3:list<Partition> new_parts)
+                                    throws (1:InvalidOperationException o1, 2:MetaException o2)
+
+               void alter_partition_with_environment_context(1:string db_name,
+                   2:string tbl_name, 3:Partition new_part,
+                   4:EnvironmentContext environment_context)
+                   throws (1:InvalidOperationException o1, 2:MetaException o2)
+
+               // rename the old partition to the new partition object by changing old part values to the part values
+               // in the new_part. old partition is identified from part_vals.
+               // partition keys in new_part should be the same as those in old partition.
+               void rename_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:Partition new_part)
+                                    throws (1:InvalidOperationException o1, 2:MetaException o2)
+
+               // returns whether or not the partition name is valid based on the value of the config
+               // hive.metastore.partition.name.whitelist.pattern
+               bool partition_name_has_valid_characters(1:list<string> part_vals, 2:bool throw_exception)
+              	throws(1: MetaException o1)
+
+               // gets the value of the configuration key in the metastore server. returns
+               // defaultValue if the key does not exist. if the configuration key does not
+               // begin with "hive", "mapred", or "hdfs", a ConfigValSecurityException is
+               // thrown.
+               string get_config_value(1:string name, 2:string defaultValue)
+                                       throws(1:ConfigValSecurityException o1)
+
+               // converts a partition name into a partition values array
+               list<string> partition_name_to_vals(1: string part_name)
+                                       throws(1: MetaException o1)
+               // converts a partition name into a partition specification (a mapping from
+               // the partition cols to the values)
+               map<string, string> partition_name_to_spec(1: string part_name)
+                                       throws(1: MetaException o1)
+
+               void markPartitionForEvent(1:string db_name, 2:string tbl_name, 3:map<string,string> part_vals,
+                               4:PartitionEventType eventType) throws (1: MetaException o1, 2: NoSuchObjectException o2,
+                               3: UnknownDBException o3, 4: UnknownTableException o4, 5: UnknownPartitionException o5,
+                               6: InvalidPartitionException o6)
+               bool isPartitionMarkedForEvent(1:string db_name, 2:string tbl_name, 3:map<string,string> part_vals,
+                               4: PartitionEventType eventType) throws (1: MetaException o1, 2:NoSuchObjectException o2,
+                               3: UnknownDBException o3, 4: UnknownTableException o4, 5: UnknownPartitionException o5,
+                               6: InvalidPartitionException o6)
+
+               //index
+               Index add_index(1:Index new_index, 2: Table index_table)
+                                    throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+               void alter_index(1:string dbname, 2:string base_tbl_name, 3:string idx_name, 4:Index new_idx)
+                                    throws (1:InvalidOperationException o1, 2:MetaException o2)
+               bool drop_index_by_name(1:string db_name, 2:string tbl_name, 3:string index_name, 4:bool deleteData)
+                                    throws(1:NoSuchObjectException o1, 2:MetaException o2)
+               Index get_index_by_name(1:string db_name 2:string tbl_name, 3:string index_name)
+                                    throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
+               list<Index> get_indexes(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1)
+                                    throws(1:NoSuchObjectException o1, 2:MetaException o2)
+               list<string> get_index_names(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1)
+                                    throws(1:MetaException o2)
+
+               // column statistics interfaces
+
+               // update APIs persist the column statistics object(s) that are passed in. If statistics already
+               // exists for one or more columns, the existing statistics will be overwritten. The update APIs
+               // validate that the dbName, tableName, partName, colName[] passed in as part of the ColumnStatistics
+               // struct are valid, throws InvalidInputException/NoSuchObjectException if found to be invalid
+               bool update_table_column_statistics(1:ColumnStatistics stats_obj) throws (1:NoSuchObjectException o1,
+                           2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
+               bool update_partition_column_statistics(1:ColumnStatistics stats_obj) throws (1:NoSuchObjectException o1,
+                           2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
+
+               // get APIs return the column statistics corresponding to db_name, tbl_name, [part_name], col_name if
+               // such statistics exists. If the required statistics doesn't exist, get APIs throw NoSuchObjectException
+               // For instance, if get_table_column_statistics is called on a partitioned table for which only
+               // partition level column stats exist, get_table_column_statistics will throw NoSuchObjectException
+               ColumnStatistics get_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws
+                           (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidInputException o3, 4:InvalidObjectException o4)
+               ColumnStatistics get_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name,
+                            4:string col_name) throws (1:NoSuchObjectException o1, 2:MetaException o2,
+                            3:InvalidInputException o3, 4:InvalidObjectException o4)
+               TableStatsResult get_table_statistics_req(1:TableStatsRequest request) throws
+                           (1:NoSuchObjectException o1, 2:MetaException o2)
+               PartitionsStatsResult get_partitions_statistics_req(1:PartitionsStatsRequest request) throws
+                           (1:NoSuchObjectException o1, 2:MetaException o2)
+               AggrStats get_aggr_stats_for(1:PartitionsStatsRequest request) throws
+                           (1:NoSuchObjectException o1, 2:MetaException o2)
+               bool set_aggr_stats_for(1:SetPartitionsStatsRequest request) throws
+                           (1:NoSuchObjectException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
+
+
+               // delete APIs attempt to delete column statistics, if found, associated with a given db_name, tbl_name, [part_name]
+               // and col_name. If the delete API doesn't find the statistics record in the metastore, throws NoSuchObjectException
+               // Delete API validates the input and if the input is invalid throws InvalidInputException/InvalidObjectException.
+               bool delete_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name, 4:string col_name) throws
+                           (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidObjectException o3,
+                            4:InvalidInputException o4)
+               bool delete_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws
+                           (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidObjectException o3,
+                            4:InvalidInputException o4)
+
+               //
+               // user-defined functions
+               //
+
+               void create_function(1:Function func)
+                   throws (1:AlreadyExistsException o1,
+                           2:InvalidObjectException o2,
+                           3:MetaException o3,
+                           4:NoSuchObjectException o4)
+
+               void drop_function(1:string dbName, 2:string funcName)
+                   throws (1:NoSuchObjectException o1, 2:MetaException o3)
+
+               void alter_function(1:string dbName, 2:string funcName, 3:Function newFunc)
+                   throws (1:InvalidOperationException o1, 2:MetaException o2)
+
+               list<string> get_functions(1:string dbName, 2:string pattern)
+                   throws (1:MetaException o1)
+               Function get_function(1:string dbName, 2:string funcName)
+                   throws (1:MetaException o1, 2:NoSuchObjectException o2)
+
+               //authorization privileges
+
+               bool create_role(1:Role role) throws(1:MetaException o1)
+               bool drop_role(1:string role_name) throws(1:MetaException o1)
+               list<string> get_role_names() throws(1:MetaException o1)
+               // Deprecated, use grant_revoke_role()
+               bool grant_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type,
+                 4:string grantor, 5:PrincipalType grantorType, 6:bool grant_option) throws(1:MetaException o1)
+               // Deprecated, use grant_revoke_role()
+               bool revoke_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type)
+                                     throws(1:MetaException o1)
+               list<Role> list_roles(1:string principal_name, 2:PrincipalType principal_type) throws(1:MetaException o1)
+               GrantRevokeRoleResponse grant_revoke_role(1:GrantRevokeRoleRequest request) throws(1:MetaException o1)
+
+               // get all role-grants for users/roles that have been granted the given role
+               // Note that in the returned list of RolePrincipalGrants, the roleName is
+               // redundant as it would match the role_name argument of this function
+               GetPrincipalsInRoleResponse get_principals_in_role(1: GetPrincipalsInRoleRequest request) throws(1:MetaException o1)
+
+               // get grant information of all roles granted to the given principal
+               // Note that in the returned list of RolePrincipalGrants, the principal name,type is
+               // redundant as it would match the principal name,type arguments of this function
+               GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(1: GetRoleGrantsForPrincipalRequest request) throws(1:MetaException o1)
+
+               PrincipalPrivilegeSet get_privilege_set(1:HiveObjectRef hiveObject, 2:string user_name,
+                 3: list<string> group_names) throws(1:MetaException o1)
+               list<HiveObjectPrivilege> list_privileges(1:string principal_name, 2:PrincipalType principal_type,
+                 3: HiveObjectRef hiveObject) throws(1:MetaException o1)
+
+               // Deprecated, use grant_revoke_privileges()
+               bool grant_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1)
+               // Deprecated, use grant_revoke_privileges()
+               bool revoke_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1)
+               GrantRevokePrivilegeResponse grant_revoke_privileges(1:GrantRevokePrivilegeRequest request) throws(1:MetaException o1);
+
+               // this is used by metastore client to send UGI information to metastore server immediately
+               // after setting up a connection.
+               list<string> set_ugi(1:string user_name, 2:list<string> group_names) throws (1:MetaException o1)
+
+               //Authentication (delegation token) interfaces
+
+               // get metastore server delegation token for use from the map/reduce tasks to authenticate
+               // to metastore server
+               string get_delegation_token(1:string token_owner, 2:string renewer_kerberos_principal_name)
+                 throws (1:MetaException o1)
+
+               // method to renew delegation token obtained from metastore server
+               i64 renew_delegation_token(1:string token_str_form) throws (1:MetaException o1)
+
+               // method to cancel delegation token obtained from metastore server
+               void cancel_delegation_token(1:string token_str_form) throws (1:MetaException o1)
+
+               // Transaction and lock management calls
+               // Get just list of open transactions
+               GetOpenTxnsResponse get_open_txns()
+               // Get list of open transactions with state (open, aborted)
+               GetOpenTxnsInfoResponse get_open_txns_info()
+               OpenTxnsResponse open_txns(1:OpenTxnRequest rqst)
+               void abort_txn(1:AbortTxnRequest rqst) throws (1:NoSuchTxnException o1)
+               void commit_txn(1:CommitTxnRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2)
+               LockResponse lock(1:LockRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2)
+               LockResponse check_lock(1:CheckLockRequest rqst)
+                 throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2, 3:NoSuchLockException o3)
+               void unlock(1:UnlockRequest rqst) throws (1:NoSuchLockException o1, 2:TxnOpenException o2)
+               ShowLocksResponse show_locks(1:ShowLocksRequest rqst)
+               void heartbeat(1:HeartbeatRequest ids) throws (1:NoSuchLockException o1, 2:NoSuchTxnException o2, 3:TxnAbortedException o3)
+               HeartbeatTxnRangeResponse heartbeat_txn_range(1:HeartbeatTxnRangeRequest txns)
+               void compact(1:CompactionRequest rqst)
+               ShowCompactResponse show_compact(1:ShowCompactRequest rqst)
+             }""")
+        Assert.assertTrue(r.successful)
+
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/30711973/tools/src/test/scala/org/apache/atlas/tools/thrift/ThriftParserTest.scala
----------------------------------------------------------------------
diff --git a/tools/src/test/scala/org/apache/atlas/tools/thrift/ThriftParserTest.scala b/tools/src/test/scala/org/apache/atlas/tools/thrift/ThriftParserTest.scala
new file mode 100755
index 0000000..82e5383
--- /dev/null
+++ b/tools/src/test/scala/org/apache/atlas/tools/thrift/ThriftParserTest.scala
@@ -0,0 +1,772 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.atlas.tools.thrift
+
+import com.google.gson.JsonParser
+import org.json4s.native.JsonMethods._
+import org.json4s.native.Serialization.{write => swrite}
+import org.json4s.{NoTypeHints, _}
+import org.junit.{Assert, Test}
+
+import scala.io.Source
+import scala.reflect.ClassTag
+
+/**
+ * Copied from
+ * [[https://github.com/json4s/json4s/blob/master/ext/src/main/scala/org/json4s/ext/EnumSerializer.scala json4s github]]
+ * to avoid dependency on json4s-ext.
+ */
+class EnumNameSerializer[E <: Enumeration: ClassTag](enum: E) extends Serializer[E#Value] {
+    import JsonDSL._
+
+    val EnumerationClass = classOf[E#Value]
+
+    def deserialize(implicit format: Formats): PartialFunction[(TypeInfo, JValue), E#Value] = {
+        case (t@TypeInfo(EnumerationClass, _), json) if (isValid(json)) => {
+            json match {
+                case JString(value) => enum.withName(value)
+                case value => throw new MappingException("Can't convert " +
+                    value + " to " + EnumerationClass)
+            }
+        }
+    }
+
+    private[this] def isValid(json: JValue) = json match {
+        case JString(value) if (enum.values.exists(_.toString == value)) => true
+        case _ => false
+    }
+
+    def serialize(implicit format: Formats): PartialFunction[Any, JValue] = {
+        case i: E#Value => i.toString
+    }
+}
+
+class ThriftParserTest {
+
+    @Test def testSimple {
+        var p = new ThriftParser
+        val parser = new JsonParser
+
+        var td: Option[ThriftDef] = p( """include "share/fb303/if/fb303.thrift"
+
+                             namespace java org.apache.hadoop.hive.metastore.api
+                             namespace php metastore
+                             namespace cpp Apache.Hadoop.Hive
+                                       """)
+
+        val parsed = parser.parse(toJson(td.get))
+        val sample = parser.parse( """{
+  "includes":[{
+    "value":"share/fb303/if/fb303.thrift"
+  }],
+  "cppIncludes":[],
+  "namespaces":[{
+    "lang":"",
+    "name":"Apache.Hadoop.Hive",
+    "otherLang":"cpp"
+  },{
+    "lang":"",
+    "name":"metastore",
+    "otherLang":"php"
+  },{
+    "lang":"",
+    "name":"org.apache.hadoop.hive.metastore.api",
+    "otherLang":"java"
+  }],
+  "constants":[],
+  "typedefs":[],
+  "enums":[],
+  "senums":[],
+  "structs":[],
+  "unions":[],
+  "xceptions":[],
+  "services":[]
+}""")
+
+        Assert.assertEquals(parsed.toString, sample.toString)
+    }
+
+    @Test def testStruct {
+        val p = new ThriftParser
+        val parser = new JsonParser
+
+        var td: Option[ThriftDef] = p( """struct PartitionSpecWithSharedSD {
+           1: list<PartitionWithoutSD> partitions,
+           2: StorageDescriptor sd
+         }""")
+
+        val parsed = parser.parse(toJson(td.get))
+
+        val sample = parser.parse( """{
+  "includes":[],
+  "cppIncludes":[],
+  "namespaces":[],
+  "constants":[],
+  "typedefs":[],
+  "enums":[],
+  "senums":[],
+  "structs":[{
+    "name":"PartitionSpecWithSharedSD",
+    "xsdAll":false,
+    "fields":[{
+      "id":{
+        "value":1
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "elemType":{
+          "name":"PartitionWithoutSD"
+        }
+      },
+      "name":"partitions",
+      "xsdOptional":false,
+      "xsdNillable":false
+    },{
+      "id":{
+        "value":2
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "name":"StorageDescriptor"
+      },
+      "name":"sd",
+      "xsdOptional":false,
+      "xsdNillable":false
+    }]
+  }],
+  "unions":[],
+  "xceptions":[],
+  "services":[]
+}""")
+
+        Assert.assertEquals(parsed.toString, sample.toString)
+    }
+
+    def toJson(td: ThriftDef) = {
+        implicit val formats = org.json4s.native.Serialization.formats(NoTypeHints) + new EnumNameSerializer(BASE_TYPES) +
+            new EnumNameSerializer(THRIFT_LANG)
+        val ser = swrite(td)
+        pretty(render(parse(ser)))
+    }
+
+    @Test def testTableStruct {
+        val p = new ThriftParser
+        val parser = new JsonParser
+
+        var td: Option[ThriftDef] = p( """// table information
+         struct Table {
+           1: string tableName,                // name of the table
+           2: string dbName,                   // database name ('default')
+           3: string owner,                    // owner of this table
+           4: i32    createTime,               // creation time of the table
+           5: i32    lastAccessTime,           // last access time (usually this will be filled from HDFS and shouldn't be relied on)
+           6: i32    retention,                // retention time
+           7: StorageDescriptor sd,            // storage descriptor of the table
+           8: list<FieldSchema> partitionKeys, // partition keys of the table. only primitive types are supported
+           9: map<string, string> parameters,   // to store comments or any other user level parameters
+           10: string viewOriginalText,         // original view text, null for non-view
+           11: string viewExpandedText,         // expanded view text, null for non-view
+           12: string tableType,                 // table type enum, e.g. EXTERNAL_TABLE
+           13: optional PrincipalPrivilegeSet privileges,
+           14: optional bool temporary=false
+         }""")
+
+        val parsed = parser.parse(toJson(td.get))
+        val sample = parser.parse( """{
+  "includes":[],
+  "cppIncludes":[],
+  "namespaces":[],
+  "constants":[],
+  "typedefs":[],
+  "enums":[],
+  "senums":[],
+  "structs":[{
+    "name":"Table",
+    "xsdAll":false,
+    "fields":[{
+      "id":{
+        "value":1
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "typ":"string"
+      },
+      "name":"tableName",
+      "xsdOptional":false,
+      "xsdNillable":false
+    },{
+      "id":{
+        "value":2
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "typ":"string"
+      },
+      "name":"dbName",
+      "xsdOptional":false,
+      "xsdNillable":false
+    },{
+      "id":{
+        "value":3
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "typ":"string"
+      },
+      "name":"owner",
+      "xsdOptional":false,
+      "xsdNillable":false
+    },{
+      "id":{
+        "value":4
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "typ":"i32"
+      },
+      "name":"createTime",
+      "xsdOptional":false,
+      "xsdNillable":false
+    },{
+      "id":{
+        "value":5
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "typ":"i32"
+      },
+      "name":"lastAccessTime",
+      "xsdOptional":false,
+      "xsdNillable":false
+    },{
+      "id":{
+        "value":6
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "typ":"i32"
+      },
+      "name":"retention",
+      "xsdOptional":false,
+      "xsdNillable":false
+    },{
+      "id":{
+        "value":7
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "name":"StorageDescriptor"
+      },
+      "name":"sd",
+      "xsdOptional":false,
+      "xsdNillable":false
+    },{
+      "id":{
+        "value":8
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "elemType":{
+          "name":"FieldSchema"
+        }
+      },
+      "name":"partitionKeys",
+      "xsdOptional":false,
+      "xsdNillable":false
+    },{
+      "id":{
+        "value":9
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "keyType":{
+          "typ":"string"
+        },
+        "valueType":{
+          "typ":"string"
+        }
+      },
+      "name":"parameters",
+      "xsdOptional":false,
+      "xsdNillable":false
+    },{
+      "id":{
+        "value":10
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "typ":"string"
+      },
+      "name":"viewOriginalText",
+      "xsdOptional":false,
+      "xsdNillable":false
+    },{
+      "id":{
+        "value":11
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "typ":"string"
+      },
+      "name":"viewExpandedText",
+      "xsdOptional":false,
+      "xsdNillable":false
+    },{
+      "id":{
+        "value":12
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "typ":"string"
+      },
+      "name":"tableType",
+      "xsdOptional":false,
+      "xsdNillable":false
+    },{
+      "id":{
+        "value":13
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "name":"PrincipalPrivilegeSet"
+      },
+      "name":"privileges",
+      "xsdOptional":false,
+      "xsdNillable":false
+    },{
+      "id":{
+        "value":14
+      },
+      "requiredNess":false,
+      "fieldType":{
+        "typ":"bool"
+      },
+      "name":"temporary",
+      "fieldValue":{
+        "value":"false"
+      },
+      "xsdOptional":false,
+      "xsdNillable":false
+    }]
+  }],
+  "unions":[],
+  "xceptions":[],
+  "services":[]
+}""")
+
+        Assert.assertEquals(parsed.toString, sample.toString)
+    }
+
+    @Test def testHiveThrift {
+        val p = new ThriftParser
+        val is = getClass().getResourceAsStream("/test.thrift")
+        val src: Source = Source.fromInputStream(is)
+        val t: String = src.getLines().mkString("\n")
+        var td: Option[ThriftDef] = p(t)
+        Assert.assertTrue(td.isDefined)
+        //println(toJson(td.get))
+    }
+
+    @Test def testService {
+        val p = new ThriftParser
+        val parser = new JsonParser
+
+        var td: Option[ThriftDef] = p( """/**
+             * This interface is live.
+             */
+             service ThriftHiveMetastore extends fb303.FacebookService
+             {
+               string getMetaConf(1:string key) throws(1:MetaException o1)
+               void setMetaConf(1:string key, 2:string value) throws(1:MetaException o1)
+
+               void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
+               Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2)
+               void drop_database(1:string name, 2:bool deleteData, 3:bool cascade) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+               list<string> get_databases(1:string pattern) throws(1:MetaException o1)
+               list<string> get_all_databases() throws(1:MetaException o1)
+               void alter_database(1:string dbname, 2:Database db) throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
+             }""")
+
+        val parsed = parser.parse(toJson(td.get))
+        val sample = parser.parse( """{
+  "includes":[],
+  "cppIncludes":[],
+  "namespaces":[],
+  "constants":[],
+  "typedefs":[],
+  "enums":[],
+  "senums":[],
+  "structs":[],
+  "unions":[],
+  "xceptions":[],
+  "services":[{
+    "name":"ThriftHiveMetastore",
+    "superName":"fb303.FacebookService",
+    "functions":[{
+      "oneway":false,
+      "returnType":{
+        "typ":"string"
+      },
+      "name":"getMetaConf",
+      "parameters":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "typ":"string"
+        },
+        "name":"key",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }],
+      "throwFields":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"MetaException"
+        },
+        "name":"o1",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }]
+    },{
+      "oneway":false,
+      "returnType":{
+
+      },
+      "name":"setMetaConf",
+      "parameters":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "typ":"string"
+        },
+        "name":"key",
+        "xsdOptional":false,
+        "xsdNillable":false
+      },{
+        "id":{
+          "value":2
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "typ":"string"
+        },
+        "name":"value",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }],
+      "throwFields":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"MetaException"
+        },
+        "name":"o1",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }]
+    },{
+      "oneway":false,
+      "returnType":{
+
+      },
+      "name":"create_database",
+      "parameters":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"Database"
+        },
+        "name":"database",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }],
+      "throwFields":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"AlreadyExistsException"
+        },
+        "name":"o1",
+        "xsdOptional":false,
+        "xsdNillable":false
+      },{
+        "id":{
+          "value":2
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"InvalidObjectException"
+        },
+        "name":"o2",
+        "xsdOptional":false,
+        "xsdNillable":false
+      },{
+        "id":{
+          "value":3
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"MetaException"
+        },
+        "name":"o3",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }]
+    },{
+      "oneway":false,
+      "returnType":{
+        "name":"Database"
+      },
+      "name":"get_database",
+      "parameters":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "typ":"string"
+        },
+        "name":"name",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }],
+      "throwFields":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"NoSuchObjectException"
+        },
+        "name":"o1",
+        "xsdOptional":false,
+        "xsdNillable":false
+      },{
+        "id":{
+          "value":2
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"MetaException"
+        },
+        "name":"o2",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }]
+    },{
+      "oneway":false,
+      "returnType":{
+
+      },
+      "name":"drop_database",
+      "parameters":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "typ":"string"
+        },
+        "name":"name",
+        "xsdOptional":false,
+        "xsdNillable":false
+      },{
+        "id":{
+          "value":2
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "typ":"bool"
+        },
+        "name":"deleteData",
+        "xsdOptional":false,
+        "xsdNillable":false
+      },{
+        "id":{
+          "value":3
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "typ":"bool"
+        },
+        "name":"cascade",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }],
+      "throwFields":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"NoSuchObjectException"
+        },
+        "name":"o1",
+        "xsdOptional":false,
+        "xsdNillable":false
+      },{
+        "id":{
+          "value":2
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"InvalidOperationException"
+        },
+        "name":"o2",
+        "xsdOptional":false,
+        "xsdNillable":false
+      },{
+        "id":{
+          "value":3
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"MetaException"
+        },
+        "name":"o3",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }]
+    },{
+      "oneway":false,
+      "returnType":{
+        "elemType":{
+          "typ":"string"
+        }
+      },
+      "name":"get_databases",
+      "parameters":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "typ":"string"
+        },
+        "name":"pattern",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }],
+      "throwFields":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"MetaException"
+        },
+        "name":"o1",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }]
+    },{
+      "oneway":false,
+      "returnType":{
+        "elemType":{
+          "typ":"string"
+        }
+      },
+      "name":"get_all_databases",
+      "parameters":[],
+      "throwFields":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"MetaException"
+        },
+        "name":"o1",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }]
+    },{
+      "oneway":false,
+      "returnType":{
+
+      },
+      "name":"alter_database",
+      "parameters":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "typ":"string"
+        },
+        "name":"dbname",
+        "xsdOptional":false,
+        "xsdNillable":false
+      },{
+        "id":{
+          "value":2
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"Database"
+        },
+        "name":"db",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }],
+      "throwFields":[{
+        "id":{
+          "value":1
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"MetaException"
+        },
+        "name":"o1",
+        "xsdOptional":false,
+        "xsdNillable":false
+      },{
+        "id":{
+          "value":2
+        },
+        "requiredNess":false,
+        "fieldType":{
+          "name":"NoSuchObjectException"
+        },
+        "name":"o2",
+        "xsdOptional":false,
+        "xsdNillable":false
+      }]
+    }]
+  }]
+}""")
+
+        Assert.assertEquals(parsed.toString, sample.toString)
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/30711973/tools/src/test/scala/org/apache/hadoop/metadata/tools/dsl/DSLTest.scala
----------------------------------------------------------------------
diff --git a/tools/src/test/scala/org/apache/hadoop/metadata/tools/dsl/DSLTest.scala b/tools/src/test/scala/org/apache/hadoop/metadata/tools/dsl/DSLTest.scala
deleted file mode 100755
index fdab63e..0000000
--- a/tools/src/test/scala/org/apache/hadoop/metadata/tools/dsl/DSLTest.scala
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.metadata.tools.dsl
-
-import org.apache.hadoop.metadata.dsl._
-import org.apache.hadoop.metadata.tools.hive.HiveMockMetadataService
-import org.apache.hadoop.metadata.typesystem.types.utils.TypesUtil
-import org.apache.hadoop.metadata.typesystem.types.{DataTypes, StructType, TypeSystem}
-import org.json4s.native.JsonMethods._
-import org.junit.{Assert, Before, Test}
-
-/**
- * DSL Test.
- */
-class DSLTest {
-    val STRUCT_TYPE_1: String = "t1"
-    val STRUCT_TYPE_2: String = "t2"
-
-
-    @Before
-    def setup {
-        val ts: TypeSystem = TypeSystem.getInstance
-        ts.reset()
-
-        val structType: StructType = ts.defineStructType(
-            STRUCT_TYPE_1, true,
-            TypesUtil.createRequiredAttrDef("a", DataTypes.INT_TYPE),
-            TypesUtil.createOptionalAttrDef("b", DataTypes.BOOLEAN_TYPE),
-            TypesUtil.createOptionalAttrDef("c", DataTypes.BYTE_TYPE),
-            TypesUtil.createOptionalAttrDef("d", DataTypes.SHORT_TYPE),
-            TypesUtil.createOptionalAttrDef("e", DataTypes.INT_TYPE),
-            TypesUtil.createOptionalAttrDef("f", DataTypes.INT_TYPE),
-            TypesUtil.createOptionalAttrDef("g", DataTypes.LONG_TYPE),
-            TypesUtil.createOptionalAttrDef("h", DataTypes.FLOAT_TYPE),
-            TypesUtil.createOptionalAttrDef("i", DataTypes.DOUBLE_TYPE),
-            TypesUtil.createOptionalAttrDef("j", DataTypes.BIGINTEGER_TYPE),
-            TypesUtil.createOptionalAttrDef("k", DataTypes.BIGDECIMAL_TYPE),
-            TypesUtil.createOptionalAttrDef("l", DataTypes.DATE_TYPE),
-            TypesUtil.createOptionalAttrDef("m", ts.defineArrayType(DataTypes.INT_TYPE)),
-            TypesUtil.createOptionalAttrDef("n", ts.defineArrayType(DataTypes.BIGDECIMAL_TYPE)),
-            TypesUtil.createOptionalAttrDef("o",
-                ts.defineMapType(DataTypes.STRING_TYPE, DataTypes.DOUBLE_TYPE)))
-
-        val recursiveStructType: StructType = ts.defineStructType(
-            STRUCT_TYPE_2, true,
-            TypesUtil.createRequiredAttrDef("a", DataTypes.INT_TYPE),
-            TypesUtil.createOptionalAttrDef("s", STRUCT_TYPE_2))
-    }
-
-    @Test def test1 {
-
-        // 1. Existing Types in System
-        //Assert.assertEquals(s"${listTypes}", "[t2, t1, int, array<bigdecimal>, long, double, date, float, short, biginteger, byte, string, boolean, bigdecimal, map<string,double>, array<int>]")
-
-        defineStructType("mytype",
-            attrDef("a", INT_TYPE, ATTR_REQUIRED),
-            attrDef("b", BOOLEAN_TYPE),
-            attrDef("c", BYTE_TYPE),
-            attrDef("d", SHORT_TYPE),
-            attrDef("e", INT_TYPE),
-            attrDef("f", INT_TYPE),
-            attrDef("g", LONG_TYPE),
-            attrDef("h", FLOAT_TYPE),
-            attrDef("i", DOUBLE_TYPE),
-            attrDef("j", BIGINT_TYPE),
-            attrDef("k", BIGDECIMAL_TYPE),
-            attrDef("l", DATE_TYPE),
-            attrDef("m", arrayType(INT_TYPE)),
-            attrDef("n", arrayType(BIGDECIMAL_TYPE)),
-            attrDef("o", mapType(STRING_TYPE, DOUBLE_TYPE)))
-
-        // 2. 'mytype' available as a a Type
-        Assert.assertEquals(s"${listTypes}", "[array<bigdecimal>,array<int>,map<string,double>,mytype,t1,t2]")
-
-        // 3. Create a 'mytype' instance from Json
-        val i = createInstance("mytype", """
-        {
-                               "$typeName$":"mytype",
-                               "e":1,
-                               "n":[1,1.1],
-                               "h":1.0,
-                               "b":true,
-                               "k":1,
-                               "j":1,
-                               "d":2,
-                               "m":[1,1],
-                               "g":1,
-                               "a":1,
-                               "i":1.0,
-                               "c":1,
-                               "l":"2014-12-03T08:00:00.000Z",
-                               "f":1,
-                               "o":{
-                                 "b":2.0,
-                                 "a":1.0
-                               }
-                             }
-                                         """)
-
-        // 4. Navigate mytype instance in code
-        // Examples of Navigate mytype instance in code
-        Assert.assertEquals(s"${i.a}", "1")
-        Assert.assertEquals(s"${i.o}", "{b=2.0, a=1.0}")
-        Assert.assertEquals(s"${i.o.asInstanceOf[java.util.Map[_, _]].keySet}", "[b, a]")
-
-        // 5. Serialize mytype instance to Json
-        Assert.assertEquals(s"${pretty(render(i))}", "{\n  \"$typeName$\":\"mytype\",\n  \"e\":1," + "\n  \"n\":[1,1.100000000000000088817841970012523233890533447265625],\n  \"h\":1.0,\n  \"b\":true,\n  \"k\":1,\n  \"j\":1,\n  \"d\":2,\n  \"m\":[1,1],\n  \"g\":1,\n  \"a\":1,\n  \"i\":1.0,\n  \"c\":1,\n  \"l\":\"2014-12-03T08:00:00.000Z\",\n  \"f\":1,\n  \"o\":{\n    \"b\":2.0,\n    \"a\":1.0\n  }\n}")
-    }
-
-    @Test def test2 {
-
-        // 1. Existing Types in System
-        Assert.assertEquals(s"${listTypes}", "[array<bigdecimal>,array<int>,map<string,double>,t1,t2]")
-
-        val addrType = defineStructType("addressType",
-            attrDef("houseNum", INT_TYPE, ATTR_REQUIRED),
-            attrDef("street", STRING_TYPE, ATTR_REQUIRED),
-            attrDef("city", STRING_TYPE, ATTR_REQUIRED),
-            attrDef("state", STRING_TYPE, ATTR_REQUIRED),
-            attrDef("zip", INT_TYPE, ATTR_REQUIRED),
-            attrDef("country", STRING_TYPE, ATTR_REQUIRED)
-        )
-
-        val personType = defineStructType("personType",
-            attrDef("first_name", STRING_TYPE, ATTR_REQUIRED),
-            attrDef("last_name", STRING_TYPE, ATTR_REQUIRED),
-            attrDef("address", addrType)
-        )
-
-        // 2. updated Types in System
-        Assert.assertEquals(s"${listTypes}", "[addressType,array<bigdecimal>,array<int>,map<string,double>,personType,t1,t2]")
-
-
-        // 3. Construct a Person in Code
-        val person = createInstance("personType")
-        val address = createInstance("addressType")
-
-        person.first_name = "Meta"
-        person.last_name = "Hadoop"
-
-        address.houseNum = 3460
-        address.street = "W Bayshore Rd"
-        address.city = "Palo Alto"
-        address.state = "CA"
-        address.zip = 94303
-        address.country = "USA"
-
-        person.address = address
-
-        // 4. Convert to Json
-        Assert.assertEquals(s"${pretty(render(person))}", "{\n  \"$typeName$\":\"personType\",\n  \"first_name\":\"Meta\",\n  \"address\":{\n    \"$typeName$\":\"addressType\",\n    \"houseNum\":3460,\n    \"city\":\"Palo Alto\",\n    \"country\":\"USA\",\n    \"state\":\"CA\",\n    \"zip\":94303,\n    \"street\":\"W Bayshore Rd\"\n  },\n  \"last_name\":\"Hadoop\"\n}");
-
-        val p2 = createInstance("personType", """{
-                                              "first_name":"Meta",
-                                              "address":{
-                                                "houseNum":3460,
-                                                "city":"Palo Alto",
-                                                "country":"USA",
-                                                "state":"CA",
-                                                "zip":94303,
-                                                "street":"W Bayshore Rd"
-                                              },
-                                              "last_name":"Hadoop"
-                                            }""")
-
-    }
-
-    @Test def testHive(): Unit = {
-        val hiveTable = HiveMockMetadataService.getTable("tpcds", "date_dim")
-        //println(hiveTable)
-
-        //name : String, typeName : String, comment : String
-        val fieldType = defineStructType("FieldSchema",
-            attrDef("name", STRING_TYPE, ATTR_REQUIRED),
-            attrDef("typeName", STRING_TYPE, ATTR_REQUIRED),
-            attrDef("comment", STRING_TYPE)
-        )
-        /*
-        SerDe(name : String, serializationLib : String, parameters : Map[String, String])
-         */
-        defineStructType("SerDe",
-            attrDef("name", STRING_TYPE, ATTR_REQUIRED),
-            attrDef("serializationLib", STRING_TYPE, ATTR_REQUIRED),
-            attrDef("parameters", mapType(STRING_TYPE, STRING_TYPE))
-        )
-
-        /*
-        StorageDescriptor(fields : List[FieldSchema],
-                                   location : String, inputFormat : String,
-                                    outputFormat : String, compressed : Boolean,
-                                    numBuckets : Int, bucketColumns : List[String],
-                                    sortColumns : List[String],
-                                    parameters : Map[String, String],
-                                    storedAsSubDirs : Boolean
-                                    )
-         */
-        val sdType = defineStructType("StorageDescriptor",
-            attrDef("location", STRING_TYPE, ATTR_REQUIRED),
-            attrDef("inputFormat", STRING_TYPE, ATTR_REQUIRED),
-            attrDef("outputFormat", STRING_TYPE, ATTR_REQUIRED),
-            attrDef("compressed", BOOLEAN_TYPE),
-            attrDef("numBuckets", INT_TYPE),
-            attrDef("bucketColumns", arrayType(STRING_TYPE)),
-            attrDef("sortColumns", arrayType(STRING_TYPE)),
-            attrDef("parameters", mapType(STRING_TYPE, STRING_TYPE)),
-            attrDef("storedAsSubDirs", BOOLEAN_TYPE)
-        )
-
-        /*
-        case class Table(dbName : String, tableName : String, storageDesc : StorageDescriptor,
-                       parameters : Map[String, String],
-                        tableType : String)
-         */
-        defineStructType("Table",
-            attrDef("dbName", STRING_TYPE, ATTR_REQUIRED),
-            attrDef("tableName", STRING_TYPE, ATTR_REQUIRED),
-            attrDef("storageDesc", sdType, ATTR_REQUIRED),
-            attrDef("compressed", BOOLEAN_TYPE),
-            attrDef("numBuckets", INT_TYPE),
-            attrDef("bucketColumns", arrayType(STRING_TYPE)),
-            attrDef("sortColumns", arrayType(STRING_TYPE)),
-            attrDef("parameters", mapType(STRING_TYPE, STRING_TYPE)),
-            attrDef("storedAsSubDirs", BOOLEAN_TYPE)
-        )
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-atlas/blob/30711973/tools/src/test/scala/org/apache/hadoop/metadata/tools/hive/HiveMockMetadataService.scala
----------------------------------------------------------------------
diff --git a/tools/src/test/scala/org/apache/hadoop/metadata/tools/hive/HiveMockMetadataService.scala b/tools/src/test/scala/org/apache/hadoop/metadata/tools/hive/HiveMockMetadataService.scala
deleted file mode 100755
index 3a8215d..0000000
--- a/tools/src/test/scala/org/apache/hadoop/metadata/tools/hive/HiveMockMetadataService.scala
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.metadata.tools.hive
-
-object HiveMockMetadataService {
-
-    def getTable(dbName: String, table: String): Table = {
-        return Table(dbName, table,
-            StorageDescriptor(
-                List[FieldSchema](
-                    FieldSchema("d_date_sk", "int", null),
-                    FieldSchema("d_date_id", "string", null),
-                    FieldSchema("d_date", "string", null),
-                    FieldSchema("d_month_seq", "int", null),
-                    FieldSchema("d_week_seq", "int", null),
-                    FieldSchema("d_quarter_seq", "int", null),
-                    FieldSchema("d_year", "int", null),
-                    FieldSchema("d_dow", "int", null),
-                    FieldSchema("d_moy", "int", null),
-                    FieldSchema("d_dom", "int", null),
-                    FieldSchema("d_qoy", "int", null),
-                    FieldSchema("d_fy_year", "int", null),
-                    FieldSchema("d_fy_quarter_seq", "int", null),
-                    FieldSchema("d_fy_week_seq", "int", null),
-                    FieldSchema("d_day_name", "string", null),
-                    FieldSchema("d_quarter_name", "string", null),
-                    FieldSchema("d_holiday", "string", null),
-                    FieldSchema("d_weekend", "string", null),
-                    FieldSchema("d_following_holiday", "string", null),
-                    FieldSchema("d_first_dom", "int", null),
-                    FieldSchema("d_last_dom", "int", null),
-                    FieldSchema("d_same_day_ly", "int", null),
-                    FieldSchema("d_same_day_lq", "int", null),
-                    FieldSchema("d_current_day", "string", null),
-                    FieldSchema("d_current_week", "string", null),
-                    FieldSchema("d_current_month", "string", null),
-                    FieldSchema("d_current_quarter", "string", null),
-                    FieldSchema("d_current_year", "string", null)
-                ),
-                "file:/tmp/warehouse/tpcds.db/date_dim",
-                "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat",
-                "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat",
-                false,
-                0, List[String](), List[String](),
-                Map[String, String](),
-                false
-            ),
-            Map[String, String](),
-            "Table")
-    }
-
-    case class FieldSchema(name: String, typeName: String, comment: String)
-
-    case class SerDe(name: String, serializationLib: String, parameters: Map[String, String])
-
-    case class StorageDescriptor(fields: List[FieldSchema],
-                                 location: String, inputFormat: String,
-                                 outputFormat: String, compressed: Boolean,
-                                 numBuckets: Int, bucketColumns: List[String],
-                                 sortColumns: List[String],
-                                 parameters: Map[String, String],
-                                 storedAsSubDirs: Boolean
-                                    )
-
-    case class Table(dbName: String, tableName: String, storageDesc: StorageDescriptor,
-                     parameters: Map[String, String],
-                     tableType: String)
-}


Mime
View raw message