carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chenliang...@apache.org
Subject carbondata git commit: [CARBONDATA-1327] Add carbon sort column examples
Date Sat, 29 Jul 2017 14:36:34 GMT
Repository: carbondata
Updated Branches:
  refs/heads/master 66d0b3930 -> a4fe9b869


[CARBONDATA-1327] Add carbon sort column examples

Add carbon sort column examples and update the introduction document

This closes #1193


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/a4fe9b86
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/a4fe9b86
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/a4fe9b86

Branch: refs/heads/master
Commit: a4fe9b8697199e18f77442b91a0d8296fc247fe6
Parents: 66d0b39
Author: mayun <simafengyun1984@163.com>
Authored: Mon Jul 24 18:01:29 2017 +0800
Committer: chenliang613 <chenliang613@apache.org>
Committed: Sat Jul 29 22:36:20 2017 +0800

----------------------------------------------------------------------
 docs/ddl-operation-on-carbondata.md             |  11 +-
 .../examples/CarbonSortColumnsExample.scala     | 127 +++++++++++++++++++
 2 files changed, 137 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/a4fe9b86/docs/ddl-operation-on-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/ddl-operation-on-carbondata.md b/docs/ddl-operation-on-carbondata.md
index 79d1139..180132d 100644
--- a/docs/ddl-operation-on-carbondata.md
+++ b/docs/ddl-operation-on-carbondata.md
@@ -101,6 +101,14 @@ The following DDL operations are supported in CarbonData :
 
    - All dimensions except complex datatype columns are part of multi dimensional key(MDK).
This behavior can be overridden by using TBLPROPERTIES. If the user wants to keep any column
(except columns of complex datatype) in multi dimensional key then he can keep the columns
either in DICTIONARY_EXCLUDE or DICTIONARY_INCLUDE.
 
+   - **Sort Columns Configuration**
+
+     "SORT_COLUMN" property is for users to specify which columns belong to the MDK index.
If user don't specify "SORT_COLUMN" property, by default MDK index be built by using all dimension
columns except complex datatype column. 
+
+```
+       TBLPROPERTIES ('SORT_COLUMNS'='column1, column3')
+```
+
 ### Example:
 ```
     CREATE TABLE IF NOT EXISTS productSchema.productSalesTable (
@@ -115,7 +123,8 @@ The following DDL operations are supported in CarbonData :
       STORED BY 'carbondata'
       TBLPROPERTIES ('DICTIONARY_EXCLUDE'='storeCity',
                      'DICTIONARY_INCLUDE'='productNumber',
-                     'NO_INVERTED_INDEX'='productBatch')
+                     'NO_INVERTED_INDEX'='productBatch',
+                     'SORT_COLUMNS'='productName,storeCity')
 ```
 
 ## SHOW TABLE

http://git-wip-us.apache.org/repos/asf/carbondata/blob/a4fe9b86/examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonSortColumnsExample.scala
----------------------------------------------------------------------
diff --git a/examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonSortColumnsExample.scala
b/examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonSortColumnsExample.scala
new file mode 100644
index 0000000..7baee56
--- /dev/null
+++ b/examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonSortColumnsExample.scala
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.examples
+
+import java.io.File
+
+import org.apache.spark.sql.SparkSession
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+object CarbonSortColumnsExample {
+
+  def main(args: Array[String]) {
+    val rootPath = new File(this.getClass.getResource("/").getPath
+                            + "../../../..").getCanonicalPath
+    val storeLocation = s"$rootPath/examples/spark2/target/store"
+    val warehouse = s"$rootPath/examples/spark2/target/warehouse"
+    val metastoredb = s"$rootPath/examples/spark2/target"
+
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd HH:mm:ss")
+      .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy/MM/dd")
+
+    import org.apache.spark.sql.CarbonSession._
+    val spark = SparkSession
+      .builder()
+      .master("local")
+      .appName("CarbonSortColumnsExample")
+      .config("spark.sql.warehouse.dir", warehouse)
+      .config("spark.driver.host", "localhost")
+      .getOrCreateCarbonSession(storeLocation, metastoredb)
+
+    spark.sparkContext.setLogLevel("WARN")
+
+    spark.sql("DROP TABLE IF EXISTS sort_columns_table")
+
+    // Create table with no sort columns
+    spark.sql(
+      s"""
+         | CREATE TABLE no_sort_columns_table(
+         | shortField SHORT,
+         | intField INT,
+         | bigintField LONG,
+         | doubleField DOUBLE,
+         | stringField STRING,
+         | timestampField TIMESTAMP,
+         | decimalField DECIMAL(18,2),
+         | dateField DATE,
+         | charField CHAR(5),
+         | floatField FLOAT,
+         | complexData ARRAY<STRING>
+         | )
+         | STORED BY 'carbondata'
+         | TBLPROPERTIES('SORT_COLUMNS'='')
+       """.stripMargin)
+
+    // Create table with sort columns
+    // you can specify any columns to sort columns for building MDX index, remark: currently
+    // sort columns don't support "FLOAT, DOUBLE, DECIMAL"
+    spark.sql(
+      s"""
+         | CREATE TABLE sort_columns_table(
+         | shortField SHORT,
+         | intField INT,
+         | bigintField LONG,
+         | doubleField DOUBLE,
+         | stringField STRING,
+         | timestampField TIMESTAMP,
+         | decimalField DECIMAL(18,2),
+         | dateField DATE,
+         | charField CHAR(5),
+         | floatField FLOAT,
+         | complexData ARRAY<STRING>
+         | )
+         | STORED BY 'carbondata'
+         | TBLPROPERTIES('SORT_COLUMNS'='intField, stringField, charField')
+       """.stripMargin)
+
+    val path = s"$rootPath/examples/spark2/src/main/resources/data.csv"
+
+    // scalastyle:off
+    spark.sql(
+      s"""
+         | LOAD DATA LOCAL INPATH '$path'
+         | INTO TABLE no_sort_columns_table
+         | OPTIONS('FILEHEADER'='shortField,intField,bigintField,doubleField,stringField,timestampField,decimalField,dateField,charField,floatField,complexData',
+         | 'COMPLEX_DELIMITER_LEVEL_1'='#')
+       """.stripMargin)
+    spark.sql(
+      s"""
+         | LOAD DATA LOCAL INPATH '$path'
+         | INTO TABLE sort_columns_table
+         | OPTIONS('FILEHEADER'='shortField,intField,bigintField,doubleField,stringField,timestampField,decimalField,dateField,charField,floatField,complexData',
+         | 'COMPLEX_DELIMITER_LEVEL_1'='#')
+       """.stripMargin)
+    // scalastyle:on
+
+    spark.sql(
+      s"""SELECT * FROM no_sort_columns_table""".stripMargin).show()
+
+    spark.sql(
+      s"""SELECT * FROM sort_columns_table""".stripMargin).show()
+
+    // Drop table
+    spark.sql("DROP TABLE IF EXISTS no_sort_columns_table")
+    spark.sql("DROP TABLE IF EXISTS sort_columns_table")
+
+    spark.stop()
+  }
+
+}


Mime
View raw message