drill-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bridg...@apache.org
Subject [1/5] drill git commit: DRILL-1820
Date Sat, 20 Jun 2015 00:07:04 GMT
Repository: drill
Updated Branches:
  refs/heads/gh-pages d476ce583 -> 3d1c00554


DRILL-1820

consistent acronym def

query > select statement

new config spotfire server file

minor edits

CTAS partitioning


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/00efc0f1
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/00efc0f1
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/00efc0f1

Branch: refs/heads/gh-pages
Commit: 00efc0f1935dee9b8020f9fc778aeb90c7905ded
Parents: cfd4573
Author: Kristine Hahn <khahn@maprtech.com>
Authored: Thu Jun 18 17:36:47 2015 -0700
Committer: Kristine Hahn <khahn@maprtech.com>
Committed: Fri Jun 19 14:43:21 2015 -0700

----------------------------------------------------------------------
 _data/docs.json                                 | 232 ++++++++++++++++---
 .../040-parquet-format.md                       |   2 +-
 .../010-compiling-drill-from-source.md          |   9 +-
 .../020-drill-patch-review-tool.md              |   2 +-
 .../050-configuring-spotfire-server.md          | 123 +++++++++-
 .../performance-tuning/020-partition-pruning.md |  27 ++-
 _docs/query-data/010-query-data-introduction.md |  38 +--
 _docs/sql-reference/090-sql-extensions.md       |   2 +-
 .../data-types/010-supported-data-types.md      |   2 +-
 .../sql-commands/030-create-table-as.md         |   4 +-
 .../sql-commands/035-partition-by-clause.md     |  37 +++
 .../sql-functions/020-data-type-conversion.md   |   2 +-
 .../030-analyzing-the-yelp-academic-dataset.md  |   2 +-
 13 files changed, 409 insertions(+), 73 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/00efc0f1/_data/docs.json
----------------------------------------------------------------------
diff --git a/_data/docs.json b/_data/docs.json
index 6989a98..ddd3a4e 100644
--- a/_data/docs.json
+++ b/_data/docs.json
@@ -601,8 +601,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "CREATE VIEW", 
-            "next_url": "/docs/create-view/", 
+            "next_title": "PARTITION BY Clause", 
+            "next_url": "/docs/partition-by-clause/", 
             "parent": "SQL Commands", 
             "previous_title": "ALTER SYSTEM", 
             "previous_url": "/docs/alter-system/", 
@@ -625,8 +625,8 @@
             "next_title": "DROP VIEW", 
             "next_url": "/docs/drop-view/", 
             "parent": "SQL Commands", 
-            "previous_title": "CREATE TABLE AS (CTAS)", 
-            "previous_url": "/docs/create-table-as-ctas/", 
+            "previous_title": "PARTITION BY Clause", 
+            "previous_url": "/docs/partition-by-clause/", 
             "relative_path": "_docs/sql-reference/sql-commands/050-create-view.md", 
             "title": "CREATE VIEW", 
             "url": "/docs/create-view/"
@@ -1373,6 +1373,27 @@
             "title": "Configuring Resources for a Shared Drillbit", 
             "url": "/docs/configuring-resources-for-a-shared-drillbit/"
         }, 
+        "Configuring Tibco Spotfire Server with Drill": {
+            "breadcrumbs": [
+                {
+                    "title": "Using Drill with BI Tools", 
+                    "url": "/docs/using-drill-with-bi-tools/"
+                }, 
+                {
+                    "title": "ODBC/JDBC Interfaces", 
+                    "url": "/docs/odbc-jdbc-interfaces/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "Using Apache Drill with Tableau 9 Desktop", 
+            "next_url": "/docs/using-apache-drill-with-tableau-9-desktop/", 
+            "parent": "Using Drill with BI Tools", 
+            "previous_title": "Using Tibco Spotfire with Drill", 
+            "previous_url": "/docs/using-tibco-spotfire-with-drill/", 
+            "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/050-configuring-spotfire-server.md", 
+            "title": "Configuring Tibco Spotfire Server with Drill", 
+            "url": "/docs/configuring-tibco-spotfire-server-with-drill/"
+        }, 
         "Configuring User Authentication": {
             "breadcrumbs": [
                 {
@@ -5352,8 +5373,8 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "Using Apache Drill with Tableau 9 Desktop", 
-                            "next_url": "/docs/using-apache-drill-with-tableau-9-desktop/", 
+                            "next_title": "Configuring Tibco Spotfire Server with Drill", 
+                            "next_url": "/docs/configuring-tibco-spotfire-server-with-drill/", 
                             "parent": "Using Drill with BI Tools", 
                             "previous_title": "Using MicroStrategy Analytics with Apache Drill", 
                             "previous_url": "/docs/using-microstrategy-analytics-with-apache-drill/", 
@@ -5373,11 +5394,32 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "Using Apache Drill with Tableau 9 Server", 
-                            "next_url": "/docs/using-apache-drill-with-tableau-9-server/", 
+                            "next_title": "Using Apache Drill with Tableau 9 Desktop", 
+                            "next_url": "/docs/using-apache-drill-with-tableau-9-desktop/", 
                             "parent": "Using Drill with BI Tools", 
                             "previous_title": "Using Tibco Spotfire with Drill", 
                             "previous_url": "/docs/using-tibco-spotfire-with-drill/", 
+                            "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/050-configuring-spotfire-server.md", 
+                            "title": "Configuring Tibco Spotfire Server with Drill", 
+                            "url": "/docs/configuring-tibco-spotfire-server-with-drill/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "Using Drill with BI Tools", 
+                                    "url": "/docs/using-drill-with-bi-tools/"
+                                }, 
+                                {
+                                    "title": "ODBC/JDBC Interfaces", 
+                                    "url": "/docs/odbc-jdbc-interfaces/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "Using Apache Drill with Tableau 9 Server", 
+                            "next_url": "/docs/using-apache-drill-with-tableau-9-server/", 
+                            "parent": "Using Drill with BI Tools", 
+                            "previous_title": "Configuring Tibco Spotfire Server with Drill", 
+                            "previous_url": "/docs/configuring-tibco-spotfire-server-with-drill/", 
                             "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/060-using-apache-drill-with-tableau-9-desktop.md", 
                             "title": "Using Apache Drill with Tableau 9 Desktop", 
                             "url": "/docs/using-apache-drill-with-tableau-9-desktop/"
@@ -5503,6 +5545,27 @@
             "title": "Operators", 
             "url": "/docs/operators/"
         }, 
+        "PARTITION BY Clause": {
+            "breadcrumbs": [
+                {
+                    "title": "SQL Commands", 
+                    "url": "/docs/sql-commands/"
+                }, 
+                {
+                    "title": "SQL Reference", 
+                    "url": "/docs/sql-reference/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "CREATE VIEW", 
+            "next_url": "/docs/create-view/", 
+            "parent": "SQL Commands", 
+            "previous_title": "CREATE TABLE AS (CTAS)", 
+            "previous_url": "/docs/create-table-as-ctas/", 
+            "relative_path": "_docs/sql-reference/sql-commands/035-partition-by-clause.md", 
+            "title": "PARTITION BY Clause", 
+            "url": "/docs/partition-by-clause/"
+        }, 
         "Parquet Format": {
             "breadcrumbs": [
                 {
@@ -7738,8 +7801,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "CREATE VIEW", 
-                    "next_url": "/docs/create-view/", 
+                    "next_title": "PARTITION BY Clause", 
+                    "next_url": "/docs/partition-by-clause/", 
                     "parent": "SQL Commands", 
                     "previous_title": "ALTER SYSTEM", 
                     "previous_url": "/docs/alter-system/", 
@@ -7759,11 +7822,32 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "DROP VIEW", 
-                    "next_url": "/docs/drop-view/", 
+                    "next_title": "CREATE VIEW", 
+                    "next_url": "/docs/create-view/", 
                     "parent": "SQL Commands", 
                     "previous_title": "CREATE TABLE AS (CTAS)", 
                     "previous_url": "/docs/create-table-as-ctas/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/035-partition-by-clause.md", 
+                    "title": "PARTITION BY Clause", 
+                    "url": "/docs/partition-by-clause/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "SQL Commands", 
+                            "url": "/docs/sql-commands/"
+                        }, 
+                        {
+                            "title": "SQL Reference", 
+                            "url": "/docs/sql-reference/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "DROP VIEW", 
+                    "next_url": "/docs/drop-view/", 
+                    "parent": "SQL Commands", 
+                    "previous_title": "PARTITION BY Clause", 
+                    "previous_url": "/docs/partition-by-clause/", 
                     "relative_path": "_docs/sql-reference/sql-commands/050-create-view.md", 
                     "title": "CREATE VIEW", 
                     "url": "/docs/create-view/"
@@ -8900,8 +8984,8 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "CREATE VIEW", 
-                            "next_url": "/docs/create-view/", 
+                            "next_title": "PARTITION BY Clause", 
+                            "next_url": "/docs/partition-by-clause/", 
                             "parent": "SQL Commands", 
                             "previous_title": "ALTER SYSTEM", 
                             "previous_url": "/docs/alter-system/", 
@@ -8921,11 +9005,32 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "DROP VIEW", 
-                            "next_url": "/docs/drop-view/", 
+                            "next_title": "CREATE VIEW", 
+                            "next_url": "/docs/create-view/", 
                             "parent": "SQL Commands", 
                             "previous_title": "CREATE TABLE AS (CTAS)", 
                             "previous_url": "/docs/create-table-as-ctas/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/035-partition-by-clause.md", 
+                            "title": "PARTITION BY Clause", 
+                            "url": "/docs/partition-by-clause/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "SQL Commands", 
+                                    "url": "/docs/sql-commands/"
+                                }, 
+                                {
+                                    "title": "SQL Reference", 
+                                    "url": "/docs/sql-reference/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "DROP VIEW", 
+                            "next_url": "/docs/drop-view/", 
+                            "parent": "SQL Commands", 
+                            "previous_title": "PARTITION BY Clause", 
+                            "previous_url": "/docs/partition-by-clause/", 
                             "relative_path": "_docs/sql-reference/sql-commands/050-create-view.md", 
                             "title": "CREATE VIEW", 
                             "url": "/docs/create-view/"
@@ -10328,8 +10433,8 @@
             "next_title": "Using Apache Drill with Tableau 9 Server", 
             "next_url": "/docs/using-apache-drill-with-tableau-9-server/", 
             "parent": "Using Drill with BI Tools", 
-            "previous_title": "Using Tibco Spotfire with Drill", 
-            "previous_url": "/docs/using-tibco-spotfire-with-drill/", 
+            "previous_title": "Configuring Tibco Spotfire Server with Drill", 
+            "previous_url": "/docs/configuring-tibco-spotfire-server-with-drill/", 
             "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/060-using-apache-drill-with-tableau-9-desktop.md", 
             "title": "Using Apache Drill with Tableau 9 Desktop", 
             "url": "/docs/using-apache-drill-with-tableau-9-desktop/"
@@ -10536,8 +10641,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Using Apache Drill with Tableau 9 Desktop", 
-                    "next_url": "/docs/using-apache-drill-with-tableau-9-desktop/", 
+                    "next_title": "Configuring Tibco Spotfire Server with Drill", 
+                    "next_url": "/docs/configuring-tibco-spotfire-server-with-drill/", 
                     "parent": "Using Drill with BI Tools", 
                     "previous_title": "Using MicroStrategy Analytics with Apache Drill", 
                     "previous_url": "/docs/using-microstrategy-analytics-with-apache-drill/", 
@@ -10557,11 +10662,32 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Using Apache Drill with Tableau 9 Server", 
-                    "next_url": "/docs/using-apache-drill-with-tableau-9-server/", 
+                    "next_title": "Using Apache Drill with Tableau 9 Desktop", 
+                    "next_url": "/docs/using-apache-drill-with-tableau-9-desktop/", 
                     "parent": "Using Drill with BI Tools", 
                     "previous_title": "Using Tibco Spotfire with Drill", 
                     "previous_url": "/docs/using-tibco-spotfire-with-drill/", 
+                    "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/050-configuring-spotfire-server.md", 
+                    "title": "Configuring Tibco Spotfire Server with Drill", 
+                    "url": "/docs/configuring-tibco-spotfire-server-with-drill/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Using Drill with BI Tools", 
+                            "url": "/docs/using-drill-with-bi-tools/"
+                        }, 
+                        {
+                            "title": "ODBC/JDBC Interfaces", 
+                            "url": "/docs/odbc-jdbc-interfaces/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Using Apache Drill with Tableau 9 Server", 
+                    "next_url": "/docs/using-apache-drill-with-tableau-9-server/", 
+                    "parent": "Using Drill with BI Tools", 
+                    "previous_title": "Configuring Tibco Spotfire Server with Drill", 
+                    "previous_url": "/docs/configuring-tibco-spotfire-server-with-drill/", 
                     "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/060-using-apache-drill-with-tableau-9-desktop.md", 
                     "title": "Using Apache Drill with Tableau 9 Desktop", 
                     "url": "/docs/using-apache-drill-with-tableau-9-desktop/"
@@ -10710,8 +10836,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Using Apache Drill with Tableau 9 Desktop", 
-            "next_url": "/docs/using-apache-drill-with-tableau-9-desktop/", 
+            "next_title": "Configuring Tibco Spotfire Server with Drill", 
+            "next_url": "/docs/configuring-tibco-spotfire-server-with-drill/", 
             "parent": "Using Drill with BI Tools", 
             "previous_title": "Using MicroStrategy Analytics with Apache Drill", 
             "previous_url": "/docs/using-microstrategy-analytics-with-apache-drill/", 
@@ -12464,8 +12590,8 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "Using Apache Drill with Tableau 9 Desktop", 
-                            "next_url": "/docs/using-apache-drill-with-tableau-9-desktop/", 
+                            "next_title": "Configuring Tibco Spotfire Server with Drill", 
+                            "next_url": "/docs/configuring-tibco-spotfire-server-with-drill/", 
                             "parent": "Using Drill with BI Tools", 
                             "previous_title": "Using MicroStrategy Analytics with Apache Drill", 
                             "previous_url": "/docs/using-microstrategy-analytics-with-apache-drill/", 
@@ -12485,11 +12611,32 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "Using Apache Drill with Tableau 9 Server", 
-                            "next_url": "/docs/using-apache-drill-with-tableau-9-server/", 
+                            "next_title": "Using Apache Drill with Tableau 9 Desktop", 
+                            "next_url": "/docs/using-apache-drill-with-tableau-9-desktop/", 
                             "parent": "Using Drill with BI Tools", 
                             "previous_title": "Using Tibco Spotfire with Drill", 
                             "previous_url": "/docs/using-tibco-spotfire-with-drill/", 
+                            "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/050-configuring-spotfire-server.md", 
+                            "title": "Configuring Tibco Spotfire Server with Drill", 
+                            "url": "/docs/configuring-tibco-spotfire-server-with-drill/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "Using Drill with BI Tools", 
+                                    "url": "/docs/using-drill-with-bi-tools/"
+                                }, 
+                                {
+                                    "title": "ODBC/JDBC Interfaces", 
+                                    "url": "/docs/odbc-jdbc-interfaces/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "Using Apache Drill with Tableau 9 Server", 
+                            "next_url": "/docs/using-apache-drill-with-tableau-9-server/", 
+                            "parent": "Using Drill with BI Tools", 
+                            "previous_title": "Configuring Tibco Spotfire Server with Drill", 
+                            "previous_url": "/docs/configuring-tibco-spotfire-server-with-drill/", 
                             "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/060-using-apache-drill-with-tableau-9-desktop.md", 
                             "title": "Using Apache Drill with Tableau 9 Desktop", 
                             "url": "/docs/using-apache-drill-with-tableau-9-desktop/"
@@ -13908,8 +14055,8 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "CREATE VIEW", 
-                            "next_url": "/docs/create-view/", 
+                            "next_title": "PARTITION BY Clause", 
+                            "next_url": "/docs/partition-by-clause/", 
                             "parent": "SQL Commands", 
                             "previous_title": "ALTER SYSTEM", 
                             "previous_url": "/docs/alter-system/", 
@@ -13929,11 +14076,32 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "DROP VIEW", 
-                            "next_url": "/docs/drop-view/", 
+                            "next_title": "CREATE VIEW", 
+                            "next_url": "/docs/create-view/", 
                             "parent": "SQL Commands", 
                             "previous_title": "CREATE TABLE AS (CTAS)", 
                             "previous_url": "/docs/create-table-as-ctas/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/035-partition-by-clause.md", 
+                            "title": "PARTITION BY Clause", 
+                            "url": "/docs/partition-by-clause/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "SQL Commands", 
+                                    "url": "/docs/sql-commands/"
+                                }, 
+                                {
+                                    "title": "SQL Reference", 
+                                    "url": "/docs/sql-reference/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "DROP VIEW", 
+                            "next_url": "/docs/drop-view/", 
+                            "parent": "SQL Commands", 
+                            "previous_title": "PARTITION BY Clause", 
+                            "previous_url": "/docs/partition-by-clause/", 
                             "relative_path": "_docs/sql-reference/sql-commands/050-create-view.md", 
                             "title": "CREATE VIEW", 
                             "url": "/docs/create-view/"

http://git-wip-us.apache.org/repos/asf/drill/blob/00efc0f1/_docs/data-sources-and-file-formats/040-parquet-format.md
----------------------------------------------------------------------
diff --git a/_docs/data-sources-and-file-formats/040-parquet-format.md b/_docs/data-sources-and-file-formats/040-parquet-format.md
index 5cfc83f..c9070b7 100644
--- a/_docs/data-sources-and-file-formats/040-parquet-format.md
+++ b/_docs/data-sources-and-file-formats/040-parquet-format.md
@@ -21,7 +21,7 @@ Apache Drill includes the following support for Parquet:
 When a read of Parquet data occurs, Drill loads only the necessary columns of data, which reduces I/O. Reading only a small piece of the Parquet data from a data file or table, Drill can examine and analyze all values for a column across multiple files. You can create a Drill table from one format and store the data in another format, including Parquet.
 
 ## Writing Parquet Files
-CREATE TABLE AS SELECT (CTAS) can use any data source provided by the storage plugin. To write Parquet data using the CTAS command, set the session store.format option as shown in the next section. Alternatively, configure the storage plugin to point to the directory containing the Parquet files.
+CREATE TABLE AS (CTAS) can use any data source provided by the storage plugin. To write Parquet data using the CTAS command, set the session store.format option as shown in the next section. Alternatively, configure the storage plugin to point to the directory containing the Parquet files.
 
 Although the data resides in a single table, Parquet output generally consists of multiple files that resemble MapReduce output having numbered file names,  such as 0_0_0.parquet in a directory.
 

http://git-wip-us.apache.org/repos/asf/drill/blob/00efc0f1/_docs/developer-information/develop-drill/010-compiling-drill-from-source.md
----------------------------------------------------------------------
diff --git a/_docs/developer-information/develop-drill/010-compiling-drill-from-source.md b/_docs/developer-information/develop-drill/010-compiling-drill-from-source.md
index 248ab81..a10a842 100644
--- a/_docs/developer-information/develop-drill/010-compiling-drill-from-source.md
+++ b/_docs/developer-information/develop-drill/010-compiling-drill-from-source.md
@@ -24,15 +24,10 @@ Maven and JDK installed:
 
 ## 2\. Compile the Code
 
-    cd incubator-drill
+    cd drill
     mvn clean install -DskipTests
 
-## 3\. Explode the Tarball in the Installation Directory
-
-    mkdir ~/compiled-drill
-    tar xvzf distribution/target/*.tar.gz --strip=1 -C ~/compiled-drill
-
-Now that you have Drill installed, you can connect to Drill and query sample
+The tarball appears in distribution/target. Move the tarball to a directory for unpacking, unpack, and then you can connect to Drill and query sample
 data or you can connect Drill to your data sources.
 
   * To connect Drill to your data sources, refer to [Connect to Data Sources]({{ site.baseurl }}/docs/connect-a-data-source-introduction) for instructions.

http://git-wip-us.apache.org/repos/asf/drill/blob/00efc0f1/_docs/developer-information/develop-drill/020-drill-patch-review-tool.md
----------------------------------------------------------------------
diff --git a/_docs/developer-information/develop-drill/020-drill-patch-review-tool.md b/_docs/developer-information/develop-drill/020-drill-patch-review-tool.md
index 5144d36..8edf2bf 100644
--- a/_docs/developer-information/develop-drill/020-drill-patch-review-tool.md
+++ b/_docs/developer-information/develop-drill/020-drill-patch-review-tool.md
@@ -128,7 +128,7 @@ review board. So you need to configure an override to use the non-http url.
 You can do this by adding a config file like this:
 
 	jkreps$ cat ~/.reviewboardrc
-	REPOSITORY = 'git://git.apache.org/incubator-drill.git'
+	REPOSITORY = 'git://git.apache.org/drill.git'
 	TARGET_GROUPS = 'drill-git'
 GUESS_FIELDS = True
 

http://git-wip-us.apache.org/repos/asf/drill/blob/00efc0f1/_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/050-configuring-spotfire-server.md
----------------------------------------------------------------------
diff --git a/_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/050-configuring-spotfire-server.md b/_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/050-configuring-spotfire-server.md
index 436776e..c68b07d 100644
--- a/_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/050-configuring-spotfire-server.md
+++ b/_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/050-configuring-spotfire-server.md
@@ -1 +1,122 @@
----
title: "Configuring Tibco Spotfire Server with Drill"
parent: "Using Drill with BI Tools"
---
This document describes how to configure Tibco Spotfire Server (TSS) to integrate with Apache Drill and explore multiple data formats instantly on Hadoop. Users can combine these powerful platforms to rapidly gain analytical access to a wide variety of data types. 

Complete the following steps to configure and use Apache Drill with TSS: 

1. Install the Drill JDBC driver with TSS.
2. Configure the Drill Data Source Template in TSS with the TSS configuration tool.
3. Configure Drill data sources with Tibco Spotfire Desktop and Information Designer.
4. Query and analyze various data formats with Tibco Spotfire and Drill.


----------


### Step 1: Install and Configure the Drill JDBC Driver 


Drill provides standard JDBC connectivity, making it easy to integrate data exploration capabilities on complex, schema-less data sets. Tibco Spotfire Server (TSS) requires Drill 1.0 or later, whic
 h incudes the JDBC driver. The JDBC driver is bundled with the Drill configuration files, and it is recommended that you use the JDBC driver that is shipped with the specific Drill version.

For general instructions to install the Drill JDBC driver, see [Using JDBC](http://drill.apache.org/docs/using-jdbc/).
Complete the following steps to install and configure the JDBC driver for TSS:

1. Locate the JDBC driver in the Drill installation directory:  
   `<drill-home>/jars/jdbc-driver/drill-jdbc-all-<drill-version>.jar`  
   For example, on a MapR cluster:  
   `/opt/mapr/drill/drill-1.0.0/jars/jdbc-driver/drill-jdbc-all-1.0.0-SNAPSHOT.jar`

2. Locate the TSS library directory and copy the JDBC driver file to that directory: 
   `<TSS-home-directory>/tomcat/lib`  
   For example, on a Linux server:  
   `/usr/local/bin/tibco/tss/6.0.3/tomcat/lib`  
   For example, on a Windows server:  
   `C:\Program Files\apache-tomcat\lib`

3. Restart TSS to load the JDBC driver.
4. Verify that th
 e TSS system can resolve the hostnames of the ZooKeeper nodes for the Drill cluster. You can do this by validating that DNS is properly configured for the TSS system and all the ZooKeeper nodes. Alternatively, you can add the hostnames and IP addresses of the ZooKeeper nodes to the TSS system hosts file.  
   For Linux systems, the hosts file is located here: 
   `/etc/hosts`  
   For Windows systems, the hosts file is located here: 
   `%WINDIR%\system32\drivers\etc\hosts`

----------

### Step 2: Configure the Drill Data Source Template in TSS

The Drill Data Source template can now be configured with the TSS Configuration Tool. The Windows-based TSS Configuration Tool is recommended. If TSS is installed on a Linux system, you also need to install TSS on a small Windows-based system so you can utilize the Configuration Tool. In this case, it is also recommended that you install the Drill JDBC driver on the TSS Windows system.

1. Click **Start > All Programs > TIBCO Spotfire Serve
 r > Configure TIBCO Spotfire Server**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-start.png)
2. Enter the Configuration Tool password that was specified when TSS was initially installed.
3. Once the Configuration Tool has connected to TSS, click the **Configuration** tab, then **Data Source Templates**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-configtab.png)
4. In the Data Source Templates window, click the **New** button at the bottom of the window. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-new.png)
5. Provide a name for the data source template, then copy the following XML template into the **Data Source Template** box. When complete, click **OK**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-template.png)
6. The new entry will now be available in the data source template. Check the box next to the new entry, then click **Save Configuration**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfi
 re-server-saveconfig.png)
7. Select Database as the destination and click Next. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-saveconfig2.png) 
8. Add a comment to the updated configuration and click **Finish**. 
9. A response window is displayed to state that the configuration was successfully uploaded to TSS. Click **OK**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-importconfig.png)
10. Restart TSS to enable it to use the Drill data source template.
   
#### XML Template

Make sure that you enter the correct ZooKeeper node name instead of `<zk-node>`, as well as the correct Drill cluster name instead of `<drill-cluster-name>` in the example below. This is just a template that will appear whenever a data source is configured. The hostnames of ZooKeeper nodes and the Drill cluster name can be found in the `$DRILL_HOME/conf/drill-override.conf` file on any of the Drill nodes in the cluster.
     
    <jdbc-type-settings>
    <type-name>drill</ty
 pe-name>
    <driver>org.apache.drill.jdbc.Driver</driver> 
    <connection-url-pattern>jdbc:drill:zk=<zk-node>:5181/drill/<drill-cluster-name>-drillbits</connection-url-pattern> 
    <ping-command>SELECT 1 FROM sys.version</ping-command>
    <supports-catalogs>true</supports-catalogs>
    <supports-schemas>true</supports-schemas>
    <supports-procedures>false</supports-procedures>
    <table-expression-pattern>[$$schema$$.]$$table$$</table-expression-pattern>
 
    <column-name-pattern>`$$name$$`</column-name-pattern>
    <table-name-pattern>`$$name$$`</table-name-pattern>
    <schema-name-pattern>`$$name$$`</schema-name-pattern>
    <catalog-name-pattern>`$$name$$`</catalog-name-pattern>
    <procedure-name-pattern>`$$name$$`</procedure-name-pattern>
    <column-alias-pattern>`$$name$$`</column-alias-pattern>

    <java-to-sql-type-conversions>
     <type-mapping>
      <from max-length="32672">String</from>
      <to>VARCHAR($$value$$)</to>
     </type-mapping>
     <type-mappin
 g>
      <from>String</from>
      <to>VARCHAR(32672)</to>
     </type-mapping>
     <type-mapping>
      <from>Integer</from>
      <to>INTEGER</to>
     </type-mapping>
    </java-to-sql-type-conversions>
    </jdbc-type-settings>


----------

### Step 3: Configure Drill Data Sources with Tibco Spotfire Desktop 

To configure Drill data sources in TSS, you need to use the Tibco Spotfire Desktop client.

1. Open Tibco Spotfire Desktop. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-client.png)
2. Log into TSS. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-tss.png)
3. Select the deployment area in TSS to be used. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-deployment.png)
4. Click **Tools > Information Designer**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-infodesigner.png)
5. In the Information Designer, click **New > Data Source**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-infodesign
 er2.png)
6. In the Data Source window, enter the name for the data source. Select the Drill Data Source template created in Step 2 as the type. Update the connection URL with the correct hostname of the ZooKeeper node(s) and the Drill cluster name. Note: The Zookeeper node(s) hostname(s) and Drill cluster name can be found in the `$DRILL_HOME/conf/drill-override.conf` file on any of the Drill nodes in the cluster. Enter the username and password used to connect to Drill. When completed, click **Save**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-connectionURL.png)
7. In the Save As window, verify the name and the folder where you want to save the new data source in TSS. Click **Save** when done. TSS will now validate the information and save the new data source in TSS.
8. When the data source is saved, it will appear in the **Data Sources** tab, and you will be able to navigate the schema. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-datasource
 s-tab.png)


----------

### Step 4: Query and Analyze the Data

After the Drill data source has been configured in the Information Designer, the information elements can be defined. 

1.	In this example all the columns of a Hive table have been defined, using the Drill data source, and added to an information link. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-infolink.png)
2.	The SQL syntax to retrieve the data can be validated by clicking the **SQL** button. Many other operations can be performed in Information Link,  including joins, filters, and so on. See the Tibco Spotfire documentation for details.
3.	You can now import the data of this table into TSS by clicking the **Open Data** button. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-hiveorders.png)
The data is now available in Tibco Spotfire Desktop to create various reports and tables as needed, and to be shared. For more information about creating charts, tables and reports, see the Tib
 co Spotfire documentation.


...
\ No newline at end of file
+---
+title: "Configuring Tibco Spotfire Server with Drill"
+parent: "Using Drill with BI Tools"
+---
+
+This document describes how to configure Tibco Spotfire Server (TSS) to integrate with Apache Drill and explore multiple data formats instantly on Hadoop. Users can combine these powerful platforms to rapidly gain analytical access to a wide variety of data types. 
+
+Complete the following steps to configure and use Apache Drill with TSS: 
+
+1. Install the Drill JDBC driver with TSS.
+2. Configure the Drill Data Source Template in TSS with the TSS configuration tool.
+3. Configure Drill data sources with Tibco Spotfire Desktop and Information Designer.
+4. Query and analyze various data formats with Tibco Spotfire and Drill.
+
+----------
+
+### Step 1: Install and Configure the Drill JDBC Driver 
+
+Drill provides standard JDBC connectivity, making it easy to integrate data exploration capabilities on complex, schema-less data sets. Tibco Spotfire Server (TSS) requires Drill 1.0 or later, which incudes the JDBC driver. The JDBC driver is bundled with the Drill configuration files, and it is recommended that you use the JDBC driver that is shipped with the specific Drill version.
+
+For general instructions to install the Drill JDBC driver, see [Using JDBC]({{site.baseurl}}/docs/using-jdbc/).
+Complete the following steps to install and configure the JDBC driver for TSS:
+
+1. Locate the JDBC driver in the Drill installation directory:  
+   `<drill-home>/jars/jdbc-driver/drill-jdbc-all-<drill-version>.jar`  
+   For example, on a MapR cluster:  
+   `/opt/mapr/drill/drill-1.0.0/jars/jdbc-driver/drill-jdbc-all-1.0.0-SNAPSHOT.jar`
+
+2. Locate the TSS library directory and copy the JDBC driver file to that directory: 
+   `<TSS-home-directory>/tomcat/lib`  
+   For example, on a Linux server:  
+   `/usr/local/bin/tibco/tss/6.0.3/tomcat/lib`  
+   For example, on a Windows server:  
+   `C:\Program Files\apache-tomcat\lib`
+
+3. Restart TSS to load the JDBC driver.
+4. Verify that the TSS system can resolve the hostnames of the ZooKeeper nodes for the Drill cluster. You can do this by validating that DNS is properly configured for the TSS system and all the ZooKeeper nodes. Alternatively, you can add the hostnames and IP addresses of the ZooKeeper nodes to the TSS system hosts file.  
+   For Linux systems, the hosts file is located here: 
+   `/etc/hosts`  
+   For Windows systems, the hosts file is located here: 
+   `%WINDIR%\system32\drivers\etc\hosts`
+
+----------
+
+### Step 2: Configure the Drill Data Source Template in TSS
+
+The Drill Data Source template can now be configured with the TSS Configuration Tool. The Windows-based TSS Configuration Tool is recommended. If TSS is installed on a Linux system, you also need to install TSS on a small Windows-based system so you can utilize the Configuration Tool. In this case, it is also recommended that you install the Drill JDBC driver on the TSS Windows system.
+
+1. Click **Start > All Programs > TIBCO Spotfire Server > Configure TIBCO Spotfire Server**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-start.png)
+2. Enter the Configuration Tool password that was specified when TSS was initially installed.
+3. Once the Configuration Tool has connected to TSS, click the **Configuration** tab, then **Data Source Templates**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-configtab.png)
+4. In the Data Source Templates window, click the **New** button at the bottom of the window. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-new.png)
+5. Provide a name for the data source template, then copy the following XML template into the **Data Source Template** box. When complete, click **OK**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-template.png)
+6. The new entry will now be available in the data source template. Check the box next to the new entry, then click **Save Configuration**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-saveconfig.png)
+7. Select Database as the destination and click Next. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-saveconfig2.png) 
+8. Add a comment to the updated configuration and click **Finish**. 
+9. A response window is displayed to state that the configuration was successfully uploaded to TSS. Click **OK**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-importconfig.png)
+10. Restart TSS to enable it to use the Drill data source template.
+   
+#### XML Template
+
+Make sure that you enter the correct ZooKeeper node name instead of `<zk-node>`, as well as the correct Drill cluster name instead of `<drill-cluster-name>` in the example below. This is just a template that will appear whenever a data source is configured. The hostnames of ZooKeeper nodes and the Drill cluster name can be found in the `$DRILL_HOME/conf/drill-override.conf` file on any of the Drill nodes in the cluster.
+     
+      <jdbc-type-settings>
+      <type-name>drill</type-name>
+      <driver>org.apache.drill.jdbc.Driver</driver> 
+      <connection-url-pattern>jdbc:drill:zk=<zk-node>:5181/drill/<drill-cluster-name>-drillbits</connection-url-pattern> 
+      <ping-command>SELECT 1 FROM sys.version</ping-command>
+      <supports-catalogs>true</supports-catalogs>
+      <supports-schemas>true</supports-schemas>
+      <supports-procedures>false</supports-procedures>
+      <table-expression-pattern>[$$schema$$.]$$table$$</table-expression-pattern>
+   
+      <column-name-pattern>`$$name$$`</column-name-pattern>
+      <table-name-pattern>`$$name$$`</table-name-pattern>
+      <schema-name-pattern>`$$name$$`</schema-name-pattern>
+      <catalog-name-pattern>`$$name$$`</catalog-name-pattern>
+      <procedure-name-pattern>`$$name$$`</procedure-name-pattern>
+      <column-alias-pattern>`$$name$$`</column-alias-pattern>
+
+      <java-to-sql-type-conversions>
+       <type-mapping>
+        <from max-length="32672">String</from>
+        <to>VARCHAR($$value$$)</to>
+       </type-mapping>
+       <type-mapping>
+        <from>String</from>
+        <to>VARCHAR(32672)</to>
+       </type-mapping>
+       <type-mapping>
+        <from>Integer</from>
+        <to>INTEGER</to>
+       </type-mapping>
+      </java-to-sql-type-conversions>
+      </jdbc-type-settings>
+
+
+----------
+
+### Step 3: Configure Drill Data Sources with Tibco Spotfire Desktop 
+
+To configure Drill data sources in TSS, you need to use the Tibco Spotfire Desktop client.
+
+1. Open Tibco Spotfire Desktop. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-client.png)
+2. Log into TSS. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-tss.png)
+3. Select the deployment area in TSS to be used. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-deployment.png)
+4. Click **Tools > Information Designer**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-infodesigner.png)
+5. In the Information Designer, click **New > Data Source**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-infodesigner2.png)
+6. In the Data Source window, enter the name for the data source. Select the Drill Data Source template created in Step 2 as the type. Update the connection URL with the correct hostname of the ZooKeeper node(s) and the Drill cluster name. Note: The Zookeeper node(s) hostname(s) and Drill cluster name can be found in the `$DRILL_HOME/conf/drill-override.conf` file on any of the Drill nodes in the cluster. Enter the username and password used to connect to Drill. When completed, click **Save**. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-connectionURL.png)
+7. In the Save As window, verify the name and the folder where you want to save the new data source in TSS. Click **Save** when done. TSS will now validate the information and save the new data source in TSS.
+8. When the data source is saved, it will appear in the **Data Sources** tab, and you will be able to navigate the schema. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-datasources-tab.png)
+
+----------
+
+### Step 4: Query and Analyze the Data
+
+After the Drill data source has been configured in the Information Designer, the information elements can be defined. 
+
+1.  In this example all the columns of a Hive table have been defined, using the Drill data source, and added to an information link. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-infolink.png)
+2.  The SQL syntax to retrieve the data can be validated by clicking the **SQL** button. Many other operations can be performed in Information Link,  including joins, filters, and so on. See the Tibco Spotfire documentation for details.
+3.  You can now import the data of this table into TSS by clicking the **Open Data** button. ![drill query flow]({{ site.baseurl }}/docs/img/spotfire-server-hiveorders.png)
+The data is now available in Tibco Spotfire Desktop to create various reports and tables as needed, and to be shared. For more information about creating charts, tables and reports, see the Tibco Spotfire documentation.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/00efc0f1/_docs/performance-tuning/020-partition-pruning.md
----------------------------------------------------------------------
diff --git a/_docs/performance-tuning/020-partition-pruning.md b/_docs/performance-tuning/020-partition-pruning.md
index 307110f..49be254 100755
--- a/_docs/performance-tuning/020-partition-pruning.md
+++ b/_docs/performance-tuning/020-partition-pruning.md
@@ -5,17 +5,30 @@ parent: "Performance Tuning"
 
 Partition pruning is a performance optimization that limits the number of files and partitions that Drill reads when querying file systems and Hive tables. When you partition data, Drill only reads a subset of the files that reside in a file system or a subset of the partitions in a Hive table when a query matches certain filter criteria.
  
-The query planner in Drill evaluates the filters as part of a Filter operator. If no partition filters are present, the underlying Scan operator reads all files in all directories and then sends the data to operators downstream, such as Filter. When partition filters are present, the query planner determines if it can push the filters down to the Scan such that the Scan only reads the directories that match the partition filters, thus reducing disk I/O.
+The query planner in Drill performs partition pruning by evaluating the filters. If no partition filters are present, the underlying Scan operator reads all files in all directories and then sends the data to operators, such as Filter, downstream. When partition filters are present, the query planner pushes the filters down to the Scan if possible. The Scan reads only the directories that match the partition filters, thus reducing disk I/O.
 
-## Determining a Partitioning Scheme  
+## How to Use Partition Pruning
 
-You can organize your data in such a way that maximizes partition pruning in Drill to optimize performance. Currently, you must partition data manually for a query to take advantage of partition pruning in Drill.
+You can partition data manually or automatically to take advantage of partition pruning in Drill. In Drill 1.0 and earlier, you need to organize your data in such a way to take advantage of partition pruning. In Drill 1.1.0 and later, if the data source is Parquet, you can partition data automatically using CTAS--no data organization tasks required. 
+
+## Automatic Partitioning
+Automatic partitioning in Drill 1.1.0 and later occurs when you write Parquet date using the [[PARTITION BY]({{site.baseurl}}/docs/partition-by-clause/) clause in the CTAS statemebnt.
+
+Automatic partitioning creates separate files, but not separate directories, for different partitions. Each file contains exactly one partition value, but there could be multiple files for the same partition value.
+
+Partition pruning uses the Parquet column statistics to determine which columns to use to prune.
+
+## Manual Partitioning
  
-Partitioning data requires you to determine a partitioning scheme, or a logical way to store the data in a hierarchy of directories. You can then use CTAS to create Parquet files from the original data, specifying filter conditions, and then move the files into the correlating directories in the hierarchy. Once you have partitioned the data, you can create and query views on the data.
+1. Devise a logical way to store the data in a hierarchy of directories. 
+2. Use CTAS to create Parquet files from the original data, specifying filter conditions.
+3. Move the files into directories in the hierarchy. 
+
+After partitioning the data, create and query views on the data.
  
-### Partitioning Example  
+### Manual Partitioning Example  
 
-If you have several text files with log data which span multiple years, and you want to partition the data by year and quarter, you could create the following hierarchy of directories:  
+Suppose you have text files containing several years of log data. To partition the data by year and quarter, create the following hierarchy of directories:  
        
        …/logs/1994/Q1  
        …/logs/1994/Q2  
@@ -30,7 +43,7 @@ If you have several text files with log data which span multiple years, and you
        …/logs/1996/Q3  
        …/logs/1996/Q4  
 
-Once the directory structure is in place, run CTAS with a filter condition in the year and quarter for Q1 1994.
+Run the following CTAS statement, filtering on the Q1 1994 data.
  
           CREATE TABLE TT_1994_Q1 
               AS SELECT * FROM <raw table data in text format >

http://git-wip-us.apache.org/repos/asf/drill/blob/00efc0f1/_docs/query-data/010-query-data-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/010-query-data-introduction.md b/_docs/query-data/010-query-data-introduction.md
index 980c975..1d5ae5d 100644
--- a/_docs/query-data/010-query-data-introduction.md
+++ b/_docs/query-data/010-query-data-introduction.md
@@ -2,39 +2,41 @@
 title: "Query Data Introduction"
 parent: "Query Data"
 ---
-You can query local and distributed file systems, Hive, and HBase data sources
-registered with Drill. You issue the `USE
-<storage plugin>` statement to run your queries against a particular storage plugin. You use dot notation and back ticks to specify the storage plugin name and sometimes the workspace name. For example, to use the dfs storage plugin and default workspace, issue this command: ``USE dfs.`default``
+You can query local and distributed file systems, Hive, HBase data, complex data, INFORMATION SCHEMA, and system tables as described in the subtopics of this section. 
 
-Alternatively, you can omit the USE statement, and specify the storage plugin and workspace name using dot notation and back ticks. For example:
+The query specifies the data source location and include data casting. 
+
+## Specifying the Data Source Location
+The optional [USE statement]({{site.baseurl}}/docs/use) runs subsequent queries against a particular [storage plugin]({{site.baseurl}}/docs/connect-a-data-source-introduction/). The USE statement typically saves typing some of the storage plugin information in the FROM statement. If you omit the USE statement, specify a storage plugin, such as dfs, and optionally a workspace, such as default, and a path to the data source using dot notation and back ticks. For example:
 
 ``dfs.`default`.`/Users/drill-user/apache-drill-1.0.0/log/sqlline_queries.json```;
 
-You may need to use casting functions in some queries. For example, you may
-have to cast a string `"100"` to an integer in order to apply a math function
+## Casting Data
+In some cases, Drill converts schema-less data to correctly-typed data implicitly. In this case, you do not need to [cast the data]({{site.baseurl}}/docs/supported-data-types/#casting-and-converting-data-types) to another type. The file format of the data and the nature of your query determines the requirement for casting or converting. Differences in casting depend on the data source. 
+
+For example, you have to cast a string `"100"` in a JSON file to an integer in order to apply a math function
 or an aggregate function.
 
-You can use the EXPLAIN command to analyze errors and troubleshoot queries
+Use CONVERT_TO and CONVERT_FROM instead of the CAST function for converting binary data types, as described in section "[CONVERT_TO and CONVERT_FROM Usage Notes](/docs/data-type-conversion/#convert_to-and-convert_from-usage-notes)".
+
+## Troubleshooting Queries
+
+In addition to testing queries interactively in the Drill shell, and examining error messages, use the [EXPLAIN command]({{site.baseurl}}/docs/explain/) to analyze errors and troubleshoot queries
 that do not run. For example, if you run into a casting error, the query plan
 text may help you isolate the problem.
 
     0: jdbc:drill:zk=local> !set maxwidth 10000
     0: jdbc:drill:zk=local> explain plan for select ... ;
 
-The set command increases the default text display (number of characters). By
+[Drill shell commands]({{site.baseurl}}/docs/configuring-the-drill-shell/) include the `!set <set variable> <value>` to increase the default text display (number of characters). By
 default, most of the plan output is hidden.
 
+## Query Syntax Tips
+
 Remember the following tips when querying data with Drill:
 
-  * Include a semicolon at the end of SQL statements, except when you issue a command with an exclamation point `(!).   
+  * Include a semicolon at the end of SQL statements, except when you issue a [Drill shell command]({{site.baseurl}}/docs/configuring-the-drill-shell/).   
     `Example: `!set maxwidth 10000`
-  * Use backticks around file and directory names that contain special characters and also around reserved words when you query a file system.   
-    The following special characters require backticks:
-
-    * . (period)
-    * / (forward slash)
-    * _ (underscore)
-    Example: ``SELECT * FROM dfs.default.`sample_data/my_sample.json`; ``
-  * `CAST` data to `VARCHAR` if an expression in a query returns `VARBINARY` as the result type in order to view the `VARBINARY` types as readable data. If you do not use the `CAST` function, Drill returns the results as byte data.    
-     Example: `CAST (VARBINARY_expr as VARCHAR(50))`
+  * Use backticks around [keywords]({{site.baseurl}}/docs/reserved-keywords), special characters, and [identifiers]({{site.baseurl}}/docs/lexical-structure/#identifier) that SQL cannot parse, such as the keyword default and a path that contains a forward slash character:
+    Example: ``SELECT * FROM dfs.`default`.`/Users/drilluser/apache-drill-1.1.0-SNAPSHOT/sample-data/nation.parquet`;``
   * When selecting all (SELECT *) schema-less data, the order of returned columns might differ from the stored order and might vary from query to query.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/00efc0f1/_docs/sql-reference/090-sql-extensions.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/090-sql-extensions.md b/_docs/sql-reference/090-sql-extensions.md
index ed97611..4896abd 100644
--- a/_docs/sql-reference/090-sql-extensions.md
+++ b/_docs/sql-reference/090-sql-extensions.md
@@ -6,7 +6,7 @@ Drill extends SQL to work with Hadoop-scale data and to explore smaller-scale da
 
 Drill provides language support for pointing to [storage plugin]({{site.baseurl}}/docs/connect-a-data-source-introduction) interfaces that Drill uses to interact with data sources. Use the name of a storage plugin to specify a file system *database* as a prefix in queries when you refer to objects across databases. Query files, including compressed .gz files, and [directories]({{ site.baseurl }}/docs/querying-directories), as you would query an SQL table. You can query multiple files in a directory.
 
-Drill extends the SELECT statement for reading complex, multi-structured data. The extended CREATE TABLE AS SELECT provides the capability to write data of complex/multi-structured data types. Drill extends the [lexical rules](http://drill.apache.org/docs/lexical-structure) for working with files and directories, such as using back ticks for including file names, directory names, and reserved words in queries. Drill syntax supports using the file system as a persistent store for query profiles and diagnostic information.
+Drill extends the SELECT statement for reading complex, multi-structured data. The extended CREATE TABLE AS provides the capability to write data of complex/multi-structured data types. Drill extends the [lexical rules](http://drill.apache.org/docs/lexical-structure) for working with files and directories, such as using back ticks for including file names, directory names, and reserved words in queries. Drill syntax supports using the file system as a persistent store for query profiles and diagnostic information.
 
 ## Extensions for Hive- and HBase-related Data Sources
 

http://git-wip-us.apache.org/repos/asf/drill/blob/00efc0f1/_docs/sql-reference/data-types/010-supported-data-types.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/data-types/010-supported-data-types.md b/_docs/sql-reference/data-types/010-supported-data-types.md
index a30c4b9..89e001c 100644
--- a/_docs/sql-reference/data-types/010-supported-data-types.md
+++ b/_docs/sql-reference/data-types/010-supported-data-types.md
@@ -71,7 +71,7 @@ changes in the data processing, Drill regenerates the code as necessary.
 ## Casting and Converting Data Types
 
 In Drill, you cast or convert data to the required type for moving data from one data source to another or to make the data readable.
-You do not assign a data type to every column name in a CREATE TABLE statement to define the table as you do in database software. Instead, you use the CREATE TABLE AS SELECT (CTAS) statement with one or more of the following functions to define the table:
+You do not assign a data type to every column name in a CREATE TABLE statement to define the table as you do in database software. Instead, you use the CREATE TABLE AS (CTAS) statement with one or more of the following functions to define the table:
 
 * [CAST]({{ site.baseurl }}/docs/data-type-conversion#cast)    
 * [CONVERT TO/FROM]({{ site.baseurl }}/docs/data-type-conversion#convert_to-and-convert_from)   

http://git-wip-us.apache.org/repos/asf/drill/blob/00efc0f1/_docs/sql-reference/sql-commands/030-create-table-as.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/030-create-table-as.md b/_docs/sql-reference/sql-commands/030-create-table-as.md
index f5ba9d3..1669cf6 100644
--- a/_docs/sql-reference/sql-commands/030-create-table-as.md
+++ b/_docs/sql-reference/sql-commands/030-create-table-as.md
@@ -6,7 +6,7 @@ You can create tables in Drill by using the CTAS command:
 
     CREATE TABLE new_table_name AS <query>;
 
-where query is any valid Drill query. Each table you create must have a unique
+where query is a SELECT statement. Each table you create must have a unique
 name. You can include an optional column list for the new table. For example:
 
     create table logtable(transid, prodid) as select transaction_id, product_id from ...
@@ -17,7 +17,7 @@ You can store table data in one of three formats:
   * parquet
   * json
 
-The parquet and json formats can be used to store complex data.
+The parquet and json formats can be used to store complex data. Drill automatically partitions data stored in parquet when you use the [PARTITION BY]({{site.baseurl}}/docs/partition-by-clause) clause.
 
 To set the output format for a Drill table, set the `store.format` option with
 the ALTER SYSTEM or ALTER SESSION command. For example:

http://git-wip-us.apache.org/repos/asf/drill/blob/00efc0f1/_docs/sql-reference/sql-commands/035-partition-by-clause.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/035-partition-by-clause.md b/_docs/sql-reference/sql-commands/035-partition-by-clause.md
new file mode 100644
index 0000000..b34e5e7
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/035-partition-by-clause.md
@@ -0,0 +1,37 @@
+---
+title: "PARTITION BY Clause"
+parent: "SQL Commands"
+---
+You can take advantage of automatic partitioning in Drill 1.1 using the PARTITION BY CLAUSE in the CTAS command:
+
+	CREATE TABLE table_name [ (column_name, . . .) ] 
+    [ PARTITION_BY (column_name, . . .) ] 
+    AS SELECT_statement;
+
+The CTAS statement that uses the PARTITION BY clause must store the data in Parquet format. The CTAS statement needs to meet one of the following requirements:
+
+* The column list in the PARTITION by clause are included in the column list following the table_name
+* The SELECT statement has to use a * column if the base table in the SELECT statement is schema-less, and when the partition column is resolved to * column in a schema-less query, this * column cannot be a result of a join operation. 
+
+
+To create and verify the contents of a table that contains this row:
+
+  1. Set the workspace to a writable workspace.
+  2. Set the `store.format` option to Parquet
+  3. Run a CTAS statement with the PARTITION BY clause.
+  4. Go to the directory where the table is stored and check the contents of the file.
+  5. Run a query against the new table.
+
+Examples:
+
+	CREATE TABLE mytable1 PARTITION BY (r_regionkey) AS 
+	  SELECT r_regionkey, r_name FROM cp.`tpch/region.parquet`
+	CREATE TABLE mytable2 PARTITION BY (r_regionkey) AS 
+	  SELECT * FROM cp.`tpch/region.parquet`
+	CREATE TABLE mytable3 PARTITION BY (r_regionkey) AS
+	  SELECT r.r_regionkey, r.r_name, n.n_nationkey, n.n_name 
+	  FROM cp.`tpch/nation.parquet` n, cp.`tpch/region.parquet` r
+	  WHERE n.n_regionkey = r.r_regionkey
+
+
+

http://git-wip-us.apache.org/repos/asf/drill/blob/00efc0f1/_docs/sql-reference/sql-functions/020-data-type-conversion.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-functions/020-data-type-conversion.md b/_docs/sql-reference/sql-functions/020-data-type-conversion.md
index 77466a3..77770fe 100644
--- a/_docs/sql-reference/sql-functions/020-data-type-conversion.md
+++ b/_docs/sql-reference/sql-functions/020-data-type-conversion.md
@@ -261,7 +261,7 @@ This example assumes you are working in the Drill Sandbox. The `maprdb` storage
 
 ### Convert the Binary HBase Students Table to JSON Data
 
-First, you set the storage format to JSON. Next, you use the CREATE TABLE AS SELECT (CTAS) statement to convert from a selected file of a different format, HBase in this example, to the storage format. You then convert the JSON file to Parquet using a similar procedure. Set the storage format to Parquet, and use a CTAS statement to convert to Parquet from JSON. In each case, you [select UTF8]({{ site.baseurl }}/docs/data-type-conversion/#convert_to-and-convert_from-data-types) as the file format because the data you are converting from and then to consists of strings.
+First, you set the storage format to JSON. Next, you use the CREATE TABLE AS (CTAS) statement to convert from a selected file of a different format, HBase in this example, to the storage format. You then convert the JSON file to Parquet using a similar procedure. Set the storage format to Parquet, and use a CTAS statement to convert to Parquet from JSON. In each case, you [select UTF8]({{ site.baseurl }}/docs/data-type-conversion/#convert_to-and-convert_from-data-types) as the file format because the data you are converting from and then to consists of strings.
 
 1. Start Drill on the Drill Sandbox and set the default storage format from Parquet to JSON.
 

http://git-wip-us.apache.org/repos/asf/drill/blob/00efc0f1/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md
----------------------------------------------------------------------
diff --git a/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md b/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md
index c16964b..308c8c3 100644
--- a/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md
+++ b/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md
@@ -315,7 +315,7 @@ Note that Drill views are lightweight, and can just be created in the local
 file system. Drill in standalone mode comes with a dfs.tmp workspace, which we
 can use to create views (or you can can define your own workspaces on a local
 or distributed file system). If you want to persist the data physically
-instead of in a logical view, you can use CREATE TABLE AS SELECT syntax.
+instead of in a logical view, you can use CREATE TABLE AS syntax.
 
     0: jdbc:drill:zk=local> create or replace view dfs.tmp.businessreviews as 
     Select b.name,b.stars,b.state,b.city,r.votes.funny,r.votes.useful,r.votes.cool, r.`date` 


Mime
View raw message