carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chenliang...@apache.org
Subject [18/39] carbondata-site git commit: Added new page layout & updated as per new md files
Date Fri, 07 Sep 2018 16:54:05 GMT
http://git-wip-us.apache.org/repos/asf/carbondata-site/blob/44eed099/content/configuration-parameters.html
----------------------------------------------------------------------
diff --git a/content/configuration-parameters.html b/content/configuration-parameters.html
index 4f4f1cd..ba73b0d 100644
--- a/content/configuration-parameters.html
+++ b/content/configuration-parameters.html
@@ -22,6 +22,7 @@
     <![endif]-->
     <script src="js/jquery.min.js"></script>
     <script src="js/bootstrap.min.js"></script>
+    <script defer src="https://use.fontawesome.com/releases/v5.0.8/js/all.js"></script>
 
 
 </head>
@@ -67,7 +68,7 @@
                                    target="_blank">Release Archive</a></li>
                         </ul>
                     </li>
-                    <li><a href="mainpage.html" class="active">Documentation</a></li>
+                    <li><a href="documentation.html" class="active">Documentation</a></li>
                     <li class="dropdown">
                         <a href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-haspopup="true"
                            aria-expanded="false">Community <span class="caret"></span></a>
@@ -152,7 +153,57 @@
 <section><!-- Dashboard nav -->
     <div class="container-fluid q">
         <div class="col-sm-12  col-md-12 maindashboard">
-            <div class="row">
+            <div class="verticalnavbar">
+                <nav class="b-sticky-nav">
+                    <div class="nav-scroller">
+                        <div class="nav__inner">
+                            <a class="b-nav__intro nav__item" href="./introduction.html">introduction</a>
+                            <a class="b-nav__quickstart nav__item" href="./quick-start-guide.html">quick start</a>
+                            <a class="b-nav__uses nav__item" href="./usescases.html">use cases</a>
+
+                            <div class="nav__item nav__item__with__subs">
+                                <a class="b-nav__docs nav__item nav__sub__anchor" href="./language-manual.html">Language Reference</a>
+                                <a class="nav__item nav__sub__item" href="./ddl-of-carbondata.html">DDL</a>
+                                <a class="nav__item nav__sub__item" href="./dml-of-carbondata.html">DML</a>
+                                <a class="nav__item nav__sub__item" href="./streaming-guide.html">Streaming</a>
+                                <a class="nav__item nav__sub__item" href="./configuration-parameters.html">Configuration</a>
+                                <a class="nav__item nav__sub__item" href="./datamap-developer-guide.html">Datamaps</a>
+                                <a class="nav__item nav__sub__item" href="./supported-data-types-in-carbondata.html">Data Types</a>
+                            </div>
+
+                            <div class="nav__item nav__item__with__subs">
+                                <a class="b-nav__datamap nav__item nav__sub__anchor" href="./datamap-management.html">DataMaps</a>
+                                <a class="nav__item nav__sub__item" href="./bloomfilter-datamap-guide.html">Bloom Filter</a>
+                                <a class="nav__item nav__sub__item" href="./lucene-datamap-guide.html">Lucene</a>
+                                <a class="nav__item nav__sub__item" href="./preaggregate-datamap-guide.html">Pre-Aggregate</a>
+                                <a class="nav__item nav__sub__item" href="./timeseries-datamap-guide.html">Time Series</a>
+                            </div>
+
+                            <a class="b-nav__s3 nav__item" href="./s3-guide.html">S3 Support</a>
+                            <a class="b-nav__api nav__item" href="./sdk-guide.html">API</a>
+                            <a class="b-nav__perf nav__item" href="./performance-tuning.html">Performance Tuning</a>
+                            <a class="b-nav__faq nav__item" href="./faq.html">FAQ</a>
+                            <a class="b-nav__contri nav__item" href="./how-to-contribute-to-apache-carbondata.html">Contribute</a>
+                            <a class="b-nav__security nav__item" href="./security.html">Security</a>
+                            <a class="b-nav__release nav__item" href="./release-guide.html">Release Guide</a>
+                        </div>
+                    </div>
+                    <div class="navindicator">
+                        <div class="b-nav__intro navindicator__item"></div>
+                        <div class="b-nav__quickstart navindicator__item"></div>
+                        <div class="b-nav__uses navindicator__item"></div>
+                        <div class="b-nav__docs navindicator__item"></div>
+                        <div class="b-nav__datamap navindicator__item"></div>
+                        <div class="b-nav__s3 navindicator__item"></div>
+                        <div class="b-nav__api navindicator__item"></div>
+                        <div class="b-nav__perf navindicator__item"></div>
+                        <div class="b-nav__faq navindicator__item"></div>
+                        <div class="b-nav__contri navindicator__item"></div>
+                        <div class="b-nav__security navindicator__item"></div>
+                    </div>
+                </nav>
+            </div>
+            <div class="mdcontent">
                 <section>
                     <div style="padding:10px 15px;">
                         <div id="viewpage" name="viewpage">
@@ -161,18 +212,18 @@
                                     <div>
 <h1>
 <a id="configuring-carbondata" class="anchor" href="#configuring-carbondata" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Configuring CarbonData</h1>
-<p>This tutorial guides you through the advanced configurations of CarbonData :</p>
+<p>This guide explains the configurations that can be used to tune CarbonData to achieve better performance.Some of the properties can be set dynamically and are explained in the section Dynamic Configuration In CarbonData Using SET-RESET.Most of the properties that control the internal settings have reasonable default values.They are listed along with the properties along with explanation.</p>
 <ul>
 <li><a href="#system-configuration">System Configuration</a></li>
-<li><a href="#performance-configuration">Performance Configuration</a></li>
-<li><a href="#miscellaneous-configuration">Miscellaneous Configuration</a></li>
-<li><a href="#spark-configuration">Spark Configuration</a></li>
+<li><a href="#data-loading-configuration">Data Loading Configuration</a></li>
+<li><a href="#compaction-configuration">Compaction Configuration</a></li>
+<li><a href="#query-configuration">Query Configuration</a></li>
+<li><a href="#data-mutation-configuration">Data Mutation Configuration</a></li>
 <li><a href="#dynamic-configuration-in-carbondata-using-set-reset">Dynamic Configuration In CarbonData Using SET-RESET</a></li>
 </ul>
 <h2>
 <a id="system-configuration" class="anchor" href="#system-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>System Configuration</h2>
 <p>This section provides the details of all the configurations required for the CarbonData System.</p>
-<p><b></b></p><p align="center">System Configuration in carbon.properties</p>
 <table>
 <thead>
 <tr>
@@ -184,411 +235,303 @@
 <tbody>
 <tr>
 <td>carbon.storelocation</td>
-<td></td>
-<td>Location where CarbonData will create the store, and write the data in its own format. If not specified then it takes spark.sql.warehouse.dir path. NOTE: Store location should be in HDFS.</td>
+<td>spark.sql.warehouse.dir property value</td>
+<td>Location where CarbonData will create the store, and write the data in its custom format. If not specified,the path defaults to spark.sql.warehouse.dir property. NOTE: Store location should be in HDFS.</td>
 </tr>
 <tr>
 <td>carbon.ddl.base.hdfs.url</td>
-<td></td>
-<td>This property is used to configure the HDFS relative path, the path configured in carbon.ddl.base.hdfs.url will be appended to the HDFS path configured in fs.defaultFS. If this path is configured, then user need not pass the complete path while dataload. For example: If absolute path of the csv file is hdfs://10.18.101.155:54310/data/cnbc/2016/xyz.csv, the path "hdfs://10.18.101.155:54310" will come from property fs.defaultFS and user can configure the /data/cnbc/ as carbon.ddl.base.hdfs.url. Now while dataload user can specify the csv path as /2016/xyz.csv.</td>
+<td>(none)</td>
+<td>To simplify and shorten the path to be specified in DDL/DML commands, this property is supported.This property is used to configure the HDFS relative path, the path configured in carbon.ddl.base.hdfs.url will be appended to the HDFS path configured in fs.defaultFS of core-site.xml. If this path is configured, then user need not pass the complete path while dataload. For example: If absolute path of the csv file is hdfs://10.18.101.155:54310/data/cnbc/2016/xyz.csv, the path "hdfs://10.18.101.155:54310" will come from property fs.defaultFS and user can configure the /data/cnbc/ as carbon.ddl.base.hdfs.url. Now while dataload user can specify the csv path as /2016/xyz.csv.</td>
 </tr>
 <tr>
 <td>carbon.badRecords.location</td>
-<td></td>
-<td>Path where the bad records are stored.</td>
-</tr>
-<tr>
-<td>carbon.data.file.version</td>
-<td>V3</td>
-<td>If this parameter value is set to 1, then CarbonData will support the data load which is in old format(0.x version). If the value is set to 2(1.x onwards version), then CarbonData will support the data load of new format only.</td>
+<td>(none)</td>
+<td>CarbonData can detect the records not conforming to defined table schema and isolate them as bad records.This property is used to specify where to store such bad records.</td>
 </tr>
 <tr>
 <td>carbon.streaming.auto.handoff.enabled</td>
 <td>true</td>
-<td>If this parameter value is set to true, auto trigger handoff function will be enabled.</td>
+<td>CarbonData supports storing of streaming data.To have high throughput for streaming, the data is written in Row format which is highly optimized for write, but performs poorly for query.When this property is true and when the streaming data size reaches <em><strong>carbon.streaming.segment.max.size</strong></em>, CabonData will automatically convert the data to columnar format and optimize it for faster querying.<strong>NOTE:</strong> It is not recommended to keep the default value which is true.</td>
 </tr>
 <tr>
 <td>carbon.streaming.segment.max.size</td>
 <td>1024000000</td>
-<td>This parameter defines the maximum size of the streaming segment. Setting this parameter to appropriate value will avoid impacting the streaming ingestion. The value is in bytes.</td>
+<td>CarbonData writes streaming data in row format which is optimized for high write throughput.This property defines the maximum size of data to be held is row format, beyond which it will be converted to columnar format in order to support high performane query, provided <em><strong>carbon.streaming.auto.handoff.enabled</strong></em> is true. <strong>NOTE:</strong> Setting higher value will impact the streaming ingestion. The value has to be configured in bytes.</td>
 </tr>
 <tr>
 <td>carbon.query.show.datamaps</td>
 <td>true</td>
-<td>If this parameter value is set to true, show tables command will list all the tables including datatmaps(eg: Preaggregate table), else datamaps will be excluded from the table list.</td>
+<td>CarbonData stores datamaps as independent tables so as to allow independent maintenance to some extent.When this property is true,which is by default, show tables command will list all the tables including datatmaps(eg: Preaggregate table), else datamaps will be excluded from the table list.<strong>NOTE:</strong>  It is generally not required for the user to do any maintenance operations on these tables and hence not required to be seen.But it is shown by default so that user or admin can get clear understanding of the system for capacity planning.</td>
 </tr>
 <tr>
 <td>carbon.segment.lock.files.preserve.hours</td>
 <td>48</td>
-<td>This property value indicates the number of hours the segment lock files will be preserved after dataload. These lock files will be deleted with the clean command after the configured number of hours.</td>
+<td>In order to support parallel data loading onto the same table, CarbonData sequences(locks) at the granularity of segments.Operations affecting the segment(like IUD, alter) are blocked from parallel operations.This property value indicates the number of hours the segment lock files will be preserved after dataload. These lock files will be deleted with the clean command after the configured number of hours.</td>
+</tr>
+<tr>
+<td>carbon.timestamp.format</td>
+<td>yyyy-MM-dd HH:mm:ss</td>
+<td>CarbonData can understand data of timestamp type and process it in special manner.It can be so that the format of Timestamp data is different from that understood by CarbonData by default.This configuration allows users to specify the format of Timestamp in their data.</td>
+</tr>
+<tr>
+<td>carbon.lock.type</td>
+<td>LOCALLOCK</td>
+<td>This configuration specifies the type of lock to be acquired during concurrent operations on table. There are following types of lock implementation: - LOCALLOCK: Lock is created on local file system as file. This lock is useful when only one spark driver (thrift server) runs on a machine and no other CarbonData spark application is launched concurrently. - HDFSLOCK: Lock is created on HDFS file system as file. This lock is useful when multiple CarbonData spark applications are launched and no ZooKeeper is running on cluster and HDFS supports file based locking.</td>
+</tr>
+<tr>
+<td>carbon.lock.path</td>
+<td>TABLEPATH</td>
+<td>This configuration specifies the path where lock files have to be created. Recommended to configure zookeeper lock type or configure HDFS lock path(to this property) in case of S3 file system as locking is not feasible on S3.</td>
 </tr>
 <tr>
 <td>carbon.unsafe.working.memory.in.mb</td>
 <td>512</td>
-<td>Specifies the size of executor unsafe working memory. Used for sorting data, storing column pages,etc. This value is expressed in MB.</td>
+<td>CarbonData supports storing data in off-heap memory for certain operations during data loading and query.This helps to avoid the Java GC and thereby improve the overall performance.The Minimum value recommeded is 512MB.Any value below this is reset to default value of 512MB.<strong>NOTE:</strong> The below formulas explain how to arrive at the off-heap size required.Memory Required For Data Loading:(<em>carbon.number.of.cores.while.loading</em>) * (Number of tables to load in parallel) * (<em>offheap.sort.chunk.size.inmb</em> + <em>carbon.blockletgroup.size.in.mb</em> + <em>carbon.blockletgroup.size.in.mb</em>/3.5 ). Memory required for Query:SPARK_EXECUTOR_INSTANCES * (<em>carbon.blockletgroup.size.in.mb</em> + <em>carbon.blockletgroup.size.in.mb</em> * 3.5) * spark.executor.cores</td>
 </tr>
 <tr>
-<td>carbon.unsafe.driver.working.memory.in.mb</td>
-<td>512</td>
-<td>Specifies the size of driver unsafe working memory. Used for storing block or blocklet datamap cache. If not configured then carbon.unsafe.working.memory.in.mb value is considered. This value is expressed in MB.</td>
+<td>carbon.update.sync.folder</td>
+<td>/tmp/carbondata</td>
+<td>CarbonData maintains last modification time entries in modifiedTime.htmlt to determine the schema changes and reload only when necessary.This configuration specifies the path where the file needs to be written.</td>
+</tr>
+<tr>
+<td>carbon.invisible.segments.preserve.count</td>
+<td>200</td>
+<td>CarbonData maintains each data load entry in tablestatus file. The entries from this file are not deleted for those segments that are compacted or dropped, but are made invisible.If the number of data loads are very high, the size and number of entries in tablestatus file can become too many causing unnecessary reading of all data.This configuration specifies the number of segment entries to be maintained afte they are compacted or dropped.Beyond this, the entries are moved to a separate history tablestatus file.<strong>NOTE:</strong> The entries in tablestatus file help to identify the operations performed on CarbonData table and is also used for checkpointing during various data manupulation operations.This is similar to AUDIT file maintaining all the operations and its status.Hence the entries are never deleted but moved to a separate history file.</td>
+</tr>
+<tr>
+<td>carbon.lock.retries</td>
+<td>3</td>
+<td>CarbonData ensures consistency of operations by blocking certain operations from running in parallel.In order to block the operations from running in parallel, lock is obtained on the table.This configuration specifies the maximum number of retries to obtain the lock for any operations other than load.<strong>NOTE:</strong> Data manupulation operations like Compaction,UPDATE,DELETE  or LOADING,UPDATE,DELETE are not allowed to run in parallel.How ever data loading can happen in parallel to compaction.</td>
+</tr>
+<tr>
+<td>carbon.lock.retry.timeout.sec</td>
+<td>5</td>
+<td>Specifies the interval between the retries to obtain the lock for any operation other than load.<strong>NOTE:</strong> Refer to <em><strong>carbon.lock.retries</strong></em> for understanding why CarbonData uses locks for operations.</td>
 </tr>
 </tbody>
 </table>
 <h2>
-<a id="performance-configuration" class="anchor" href="#performance-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Performance Configuration</h2>
-<p>This section provides the details of all the configurations required for CarbonData Performance Optimization.</p>
-<p><b></b></p><p align="center">Performance Configuration in carbon.properties</p>
-<ul>
-<li><strong>Data Loading Configuration</strong></li>
-</ul>
+<a id="data-loading-configuration" class="anchor" href="#data-loading-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Data Loading Configuration</h2>
 <table>
 <thead>
 <tr>
 <th>Parameter</th>
 <th>Default Value</th>
 <th>Description</th>
-<th>Range</th>
 </tr>
 </thead>
 <tbody>
 <tr>
 <td>carbon.number.of.cores.while.loading</td>
 <td>2</td>
-<td>Number of cores to be used while loading data.</td>
-<td></td>
+<td>Number of cores to be used while loading data.This also determines the number of threads to be used to read the input files (csv) in parallel.<strong>NOTE:</strong> This configured value is used in every data loading step to parallelize the operations. Configuring a higher value can lead to increased early thread pre-emption by OS and there by reduce the overall performance.</td>
 </tr>
 <tr>
 <td>carbon.sort.size</td>
 <td>100000</td>
-<td>Record count to sort and write intermediate files to temp.</td>
-<td></td>
-</tr>
-<tr>
-<td>carbon.max.driver.lru.cache.size</td>
-<td>-1</td>
-<td>Max LRU cache size upto which data will be loaded at the driver side. This value is expressed in MB. Default value of -1 means there is no memory limit for caching. Only integer values greater than 0 are accepted.</td>
-<td></td>
-</tr>
-<tr>
-<td>carbon.max.executor.lru.cache.size</td>
-<td>-1</td>
-<td>Max LRU cache size upto which data will be loaded at the executor side. This value is expressed in MB. Default value of -1 means there is no memory limit for caching. Only integer values greater than 0 are accepted. If this parameter is not configured, then the carbon.max.driver.lru.cache.size value will be considered.</td>
-<td></td>
-</tr>
-<tr>
-<td>carbon.merge.sort.prefetch</td>
-<td>true</td>
-<td>Enable prefetch of data during merge sort while reading data from sort temp files in data loading.</td>
-<td></td>
-</tr>
-<tr>
-<td>carbon.insert.persist.enable</td>
-<td>false</td>
-<td>Enabling this parameter considers persistent data. If we are executing insert into query from source table using select statement &amp; loading the same source table concurrently, when select happens on source table during the data load, it gets new record for which dictionary is not generated, so there will be inconsistency. To avoid this condition we can persist the dataframe into MEMORY_AND_DISK(default value) and perform insert into operation. By default this value will be false because no need to persist the dataframe in all cases. If user wants to run load and insert queries on source table concurrently then user can enable this parameter.</td>
-<td></td>
-</tr>
-<tr>
-<td>carbon.insert.storage.level</td>
-<td>MEMORY_AND_DISK</td>
-<td>Which storage level to persist dataframe when 'carbon.insert.persist.enable'=true, if user's executor has less memory, set this parameter to 'MEMORY_AND_DISK_SER' or other storage level to correspond to different environment. <a href="http://spark.apache.org/docs/latest/rdd-programming-guide.html#rdd-persistence" rel="nofollow">See detail</a>.</td>
-<td></td>
-</tr>
-<tr>
-<td>carbon.update.persist.enable</td>
-<td>true</td>
-<td>Enabling this parameter considers persistent data. Enabling this will reduce the execution time of UPDATE operation.</td>
-<td></td>
-</tr>
-<tr>
-<td>carbon.update.storage.level</td>
-<td>MEMORY_AND_DISK</td>
-<td>Which storage level to persist dataframe when 'carbon.update.persist.enable'=true, if user's executor has less memory, set this parameter to 'MEMORY_AND_DISK_SER' or other storage level to correspond to different environment. <a href="http://spark.apache.org/docs/latest/rdd-programming-guide.html#rdd-persistence" rel="nofollow">See detail</a>.</td>
-<td></td>
+<td>Number of records to hold in memory to sort and write intermediate temp files.<strong>NOTE:</strong> Memory required for data loading increases with increase in configured value as each thread would cache configured number of records.</td>
 </tr>
 <tr>
 <td>carbon.global.sort.rdd.storage.level</td>
 <td>MEMORY_ONLY</td>
-<td>Which storage level to persist rdd when loading data with 'sort_scope'='global_sort', if user's executor has less memory, set this parameter to 'MEMORY_AND_DISK_SER' or other storage level to correspond to different environment. <a href="http://spark.apache.org/docs/latest/rdd-programming-guide.html#rdd-persistence" rel="nofollow">See detail</a>.</td>
-<td></td>
+<td>Storage level to persist dataset of RDD/dataframe when loading data with 'sort_scope'='global_sort', if user's executor has less memory, set this parameter to 'MEMORY_AND_DISK_SER' or other storage level to correspond to different environment. <a href="http://spark.apache.org/docs/latest/rdd-programming-guide.html#rdd-persistence" rel="nofollow">See detail</a>.</td>
 </tr>
 <tr>
 <td>carbon.load.global.sort.partitions</td>
 <td>0</td>
-<td>The Number of partitions to use when shuffling data for sort. If user don't configurate or configurate it less than 1, it uses the number of map tasks as reduce tasks. In general, we recommend 2-3 tasks per CPU core in your cluster.</td>
-<td></td>
+<td>The Number of partitions to use when shuffling data for sort. Default value 0 means to use same number of map tasks as reduce tasks.<strong>NOTE:</strong> In general, it is recommended to have 2-3 tasks per CPU core in your cluster.</td>
 </tr>
 <tr>
 <td>carbon.options.bad.records.logger.enable</td>
 <td>false</td>
-<td>Whether to create logs with details about bad records.</td>
-<td></td>
+<td>CarbonData can identify the records that are not conformant to schema and isolate them as bad records.Enabling this configuration will make CarbonData to log such bad records.<strong>NOTE:</strong> If the input data contains many bad records, logging them will slow down the over all data loading throughput.The data load operation status would depend on the configuration in <em><strong>carbon.bad.records.action</strong></em>.</td>
 </tr>
 <tr>
 <td>carbon.bad.records.action</td>
-<td>FORCE</td>
-<td>This property can have four types of actions for bad records FORCE, REDIRECT, IGNORE and FAIL. If set to FORCE then it auto-corrects the data by storing the bad records as NULL. If set to REDIRECT then bad records are written to the raw CSV instead of being loaded. If set to IGNORE then bad records are neither loaded nor written to the raw CSV. If set to FAIL then data loading fails if any bad records are found.</td>
-<td></td>
+<td>FAIL</td>
+<td>CarbonData in addition to identifying the bad records, can take certain actions on such data.This configuration can have four types of actions for bad records namely FORCE, REDIRECT, IGNORE and FAIL. If set to FORCE then it auto-corrects the data by storing the bad records as NULL. If set to REDIRECT then bad records are written to the raw CSV instead of being loaded. If set to IGNORE then bad records are neither loaded nor written to the raw CSV. If set to FAIL then data loading fails if any bad records are found.</td>
 </tr>
 <tr>
 <td>carbon.options.is.empty.data.bad.record</td>
 <td>false</td>
-<td>If false, then empty ("" or '' or ,,) data will not be considered as bad record and vice versa.</td>
-<td></td>
+<td>Based on the business scenarios, empty("" or '' or ,,) data can be valid or invalid. This configuration controls how empty data should be treated by CarbonData. If false, then empty ("" or '' or ,,) data will not be considered as bad record and vice versa.</td>
 </tr>
 <tr>
 <td>carbon.options.bad.record.path</td>
-<td></td>
-<td>Specifies the HDFS path where bad records are stored. By default the value is Null. This path must to be configured by the user if bad record logger is enabled or bad record action redirect.</td>
-<td></td>
-</tr>
-<tr>
-<td>carbon.enable.vector.reader</td>
-<td>true</td>
-<td>This parameter increases the performance of select queries as it fetch columnar batch of size 4*1024 rows instead of fetching data row by row.</td>
-<td></td>
+<td>(none)</td>
+<td>Specifies the HDFS path where bad records are to be stored. By default the value is Null. This path must to be configured by the user if <em><strong>carbon.options.bad.records.logger.enable</strong></em> is <strong>true</strong> or <em><strong>carbon.bad.records.action</strong></em> is <strong>REDIRECT</strong>.</td>
 </tr>
 <tr>
 <td>carbon.blockletgroup.size.in.mb</td>
-<td>64 MB</td>
-<td>The data are read as a group of blocklets which are called blocklet groups. This parameter specifies the size of the blocklet group. Higher value results in better sequential IO access.The minimum value is 16MB, any value lesser than 16MB will reset to the default value (64MB).</td>
-<td></td>
+<td>64</td>
+<td>Please refer to <a href="./file-structure-of-carbondata.html">file-structure-of-carbondata</a> to understand the storage format of CarbonData.The data are read as a group of blocklets which are called blocklet groups. This parameter specifies the size of each blocklet group. Higher value results in better sequential IO access.The minimum value is 16MB, any value lesser than 16MB will reset to the default value (64MB).<strong>NOTE:</strong> Configuring a higher value might lead to poor performance as an entire blocklet group will have to read into memory before processing.For filter queries with limit, it is <strong>not advisable</strong> to have a bigger blocklet size.For Aggregation queries which need to return more number of rows,bigger blocklet size is advisable.</td>
 </tr>
 <tr>
-<td>carbon.task.distribution</td>
-<td>block</td>
-<td>
-<strong>block</strong>: Setting this value will launch one task per block. This setting is suggested in case of concurrent queries and queries having big shuffling scenarios. <strong>custom</strong>: Setting this value will group the blocks and distribute it uniformly to the available resources in the cluster. This enhances the query performance but not suggested in case of concurrent queries and queries having big shuffling scenarios. <strong>blocklet</strong>: Setting this value will launch one task per blocklet. This setting is suggested in case of concurrent queries and queries having big shuffling scenarios. <strong>merge_small_files</strong>: Setting this value will merge all the small partitions to a size of (128 MB is the default value of "spark.sql.files.maxPartitionBytes",it is configurable) during querying. The small partitions are combined to a map task to reduce the number of read task. This enhances the performance.</td>
-<td></td>
-</tr>
-<tr>
-<td>carbon.load.sortmemory.spill.percentage</td>
-<td>0</td>
-<td>If we use unsafe memory during data loading, this configuration will be used to control the behavior of spilling inmemory pages to disk. Internally in Carbondata, during sorting carbondata will sort data in pages and add them in unsafe memory. If the memory is insufficient, carbondata will spill the pages to disk and generate sort temp file. This configuration controls how many pages in memory will be spilled to disk based size. The size can be calculated by multiplying this configuration value with 'carbon.sort.storage.inmemory.size.inmb'. For example, default value 0 means that no pages in unsafe memory will be spilled and all the newly sorted data will be spilled to disk; Value 50 means that if the unsafe memory is insufficient, about half of pages in the unsafe memory will be spilled to disk while value 100 means that almost all pages in unsafe memory will be spilled. <strong>Note</strong>: This configuration only works for 'LOCAL_SORT' and 'BATCH_SORT' and the actual spilli
 ng behavior may slightly be different in each data loading.</td>
-<td>Integer values between 0 and 100</td>
-</tr>
-</tbody>
-</table>
-<ul>
-<li><strong>Compaction Configuration</strong></li>
-</ul>
-<table>
-<thead>
-<tr>
-<th>Parameter</th>
-<th>Default Value</th>
-<th>Description</th>
-<th>Range</th>
-</tr>
-</thead>
-<tbody>
-<tr>
-<td>carbon.number.of.cores.while.compacting</td>
-<td>2</td>
-<td>Number of cores which are used to write data during compaction.</td>
-<td></td>
+<td>carbon.sort.file.write.buffer.size</td>
+<td>16384</td>
+<td>CarbonData sorts and writes data to intermediate files to limit the memory usage.This configuration determines the buffer size to be used for reading and writing such files. <strong>NOTE:</strong> This configuration is useful to tune IO and derive optimal performance.Based on the OS and underlying harddisk type, these values can significantly affect the overall performance.It is ideal to tune the buffersize equivalent to the IO buffer size of the OS.Recommended range is between 10240 to 10485760 bytes.</td>
 </tr>
 <tr>
-<td>carbon.compaction.level.threshold</td>
-<td>4, 3</td>
-<td>This property is for minor compaction which decides how many segments to be merged. Example: If it is set as 2, 3 then minor compaction will be triggered for every 2 segments. 3 is the number of level 1 compacted segment which is further compacted to new segment.</td>
-<td>Valid values are from 0-100.</td>
+<td>carbon.sort.intermediate.files.limit</td>
+<td>20</td>
+<td>CarbonData sorts and writes data to intermediate files to limit the memory usage.Before writing the target carbondat file, the data in these intermediate files needs to be sorted again so as to ensure the entire data in the data load is sorted.This configuration determines the minimum number of intermediate files after which merged sort is applied on them sort the data.<strong>NOTE:</strong> Intermediate merging happens on a separate thread in the background.Number of threads used is determined by <em><strong>carbon.merge.sort.reader.thread</strong></em>.Configuring a low value will cause more time to be spent in merging these intermediate merged files which can cause more IO.Configuring a high value would cause not to use the idle threads to do intermediate sort merges.Range of recommended values are between 2 and 50</td>
 </tr>
 <tr>
-<td>carbon.major.compaction.size</td>
-<td>1024</td>
-<td>Major compaction size can be configured using this parameter. Sum of the segments which is below this threshold will be merged. This value is expressed in MB.</td>
-<td></td>
+<td>carbon.csv.read.buffersize.byte</td>
+<td>1048576</td>
+<td>CarbonData uses Hadoop InputFormat to read the csv files.This configuration value is used to pass buffer size as input for the Hadoop MR job when reading the csv files.This value is configured in bytes.<strong>NOTE:</strong> Refer to <em><strong>org.apache.hadoop.mapreduce.InputFormat</strong></em> documentation for additional information.</td>
 </tr>
 <tr>
-<td>carbon.horizontal.compaction.enable</td>
-<td>true</td>
-<td>This property is used to turn ON/OFF horizontal compaction. After every DELETE and UPDATE statement, horizontal compaction may occur in case the delta (DELETE/ UPDATE) files becomes more than specified threshold.</td>
-<td></td>
+<td>carbon.merge.sort.reader.thread</td>
+<td>3</td>
+<td>CarbonData sorts and writes data to intermediate files to limit the memory usage.When the intermediate files reaches <em><strong>carbon.sort.intermediate.files.limit</strong></em> the files will be merged,the number of threads specified in this configuration will be used to read the intermediate files for performing merge sort.<strong>NOTE:</strong> Refer to <em><strong>carbon.sort.intermediate.files.limit</strong></em> for operation description.Configuring less  number of threads can cause merging to slow down over loading process where as configuring more number of threads can cause thread contention with threads in other data loading steps.Hence configure a fraction of <em><strong>carbon.number.of.cores.while.loading</strong></em>.</td>
 </tr>
 <tr>
-<td>carbon.horizontal.UPDATE.compaction.threshold</td>
-<td>1</td>
-<td>This property specifies the threshold limit on number of UPDATE delta files within a segment. In case the number of delta files goes beyond the threshold, the UPDATE delta files within the segment becomes eligible for horizontal compaction and compacted into single UPDATE delta file.</td>
-<td>Values between 1 to 10000.</td>
+<td>carbon.concurrent.lock.retries</td>
+<td>100</td>
+<td>CarbonData supports concurrent data loading onto same table.To ensure the loading status is correctly updated into the system,locks are used to sequence the status updation step.This configuration specifies the maximum number of retries to obtain the lock for updating the load status.<strong>NOTE:</strong> This value is high as more number of concurrent loading happens,more the chances of not able to obtain the lock when tried.Adjust this value according to the number of concurrent loading to be supported by the system.</td>
 </tr>
 <tr>
-<td>carbon.horizontal.DELETE.compaction.threshold</td>
+<td>carbon.concurrent.lock.retry.timeout.sec</td>
 <td>1</td>
-<td>This property specifies the threshold limit on number of DELETE delta files within a block of a segment. In case the number of delta files goes beyond the threshold, the DELETE delta files for the particular block of the segment becomes eligible for horizontal compaction and compacted into single DELETE delta file.</td>
-<td>Values between 1 to 10000.</td>
+<td>Specifies the interval between the retries to obtain the lock for concurrent operations.<strong>NOTE:</strong> Refer to <em><strong>carbon.concurrent.lock.retries</strong></em> for understanding why CarbonData uses locks during data loading operations.</td>
 </tr>
 <tr>
-<td>carbon.update.segment.parallelism</td>
-<td>1</td>
-<td>This property specifies the parallelism for each segment during update. If there are segments that contain too many records to update and the spark job encounter data-spill related errors, it is better to increase this property value. It is recommended to set this value to a multiple of the number of executors for balance.</td>
-<td>Values between 1 to 1000.</td>
+<td>carbon.skip.empty.line</td>
+<td>false</td>
+<td>The csv files givent to CarbonData for loading can contain empty lines.Based on the business scenario, this empty line might have to be ignored or needs to be treated as NULL value for all columns.In order to define this business behavior, this configuration is provided.<strong>NOTE:</strong> In order to consider NULL values for non string columns and continue with data load, <em><strong>carbon.bad.records.action</strong></em> need to be set to <strong>FORCE</strong>;else data load will be failed as bad records encountered.</td>
 </tr>
 <tr>
-<td>carbon.merge.index.in.segment</td>
+<td>carbon.enable.calculate.size</td>
 <td>true</td>
-<td>This property is used to merge all carbon index files (.carbonindex) inside a segment to a single carbon index merge file (.carbonindexmerge).</td>
-<td>Values true or false</td>
+<td>
+<strong>For Load Operation</strong>: Setting this property calculates the size of the carbon data file (.carbondata) and carbon index file (.carbonindex) for every load and updates the table status file. <strong>For Describe Formatted</strong>: Setting this property calculates the total size of the carbon data files and carbon index files for the respective table and displays in describe formatted command.<strong>NOTE:</strong> This is useful to determine the overall size of the carbondata table and also get an idea of how the table is growing in order to take up other backup strategy decisions.</td>
 </tr>
-</tbody>
-</table>
-<ul>
-<li><strong>Query Configuration</strong></li>
-</ul>
-<table>
-<thead>
 <tr>
-<th>Parameter</th>
-<th>Default Value</th>
-<th>Description</th>
-<th>Range</th>
+<td>carbon.cutOffTimestamp</td>
+<td>(none)</td>
+<td>CarbonData has capability to generate the Dictionary values for the timestamp columns from the data itself without the need to store the computed dictionary values. This configuration sets the start date for calculating the timestamp. Java counts the number of milliseconds from start of "1970-01-01 00:00:00". This property is used to customize the start of position. For example "2000-01-01 00:00:00". <strong>NOTE:</strong> The date must be in the form <em><strong>carbon.timestamp.format</strong></em>. CarbonData supports storing data for upto 68 years.For example, if the cut-off time is 1970-01-01 05:30:00, then data upto 2038-01-01 05:30:00 will be supported by CarbonData.</td>
 </tr>
-</thead>
-<tbody>
 <tr>
-<td>carbon.number.of.cores</td>
-<td>4</td>
-<td>Number of cores to be used while querying.</td>
-<td></td>
+<td>carbon.timegranularity</td>
+<td>SECOND</td>
+<td>The configuration is used to specify the data granularity level such as DAY, HOUR, MINUTE, or SECOND.This helps to store more than 68 years of data into CarbonData.</td>
 </tr>
 <tr>
-<td>carbon.enable.quick.filter</td>
+<td>carbon.use.local.dir</td>
 <td>false</td>
-<td>Improves the performance of filter query.</td>
-<td></td>
+<td>CarbonData during data loading, writes files to local temp directories before copying the files to HDFS.This configuration is used to specify whether CarbonData can write locally to tmp directory of the container or to the YARN application directory.</td>
 </tr>
-</tbody>
-</table>
-<h2>
-<a id="miscellaneous-configuration" class="anchor" href="#miscellaneous-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Miscellaneous Configuration</h2>
-<p><b></b></p><p align="center">Extra Configuration in carbon.properties</p>
-<ul>
-<li><strong>Time format for CarbonData</strong></li>
-</ul>
-<table>
-<thead>
 <tr>
-<th>Parameter</th>
-<th>Default Format</th>
-<th>Description</th>
+<td>carbon.use.multiple.temp.dir</td>
+<td>false</td>
+<td>When multiple disks are present in the system, YARN is generally configured with multiple disks to be used as temp directories for managing the containers.This configuration specifies whether to use multiple YARN local directories during data loading for disk IO load balancing.Enable <em><strong>carbon.use.local.dir</strong></em> for this configuration to take effect.<strong>NOTE:</strong> Data Loading is an IO intensive operation whose performance can be limited by the disk IO threshold, particularly during multi table concurrent data load.Configuring this parameter, balances the disk IO across multiple disks there by improving the over all load performance.</td>
 </tr>
-</thead>
-<tbody>
 <tr>
-<td>carbon.timestamp.format</td>
-<td>yyyy-MM-dd HH:mm:ss</td>
-<td>Timestamp format of input data used for timestamp data type.</td>
+<td>carbon.sort.temp.compressor</td>
+<td>(none)</td>
+<td>CarbonData writes every <em><strong>carbon.sort.size</strong></em> number of records to intermediate temp files during data loading to ensure memory footprint is within limits.These temporary files cab be compressed and written in order to save the storage space.This configuration specifies the name of compressor to be used to compress the intermediate sort temp files during sort procedure in data loading.The valid values are 'SNAPPY','GZIP','BZIP2','LZ4','ZSTD' and empty. By default, empty means that Carbondata will not compress the sort temp files.<strong>NOTE:</strong> Compressor will be useful if you encounter disk bottleneck.Since the data needs to be compressed and decompressed,it involves additional CPU cycles,but is compensated by the high IO throughput due to less data to be written or read from the disks.</td>
 </tr>
-</tbody>
-</table>
-<ul>
-<li><strong>Dataload Configuration</strong></li>
-</ul>
-<table>
-<thead>
 <tr>
-<th>Parameter</th>
-<th>Default Value</th>
-<th>Description</th>
+<td>carbon.load.skewedDataOptimization.enabled</td>
+<td>false</td>
+<td>During data loading,CarbonData would divide the number of blocks equally so as to ensure all executors process same number of blocks.This mechanism satisfies most of the scenarios and ensures maximum parallel processing for optimal data loading performance.In some business scenarios, there might be scenarios where the size of blocks vary significantly and hence some executors would have to do more work if they get blocks containing more data. This configuration enables size based block allocation strategy for data loading.When loading, carbondata will use file size based block allocation strategy for task distribution. It will make sure that all the executors process the same size of data.<strong>NOTE:</strong> This configuration is useful if the size of your input data files varies widely, say 1MB~1GB.For this configuration to work effectively,knowing the data pattern and size is important and necessary.</td>
 </tr>
-</thead>
-<tbody>
 <tr>
-<td>carbon.sort.file.write.buffer.size</td>
-<td>16384</td>
-<td>File write buffer size used during sorting. Minimum allowed buffer size is 10240 byte and Maximum allowed buffer size is 10485760 byte.</td>
+<td>carbon.load.min.size.enabled</td>
+<td>false</td>
+<td>During Data Loading, CarbonData would divide the number of files among the available executors to parallelize the loading operation.When the input data files are very small, this action causes to generate many small carbondata files.This configuration determines whether to enable node minumun input data size allocation strategy for data loading.It will make sure that the node load the minimum amount of data there by reducing number of carbondata files.<strong>NOTE:</strong> This configuration is useful if the size of the input data files are very small, like 1MB~256MB.Refer to <em><strong>load_min_size_inmb</strong></em> to configure the minimum size to be considered for splitting files among executors.</td>
 </tr>
 <tr>
-<td>carbon.lock.type</td>
-<td>LOCALLOCK</td>
-<td>This configuration specifies the type of lock to be acquired during concurrent operations on table. There are following types of lock implementation: - LOCALLOCK: Lock is created on local file system as file. This lock is useful when only one spark driver (thrift server) runs on a machine and no other CarbonData spark application is launched concurrently. - HDFSLOCK: Lock is created on HDFS file system as file. This lock is useful when multiple CarbonData spark applications are launched and no ZooKeeper is running on cluster and HDFS supports file based locking.</td>
+<td>enable.data.loading.statistics</td>
+<td>false</td>
+<td>CarbonData has extensive logging which would be useful for debugging issues related to performance or hard to locate issues.This configuration when made <em><strong>true</strong></em> would log additional data loading statistics information to more accurately locate the issues being debugged.<strong>NOTE:</strong> Enabling this would log more debug information to log files, there by increasing the log files size significantly in short span of time.It is advised to configure the log files size, retention of log files parameters in log4j properties appropriately.Also extensive logging is an increased IO operation and hence over all data loading performance might get reduced.Therefore it is recommened to enable this configuration only for the duration of debugging.</td>
 </tr>
 <tr>
-<td>carbon.lock.path</td>
-<td>TABLEPATH</td>
-<td>Locks on the files are used to prevent concurrent operation from modifying the same files. This</td>
+<td>carbon.dictionary.chunk.size</td>
+<td>10000</td>
+<td>CarbonData generates dictionary keys and writes them to separate dictionary file during data loading.To optimize the IO, this configuration determines the number of dictionary keys to be persisted to dictionary file at a time.<strong>NOTE:</strong> Writing to file also serves as a commit point to the dictionary generated.Increasing more values in memory causes more data loss during system or application failure.It is advised to alter this configuration judiciously.</td>
 </tr>
 <tr>
-<td>configuration specifies the path where lock files have to be created. Recommended to configure</td>
-<td></td>
-<td></td>
+<td>dictionary.worker.threads</td>
+<td>1</td>
+<td>CarbonData supports Optimized data loading by relying on a dictionary server.Dictionary server helps  to maintain dictionary values independent of the data loading and there by avoids reading the same input data multiples times.This configuration determines the number of concurrent dictionary generation or request that needs to be served by the dictionary server.<strong>NOTE:</strong> This configuration takes effect when <em><strong>carbon.options.single.pass</strong></em> is configured as true.Please refer to <em>carbon.options.single.pass</em>to understand how dictionary server optimizes data loading.</td>
 </tr>
 <tr>
-<td>HDFS lock path(to this property) in case of S3 file system as locking is not feasible on S3.</td>
-<td></td>
-<td></td>
+<td>enable.unsafe.sort</td>
+<td>true</td>
+<td>CarbonData supports unsafe operations of Java to avoid GC overhead for certain operations.This configuration enables to use unsafe functions in CarbonData.<strong>NOTE:</strong> For operations like data loading, which generates more short lived Java objects, Java GC can be a bottle neck.Using unsafe can overcome the GC overhead and improve the overall performance.</td>
 </tr>
 <tr>
-<td>
-<strong>Note:</strong> If this property is not set to HDFS location for S3 store, then there is a possibility</td>
-<td></td>
-<td></td>
+<td>enable.offheap.sort</td>
+<td>true</td>
+<td>CarbonData supports storing data in off-heap memory for certain operations during data loading and query.This helps to avoid the Java GC and thereby improve the overall performance.This configuration enables using off-heap memory for sorting of data during data loading.<strong>NOTE:</strong>  <em><strong>enable.unsafe.sort</strong></em> configuration needs to be configured to true for using off-heap</td>
 </tr>
 <tr>
-<td>of data corruption because multiple data manipulation calls might try to update the status file</td>
-<td></td>
-<td></td>
+<td>enable.inmemory.merge.sort</td>
+<td>false</td>
+<td>CarbonData sorts and writes data to intermediate files to limit the memory usage.These intermediate files needs to be sorted again using merge sort before writing to the final carbondata file.Performing merge sort in memory would increase the sorting performance at the cost of increased memory footprint. This Configuration specifies to do in-memory merge sort or to do file based merge sort.</td>
 </tr>
 <tr>
-<td>and as lock is not acquired before updation data might get overwritten.</td>
-<td></td>
-<td></td>
+<td>carbon.load.sort.scope</td>
+<td>LOCAL_SORT</td>
+<td>CarbonData can support various sorting options to match the balance between load and query performance.LOCAL_SORT:All the data given to an executor in the single load is fully sorted and written to carondata files.Data loading performance is reduced a little as the entire data needs to be sorted in the executor.BATCH_SORT:Sorts the data in batches of configured size and writes to carbondata files.Data loading performance increases as the entire data need not be sorted.But query performance will get reduced due to false positives in block pruning and also due to more number of carbondata files written.Due to more number of carbondata files, if identified blocks &gt; cluster parallelism, query performance and concurrency will get reduced.GLOBAL SORT:Entire data in the data load is fully sorted and written to carbondata files.Data loading perfromance would get reduced as the entire data needs to be sorted.But the query performance increases significantly due to very less false posi
 tives and concurrency is also improved.<strong>NOTE:</strong> when BATCH_SORTis configured, it is recommended to keep <em><strong>carbon.load.batch.sort.size.inmb</strong></em> &gt; <em><strong>carbon.blockletgroup.size.in.mb</strong></em>
+</td>
 </tr>
 <tr>
-<td>carbon.sort.intermediate.files.limit</td>
-<td>20</td>
-<td>Minimum number of intermediate files after which merged sort can be started (minValue = 2, maxValue=50).</td>
+<td>carbon.load.batch.sort.size.inmb</td>
+<td>0</td>
+<td>When  <em><strong>carbon.load.sort.scope</strong></em> is configured as <em><strong>BATCH_SORT</strong></em>,This configuration needs to be added to specify the batch size for sorting and writing to carbondata files.<strong>NOTE:</strong> It is recommended to keep the value around 45% of <em><strong>carbon.sort.storage.inmemory.size.inmb</strong></em> to avoid spill to disk.Also it is recommended to keep the value higher than <em><strong>carbon.blockletgroup.size.in.mb</strong></em>. Refer to <em>carbon.load.sort.scope</em> for more information on sort options and the advantages/disadvantges of each option.</td>
 </tr>
 <tr>
-<td>carbon.block.meta.size.reserved.percentage</td>
-<td>10</td>
-<td>Space reserved in percentage for writing block meta data in CarbonData file.</td>
+<td>carbon.dictionary.server.port</td>
+<td>2030</td>
+<td>Single Pass Loading enables single job to finish data loading with dictionary generation on the fly. It enhances performance in the scenarios where the subsequent data loading after initial load involves fewer incremental updates on the dictionary.Single pass loading can be enabled using the option <em><strong>carbon.options.single.pass</strong></em>.When this option is specified, a dictionary server will be internally started to handle the dictionary generation and query requests.This configuration specifies the port on which the server need to listen for incoming requests.Port value ranges between 0-65535</td>
 </tr>
 <tr>
-<td>carbon.csv.read.buffersize.byte</td>
-<td>1048576</td>
-<td>csv reading buffer size.</td>
+<td>carbon.merge.sort.prefetch</td>
+<td>true</td>
+<td>CarbonData writes every <em><strong>carbon.sort.size</strong></em> number of records to intermediate temp files during data loading to ensure memory footprint is within limits.These intermediate temp files will have to be sorted using merge sort before writing into CarbonData format.This configuration enables pre fetching of data from these temp files in order to optimize IO and speed up data loading process.</td>
 </tr>
 <tr>
-<td>carbon.merge.sort.reader.thread</td>
-<td>3</td>
-<td>Maximum no of threads used for reading intermediate files for final merging.</td>
+<td>carbon.loading.prefetch</td>
+<td>false</td>
+<td>CarbonData uses univocity parser to read csv files.This configuration is used to inform the parser whether it can prefetch the data from csv files to speed up the reading.<strong>NOTE:</strong> Enabling prefetch improves the data loading performance, but needs higher memory to keep more records which are read ahead from disk.</td>
 </tr>
 <tr>
-<td>carbon.concurrent.lock.retries</td>
-<td>100</td>
-<td>Specifies the maximum number of retries to obtain the lock for concurrent operations. This is used for concurrent loading.</td>
+<td>carbon.prefetch.buffersize</td>
+<td>1000</td>
+<td>When the configuration <em><strong>carbon.merge.sort.prefetch</strong></em> is configured to true, we need to set the number of records that can be prefetched.This configuration is used specify the number of records to be prefetched.**NOTE: **Configuring more number of records to be prefetched increases memory footprint as more records will have to be kept in memory.</td>
 </tr>
 <tr>
-<td>carbon.concurrent.lock.retry.timeout.sec</td>
-<td>1</td>
-<td>Specifies the interval between the retries to obtain the lock for concurrent operations.</td>
+<td>load_min_size_inmb</td>
+<td>256</td>
+<td>This configuration is used along with <em><strong>carbon.load.min.size.enabled</strong></em>.This determines the minimum size of input files to be considered for distribution among executors while data loading.<strong>NOTE:</strong> Refer to <em><strong>carbon.load.min.size.enabled</strong></em> for understanding when this configuration needs to be used and its advantages and disadvantages.</td>
 </tr>
 <tr>
-<td>carbon.lock.retries</td>
-<td>3</td>
-<td>Specifies the maximum number of retries to obtain the lock for any operations other than load.</td>
+<td>carbon.load.sortmemory.spill.percentage</td>
+<td>0</td>
+<td>During data loading, some data pages are kept in memory upto memory configured in <em><strong>carbon.sort.storage.inmemory.size.inmb</strong></em> beyond which they are spilled to disk as intermediate temporary sort files.This configuration determines after what percentage data needs to be spilled to disk.<strong>NOTE:</strong> Without this configuration, when the data pages occupy upto configured memory, new data pages would be dumped to disk and old pages are still maintained in disk.</td>
 </tr>
 <tr>
-<td>carbon.lock.retry.timeout.sec</td>
-<td>5</td>
-<td>Specifies the interval between the retries to obtain the lock for any operation other than load.</td>
+<td>carbon.load.directWriteHdfs.enabled</td>
+<td>false</td>
+<td>During data load all the carbondata files are written to local disk and finally copied to the target location in HDFS.Enabling this parameter will make carrbondata files to be written directly onto target HDFS location bypassing the local disk.<strong>NOTE:</strong> Writing directly to HDFS saves local disk IO(once for writing the files and again for copying to HDFS) there by improving the performance.But the drawback is when data loading fails or the application crashes, unwanted carbondata files will remain in the target HDFS location until it is cleared during next data load or by running <em>CLEAN FILES</em> DDL command</td>
 </tr>
 <tr>
-<td>carbon.skip.empty.line</td>
-<td>false</td>
-<td>Setting this property ignores the empty lines in the CSV file during the data load</td>
+<td>carbon.options.serialization.null.format</td>
+<td>\N</td>
+<td>Based on the business scenarios, some columns might need to be loaded with null values.As null value cannot be written in csv files, some special characters might be adopted to specify null values.This configuration can be used to specify the null values format in the data being loaded.</td>
 </tr>
 <tr>
-<td>carbon.enable.calculate.size</td>
-<td>true</td>
-<td>
-<strong>For Load Operation</strong>: Setting this property calculates the size of the carbon data file (.carbondata) and carbon index file (.carbonindex) for every load and updates the table status file. <strong>For Describe Formatted</strong>: Setting this property calculates the total size of the carbon data files and carbon index files for the respective table and displays in describe formatted command.</td>
+<td>carbon.sort.storage.inmemory.size.inmb</td>
+<td>512</td>
+<td>CarbonData writes every <em><strong>carbon.sort.size</strong></em> number of records to intermediate temp files during data loading to ensure memory footprint is within limits.When <em><strong>enable.unsafe.sort</strong></em> configuration is enabled, instead of using <em><strong>carbon.sort.size</strong></em> which is based on rows count, size occupied in memory is used to determine when to flush data pages to intermediate temp files.This configuration determines the memory to be used for storing data pages in memory.<strong>NOTE:</strong> Configuring a higher values ensures more data is maintained in memory and hence increases data loading performance due to reduced or no IO.Based on the memory availability in the nodes of the cluster, configure the values accordingly.</td>
 </tr>
 </tbody>
 </table>
-<ul>
-<li><strong>Compaction Configuration</strong></li>
-</ul>
+<h2>
+<a id="compaction-configuration" class="anchor" href="#compaction-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Compaction Configuration</h2>
 <table>
 <thead>
 <tr>
@@ -599,30 +542,80 @@
 </thead>
 <tbody>
 <tr>
+<td>carbon.number.of.cores.while.compacting</td>
+<td>2</td>
+<td>Number of cores to be used while compacting data.This also determines the number of threads to be used to read carbondata files in parallel.</td>
+</tr>
+<tr>
+<td>carbon.compaction.level.threshold</td>
+<td>4, 3</td>
+<td>Each CarbonData load will create one segment, if every load is small in size it will generate many small file over a period of time impacting the query performance.This configuration is for minor compaction which decides how many segments to be merged. Configuration is of the form (x,y). Compaction will be triggered for every x segments and form a single level 1 compacted segment.When the number of compacted level 1 segments reach y, compaction will be triggered again to merge them to form a single level 2 segment. For example: If it is set as 2, 3 then minor compaction will be triggered for every 2 segments. 3 is the number of level 1 compacted segments which is further compacted to new segment.<strong>NOTE:</strong> When <em><strong>carbon.enable.auto.load.merge</strong></em> is <strong>true</strong>, Configuring higher values cause overall data loading time to increase as compaction will be triggered after data loading is complete but status is not returned till compaction is
  complete. But compacting more number of segments can increase query performance.Hence optimal values needs to be configured based on the business scenario.Valid values are bwteen 0 to 100.</td>
+</tr>
+<tr>
+<td>carbon.major.compaction.size</td>
+<td>1024</td>
+<td>To improve query performance and All the segments can be merged and compacted to a single segment upto configured size.This Major compaction size can be configured using this parameter. Sum of the segments which is below this threshold will be merged. This value is expressed in MB.</td>
+</tr>
+<tr>
+<td>carbon.horizontal.compaction.enable</td>
+<td>true</td>
+<td>CarbonData supports DELETE/UPDATE functionality by creating delta data files for existing carbondata files.These delta files would grow as more number of DELETE/UPDATE operations are performed.Compaction of these delta files are termed as horizontal compaction.This configuration is used to turn ON/OFF horizontal compaction. After every DELETE and UPDATE statement, horizontal compaction may occur in case the delta (DELETE/ UPDATE) files becomes more than specified threshold.**NOTE: **Having many delta files will reduce the query performance as scan has to happen on all these files before the final state of data can be decided.Hence it is advisable to keep horizontal compaction enabled and configure reasonable values to <em><strong>carbon.horizontal.UPDATE.compaction.threshold</strong></em> and <em><strong>carbon.horizontal.DELETE.compaction.threshold</strong></em>
+</td>
+</tr>
+<tr>
+<td>carbon.horizontal.update.compaction.threshold</td>
+<td>1</td>
+<td>This configuration specifies the threshold limit on number of UPDATE delta files within a segment. In case the number of delta files goes beyond the threshold, the UPDATE delta files within the segment becomes eligible for horizontal compaction and are compacted into single UPDATE delta file.Values range between 1 to 10000.</td>
+</tr>
+<tr>
+<td>carbon.horizontal.delete.compaction.threshold</td>
+<td>1</td>
+<td>This configuration specifies the threshold limit on number of DELETE delta files within a block of a segment. In case the number of delta files goes beyond the threshold, the DELETE delta files for the particular block of the segment becomes eligible for horizontal compaction and are compacted into single DELETE delta file.Values range between 1 to 10000.</td>
+</tr>
+<tr>
+<td>carbon.update.segment.parallelism</td>
+<td>1</td>
+<td>CarbonData processes the UPDATE operations by grouping records belonging to a segment into a single executor task.When the amount of data to be updated is more, this behavior causes problems like restarting of executor due to low memory and data-spill related errors.This property specifies the parallelism for each segment during update.<strong>NOTE:</strong> It is recommended to set this value to a multiple of the number of executors for balance.Values range between 1 to 1000.</td>
+</tr>
+<tr>
 <td>carbon.numberof.preserve.segments</td>
 <td>0</td>
-<td>If the user wants to preserve some number of segments from being compacted then he can set this property. Example: carbon.numberof.preserve.segments = 2 then 2 latest segments will always be excluded from the compaction. No segments will be preserved by default.</td>
+<td>If the user wants to preserve some number of segments from being compacted then he can set this configuration. Example: carbon.numberof.preserve.segments = 2 then 2 latest segments will always be excluded from the compaction. No segments will be preserved by default.<strong>NOTE:</strong> This configuration is useful when the chances of input data can be wrong due to environment scenarios.Preserving some of the latest segments from being compacted can help to easily delete the wrongly loaded segments.Once compacted,it becomes more difficult to determine the exact data to be deleted(except when data is incrementing according to time)</td>
 </tr>
 <tr>
 <td>carbon.allowed.compaction.days</td>
 <td>0</td>
-<td>Compaction will merge the segments which are loaded with in the specific number of days configured. Example: If the configuration is 2, then the segments which are loaded in the time frame of 2 days only will get merged. Segments which are loaded 2 days apart will not be merged. This is disabled by default.</td>
+<td>This configuration is used to control on the number of recent segments that needs to be compacted, ignoring the older ones.This congifuration is in days.For Example: If the configuration is 2, then the segments which are loaded in the time frame of past 2 days only will get merged. Segments which are loaded earlier than 2 days will not be merged. This configuration is disabled by default.<strong>NOTE:</strong> This configuration is useful when a bulk of history data is loaded into the carbondata.Query on this data is less frequent.In such cases involving these segments also into compacation will affect the resource consumption, increases overall compaction time.</td>
 </tr>
 <tr>
 <td>carbon.enable.auto.load.merge</td>
 <td>false</td>
-<td>To enable compaction while data loading.</td>
+<td>Compaction can be automatically triggered once data load completes.This ensures that the segments are merged in time and thus query times doesnt increase with increase in segments.This configuration enables to do compaction along with data loading.**NOTE: **Compaction will be triggered once the data load completes.But the status of data load wait till the compaction is completed.Hence it might look like data loading time has increased, but thats not the case.Moreover failure of compaction will not affect the data loading status.If data load had completed successfully, the status would be updated and segments are committed.However, failure while data loading, will not trigger compaction and error is returned immediately.</td>
 </tr>
 <tr>
 <td>carbon.enable.page.level.reader.in.compaction</td>
 <td>true</td>
-<td>Enabling page level reader for compaction reduces the memory usage while compacting more number of segments. It allows reading only page by page instead of reading whole blocklet to memory.</td>
+<td>Enabling page level reader for compaction reduces the memory usage while compacting more number of segments. It allows reading only page by page instead of reading whole blocklet to memory.<strong>NOTE:</strong> Please refer to <a href="./file-structure-of-carbondata.html">file-structure-of-carbondata</a> to understand the storage format of CarbonData and concepts of pages.</td>
+</tr>
+<tr>
+<td>carbon.concurrent.compaction</td>
+<td>true</td>
+<td>Compaction of different tables can be executed concurrently.This configuration determines whether to compact all qualifying tables in parallel or not.**NOTE: **Compacting concurrently is a resource demanding operation and needs more resouces there by affecting the query performance also.This configuration is <strong>deprecated</strong> and might be removed in future releases.</td>
+</tr>
+<tr>
+<td>carbon.compaction.prefetch.enable</td>
+<td>false</td>
+<td>Compaction operation is similar to Query + data load where in data from qualifying segments are queried and data loading performed to generate a new single segment.This configuration determines whether to query ahead data from segments and feed it for data loading.**NOTE: **This configuration is disabled by default as it needs extra resources for querying ahead extra data.Based on the memory availability on the cluster, user can enable it to improve compaction performance.</td>
+</tr>
+<tr>
+<td>carbon.merge.index.in.segment</td>
+<td>true</td>
+<td>Each CarbonData file has a companion CarbonIndex file which maintains the metadata about the data.These CarbonIndex files are read and loaded into driver and is used subsequently for pruning of data during queries.These CarbonIndex files are very small in size(few KB) and are many.Reading many small files from HDFS is not efficient and leads to slow IO performance.Hence these CarbonIndex files belonging to a segment can be combined into  a single file and read once there by increasing the IO throughput.This configuration enables to merge all the CarbonIndex files into a single MergeIndex file upon data loading completion.<strong>NOTE:</strong> Reading a single big file is more efficient in HDFS and IO throughput is very high.Due to this the time needed to load the index files into memory when query is received for the first time on that table is significantly reduced and there by significantly reduces the delay in serving the first query.</td>
 </tr>
 </tbody>
 </table>
-<ul>
-<li><strong>Query Configuration</strong></li>
-</ul>
+<h2>
+<a id="query-configuration" class="anchor" href="#query-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Query Configuration</h2>
 <table>
 <thead>
 <tr>
@@ -633,6 +626,16 @@
 </thead>
 <tbody>
 <tr>
+<td>carbon.max.driver.lru.cache.size</td>
+<td>-1</td>
+<td>Maximum memory <strong>(in MB)</strong> upto which the driver process can cache the data (BTree and dictionary values). Beyond this, least recently used data will be removed from cache before loading new set of values.Default value of -1 means there is no memory limit for caching. Only integer values greater than 0 are accepted.<strong>NOTE:</strong> Minimum number of entries that needs to be removed from cache in order to load the new set of data is determined and unloaded.ie.,for example if 3 cache entries qualify for pre-emption, out of these, those entries that free up more cache memory is removed prior to others.</td>
+</tr>
+<tr>
+<td>carbon.max.executor.lru.cache.size</td>
+<td>-1</td>
+<td>Maximum memory <strong>(in MB)</strong> upto which the executor process can cache the data (BTree and reverse dictionary values).Default value of -1 means there is no memory limit for caching. Only integer values greater than 0 are accepted.<strong>NOTE:</strong> If this parameter is not configured, then the value of <em><strong>carbon.max.driver.lru.cache.size</strong></em> will be used.</td>
+</tr>
+<tr>
 <td>max.query.execution.time</td>
 <td>60</td>
 <td>Maximum time allowed for one query to be executed. The value is in minutes.</td>
@@ -640,12 +643,12 @@
 <tr>
 <td>carbon.enableMinMax</td>
 <td>true</td>
-<td>Min max is feature added to enhance query performance. To disable this feature, set it false.</td>
+<td>CarbonData maintains the metadata which enables to prune unnecessary files from being scanned as per the query conditions.To achieve pruning, Min,Max of each column is maintined.Based on the filter condition in the query, certain data can be skipped from scanning by matching the filter value against the min,max values of the column(s) present in that carbondata file.This pruing enhances query performance significantly.</td>
 </tr>
 <tr>
 <td>carbon.dynamicallocation.schedulertimeout</td>
 <td>5</td>
-<td>Specifies the maximum time (unit in seconds) the scheduler can wait for executor to be active. Minimum value is 5 sec and maximum value is 15 sec.</td>
+<td>CarbonData has its own scheduling algorithm to suggest to Spark on how many tasks needs to be launched and how much work each task need to do in a Spark cluster for any query on CarbonData.To determine the number of tasks that can be scheduled, knowing the count of active executors is necessary.When dynamic allocation is enabled on a YARN based spark cluster,execuor processes are shutdown if no request is received for a particular amount of time.The executors are brought up when the requet is received again.This configuration specifies the maximum time (unit in seconds) the carbon scheduler can wait for executor to be active. Minimum value is 5 sec and maximum value is 15 sec.**NOTE: **Waiting for longer time leads to slow query response time.Moreover it might be possible that YARN is not able to start the executors and waiting is not beneficial.</td>
 </tr>
 <tr>
 <td>carbon.scheduler.minregisteredresourcesratio</td>
@@ -657,35 +660,76 @@
 <td>false</td>
 <td>If set to true, it will use CarbonReader to do distributed scan directly instead of using compute framework like spark, thus avoiding limitation of compute framework like SQL optimizer and task scheduling overhead.</td>
 </tr>
-</tbody>
-</table>
-<ul>
-<li><strong>Global Dictionary Configurations</strong></li>
-</ul>
-<table>
-<thead>
 <tr>
-<th>Parameter</th>
-<th>Default Value</th>
-<th>Description</th>
+<td>carbon.search.query.timeout</td>
+<td>10s</td>
+<td>Time within which the result is expected from the workers;beyond which the query is terminated</td>
 </tr>
-</thead>
-<tbody>
 <tr>
-<td>carbon.cutOffTimestamp</td>
-<td></td>
-<td>Sets the start date for calculating the timestamp. Java counts the number of milliseconds from start of "1970-01-01 00:00:00". This property is used to customize the start of position. For example "2000-01-01 00:00:00". The date must be in the form "carbon.timestamp.format".</td>
+<td>carbon.search.scan.thread</td>
+<td>num of cores available in worker node</td>
+<td>Number of cores to be used in each worker for performing scan.</td>
 </tr>
 <tr>
-<td>carbon.timegranularity</td>
-<td>SECOND</td>
-<td>The property used to set the data granularity level DAY, HOUR, MINUTE, or SECOND.</td>
+<td>carbon.search.master.port</td>
+<td>10020</td>
+<td>Port on which the search master listens for incoming query requests</td>
+</tr>
+<tr>
+<td>carbon.search.worker.port</td>
+<td>10021</td>
+<td>Port on which search master communicates with the workers.</td>
+</tr>
+<tr>
+<td>carbon.search.worker.workload.limit</td>
+<td>10 * <em>carbon.search.scan.thread</em>
+</td>
+<td>Maximum number of active requests that can be sent to a worker.Beyond which the request needs to be rescheduled for later time or to a different worker.</td>
+</tr>
+<tr>
+<td>carbon.detail.batch.size</td>
+<td>100</td>
+<td>The buffer size to store records, returned from the block scan. In limit scenario this parameter is very important. For example your query limit is 1000. But if we set this value to 3000 that means we get 3000 records from scan but spark will only take 1000 rows. So the 2000 remaining are useless. In one Finance test case after we set it to 100, in the limit 1000 scenario the performance increase about 2 times in comparison to if we set this value to 12000.</td>
+</tr>
+<tr>
+<td>carbon.enable.vector.reader</td>
+<td>true</td>
+<td>Spark added vector processing to optimize cpu cache miss and there by increase the query performance.This configuration enables to fetch data as columnar batch of size 4*1024 rows instead of fetching data row by row and provide it to spark so that there is improvement in  select queries performance.</td>
+</tr>
+<tr>
+<td>carbon.task.distribution</td>
+<td>block</td>
+<td>CarbonData has its own scheduling algorithm to suggest to Spark on how many tasks needs to be launched and how much work each task need to do in a Spark cluster for any query on CarbonData.Each of these task distribution suggestions has its own advantages and disadvantages.Based on the customer use case, appropriate task distribution can be configured.<strong>block</strong>: Setting this value will launch one task per block. This setting is suggested in case of concurrent queries and queries having big shuffling scenarios. <strong>custom</strong>: Setting this value will group the blocks and distribute it uniformly to the available resources in the cluster. This enhances the query performance but not suggested in case of concurrent queries and queries having big shuffling scenarios. <strong>blocklet</strong>: Setting this value will launch one task per blocklet. This setting is suggested in case of concurrent queries and queries having big shuffling scenarios. <strong>merge_smal
 l_files</strong>: Setting this value will merge all the small carbondata files upto a bigger size configured by <em><strong>spark.sql.files.maxPartitionBytes</strong></em> (128 MB is the default value,it is configurable) during querying. The small carbondata files are combined to a map task to reduce the number of read task. This enhances the performance.</td>
+</tr>
+<tr>
+<td>carbon.custom.block.distribution</td>
+<td>false</td>
+<td>CarbonData has its own scheduling algorithm to suggest to Spark on how many tasks needs to be launched and how much work each task need to do in a Spark cluster for any query on CarbonData.When this configuration is true, CarbonData would distribute the available blocks to be scanned among the available number of cores.For Example:If there are 10 blocks to be scanned and only 3 tasks can be run(only 3 executor cores available in the cluster), CarbonData would combine blocks as 4,3,3 and give it to 3 tasks to run.<strong>NOTE:</strong> When this configuration is false, as per the <em><strong>carbon.task.distribution</strong></em> configuration, each block/blocklet would be given to each task.</td>
+</tr>
+<tr>
+<td>enable.query.statistics</td>
+<td>false</td>
+<td>CarbonData has extensive logging which would be useful for debugging issues related to performance or hard to locate issues.This configuration when made <em><strong>true</strong></em> would log additional query statistics information to more accurately locate the issues being debugged.<strong>NOTE:</strong> Enabling this would log more debug information to log files, there by increasing the log files size significantly in short span of time.It is advised to configure the log files size, retention of log files parameters in log4j properties appropriately.Also extensive logging is an increased IO operation and hence over all query performance might get reduced.Therefore it is recommened to enable this configuration only for the duration of debugging.</td>
+</tr>
+<tr>
+<td>enable.unsafe.in.query.processing</td>
+<td>true</td>
+<td>CarbonData supports unsafe operations of Java to avoid GC overhead for certain operations.This configuration enables to use unsafe functions in CarbonData while scanning the  data during query.</td>
+</tr>
+<tr>
+<td>carbon.query.validate.directqueryondatamap</td>
+<td>true</td>
+<td>CarbonData supports creating pre-aggregate table datamaps as an independent tables.For some debugging purposes, it might be required to directly query from such datamap tables.This configuration allows to query on such datamaps.</td>
+</tr>
+<tr>
+<td>carbon.heap.memory.pooling.threshold.bytes</td>
+<td>1048576</td>
+<td>CarbonData supports unsafe operations of Java to avoid GC overhead for certain operations.Using unsafe, memory can be allocated on Java Heap or off heap.This configuration controlls the allocation mechanism on Java HEAP.If the heap memory allocations of the given size is greater or equal than this value,it should go through the pooling mechanism.But if set this size to -1, it should not go through the pooling mechanism.Default value is 1048576(1MB, the same as Spark).Value to be specified in bytes.</td>
 </tr>
 </tbody>
 </table>
 <h2>
-<a id="spark-configuration" class="anchor" href="#spark-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Spark Configuration</h2>
-<p><b></b></p><p align="center">Spark Configuration Reference in spark-defaults.conf</p>
+<a id="data-mutation-configuration" class="anchor" href="#data-mutation-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Data Mutation Configuration</h2>
 <table>
 <thead>
 <tr>
@@ -696,14 +740,24 @@
 </thead>
 <tbody>
 <tr>
-<td>spark.driver.memory</td>
-<td>1g</td>
-<td>Amount of memory to be used by the driver process.</td>
+<td>carbon.insert.persist.enable</td>
+<td>false</td>
+<td>CarbonData does loading in 2 major steps.1st step reads from the input source and generates the dictionary values.2nd step reads from the source again and encodes the data with the dictionary values, perform index calculations and writes in CarbonData format. Suppose we are loading the CarbonData table using another table as source(using insert into) and the source table is being loaded in parallel, there can be cases where some data got inserted into the source table after CarbonData generated for the target table in which case some new records which does not have dictionary values generated gets read leading to inconsistency. To avoid this condition we can persist the dataset of RDD/dataframe into MEMORY_AND_DISK(default value) and perform insert into operation. This ensures the data read from source table is cached and is not read again from the source there by ensuring consistency between dictionary generation and writing to CarbonData format steps. By default this value is 
 false as concurrent loading into source table is not the scenario majority of the times.<strong>NOTE:</strong> This configuration can reduce the insert into execution time as data need not be re read; but increases the memory foot print.</td>
 </tr>
 <tr>
-<td>spark.executor.memory</td>
-<td>1g</td>
-<td>Amount of memory to be used per executor process.</td>
+<td>carbon.insert.storage.level</td>
+<td>MEMORY_AND_DISK</td>
+<td>Storage level to persist dataset of a RDD/dataframe.Applicable when <em><strong>carbon.insert.persist.enable</strong></em> is <strong>true</strong>, if user's executor has less memory, set this parameter to 'MEMORY_AND_DISK_SER' or other storage level to correspond to different environment. <a href="http://spark.apache.org/docs/latest/rdd-programming-guide.html#rdd-persistence" rel="nofollow">See detail</a>.</td>
+</tr>
+<tr>
+<td>carbon.update.persist.enable</td>
+<td>true</td>
+<td>Configuration to enable the dataset of RDD/dataframe to persist data. Enabling this will reduce the execution time of UPDATE operation.</td>
+</tr>
+<tr>
+<td>carbon.update.storage.level</td>
+<td>MEMORY_AND_DISK</td>
+<td>Storage level to persist dataset of a RDD/dataframe.Applicable when <em><strong>carbon.update.persist.enable</strong></em> is <strong>true</strong>, if user's executor has less memory, set this parameter to 'MEMORY_AND_DISK_SER' or other storage level to correspond to different environment. <a href="http://spark.apache.org/docs/latest/rdd-programming-guide.html#rdd-persistence" rel="nofollow">See detail</a>.</td>
 </tr>
 </tbody>
 </table>
@@ -767,6 +821,10 @@
 <tbody>
 <tr>
 <td>carbon.options.bad.records.logger.enable</td>
+<td>CarbonData can identify the records that are not conformant to schema and isolate them as bad records.Enabling this configuration will make CarbonData to log such bad records.<strong>NOTE:</strong> If the input data contains many bad records, logging them will slow down the over all data loading throughput.The data load operation status would depend on the configuration in <em><strong>carbon.bad.records.action</strong></em>.</td>
+</tr>
+<tr>
+<td>carbon.options.bad.records.logger.enable</td>
 <td>To enable or disable bad record logger.</td>
 </tr>
 <tr>
@@ -783,7 +841,7 @@
 </tr>
 <tr>
 <td>carbon.options.single.pass</td>
-<td>Single Pass Loading enables single job to finish data loading with dictionary generation on the fly. It enhances performance in the scenarios where the subsequent data loading after initial load involves fewer incremental updates on the dictionary. This option specifies whether to use single pass for loading data or not. By default this option is set to FALSE.</td>
+<td>Single Pass Loading enables single job to finish data loading with dictionary generation on the fly. It enhances performance in the scenarios where the subsequent data loading after initial load involves fewer incremental updates on the dictionary. This option specifies whether to use single pass for loading data or not. By default this option is set to FALSE.<strong>NOTE:</strong> Enabling this starts a new dictionary server to handle dictionary generation requests during data loading.Without this option, the input csv files will have to read twice.Once while dictionary generation and persisting to the dictionary files.second when the data loading need to convert the input data into carbondata format.Enabling this optimizes the optimizes to read the input data only once there by reducing IO and hence over all data loading time.If concurrent data loading needs to be supported, consider tuning <em><strong>dictionary.worker.threads</strong></em>.Port on which the dictionary server
  need to listen on can be configured using the configuration <em><strong>carbon.dictionary.server.port</strong></em>.</td>
 </tr>
 <tr>
 <td>carbon.options.bad.record.path</td>
@@ -791,12 +849,36 @@
 </tr>
 <tr>
 <td>carbon.custom.block.distribution</td>
-<td>Specifies whether to use the Spark or Carbon block distribution feature.</td>
+<td>Specifies whether to use the Spark or Carbon block distribution feature.**NOTE: **Refer to <a href="#query-configuration">Query Configuration</a>#carbon.custom.block.distribution for more details on CarbonData scheduler.</td>
 </tr>
 <tr>
 <td>enable.unsafe.sort</td>
 <td>Specifies whether to use unsafe sort during data loading. Unsafe sort reduces the garbage collection during data load operation, resulting in better performance.</td>
 </tr>
+<tr>
+<td>carbon.options.dateformat</td>
+<td>Specifies the data format of the date columns in the data being loaded</td>
+</tr>
+<tr>
+<td>carbon.options.timestampformat</td>
+<td>Specifies the timestamp format of the time stamp columns in the data being loaded</td>
+</tr>
+<tr>
+<td>carbon.options.sort.scope</td>
+<td>Specifies how the current data load should be sorted with.**NOTE: **Refer to <a href="#data-loading-configuration">Data Loading Configuration</a>#carbon.sort.scope for detailed information.</td>
+</tr>
+<tr>
+<td>carbon.options.global.sort.partitions</td>
+<td></td>
+</tr>
+<tr>
+<td>carbon.options.serialization.null.format</td>
+<td>Default Null value representation in the data being loaded.<strong>NOTE:</strong> Refer to <a href="#data-loading-configuration">Data Loading Configuration</a>#carbon.options.serialization.null.format for detailed information.</td>
+</tr>
+<tr>
+<td>carbon.query.directQueryOnDataMap.enabled</td>
+<td>Specifies whether datamap can be queried directly.This is useful for debugging purposes.**NOTE: **Refer to <a href="#query-configuration">Query Configuration</a>#carbon.query.validate.directqueryondatamap for detailed information.</td>
+</tr>
 </tbody>
 </table>
 <p><strong>Examples:</strong></p>
@@ -824,6 +906,17 @@
 <p>Failure will be displayed in the UI.</p>
 </li>
 </ul>
+<script>
+$(function() {
+  // Show selected style on nav item
+  $('.b-nav__docs').addClass('selected');
+
+  // Display docs subnav items
+  if (!$('.b-nav__docs').parent().hasClass('nav__item__with__subs--expanded')) {
+    $('.b-nav__docs').parent().toggleClass('nav__item__with__subs--expanded');
+  }
+});
+</script>
 </div>
 </div>
 </div>
@@ -839,4 +932,4 @@
 </section><!-- End systemblock part -->
 <script src="js/custom.js"></script>
 </body>
-</html>
\ No newline at end of file
+</html>

http://git-wip-us.apache.org/repos/asf/carbondata-site/blob/44eed099/content/css/style.css
----------------------------------------------------------------------
diff --git a/content/css/style.css b/content/css/style.css
index 94b8fbf..88fd05f 100644
--- a/content/css/style.css
+++ b/content/css/style.css
@@ -4,7 +4,7 @@
 
 *{margin: 0; padding: 0;}
 
-body{background:#fdfdfd; color:#000 ; font-family:'Open Sans', arial, helvatica,  sans-serif; font-size:14px; line-height:22px;}
+body{background:#fdfdfd; color:#000 ; font-family:'Helvetica Neue', arial, helvatica,  sans-serif; font-size:14px; line-height:22px;}
 
 a{-webkit-transition: all .1s ease-in-out;
    -moz-transition: all .1s ease-in-out;
@@ -1301,3 +1301,77 @@ box-shadow: 0 3px 2px #aaa }
     white-space: nowrap;
 }
 
+.mdcontent {
+float:right;
+width:80%;
+padding-left:30px;
+}
+
+.verticalnavbar {
+    float: left;
+    text-transform: uppercase;
+    width: 15%;
+    font-family:"Helvetica Neue";
+    padding-top: 90px;
+    position: fixed;
+}
+.nav__item,
+.nav__item__with__subs {
+    color: #000000;
+    border-right: 2px solid #000000;
+    display: block;
+    padding-top: 1.5rem;
+    position: relative;
+}
+.nav__item__with__subs {
+    padding-top: 0;
+}
+.nav__sub__anchor,
+.nav__sub__item {
+    border-right: none;
+}
+.nav__sub__item {
+    display: none;
+    color: #888888;
+    font-size: 1.2rem;
+    text-transform: capitalize;
+}
+.nav__item__with__subs--expanded .nav__sub__item {
+    display: block;
+}
+.nav__item:first-of-type {
+    padding-top: 0;
+}
+.nav__item__with__subs .nav__item:first-of-type {
+    padding-top: 1.5rem;
+}
+.nav__item::after {
+    content: "";
+    display: block;
+    height: 2.1rem;
+    width: 1.1rem;
+    border-radius: 1rem;
+    -moz-border-radius: 1rem;
+    -webkit-border-radius: 1rem;
+    border: 2px solid #000000;
+    background: #FFFFFF;
+    position: absolute;
+    right: -.7rem;
+    top: 1.7rem;
+    opacity: 0;
+    transition: opacity .2s ease-out;
+}
+.nav__item.selected::after {
+    opacity: 1;
+}
+.nav__item.selected:first-of-type::after {
+    top: .4rem;
+}
+.nav__item__with__subs .nav__item:first-of-type::after {
+    top: 1.7rem;
+}
+verticalnavbar .btn {
+    display: block;
+    margin-top: 4rem;
+}
+

http://git-wip-us.apache.org/repos/asf/carbondata-site/blob/44eed099/content/data-management-on-carbondata.html
----------------------------------------------------------------------
diff --git a/content/data-management-on-carbondata.html b/content/data-management-on-carbondata.html
index 566bb8e..bb5ae78 100644
--- a/content/data-management-on-carbondata.html
+++ b/content/data-management-on-carbondata.html
@@ -1318,4 +1318,4 @@ Future {
 </section><!-- End systemblock part -->
 <script src="js/custom.js"></script>
 </body>
-</html>
\ No newline at end of file
+</html>

http://git-wip-us.apache.org/repos/asf/carbondata-site/blob/44eed099/content/data-management.html
----------------------------------------------------------------------
diff --git a/content/data-management.html b/content/data-management.html
index 93528d8..a9086ca 100644
--- a/content/data-management.html
+++ b/content/data-management.html
@@ -410,4 +410,4 @@ on the basis of column expression and optional filter conditions.</p>
 </section><!-- End systemblock part -->
 <script src="js/custom.js"></script>
 </body>
-</html>
\ No newline at end of file
+</html>


Mime
View raw message