apex-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t..@apache.org
Subject [04/15] apex-site git commit: Adding malhar- documentation
Date Sun, 27 Nov 2016 01:15:18 GMT
http://git-wip-us.apache.org/repos/asf/apex-site/blob/357a5a07/docs/malhar-3.6/operators/enricher/index.html
----------------------------------------------------------------------
diff --git a/docs/malhar-3.6/operators/enricher/index.html b/docs/malhar-3.6/operators/enricher/index.html
new file mode 100644
index 0000000..1e2e907
--- /dev/null
+++ b/docs/malhar-3.6/operators/enricher/index.html
@@ -0,0 +1,570 @@
+<!DOCTYPE html>
+<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
+<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
+<head>
+  <meta charset="utf-8">
+  <meta http-equiv="X-UA-Compatible" content="IE=edge">
+  <meta name="viewport" content="width=device-width, initial-scale=1.0">
+  
+  
+  
+  <title>Enricher - Apache Apex Malhar Documentation</title>
+  
+
+  <link rel="shortcut icon" href="../../favicon.ico">
+  
+
+  
+  <link href='https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700' rel='stylesheet' type='text/css'>
+
+  <link rel="stylesheet" href="../../css/theme.css" type="text/css" />
+  <link rel="stylesheet" href="../../css/theme_extra.css" type="text/css" />
+  <link rel="stylesheet" href="../../css/highlight.css">
+
+  
+  <script>
+    // Current page data
+    var mkdocs_page_name = "Enricher";
+    var mkdocs_page_input_path = "operators/enricher.md";
+    var mkdocs_page_url = "/operators/enricher/";
+  </script>
+  
+  <script src="../../js/jquery-2.1.1.min.js"></script>
+  <script src="../../js/modernizr-2.8.3.min.js"></script>
+  <script type="text/javascript" src="../../js/highlight.pack.js"></script>
+  <script src="../../js/theme.js"></script> 
+
+  
+</head>
+
+<body class="wy-body-for-nav" role="document">
+
+  <div class="wy-grid-for-nav">
+
+    
+    <nav data-toggle="wy-nav-shift" class="wy-nav-side stickynav">
+      <div class="wy-side-nav-search">
+        <a href="../.." class="icon icon-home"> Apache Apex Malhar Documentation</a>
+        <div role="search">
+  <form id ="rtd-search-form" class="wy-form" action="../../search.html" method="get">
+    <input type="text" name="q" placeholder="Search docs" />
+  </form>
+</div>
+      </div>
+
+      <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
+        <ul class="current">
+          
+            <li>
+    <li class="toctree-l1 ">
+        <a class="" href="../..">Apache Apex Malhar</a>
+        
+    </li>
+<li>
+          
+            <li>
+    <ul class="subnav">
+    <li><span>Operators</span></li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../kafkaInputOperator/">Kafka Input</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../jmsInputOperator/">JMS Input</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../file_splitter/">File Splitter</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../block_reader/">Block Reader</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../fsInputOperator/">File Input</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../csvParserOperator/">Csv Parser</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../file_output/">File Output</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 current">
+        <a class="current" href="./">Enricher</a>
+        
+            <ul>
+            
+                <li class="toctree-l3"><a href="#pojo-enricher">POJO Enricher</a></li>
+                
+                    <li><a class="toctree-l4" href="#operator-objective">Operator Objective</a></li>
+                
+                    <li><a class="toctree-l4" href="#operator-usecase">Operator Usecase</a></li>
+                
+                    <li><a class="toctree-l4" href="#operator-information">Operator Information</a></li>
+                
+                    <li><a class="toctree-l4" href="#properties-attributes-and-ports">Properties, Attributes and Ports</a></li>
+                
+                    <li><a class="toctree-l4" href="#platform-attributes-that-influences-operator-behavior">Platform Attributes that influences operator behavior</a></li>
+                
+                    <li><a class="toctree-l4" href="#ports">Ports</a></li>
+                
+            
+                <li class="toctree-l3"><a href="#limitations">Limitations</a></li>
+                
+            
+                <li class="toctree-l3"><a href="#example">Example</a></li>
+                
+            
+                <li class="toctree-l3"><a href="#advanced">Advanced</a></li>
+                
+                    <li><a class="toctree-l4" href="#caching-mechanism-in-pojoenricher">Caching mechanism in POJOEnricher</a></li>
+                
+                    <li><a class="toctree-l4" href="#partitioning-of-pojoenricher">Partitioning of POJOEnricher</a></li>
+                
+            
+            </ul>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../filter/">Filter</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../deduper/">Deduper</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../windowedOperator/">Windowed Operator</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../jsonParser/">Json Parser</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../jsonFormatter/">Json Formatter</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../transform/">Transform Operator</a>
+        
+    </li>
+
+        
+    </ul>
+<li>
+          
+        </ul>
+      </div>
+      &nbsp;
+    </nav>
+
+    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
+
+      
+      <nav class="wy-nav-top" role="navigation" aria-label="top navigation">
+        <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
+        <a href="../..">Apache Apex Malhar Documentation</a>
+      </nav>
+
+      
+      <div class="wy-nav-content">
+        <div class="rst-content">
+          <div role="navigation" aria-label="breadcrumbs navigation">
+  <ul class="wy-breadcrumbs">
+    <li><a href="../..">Docs</a> &raquo;</li>
+    
+      
+        
+          <li>Operators &raquo;</li>
+        
+      
+    
+    <li>Enricher</li>
+    <li class="wy-breadcrumbs-aside">
+      
+    </li>
+  </ul>
+  <hr/>
+</div>
+          <div role="main">
+            <div class="section">
+              
+                <h1 id="pojo-enricher">POJO Enricher</h1>
+<h2 id="operator-objective">Operator Objective</h2>
+<p>This operator receives an POJO (<a href="https://en.wikipedia.org/wiki/Plain_Old_Java_Object">Plain Old Java Object</a>) as an incoming tuple and uses an external source to enrich the data in 
+the incoming tuple and finally emits the enriched data as a new enriched POJO.</p>
+<p>POJOEnricher supports enrichment from following external sources:</p>
+<ol>
+<li><strong>JSON File Based</strong> - Reads the file in memory having content stored in JSON format and use that to enrich the data. This can be done using FSLoader implementation.</li>
+<li><strong>JDBC Based</strong> - Any JDBC store can act as an external entity to which enricher can request data for enriching incoming tuples. This can be done using JDBCLoader implementation.</li>
+</ol>
+<p>POJO Enricher does not hold any state and is <strong>idempotent</strong>, <strong>fault-tolerant</strong> and <strong>statically/dynamically partitionable</strong>.</p>
+<h2 id="operator-usecase">Operator Usecase</h2>
+<ol>
+<li>Bank <strong><em>transaction records</em></strong> usually contains customerId. For further analysis of transaction one wants the customer name and other customer related information. 
+Such information is present in another database. One could enrich the transaction's record with customer information using POJOEnricher.</li>
+<li><strong><em>Call Data Record (CDR)</em></strong> contains only mobile/telephone numbers of the customer. Customer information is missing in CDR. POJO Enricher can be used to enrich 
+CDR with customer data for further analysis.</li>
+</ol>
+<h2 id="operator-information">Operator Information</h2>
+<ol>
+<li>Operator location: <strong><em>malhar-contrib</em></strong></li>
+<li>Available since: <strong><em>3.4.0</em></strong></li>
+<li>Operator state: <strong><em>Evolving</em></strong></li>
+<li>Java Packages:<ul>
+<li>Operator: <strong><em><a href="https://www.datatorrent.com/docs/apidocs/com/datatorrent/contrib/enrich/POJOEnricher.html">com.datatorrent.contrib.enrich.POJOEnricher</a></em></strong></li>
+<li>FSLoader: <strong><em><a href="https://www.datatorrent.com/docs/apidocs/com/datatorrent/contrib/enrich/FSLoader.html">com.datatorrent.contrib.enrich.FSLoader</a></em></strong></li>
+<li>JDBCLoader: <strong><em><a href="https://www.datatorrent.com/docs/apidocs/com/datatorrent/contrib/enrich/JDBCLoader.html">com.datatorrent.contrib.enrich.JDBCLoader</a></em></strong></li>
+</ul>
+</li>
+</ol>
+<h2 id="properties-attributes-and-ports">Properties, Attributes and Ports</h2>
+<h3 id="properties-of-pojoenricher"><a name="props"></a>Properties of POJOEnricher</h3>
+<table>
+<thead>
+<tr>
+<th><strong>Property</strong></th>
+<th><strong>Description</strong></th>
+<th><strong>Type</strong></th>
+<th><strong>Mandatory</strong></th>
+<th><strong>Default Value</strong></th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><em>includeFields</em></td>
+<td>List of fields from database that needs to be added to output POJO.</td>
+<td>List&lt;String></td>
+<td>Yes</td>
+<td>N/A</td>
+</tr>
+<tr>
+<td><em>lookupFields</em></td>
+<td>List of fields from input POJO which will form a <em>unique composite</em> key for querying to store</td>
+<td>List&lt;String></td>
+<td>Yes</td>
+<td>N/A</td>
+</tr>
+<tr>
+<td><em>store</em></td>
+<td>Backend Store from which data should be queried for enrichment</td>
+<td><a href="#backendStore">BackendStore</a></td>
+<td>Yes</td>
+<td>N/A</td>
+</tr>
+<tr>
+<td><em>cacheExpirationInterval</em></td>
+<td>Cache entry expiry in ms. After this time, the lookup to store will be done again for given key</td>
+<td>int</td>
+<td>No</td>
+<td>1 * 60 * 60 * 1000 (1 hour)</td>
+</tr>
+<tr>
+<td><em>cacheCleanupInterval</em></td>
+<td>Interval in ms after which cache will be removed for any stale entries.</td>
+<td>int</td>
+<td>No</td>
+<td>1 * 60 * 60 * 1000 (1 hour)</td>
+</tr>
+<tr>
+<td><em>cacheSize</em></td>
+<td>Number of entry in cache after which eviction will start on each addition based on LRU</td>
+<td>int</td>
+<td>No</td>
+<td>1000</td>
+</tr>
+</tbody>
+</table>
+<h4 id="properties-of-fsloader-backendstore"><a name="backendStore"></a>Properties of FSLoader (BackendStore)</h4>
+<table>
+<thead>
+<tr>
+<th><strong>Property</strong></th>
+<th><strong>Description</strong></th>
+<th><strong>Type</strong></th>
+<th><strong>Mandatory</strong></th>
+<th><strong>Default Value</strong></th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><em>fileName</em></td>
+<td>Path of the file, the data from which will be used for enrichment. See <a href="#JSONFileFormat">here</a> for JSON File format.</td>
+<td>String</td>
+<td>Yes</td>
+<td>N/A</td>
+</tr>
+</tbody>
+</table>
+<h4 id="properties-of-jdbcloader-backendstore">Properties of JDBCLoader (BackendStore)</h4>
+<table>
+<thead>
+<tr>
+<th><strong>Property</strong></th>
+<th><strong>Description</strong></th>
+<th><strong>Type</strong></th>
+<th><strong>Mandatory</strong></th>
+<th><strong>Default Value</strong></th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><em>databaseUrl</em></td>
+<td>Connection string for connecting to JDBC</td>
+<td>String</td>
+<td>Yes</td>
+<td>N/A</td>
+</tr>
+<tr>
+<td><em>databaseDriver</em></td>
+<td>JDBC Driver class for connection to JDBC Store. This driver should be there in classpath</td>
+<td>String</td>
+<td>Yes</td>
+<td>N/A</td>
+</tr>
+<tr>
+<td><em>tableName</em></td>
+<td>Name of the table from which data needs to be retrieved</td>
+<td>String</td>
+<td>Yes</td>
+<td>N/A</td>
+</tr>
+<tr>
+<td><em>connectionProperties</em></td>
+<td>Command seperated list of advanced connection properties that need to be passed to JDBC Driver. For eg. <em>prop1:val1,prop2:val2</em></td>
+<td>String</td>
+<td>No</td>
+<td>null</td>
+</tr>
+<tr>
+<td><em>queryStmt</em></td>
+<td>Select statement which will be used to query the data. This is optional parameter in case of advanced query.</td>
+<td>String</td>
+<td>No</td>
+<td>null</td>
+</tr>
+</tbody>
+</table>
+<h3 id="platform-attributes-that-influences-operator-behavior">Platform Attributes that influences operator behavior</h3>
+<table>
+<thead>
+<tr>
+<th><strong>Attribute</strong></th>
+<th><strong>Description</strong></th>
+<th><strong>Type</strong></th>
+<th><strong>Mandatory</strong></th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><em>input.TUPLE_CLASS</em></td>
+<td>TUPLE_CLASS attribute on input port which tells operator the class of POJO which will be incoming</td>
+<td>Class or FQCN</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td><em>output.TUPLE_CLASS</em></td>
+<td>TUPLE_CLASS attribute on output port which tells operator the class of POJO which need to be emitted</td>
+<td>Class or FQCN</td>
+<td>Yes</td>
+</tr>
+</tbody>
+</table>
+<h3 id="ports">Ports</h3>
+<table>
+<thead>
+<tr>
+<th><strong>Port</strong></th>
+<th><strong>Description</strong></th>
+<th><strong>Type</strong></th>
+<th><strong>Mandatory</strong></th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><em>input</em></td>
+<td>Tuple which needs to be enriched are received on this port</td>
+<td>Object (POJO)</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td><em>output</em></td>
+<td>Tuples that are enriched from external source are emitted from on this port</td>
+<td>Object (POJO)</td>
+<td>No</td>
+</tr>
+</tbody>
+</table>
+<h2 id="limitations">Limitations</h2>
+<p>Current POJOEnricher contains following limitation:</p>
+<ol>
+<li>FSLoader loads the file content in memory. Though it loads only the composite key and composite value in memory, a very large amount of data would bloat the memory and make the operator go OOM. In case the filesize is large, allocate sufficient memory to the POJOEnricher.</li>
+<li>Incoming POJO should be a subset of outgoing POJO.</li>
+<li><a href="#props">includeFields</a> property should contains fields having same name in database column as well as outgoing POJO. For eg. If name of the database column is "customerName", then outgoing POJO should contains a field with the same name and same should be added to includeFields.</li>
+<li><a href="#props">lookupFields</a> property should contains fields having same name in database column as well as incoming POJO. For eg. If name of the database column is "customerId", then incoming POJO should contains a field with the same name and same should be added to lookupFields.</li>
+</ol>
+<h2 id="example">Example</h2>
+<p>Example for POJOEnricher can be found at: <a href="https://github.com/DataTorrent/examples/tree/master/tutorials/enricher">https://github.com/DataTorrent/examples/tree/master/tutorials/enricher</a></p>
+<h2 id="advanced">Advanced</h2>
+<h3 id="file-format-for-json-based-fsloader"><a name="JSONFileFormat"></a> File format for JSON based FSLoader</h3>
+<p>FSLoader expects file to be in specific format:</p>
+<ol>
+<li>Each line makes on record which becomes part of the store</li>
+<li>Each line is a valid JSON Object where <em>key</em> is name of the field name and <em>value</em> is the field value.</li>
+</ol>
+<p>Example for the format look like following:</p>
+<pre><code class="json">{&quot;circleId&quot;:0, &quot;circleName&quot;:&quot;A&quot;}
+{&quot;circleId&quot;:1, &quot;circleName&quot;:&quot;B&quot;}
+{&quot;circleId&quot;:2, &quot;circleName&quot;:&quot;C&quot;}
+{&quot;circleId&quot;:3, &quot;circleName&quot;:&quot;D&quot;}
+{&quot;circleId&quot;:4, &quot;circleName&quot;:&quot;E&quot;}
+{&quot;circleId&quot;:5, &quot;circleName&quot;:&quot;F&quot;}
+{&quot;circleId&quot;:6, &quot;circleName&quot;:&quot;G&quot;}
+{&quot;circleId&quot;:7, &quot;circleName&quot;:&quot;H&quot;}
+{&quot;circleId&quot;:8, &quot;circleName&quot;:&quot;I&quot;}
+{&quot;circleId&quot;:9, &quot;circleName&quot;:&quot;J&quot;}
+</code></pre>
+
+<h3 id="caching-mechanism-in-pojoenricher">Caching mechanism in POJOEnricher</h3>
+<p>POJOEnricher contains an cache which makes the lookup for keys more efficient. This is specially useful when data in external store is not changing much. 
+However, one should carefully tune the <a href="#props">cacheExpirationInterval</a> property for desirable results.</p>
+<p>On every incoming tuple, POJOEnricher first queries the cache. If the cache contains desired record and is within expiration interval, then it uses that to
+enrich the tuple, otherwise does a lookup to configured store and the return value is used to enrich the tuple. The return value is then cached for composite key and composite value.</p>
+<p>POJOEnricher only caches the required fields for enrichment mechanism and not all fields returned by external store. This ensures optimal use of memory.</p>
+<h3 id="partitioning-of-pojoenricher">Partitioning of POJOEnricher</h3>
+<p>Being stateless operator, POJOEnricher will ensure built-in partitioners present in Malhar library can be directly simply by setting few properties as follows:</p>
+<h4 id="stateless-partioning-of-pojoenricher">Stateless partioning of POJOEnricher</h4>
+<p>Stateless partitioning will ensure that POJOEnricher will will be partitioned right at the starting of the application and will remain partitioned throughout the lifetime of the DAG.
+POJOEnricher can be stateless partitioned by adding following lines to properties.xml:</p>
+<pre><code class="xml">  &lt;property&gt;
+    &lt;name&gt;dt.operator.{OperatorName}.attr.PARTITIONER&lt;/name&gt;
+    &lt;value&gt;com.datatorrent.common.partitioner.StatelessPartitioner:2&lt;/value&gt;
+  &lt;/property&gt;
+</code></pre>
+
+<p>where {OperatorName} is the name of the POJOEnricher operator.
+Above lines will partition POJOEnricher statically 2 times. Above value can be changed accordingly to change the number of static partitions.</p>
+<h4 id="dynamic-partitioning-of-pojoenricher">Dynamic Partitioning of POJOEnricher</h4>
+<p>Dynamic partitioning is a feature of Apex platform which changes the partition of the operator based on certain condition.
+POJOEnricher can be dynamically partitioned using 2 out-of-the-box partitioners:</p>
+<h5 id="throughput-based">Throughput based</h5>
+<p>Following code can be added to populateDAG method of application to dynamically partitioning POJOEnricher:</p>
+<pre><code class="java">    StatelessThroughputBasedPartitioner&lt;POJOEnricher&gt; partitioner = new StatelessThroughputBasedPartitioner&lt;&gt;();
+    partitioner.setCooldownMillis(conf.getLong(COOL_DOWN_MILLIS, 10000));
+    partitioner.setMaximumEvents(conf.getLong(MAX_THROUGHPUT, 30000));
+    partitioner.setMinimumEvents(conf.getLong(MIN_THROUGHPUT, 10000));
+    dag.setAttribute(pojoEnricherObj, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[]{partitioner}));
+    dag.setAttribute(pojoEnricherObj, OperatorContext.PARTITIONER, partitioner);
+</code></pre>
+
+<p>Above code will dynamically partition POJOEnricher when the throughput changes.
+If the overall throughput of POJOEnricher goes beyond 30000 or less than 10000, the platform will repartition POJOEnricher 
+to balance throughput of a single partition to be between 10000 and 30000.
+CooldownMillis of 10000 will be used as the threshold time for which the throughout change is observed.</p>
+<h5 id="latency-based">Latency based</h5>
+<p>Following code can be added to populateDAG method of application to dynamically partitioning POJOEnricher:</p>
+<pre><code class="java">    StatelessLatencyBasedPartitioner&lt;POJOEnricher&gt; partitioner = new StatelessLatencyBasedPartitioner&lt;&gt;();
+    partitioner.setCooldownMillis(conf.getLong(COOL_DOWN_MILLIS, 10000));
+    partitioner.setMaximumLatency(conf.getLong(MAX_THROUGHPUT, 10));
+    partitioner.setMinimumLatency(conf.getLong(MIN_THROUGHPUT, 3));
+    dag.setAttribute(pojoEnricherObj, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[]{partitioner}));
+    dag.setAttribute(pojoEnricherObj, OperatorContext.PARTITIONER, partitioner);
+</code></pre>
+
+<p>Above code will dynamically partition POJOEnricher when the overall latency of POJOEnricher changes.
+If the overall latency of POJOEnricher goes beyond 10 ms or less than 3 ms, the platform will repartition POJOEnricher 
+to balance latency of a single partition to be between 3 ms and 10 ms.
+CooldownMillis of 10000 will be used as the threshold time for which the latency change is observed.</p>
+              
+            </div>
+          </div>
+          <footer>
+  
+    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
+      
+        <a href="../filter/" class="btn btn-neutral float-right" title="Filter">Next <span class="icon icon-circle-arrow-right"></span></a>
+      
+      
+        <a href="../file_output/" class="btn btn-neutral" title="File Output"><span class="icon icon-circle-arrow-left"></span> Previous</a>
+      
+    </div>
+  
+
+  <hr/>
+
+  <div role="contentinfo">
+    <!-- Copyright etc -->
+    
+  </div>
+
+  Built with <a href="http://www.mkdocs.org">MkDocs</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
+</footer>
+	  
+        </div>
+      </div>
+
+    </section>
+
+  </div>
+
+<div class="rst-versions" role="note" style="cursor: pointer">
+    <span class="rst-current-version" data-toggle="rst-current-version">
+      
+      
+        <span><a href="../file_output/" style="color: #fcfcfc;">&laquo; Previous</a></span>
+      
+      
+        <span style="margin-left: 15px"><a href="../filter/" style="color: #fcfcfc">Next &raquo;</a></span>
+      
+    </span>
+</div>
+
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/apex-site/blob/357a5a07/docs/malhar-3.6/operators/file_output/index.html
----------------------------------------------------------------------
diff --git a/docs/malhar-3.6/operators/file_output/index.html b/docs/malhar-3.6/operators/file_output/index.html
new file mode 100644
index 0000000..edae340
--- /dev/null
+++ b/docs/malhar-3.6/operators/file_output/index.html
@@ -0,0 +1,405 @@
+<!DOCTYPE html>
+<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
+<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
+<head>
+  <meta charset="utf-8">
+  <meta http-equiv="X-UA-Compatible" content="IE=edge">
+  <meta name="viewport" content="width=device-width, initial-scale=1.0">
+  
+  
+  
+  <title>File Output - Apache Apex Malhar Documentation</title>
+  
+
+  <link rel="shortcut icon" href="../../favicon.ico">
+  
+
+  
+  <link href='https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700' rel='stylesheet' type='text/css'>
+
+  <link rel="stylesheet" href="../../css/theme.css" type="text/css" />
+  <link rel="stylesheet" href="../../css/theme_extra.css" type="text/css" />
+  <link rel="stylesheet" href="../../css/highlight.css">
+
+  
+  <script>
+    // Current page data
+    var mkdocs_page_name = "File Output";
+    var mkdocs_page_input_path = "operators/file_output.md";
+    var mkdocs_page_url = "/operators/file_output/";
+  </script>
+  
+  <script src="../../js/jquery-2.1.1.min.js"></script>
+  <script src="../../js/modernizr-2.8.3.min.js"></script>
+  <script type="text/javascript" src="../../js/highlight.pack.js"></script>
+  <script src="../../js/theme.js"></script> 
+
+  
+</head>
+
+<body class="wy-body-for-nav" role="document">
+
+  <div class="wy-grid-for-nav">
+
+    
+    <nav data-toggle="wy-nav-shift" class="wy-nav-side stickynav">
+      <div class="wy-side-nav-search">
+        <a href="../.." class="icon icon-home"> Apache Apex Malhar Documentation</a>
+        <div role="search">
+  <form id ="rtd-search-form" class="wy-form" action="../../search.html" method="get">
+    <input type="text" name="q" placeholder="Search docs" />
+  </form>
+</div>
+      </div>
+
+      <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
+        <ul class="current">
+          
+            <li>
+    <li class="toctree-l1 ">
+        <a class="" href="../..">Apache Apex Malhar</a>
+        
+    </li>
+<li>
+          
+            <li>
+    <ul class="subnav">
+    <li><span>Operators</span></li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../kafkaInputOperator/">Kafka Input</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../jmsInputOperator/">JMS Input</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../file_splitter/">File Splitter</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../block_reader/">Block Reader</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../fsInputOperator/">File Input</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../csvParserOperator/">Csv Parser</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 current">
+        <a class="current" href="./">File Output</a>
+        
+            <ul>
+            
+                <li class="toctree-l3"><a href="#abstractfileoutputoperator">AbstractFileOutputOperator</a></li>
+                
+                    <li><a class="toctree-l4" href="#persisting-data-to-files">Persisting data to files</a></li>
+                
+                    <li><a class="toctree-l4" href="#automatic-rotation">Automatic rotation</a></li>
+                
+                    <li><a class="toctree-l4" href="#fault-tolerance">Fault-tolerance</a></li>
+                
+            
+            </ul>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../enricher/">Enricher</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../filter/">Filter</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../deduper/">Deduper</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../windowedOperator/">Windowed Operator</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../jsonParser/">Json Parser</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../jsonFormatter/">Json Formatter</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../transform/">Transform Operator</a>
+        
+    </li>
+
+        
+    </ul>
+<li>
+          
+        </ul>
+      </div>
+      &nbsp;
+    </nav>
+
+    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
+
+      
+      <nav class="wy-nav-top" role="navigation" aria-label="top navigation">
+        <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
+        <a href="../..">Apache Apex Malhar Documentation</a>
+      </nav>
+
+      
+      <div class="wy-nav-content">
+        <div class="rst-content">
+          <div role="navigation" aria-label="breadcrumbs navigation">
+  <ul class="wy-breadcrumbs">
+    <li><a href="../..">Docs</a> &raquo;</li>
+    
+      
+        
+          <li>Operators &raquo;</li>
+        
+      
+    
+    <li>File Output</li>
+    <li class="wy-breadcrumbs-aside">
+      
+    </li>
+  </ul>
+  <hr/>
+</div>
+          <div role="main">
+            <div class="section">
+              
+                <h1 id="abstractfileoutputoperator">AbstractFileOutputOperator</h1>
+<p>The abstract file output operator in Apache Apex Malhar library &mdash; <a href="https://github.com/apache/incubator-apex-malhar/blob/master/library/src/main/java/com/datatorrent/lib/io/fs/AbstractFileOutputOperator.java"><code>AbstractFileOutputOperator</code></a> writes streaming data to files. The main features of this operator are:</p>
+<ol>
+<li>Persisting data to files.</li>
+<li>Automatic rotation of files based on:<br />
+  a. maximum length of a file.<br />
+  b. time-based rotation where time is specified using a count of application windows.</li>
+<li>Fault-tolerance.</li>
+<li>Compression and encryption of data before it is persisted.</li>
+</ol>
+<p>In this tutorial we will cover the details of the basic structure and implementation of all the above features in <code>AbstractFileOutputOperator</code>. Configuration items related to each feature are discussed as they are introduced in the section of that feature.</p>
+<h2 id="persisting-data-to-files">Persisting data to files</h2>
+<p>The principal function of this operator is to persist tuples to files efficiently. These files are created under a specific directory on the file system. The relevant configuration item is:</p>
+<p><strong>filePath</strong>: path specifying the directory where files are written.</p>
+<p>Different types of file system that are implementations of <code>org.apache.hadoop.fs.FileSystem</code> are supported. The file system instance which is used for creating streams is constructed from the <code>filePath</code> URI.</p>
+<pre><code class="java">FileSystem.newInstance(new Path(filePath).toUri(), new Configuration())
+</code></pre>
+
+<p>Tuples may belong to different files therefore expensive IO operations like creating multiple output streams, flushing of data to disk, and closing streams are handled carefully.</p>
+<h3 id="ports">Ports</h3>
+<ul>
+<li><code>input</code>: the input port on which tuples to be persisted are received.</li>
+</ul>
+<h3 id="streamscache"><code>streamsCache</code></h3>
+<p>This transient state caches output streams per file in memory. The file to which the data is appended may change with incoming tuples. It will be highly inefficient to keep re-opening streams for a file just because tuples for that file are interleaved with tuples for another file. Therefore, the operator maintains a cache of limited size with open output streams.</p>
+<p><code>streamsCache</code> is of type <code>com.google.common.cache.LoadingCache</code>. A <code>LoadingCache</code> has an attached <code>CacheLoader</code> which is responsible to load value of a key when the key is not present in the cache. Details are explained here- <a href="https://github.com/google/guava/wiki/CachesExplained">CachesExplained</a>.</p>
+<p>The operator constructs this cache in <code>setup(...)</code>. It is built with the following configuration items:</p>
+<ul>
+<li><strong>maxOpenFiles</strong>: maximum size of the cache. The cache evicts entries that haven't been used recently when the cache size is approaching this limit. <em>Default</em>: 100</li>
+<li><strong>expireStreamAfterAcessMillis</strong>: expires streams after the specified duration has passed since the stream was last accessed. <em>Default</em>: value of attribute- <code>OperatorContext.SPIN_MILLIS</code>.</li>
+</ul>
+<p>An important point to note here is that the guava cache does not perform cleanup and evict values asynchronously, that is, instantly after a value expires. Instead, it performs small amounts of maintenance during write operations, or during occasional read operations if writes are rare.</p>
+<h4 id="cacheloader">CacheLoader</h4>
+<p><code>streamsCache</code> is created with a <code>CacheLoader</code> that opens an <code>FSDataOutputStream</code> for a file which is not in the cache. The output stream is opened in either <code>append</code> or <code>create</code> mode and the basic logic to determine this is explained by the simple diagram below.</p>
+<p><img alt="Opening an output stream" src="../images/fileoutput/diagram1.png" /></p>
+<p>This process gets complicated when fault-tolerance (writing to temporary files)  and rotation is added.</p>
+<p>Following are few configuration items used for opening the streams:</p>
+<ul>
+<li><strong>replication</strong>: specifies the replication factor of the output files. <em>Default</em>: <code>fs.getDefaultReplication(new Path(filePath))</code></li>
+<li><strong>filePermission</strong>: specifies the permission of the output files. The permission is an octal number similar to that used by the Unix chmod command. <em>Default</em>: 0777</li>
+</ul>
+<h4 id="removallistener">RemovalListener</h4>
+<p>A <code>Guava</code> cache also allows specification of removal listener which can perform some operation when an entry is removed from the cache. Since <code>streamsCache</code> is of limited size and also has time-based expiry enabled, it is imperative that when a stream is evicted from the cache it is closed properly. Therefore, we attach a removal listener to <code>streamsCache</code> which closes the stream when it is evicted.</p>
+<h3 id="setupoperatorcontext-context"><code>setup(OperatorContext context)</code></h3>
+<p>During setup the following main tasks are performed:</p>
+<ol>
+<li>FileSystem instance is created.</li>
+<li>The cache of streams is created.</li>
+<li>Files are recovered (see Fault-tolerance section).</li>
+<li>Stray part files are cleaned (see Automatic rotation section).</li>
+</ol>
+<h3 id="processtupleinput-tuple"><a name="processTuple"></a><code>processTuple(INPUT tuple)</code></h3>
+<p>The code snippet below highlights the basic steps of processing a tuple.</p>
+<pre><code class="java">protected void processTuple(INPUT tuple)
+{  
+  //which file to write to is derived from the tuple.
+  String fileName = getFileName(tuple);  
+
+  //streamsCache is queried for the output stream. If the stream is already opened then it is returned immediately otherwise the cache loader creates one.
+  FilterOutputStream fsOutput = streamsCache.get(fileName).getFilterStream();
+
+  byte[] tupleBytes = getBytesForTuple(tuple);
+
+  fsOutput.write(tupleBytes);
+}
+</code></pre>
+
+<h3 id="endwindow"><a name="endWindow"></a>endWindow()</h3>
+<p>It should be noted that while processing a tuple we do not flush the stream after every write. Since flushing is expensive it is done periodically for all the open streams in the operator's <code>endWindow()</code>.</p>
+<pre><code class="java">Map&lt;String, FSFilterStreamContext&gt; openStreams = streamsCache.asMap();
+for (FSFilterStreamContext streamContext: openStreams.values()) {
+  ...
+  //this flushes the stream
+  streamContext.finalizeContext();
+  ...
+}
+</code></pre>
+
+<p><code>FSFilterStreamContext</code> will be explained with compression and encryption.</p>
+<h3 id="teardown"><a name="teardown"></a>teardown()</h3>
+<p>When any operator in a DAG fails then the application master invokes <code>teardown()</code> for that operator and its downstream operators. In <code>AbstractFileOutputOperator</code> we have a bunch of open streams in the cache and the operator (acting as HDFS client) holds leases for all the corresponding files. It is important to release these leases for clean re-deployment. Therefore, we try to close all the open streams in <code>teardown()</code>.</p>
+<h2 id="automatic-rotation">Automatic rotation</h2>
+<p>In a streaming application where data is being continuously processed, when this output operator is used, data will be continuously written to an output file. The users may want to be able to take the data from time to time to use it, copy it out of Hadoop or do some other processing. Having all the data in a single file makes it difficult as the user needs to keep track of how much data has been read from the file each time so that the same data is not read again. Also users may already have processes and scripts in place that work with full files and not partial data from a file.</p>
+<p>To help solve these problems the operator supports creating many smaller files instead of writing to just one big file. Data is written to a file and when some condition is met the file is finalized and data is written to a new file. This is called file rotation. The user can determine when the file gets rotated. Each of these files is called a part file as they contain portion of the data.</p>
+<h3 id="part-filename">Part filename</h3>
+<p>The filename for a part file is formed by using the original file name and the part number. The part number starts from 0 and is incremented each time a new part file created. The default filename has the format, assuming origfile represents the original filename and partnum represents the part number,</p>
+<p><code>origfile.partnum</code></p>
+<p>This naming scheme can be changed by the user. It can be done so by overriding the following method</p>
+<pre><code class="java">protected String getPartFileName(String fileName, int part)
+</code></pre>
+
+<p>This method is passed the original filename and part number as arguments and should return the part filename.</p>
+<h3 id="mechanisms">Mechanisms</h3>
+<p>The user has a couple of ways to specify when a file gets rotated. First is based on size and second on time. In the first case the files are limited by size and in the second they are rotated by time.</p>
+<h4 id="size-based">Size Based</h4>
+<p>With size based rotation the user specifies a size limit. Once the size of the currently file reaches this limit the file is rotated. The size limit can be specified by setting the following property</p>
+<p><code>maxLength</code></p>
+<p>Like any other property this can be set in Java application code or in the property file.</p>
+<h4 id="time-based">Time Based</h4>
+<p>In time based rotation user specifies a time interval. This interval is specified as number of application windows. The files are rotated periodically once the specified number of application windows have elapsed. Since the interval is application window based it is not always exactly constant time. The interval can be specified using the following property</p>
+<p><code>rotationWindows</code></p>
+<h3 id="setupoperatorcontext-context_1"><code>setup(OperatorContext context)</code></h3>
+<p>When an operator is being started there may be stray part files and they need to be cleaned up. One common scenario, when these could be present, is in the case of failure, where a node running the operator failed and a previous instance of the operator was killed. This cleanup and other initial processing for the part files happens in the operator setup. The following diagram describes this process</p>
+<p><img alt="Rotation setup" src="../images/fileoutput/FileRotation.png" /></p>
+<h2 id="fault-tolerance">Fault-tolerance</h2>
+<p>There are two issues that should be addressed in order to make the operator fault-tolerant:</p>
+<ol>
+<li>
+<p>The operator flushes data to the filesystem every application window. This implies that after a failure when the operator is re-deployed and tuples of a window are replayed, then duplicate data will be saved to the files. This is handled by recording how much the operator has written to each file every window in a state that is checkpointed and truncating files back to the recovery checkpoint after re-deployment.</p>
+</li>
+<li>
+<p>While writing to HDFS, if the operator gets killed and didn't have the opportunity to close a file, then later when it is redeployed it will attempt to truncate/restore that file. Restoring a file may fail because the lease that the previous process (operator instance before failure) had acquired from namenode to write to a file may still linger and therefore there can be exceptions in acquiring the lease again by the new process (operator instance after failure). This is handled by always writing data to temporary files and renaming these files to actual files when a file is finalized (closed) for writing, that is, we are sure that no more data will be written to it. The relevant configuration item is:  </p>
+</li>
+<li><strong>alwaysWriteToTmp</strong>: enables/disables writing to a temporary file. <em>Default</em>: true.</li>
+</ol>
+<p>Most of the complexity in the code comes from making this operator fault-tolerant.</p>
+<h3 id="checkpointed-states-needed-for-fault-tolerance">Checkpointed states needed for fault-tolerance</h3>
+<ul>
+<li>
+<p><code>endOffsets</code>: contains the size of each file as it is being updated by the operator. It helps the operator to restore a file during recovery in operator <code>setup(...)</code> and is also used while loading a stream to find out if the operator has seen a file before.</p>
+</li>
+<li>
+<p><code>fileNameToTmpName</code>: contains the name of the temporary file per actual file. It is needed because the name of a temporary file is random. They are named based on the timestamp when the stream is created. During recovery the operator needs to know the temp file which it was writing to and if it needs restoration then it creates a new temp file and updates this mapping.</p>
+</li>
+<li>
+<p><code>finalizedFiles</code>: contains set of files which were requested to be finalized per window id.</p>
+</li>
+<li>
+<p><code>finalizedPart</code>: contains the latest <code>part</code> of each file which was requested to be finalized.</p>
+</li>
+</ul>
+<p>The use of <code>finalizedFiles</code> and <code>finalizedPart</code> are explained in detail under <a href="#requestFinalize"><code>requestFinalize(...)</code></a> method.</p>
+<h3 id="recovering-files">Recovering files</h3>
+<p>When the operator is re-deployed, it checks in its <code>setup(...)</code> method if the state of a file which it has seen before the failure is consistent with the file's state on the file system, that is, the size of the file on the file system should match the size in the <code>endOffsets</code>. When it doesn't the operator truncates the file.</p>
+<p>For example, let's say the operator wrote 100 bytes to test1.txt by the end of window 10. It wrote another 20 bytes by the end of window 12 but failed in window 13. When the operator gets re-deployed it is restored with window 10 (recovery checkpoint) state. In the previous run, by the end of window 10, the size of file on the filesystem was 100 bytes but now it is 120 bytes. Tuples for windows 11 and 12 are going to be replayed. Therefore, in order to avoid writing duplicates to test1.txt, the operator truncates the file to 100 bytes (size at the end of window 10) discarding the last 20 bytes.</p>
+<h3 id="requestfinalizestring-filename"><a name="requestFinalize"></a><code>requestFinalize(String fileName)</code></h3>
+<p>When the operator is always writing to temporary files (in order to avoid HDFS Lease exceptions), then it is necessary to rename the temporary files to the actual files once it has been determined that the files are closed. This is refered to as <em>finalization</em> of files and the method allows the user code to specify when a file is ready for finalization.</p>
+<p>In this method, the requested file (or in the case of rotation &mdash; all the file parts including the latest open part which have not yet been requested for finalization) are registered for finalization. Registration is basically adding the file names to <code>finalizedFiles</code> state and updating <code>finalizedPart</code>.</p>
+<p>The process of <em>finalization</em> of all the files which were requested till the window <em>w</em> is deferred till window <em>w</em> is committed. This is because until a window is committed it can be replayed after a failure which means that a file can be open for writing even after it was requested for finalization.</p>
+<p>When rotation is enabled, part files as and when they get completed are requested for finalization. However, when rotation is not enabled user code needs to invoke this method as the knowledge that when a file is closed is unknown to this abstract operator.</p>
+              
+            </div>
+          </div>
+          <footer>
+  
+    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
+      
+        <a href="../enricher/" class="btn btn-neutral float-right" title="Enricher">Next <span class="icon icon-circle-arrow-right"></span></a>
+      
+      
+        <a href="../csvParserOperator/" class="btn btn-neutral" title="Csv Parser"><span class="icon icon-circle-arrow-left"></span> Previous</a>
+      
+    </div>
+  
+
+  <hr/>
+
+  <div role="contentinfo">
+    <!-- Copyright etc -->
+    
+  </div>
+
+  Built with <a href="http://www.mkdocs.org">MkDocs</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
+</footer>
+	  
+        </div>
+      </div>
+
+    </section>
+
+  </div>
+
+<div class="rst-versions" role="note" style="cursor: pointer">
+    <span class="rst-current-version" data-toggle="rst-current-version">
+      
+      
+        <span><a href="../csvParserOperator/" style="color: #fcfcfc;">&laquo; Previous</a></span>
+      
+      
+        <span style="margin-left: 15px"><a href="../enricher/" style="color: #fcfcfc">Next &raquo;</a></span>
+      
+    </span>
+</div>
+
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/apex-site/blob/357a5a07/docs/malhar-3.6/operators/file_splitter/index.html
----------------------------------------------------------------------
diff --git a/docs/malhar-3.6/operators/file_splitter/index.html b/docs/malhar-3.6/operators/file_splitter/index.html
new file mode 100644
index 0000000..8ab4b13
--- /dev/null
+++ b/docs/malhar-3.6/operators/file_splitter/index.html
@@ -0,0 +1,422 @@
+<!DOCTYPE html>
+<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
+<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
+<head>
+  <meta charset="utf-8">
+  <meta http-equiv="X-UA-Compatible" content="IE=edge">
+  <meta name="viewport" content="width=device-width, initial-scale=1.0">
+  
+  
+  
+  <title>File Splitter - Apache Apex Malhar Documentation</title>
+  
+
+  <link rel="shortcut icon" href="../../favicon.ico">
+  
+
+  
+  <link href='https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700' rel='stylesheet' type='text/css'>
+
+  <link rel="stylesheet" href="../../css/theme.css" type="text/css" />
+  <link rel="stylesheet" href="../../css/theme_extra.css" type="text/css" />
+  <link rel="stylesheet" href="../../css/highlight.css">
+
+  
+  <script>
+    // Current page data
+    var mkdocs_page_name = "File Splitter";
+    var mkdocs_page_input_path = "operators/file_splitter.md";
+    var mkdocs_page_url = "/operators/file_splitter/";
+  </script>
+  
+  <script src="../../js/jquery-2.1.1.min.js"></script>
+  <script src="../../js/modernizr-2.8.3.min.js"></script>
+  <script type="text/javascript" src="../../js/highlight.pack.js"></script>
+  <script src="../../js/theme.js"></script> 
+
+  
+</head>
+
+<body class="wy-body-for-nav" role="document">
+
+  <div class="wy-grid-for-nav">
+
+    
+    <nav data-toggle="wy-nav-shift" class="wy-nav-side stickynav">
+      <div class="wy-side-nav-search">
+        <a href="../.." class="icon icon-home"> Apache Apex Malhar Documentation</a>
+        <div role="search">
+  <form id ="rtd-search-form" class="wy-form" action="../../search.html" method="get">
+    <input type="text" name="q" placeholder="Search docs" />
+  </form>
+</div>
+      </div>
+
+      <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
+        <ul class="current">
+          
+            <li>
+    <li class="toctree-l1 ">
+        <a class="" href="../..">Apache Apex Malhar</a>
+        
+    </li>
+<li>
+          
+            <li>
+    <ul class="subnav">
+    <li><span>Operators</span></li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../kafkaInputOperator/">Kafka Input</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../jmsInputOperator/">JMS Input</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 current">
+        <a class="current" href="./">File Splitter</a>
+        
+            <ul>
+            
+                <li class="toctree-l3"><a href="#file-splitter">File Splitter</a></li>
+                
+                    <li><a class="toctree-l4" href="#why-is-it-needed">Why is it needed?</a></li>
+                
+                    <li><a class="toctree-l4" href="#class-diagram">Class Diagram</a></li>
+                
+                    <li><a class="toctree-l4" href="#abstractfilesplitter">AbstractFileSplitter</a></li>
+                
+                    <li><a class="toctree-l4" href="#filesplitterbase">FileSplitterBase</a></li>
+                
+                    <li><a class="toctree-l4" href="#filesplitterinput">FileSplitterInput</a></li>
+                
+                    <li><a class="toctree-l4" href="#handling-of-split-records">Handling of split records</a></li>
+                
+            
+            </ul>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../block_reader/">Block Reader</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../fsInputOperator/">File Input</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../csvParserOperator/">Csv Parser</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../file_output/">File Output</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../enricher/">Enricher</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../filter/">Filter</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../deduper/">Deduper</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../windowedOperator/">Windowed Operator</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../jsonParser/">Json Parser</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../jsonFormatter/">Json Formatter</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../transform/">Transform Operator</a>
+        
+    </li>
+
+        
+    </ul>
+<li>
+          
+        </ul>
+      </div>
+      &nbsp;
+    </nav>
+
+    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
+
+      
+      <nav class="wy-nav-top" role="navigation" aria-label="top navigation">
+        <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
+        <a href="../..">Apache Apex Malhar Documentation</a>
+      </nav>
+
+      
+      <div class="wy-nav-content">
+        <div class="rst-content">
+          <div role="navigation" aria-label="breadcrumbs navigation">
+  <ul class="wy-breadcrumbs">
+    <li><a href="../..">Docs</a> &raquo;</li>
+    
+      
+        
+          <li>Operators &raquo;</li>
+        
+      
+    
+    <li>File Splitter</li>
+    <li class="wy-breadcrumbs-aside">
+      
+    </li>
+  </ul>
+  <hr/>
+</div>
+          <div role="main">
+            <div class="section">
+              
+                <h1 id="file-splitter">File Splitter</h1>
+<p>This is a simple operator whose main function is to split a file virtually and create metadata describing the files and the splits. </p>
+<h2 id="why-is-it-needed">Why is it needed?</h2>
+<p>It is a common operation to read a file and parse it. This operation can be parallelized by having multiple partitions of such operators and each partition operating on different files. However, at times when a file is large then a single partition reading it can become a bottleneck.
+In these cases, throughput can be increased if instances of the partitioned operator can read and parse non-overlapping sets of file blocks. This is where file splitter comes in handy. It creates metadata of blocks of file which serves as tasks handed out to downstream operator partitions. 
+The downstream partitions can read/parse the block without the need of interacting with other partitions.</p>
+<h2 id="class-diagram">Class Diagram</h2>
+<p><img alt="FileSplitter class dierarchy" src="../images/filesplitter/classdiagram.png" /></p>
+<h2 id="abstractfilesplitter">AbstractFileSplitter</h2>
+<p>The abstract implementation defines the logic of processing <code>FileInfo</code>. This comprises the following tasks -  </p>
+<ul>
+<li>
+<p>building <code>FileMetadata</code> per file and emitting it. This metadata contains the file information such as filepath, no. of blocks in it, length of the file, all the block ids, etc.</p>
+</li>
+<li>
+<p>creating <code>BlockMetadataIterator</code> from <code>FileMetadata</code>. The iterator lazy-loads the block metadata when needed. We use an iterator because the no. of blocks in a file can be huge if the block size is small and loading all of them at once in memory may cause out of memory errors.</p>
+</li>
+<li>
+<p>retrieving <code>BlockMetadata.FileBlockMetadata</code> from the block metadata iterator and emitting it. The FileBlockMetadata contains the block id, start offset of the block, length of file in the block, etc. The number of block metadata emitted per window are controlled by <code>blocksThreshold</code> setting which by default is 1.  </p>
+</li>
+</ul>
+<p>The main utility method that performs all the above tasks is the <a href="#process_method"><code>process()</code></a> method. Concrete implementations can invoke this method whenever they have data to process.</p>
+<h3 id="ports">Ports</h3>
+<p>Declares only output ports on which file metadata and block metadata are emitted.</p>
+<ul>
+<li>filesMetadataOutput: metadata for each file is emitted on this port. </li>
+<li>blocksMetadataOutput: metadata for each block is emitted on this port. </li>
+</ul>
+<h3 id="process-method"><a name="process_method"></a><code>process()</code> method</h3>
+<p>When process() is invoked, any pending blocks from the current file are emitted on the 'blocksMetadataOutput' port. If the threshold for blocks per window is still not met then a new input file is processed - corresponding metadata is emitted on 'filesMetadataOutput' and more of its blocks are emitted. This operation is repeated until the <code>blocksThreshold</code> is reached or there are no more new files.</p>
+<pre><code class="java">  protected void process()
+  {
+    if (blockMetadataIterator != null &amp;&amp; blockCount &lt; blocksThreshold) {
+      emitBlockMetadata();
+    }
+
+    FileInfo fileInfo;
+    while (blockCount &lt; blocksThreshold &amp;&amp; (fileInfo = getFileInfo()) != null) {
+      if (!processFileInfo(fileInfo)) {
+        break;
+      }
+    }
+  }
+</code></pre>
+
+<h3 id="abstract-methods">Abstract methods</h3>
+<ul>
+<li>
+<p><code>FileInfo getFileInfo()</code>: called from within the <code>process()</code> and provides the next file to process.</p>
+</li>
+<li>
+<p><code>long getDefaultBlockSize()</code>: provides the block size which is used when user hasn't configured the size.</p>
+</li>
+<li>
+<p><code>FileStatus getFileStatus(Path path)</code>: provides the <code>org.apache.hadoop.fs.FileStatus</code> instance for a path.   </p>
+</li>
+</ul>
+<h3 id="configuration">Configuration</h3>
+<ol>
+<li><strong>blockSize</strong>: size of a block.</li>
+<li><strong>blocksThreshold</strong><a name="blocksThreshold"></a>: threshold on the number of blocks emitted by file splitter every window. This setting is used for throttling the work for downstream operators.</li>
+</ol>
+<h2 id="filesplitterbase">FileSplitterBase</h2>
+<p>Simple operator that receives tuples of type <code>FileInfo</code> on its <code>input</code> port. <code>FileInfo</code> contains the information (currently just the file path) about the file which this operator uses to create file metadata and block metadata.</p>
+<h3 id="example-application">Example application</h3>
+<p>This is a simple sub-dag that demonstrates how FileSplitterBase can be plugged into an application.
+<img alt="Application with FileSplitterBase" src="../images/filesplitter/baseexample.png" /></p>
+<p>The upstream operator emits tuples of type <code>FileInfo</code> on its output port which is connected to splitter input port. The downstream receives tuples of type <code>BlockMetadata.FileBlockMetadata</code> from the splitter's block metadata output port.</p>
+<pre><code class="java">public class ApplicationWithBaseSplitter implements StreamingApplication
+{
+  @Override
+  public void populateDAG(DAG dag, Configuration configuration)
+  {
+    JMSInput input = dag.addOperator(&quot;Input&quot;, new JMSInput());
+    FileSplitterBase splitter = dag.addOperator(&quot;Splitter&quot;, new FileSplitterBase());
+    FSSliceReader blockReader = dag.addOperator(&quot;BlockReader&quot;, new FSSliceReader());
+    ...
+    dag.addStream(&quot;file-info&quot;, input.output, splitter.input);
+    dag.addStream(&quot;block-metadata&quot;, splitter.blocksMetadataOutput, blockReader.blocksMetadataInput);
+    ...
+  }
+
+  public static class JMSInput extends AbstractJMSInputOperator&lt;AbstractFileSplitter.FileInfo&gt;
+  {
+
+    public final transient DefaultOutputPort&lt;AbstractFileSplitter.FileInfo&gt; output = new DefaultOutputPort&lt;&gt;();
+
+    @Override
+    protected AbstractFileSplitter.FileInfo convert(Message message) throws JMSException
+    {
+      //assuming the message is a text message containing the absolute path of the file.
+      return new AbstractFileSplitter.FileInfo(null, ((TextMessage)message).getText());
+    }
+
+    @Override
+    protected void emit(AbstractFileSplitter.FileInfo payload)
+    {
+      output.emit(payload);
+    }
+  }
+}
+</code></pre>
+
+<h3 id="ports_1">Ports</h3>
+<p>Declares an input port on which it receives tuples from the upstream operator. Output ports are inherited from AbstractFileSplitter.</p>
+<ul>
+<li>input: non optional port on which tuples of type <code>FileInfo</code> are received.</li>
+</ul>
+<h3 id="configuration_1">Configuration</h3>
+<ol>
+<li><strong>file</strong>: path of the file from which the filesystem is inferred. FileSplitter creates an instance of <code>org.apache.hadoop.fs.FileSystem</code> which is why this path is needed.  </li>
+</ol>
+<pre><code>FileSystem.newInstance(new Path(file).toUri(), new Configuration());
+</code></pre>
+
+<p>The fs instance is then used to fetch the default block size and <code>org.apache.hadoop.fs.FileStatus</code> for each file path.</p>
+<h2 id="filesplitterinput">FileSplitterInput</h2>
+<p>This is an input operator that discovers files itself. The scanning of the directories for new files is asynchronous which is handled by <code>TimeBasedDirectoryScanner</code>. The function of TimeBasedDirectoryScanner is to periodically scan specified directories and find files which were newly added or modified. The interaction between the operator and the scanner is depicted in the diagram below.</p>
+<p><img alt="Interaction between operator and scanner" src="../images/filesplitter/sequence.png" /></p>
+<h3 id="example-application_1">Example application</h3>
+<p>This is a simple sub-dag that demonstrates how FileSplitterInput can be plugged into an application.</p>
+<p><img alt="Application with FileSplitterInput" src="../images/filesplitter/inputexample.png" /></p>
+<p>Splitter is the input operator here that sends block metadata to the downstream BlockReader.</p>
+<pre><code class="java">  @Override
+  public void populateDAG(DAG dag, Configuration configuration)
+  {
+    FileSplitterInput input = dag.addOperator(&quot;Input&quot;, new FileSplitterInput());
+    FSSliceReader reader = dag.addOperator(&quot;Block Reader&quot;, new FSSliceReader());
+    ...
+    dag.addStream(&quot;block-metadata&quot;, input.blocksMetadataOutput, reader.blocksMetadataInput);
+    ...
+  }
+
+</code></pre>
+
+<h3 id="ports_2">Ports</h3>
+<p>Since it is an input operator there are no input ports and output ports are inherited from AbstractFileSplitter.</p>
+<h3 id="configuration_2">Configuration</h3>
+<ol>
+<li><strong>scanner</strong>: the component that scans directories asynchronously. It is of type <code>com.datatorrent.lib.io.fs.FileSplitter.TimeBasedDirectoryScanner</code>. The basic implementation of TimeBasedDirectoryScanner can be customized by users.  </li>
+</ol>
+<p>a. <strong>files</strong>: comma separated list of directories to scan.  </p>
+<p>b. <strong>recursive</strong>: flag that controls whether the directories should be scanned recursively.  </p>
+<p>c. <strong>scanIntervalMillis</strong>: interval specified in milliseconds after which another scan iteration is triggered.  </p>
+<p>d. <strong>filePatternRegularExp</strong>: regular expression for accepted file names.  </p>
+<p>e. <strong>trigger</strong>: a flag that triggers a scan iteration instantly. If the scanner thread is idling then it will initiate a scan immediately otherwise if a scan is in progress, then the new iteration will be triggered immediately after the completion of current one.
+2. <strong>idempotentStorageManager</strong>: by default FileSplitterInput is idempotent. 
+Idempotency ensures that the operator will process the same set of files/blocks in a window if it has seen that window previously, i.e., before a failure. For example, let's say the operator completed window 10 and failed somewhere between window 11. If the operator gets restored at window 10 then it will process the same file/block again in window 10 which it did in the previous run before the failure. Idempotency is important but comes with higher cost because at the end of each window the operator needs to persist some state with respect to that window. Therefore, if one doesn't care about idempotency then they can set this property to be an instance of <code>com.datatorrent.lib.io.IdempotentStorageManager.NoopIdempotentStorageManager</code>.</p>
+<h2 id="handling-of-split-records">Handling of split records</h2>
+<p>Splitting of files to create tasks for downstream operator needs to be a simple operation that doesn't consume a lot of resources and is fast. This is why the file splitter doesn't open files to read. The downside of that is if the file contains records then a record may split across adjacent blocks. Handling of this is left to the downstream operator.</p>
+<p>We have created Block readers in Apex-malhar library that handle line splits efficiently. The 2 line readers- <code>AbstractFSLineReader</code> and <code>AbstractFSReadAheadLineReader</code> can be found here <a href="https://github.com/apache/incubator-apex-malhar/blob/master/library/src/main/java/com/datatorrent/lib/io/block/AbstractFSBlockReader.java">AbstractFSBlockReader</a>.</p>
+              
+            </div>
+          </div>
+          <footer>
+  
+    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
+      
+        <a href="../block_reader/" class="btn btn-neutral float-right" title="Block Reader">Next <span class="icon icon-circle-arrow-right"></span></a>
+      
+      
+        <a href="../jmsInputOperator/" class="btn btn-neutral" title="JMS Input"><span class="icon icon-circle-arrow-left"></span> Previous</a>
+      
+    </div>
+  
+
+  <hr/>
+
+  <div role="contentinfo">
+    <!-- Copyright etc -->
+    
+  </div>
+
+  Built with <a href="http://www.mkdocs.org">MkDocs</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
+</footer>
+	  
+        </div>
+      </div>
+
+    </section>
+
+  </div>
+
+<div class="rst-versions" role="note" style="cursor: pointer">
+    <span class="rst-current-version" data-toggle="rst-current-version">
+      
+      
+        <span><a href="../jmsInputOperator/" style="color: #fcfcfc;">&laquo; Previous</a></span>
+      
+      
+        <span style="margin-left: 15px"><a href="../block_reader/" style="color: #fcfcfc">Next &raquo;</a></span>
+      
+    </span>
+</div>
+
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/apex-site/blob/357a5a07/docs/malhar-3.6/operators/filter/index.html
----------------------------------------------------------------------
diff --git a/docs/malhar-3.6/operators/filter/index.html b/docs/malhar-3.6/operators/filter/index.html
new file mode 100644
index 0000000..e1531d7
--- /dev/null
+++ b/docs/malhar-3.6/operators/filter/index.html
@@ -0,0 +1,378 @@
+<!DOCTYPE html>
+<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
+<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
+<head>
+  <meta charset="utf-8">
+  <meta http-equiv="X-UA-Compatible" content="IE=edge">
+  <meta name="viewport" content="width=device-width, initial-scale=1.0">
+  
+  
+  
+  <title>Filter - Apache Apex Malhar Documentation</title>
+  
+
+  <link rel="shortcut icon" href="../../favicon.ico">
+  
+
+  
+  <link href='https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700' rel='stylesheet' type='text/css'>
+
+  <link rel="stylesheet" href="../../css/theme.css" type="text/css" />
+  <link rel="stylesheet" href="../../css/theme_extra.css" type="text/css" />
+  <link rel="stylesheet" href="../../css/highlight.css">
+
+  
+  <script>
+    // Current page data
+    var mkdocs_page_name = "Filter";
+    var mkdocs_page_input_path = "operators/filter.md";
+    var mkdocs_page_url = "/operators/filter/";
+  </script>
+  
+  <script src="../../js/jquery-2.1.1.min.js"></script>
+  <script src="../../js/modernizr-2.8.3.min.js"></script>
+  <script type="text/javascript" src="../../js/highlight.pack.js"></script>
+  <script src="../../js/theme.js"></script> 
+
+  
+</head>
+
+<body class="wy-body-for-nav" role="document">
+
+  <div class="wy-grid-for-nav">
+
+    
+    <nav data-toggle="wy-nav-shift" class="wy-nav-side stickynav">
+      <div class="wy-side-nav-search">
+        <a href="../.." class="icon icon-home"> Apache Apex Malhar Documentation</a>
+        <div role="search">
+  <form id ="rtd-search-form" class="wy-form" action="../../search.html" method="get">
+    <input type="text" name="q" placeholder="Search docs" />
+  </form>
+</div>
+      </div>
+
+      <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
+        <ul class="current">
+          
+            <li>
+    <li class="toctree-l1 ">
+        <a class="" href="../..">Apache Apex Malhar</a>
+        
+    </li>
+<li>
+          
+            <li>
+    <ul class="subnav">
+    <li><span>Operators</span></li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../kafkaInputOperator/">Kafka Input</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../jmsInputOperator/">JMS Input</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../file_splitter/">File Splitter</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../block_reader/">Block Reader</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../fsInputOperator/">File Input</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../csvParserOperator/">Csv Parser</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../file_output/">File Output</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../enricher/">Enricher</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 current">
+        <a class="current" href="./">Filter</a>
+        
+            <ul>
+            
+                <li class="toctree-l3"><a href="#filter">Filter</a></li>
+                
+                    <li><a class="toctree-l4" href="#operator-objective">Operator Objective</a></li>
+                
+                    <li><a class="toctree-l4" href="#operator-usecase">Operator Usecase</a></li>
+                
+                    <li><a class="toctree-l4" href="#operator-information">Operator Information</a></li>
+                
+                    <li><a class="toctree-l4" href="#properties-attributes-and-ports">Properties, Attributes and Ports</a></li>
+                
+                    <li><a class="toctree-l4" href="#limitations">Limitations</a></li>
+                
+                    <li><a class="toctree-l4" href="#example">Example</a></li>
+                
+            
+            </ul>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../deduper/">Deduper</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../windowedOperator/">Windowed Operator</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../jsonParser/">Json Parser</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../jsonFormatter/">Json Formatter</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../transform/">Transform Operator</a>
+        
+    </li>
+
+        
+    </ul>
+<li>
+          
+        </ul>
+      </div>
+      &nbsp;
+    </nav>
+
+    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
+
+      
+      <nav class="wy-nav-top" role="navigation" aria-label="top navigation">
+        <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
+        <a href="../..">Apache Apex Malhar Documentation</a>
+      </nav>
+
+      
+      <div class="wy-nav-content">
+        <div class="rst-content">
+          <div role="navigation" aria-label="breadcrumbs navigation">
+  <ul class="wy-breadcrumbs">
+    <li><a href="../..">Docs</a> &raquo;</li>
+    
+      
+        
+          <li>Operators &raquo;</li>
+        
+      
+    
+    <li>Filter</li>
+    <li class="wy-breadcrumbs-aside">
+      
+    </li>
+  </ul>
+  <hr/>
+</div>
+          <div role="main">
+            <div class="section">
+              
+                <h1 id="filter">Filter</h1>
+<h2 id="operator-objective">Operator Objective</h2>
+<p>This operator receives an POJO (<a href="https://en.wikipedia.org/wiki/Plain_Old_Java_Object">Plain Old Java Object</a>) as an incoming tuple
+and based on the filter condition it emits filtered tuples on one output port and rest on another output port.</p>
+<p>Filter operator supports quasi Java expressions to specify filter rule.</p>
+<p>Filter operator does not hold any state and is <strong>idempotent</strong>, <strong>fault-tolerant</strong> and <strong>statically/dynamically partitionable</strong>.</p>
+<h2 id="operator-usecase">Operator Usecase</h2>
+<ol>
+<li><strong><em>Customer data</em></strong> usually contains a field customer category/segment. One wants some analysis to be done for specific customer segment. One could use this filter operator to filter the records based on segment for some analysis for specific customer segment. </li>
+<li><strong><em>Log data</em></strong> processing pipeline may want to filter logs from specific machine/router/switch.</li>
+</ol>
+<h2 id="operator-information">Operator Information</h2>
+<ol>
+<li>Operator location: <strong><em><a href="https://github.com/apache/apex-malhar/tree/master/library">malhar-library</a></em></strong></li>
+<li>Available since: <strong><em>3.5.0</em></strong></li>
+<li>Operator state: <strong><em>Evolving</em></strong></li>
+<li>Java Packages:<ul>
+<li>Operator: <strong><em><a href="https://www.datatorrent.com/docs/apidocs/com/datatorrent/lib/filter/FilterOperator.html">com.datatorrent.lib.filter.FilterOperator</a></em></strong></li>
+</ul>
+</li>
+</ol>
+<h2 id="properties-attributes-and-ports">Properties, Attributes and Ports</h2>
+<h3 id="properties-of-filteroperator"><a name="props"></a>Properties of FilterOperator</h3>
+<table>
+<thead>
+<tr>
+<th><strong>Property</strong></th>
+<th><strong>Description</strong></th>
+<th><strong>Type</strong></th>
+<th><strong>Mandatory</strong></th>
+<th><strong>Default Value</strong></th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><em>condition</em></td>
+<td>condition/expression with which Filtering is done.</td>
+<td>String</td>
+<td>Yes</td>
+<td>N/A</td>
+</tr>
+<tr>
+<td><em>additionalExpressionFunctions</em></td>
+<td>List of import classes/method that should be made statically available to expression to use.</td>
+<td><code>List&lt;String&gt;</code></td>
+<td>No</td>
+<td>Empty List</td>
+</tr>
+</tbody>
+</table>
+<h3 id="platform-attributes-that-influences-operator-behavior">Platform Attributes that influences operator behavior</h3>
+<table>
+<thead>
+<tr>
+<th><strong>Attribute</strong></th>
+<th><strong>Description</strong></th>
+<th><strong>Type</strong></th>
+<th><strong>Mandatory</strong></th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><em>port.input.attr.TUPLE_CLASS</em></td>
+<td>TUPLE_CLASS attribute on input port indicates the class of POJO which incoming tuple</td>
+<td>Class or FQCN</td>
+<td>Yes</td>
+</tr>
+</tbody>
+</table>
+<h3 id="ports">Ports</h3>
+<table>
+<thead>
+<tr>
+<th><strong>Port</strong></th>
+<th><strong>Description</strong></th>
+<th><strong>Type</strong></th>
+<th><strong>Connection Required</strong></th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td><em>input</em></td>
+<td>Tuple which needs to be filtered are received on this port</td>
+<td>Object (POJO)</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td><em>truePort</em></td>
+<td>Tuples which satisfies <a href="#props">condition</a> are emitted on this port</td>
+<td>Object (POJO)</td>
+<td>No</td>
+</tr>
+<tr>
+<td><em>falsePort</em></td>
+<td>Tuples which does not satisfy <a href="#props">condition</a> are emitted on this port</td>
+<td>Object (POJO)</td>
+<td>No</td>
+</tr>
+</tbody>
+</table>
+<h2 id="limitations">Limitations</h2>
+<p>Current <code>FilterOperator</code> has following limitation:</p>
+<ol>
+<li><a href="https://issues.apache.org/jira/browse/APEXMALHAR-2175">APEXMALHAR-2175</a> : Filter condition is not able to correctly handle java reserved words.</li>
+</ol>
+<h2 id="example">Example</h2>
+<p>Example for <code>FilterOperator</code> can be found at: <a href="https://github.com/DataTorrent/examples/tree/master/tutorials/filter">https://github.com/DataTorrent/examples/tree/master/tutorials/filter</a></p>
+              
+            </div>
+          </div>
+          <footer>
+  
+    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
+      
+        <a href="../deduper/" class="btn btn-neutral float-right" title="Deduper">Next <span class="icon icon-circle-arrow-right"></span></a>
+      
+      
+        <a href="../enricher/" class="btn btn-neutral" title="Enricher"><span class="icon icon-circle-arrow-left"></span> Previous</a>
+      
+    </div>
+  
+
+  <hr/>
+
+  <div role="contentinfo">
+    <!-- Copyright etc -->
+    
+  </div>
+
+  Built with <a href="http://www.mkdocs.org">MkDocs</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
+</footer>
+	  
+        </div>
+      </div>
+
+    </section>
+
+  </div>
+
+<div class="rst-versions" role="note" style="cursor: pointer">
+    <span class="rst-current-version" data-toggle="rst-current-version">
+      
+      
+        <span><a href="../enricher/" style="color: #fcfcfc;">&laquo; Previous</a></span>
+      
+      
+        <span style="margin-left: 15px"><a href="../deduper/" style="color: #fcfcfc">Next &raquo;</a></span>
+      
+    </span>
+</div>
+
+</body>
+</html>


Mime
View raw message