accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mwa...@apache.org
Subject [1/2] accumulo-website git commit: Several minor updates to documenation
Date Fri, 02 Jun 2017 18:03:05 GMT
Repository: accumulo-website
Updated Branches:
  refs/heads/asf-site db2398b4b -> c1dde8007
  refs/heads/master 883f56620 -> 26f0eb301


Several minor updates to documenation

* Fixed java example code whitespace
* Improved linking in clients.md
* Fixed xml example whitespace


Project: http://git-wip-us.apache.org/repos/asf/accumulo-website/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo-website/commit/26f0eb30
Tree: http://git-wip-us.apache.org/repos/asf/accumulo-website/tree/26f0eb30
Diff: http://git-wip-us.apache.org/repos/asf/accumulo-website/diff/26f0eb30

Branch: refs/heads/master
Commit: 26f0eb301b066bf7ec1d927c3fb11aa8c9568957
Parents: 883f566
Author: Mike Walch <mwalch@apache.org>
Authored: Fri Jun 2 13:59:01 2017 -0400
Committer: Mike Walch <mwalch@apache.org>
Committed: Fri Jun 2 13:59:01 2017 -0400

----------------------------------------------------------------------
 _docs-unreleased/administration/tracing.md      | 48 ++++++++---------
 _docs-unreleased/development/iterators.md       | 56 ++++++++++----------
 _docs-unreleased/getting-started/clients.md     | 38 +++++++------
 .../getting-started/table_configuration.md      |  3 +-
 .../getting-started/table_design.md             |  8 +--
 5 files changed, 74 insertions(+), 79 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo-website/blob/26f0eb30/_docs-unreleased/administration/tracing.md
----------------------------------------------------------------------
diff --git a/_docs-unreleased/administration/tracing.md b/_docs-unreleased/administration/tracing.md
index bce8d0b..1fbc2ac 100644
--- a/_docs-unreleased/administration/tracing.md
+++ b/_docs-unreleased/administration/tracing.md
@@ -85,22 +85,22 @@ zookeeper path defaults to /tracers.  An example of configuring
 Hadoop to send traces to ZooTraceClient is
 
 ```xml
-  <property>
-    <name>hadoop.htrace.spanreceiver.classes</name>
-    <value>org.apache.accumulo.core.trace.ZooTraceClient</value>
-  </property>
-  <property>
-    <name>hadoop.htrace.tracer.zookeeper.host</name>
-    <value>zookeeperHost:2181</value>
-  </property>
-  <property>
-    <name>hadoop.htrace.tracer.zookeeper.path</name>
-    <value>/tracers</value>
-  </property>
-  <property>
-    <name>hadoop.htrace.tracer.span.min.ms</name>
-    <value>1</value>
-  </property>
+<property>
+  <name>hadoop.htrace.spanreceiver.classes</name>
+  <value>org.apache.accumulo.core.trace.ZooTraceClient</value>
+</property>
+<property>
+  <name>hadoop.htrace.tracer.zookeeper.host</name>
+  <value>zookeeperHost:2181</value>
+</property>
+<property>
+  <name>hadoop.htrace.tracer.zookeeper.path</name>
+  <value>/tracers</value>
+</property>
+<property>
+  <name>hadoop.htrace.tracer.span.min.ms</name>
+  <value>1</value>
+</property>
 ```
 
 The accumulo-core, accumulo-tracer, accumulo-fate and libthrift
@@ -151,14 +151,14 @@ be placed in the ClientConfiguration (if applicable) and Accumulo's
`accumulo-si
 Two such properties for ZipkinSpanReceiver, listed with their default values, are
 
 ```xml
-  <property>
-    <name>trace.span.receiver.zipkin.collector-hostname</name>
-    <value>localhost</value>
-  </property>
-  <property>
-    <name>trace.span.receiver.zipkin.collector-port</name>
-    <value>9410</value>
-  </property>
+<property>
+  <name>trace.span.receiver.zipkin.collector-hostname</name>
+  <value>localhost</value>
+</property>
+<property>
+  <name>trace.span.receiver.zipkin.collector-port</name>
+  <value>9410</value>
+</property>
 ```
 
 ### Instrumenting a Client

http://git-wip-us.apache.org/repos/asf/accumulo-website/blob/26f0eb30/_docs-unreleased/development/iterators.md
----------------------------------------------------------------------
diff --git a/_docs-unreleased/development/iterators.md b/_docs-unreleased/development/iterators.md
index ff9d56d..11e30b5 100644
--- a/_docs-unreleased/development/iterators.md
+++ b/_docs-unreleased/development/iterators.md
@@ -177,32 +177,32 @@ The following code is a general outline for how TabletServers invoke
Iterators.
 ```java
 List<KeyValue> batch;
 Range range = getRangeFromClient();
-while(!overSizeLimit(batch)){
- SortedKeyValueIterator source = getSystemIterator();
-
- for(String clzName : getUserIterators()){
-  Class<?> clz = Class.forName(clzName);
-  SortedKeyValueIterator iter = (SortedKeyValueIterator) clz.newInstance();
-  iter.init(source, opts, env);
-  source = iter;
- }
-
- // read a batch of data to return to client
- // the last iterator, the "top"
- SortedKeyValueIterator topIter = source;
- topIter.seek(getRangeFromUser(), ...)
-
- while(topIter.hasTop() && !overSizeLimit(batch)){
-   key = topIter.getTopKey()
-   val = topIter.getTopValue()
-   batch.add(new KeyValue(key, val)
-   if(systemDataSourcesChanged()){
-     // code does not show isolation case, which will
-     // keep using same data sources until a row boundry is hit 
-     range = new Range(key, false, range.endKey(), range.endKeyInclusive());
-     break;
-   }
- }
+while (!overSizeLimit(batch)) {
+    SortedKeyValueIterator source = getSystemIterator();
+
+    for (String clzName : getUserIterators()) {
+        Class<?> clz = Class.forName(clzName);
+        SortedKeyValueIterator iter = (SortedKeyValueIterator) clz.newInstance();
+        iter.init(source, opts, env);
+        source = iter;
+    }
+
+    // read a batch of data to return to client
+    // the last iterator, the "top"
+    SortedKeyValueIterator topIter = source;
+    topIter.seek(getRangeFromUser(), ...)
+
+    while (topIter.hasTop() && !overSizeLimit(batch)) {
+        key = topIter.getTopKey()
+        val = topIter.getTopValue()
+        batch.add(new KeyValue(key, val)
+        if (systemDataSourcesChanged()) {
+            // code does not show isolation case, which will
+            // keep using same data sources until a row boundry is hit 
+            range = new Range(key, false, range.endKey(), range.endKeyInclusive());
+            break;
+        }
+    }
 }
 //return batch of key values to client
 ```
@@ -220,10 +220,8 @@ lastKeyReturned = batch.get(batch.size() - 1).getKey();
 
 // Eventually client comes back
 // Setup as before...
-
 Range userRange = getRangeFromUser();
-Range actualRange = new Range(lastKeyReturned, false
-    userRange.getEndKey(), userRange.isEndKeyInclusive());
+Range actualRange = new Range(lastKeyReturned, false, userRange.getEndKey(), userRange.isEndKeyInclusive());
 
 // Use the actualRange, not the user provided one
 topIter.seek(actualRange);

http://git-wip-us.apache.org/repos/asf/accumulo-website/blob/26f0eb30/_docs-unreleased/getting-started/clients.md
----------------------------------------------------------------------
diff --git a/_docs-unreleased/getting-started/clients.md b/_docs-unreleased/getting-started/clients.md
index ff4fdbd..8596c2e 100644
--- a/_docs-unreleased/getting-started/clients.md
+++ b/_docs-unreleased/getting-started/clients.md
@@ -43,7 +43,8 @@ the accumulo command.
 ### Using the 'accumulo-util hadoop-jar' command
 
 If you are writing map reduce job that accesses Accumulo, then you can use
-`accumulo-util hadoop-jar` to run those jobs. See the map reduce example.
+`accumulo-util hadoop-jar` to run those jobs. See the [MapReduce example][mapred-example]
+for more information.
 
 ## Connecting
 
@@ -116,13 +117,11 @@ BatchWriterConfig config = new BatchWriterConfig();
 config.setMaxMemory(10000000L); // bytes available to batchwriter for buffering mutations
 
 BatchWriter writer = conn.createBatchWriter("table", config)
-
 writer.addMutation(mutation);
-
 writer.close();
 ```
 
-For more example code, see the [batch writing and scanning example](https://github.com/apache/accumulo-examples/blob/master/docs/batch.md).
+For more example code, see the [batch writing and scanning example][batch].
 
 ### ConditionalWriter
 
@@ -141,14 +140,13 @@ mutation when a column is less than 5.
 
 In the case when a tablet server dies after a client sent a conditional
 mutation, its not known if the mutation was applied or not.  When this happens
-the ConditionalWriter reports a status of UNKNOWN for the ConditionalMutation.
+the [ConditionalWriter] reports a status of UNKNOWN for the ConditionalMutation.
 In many cases this situation can be dealt with by simply reading the row again
 and possibly sending another conditional mutation.  If this is not sufficient,
 then a higher level of abstraction can be built by storing transactional
 information within a row.
 
-See the [reservations example](https://github.com/apache/accumulo-examples/blob/master/docs/reservations.md)
-for example code that uses the conditional writer.
+See the [reservations example][reservations] for example code that uses the [ConditionalWriter].
 
 ### Durability
 
@@ -196,15 +194,13 @@ to return a subset of the columns available.
 // specify which visibilities we are allowed to see
 Authorizations auths = new Authorizations("public");
 
-Scanner scan =
-    conn.createScanner("table", auths);
-
+Scanner scan = conn.createScanner("table", auths);
 scan.setRange(new Range("harry","john"));
 scan.fetchColumnFamily(new Text("attributes"));
 
-for(Entry<Key,Value> entry : scan) {
-    Text row = entry.getKey().getRow();
-    Value value = entry.getValue();
+for (Entry<Key,Value> entry : scan) {
+  Text row = entry.getKey().getRow();
+  Value value = entry.getValue();
 }
 ```
 
@@ -229,8 +225,7 @@ crash a tablet server. By default rows are buffered in memory, but the
user
 can easily supply their own buffer if they wish to buffer to disk when rows are
 large.
 
-See the [isolation example](https://github.com/apache/accumulo-examples/blob/master/docs/isolation.md)
-for example code that uses the IsolatedScanner.
+See the [isolation example][isolation] for example code that uses the [IsolatedScanner].
 
 ### BatchScanner
 
@@ -248,17 +243,16 @@ TabletServers in parallel.
 ArrayList<Range> ranges = new ArrayList<Range>();
 // populate list of ranges ...
 
-BatchScanner bscan =
-    conn.createBatchScanner("table", auths, 10);
+BatchScanner bscan = conn.createBatchScanner("table", auths, 10);
 bscan.setRanges(ranges);
 bscan.fetchColumnFamily("attributes");
 
-for(Entry<Key,Value> entry : bscan) {
-    System.out.println(entry.getValue());
+for (Entry<Key,Value> entry : bscan) {
+  System.out.println(entry.getValue());
 }
 ```
 
-For more example code, see the [batch writing and scanning example](https://github.com/apache/accumulo-examples/blob/master/docs/batch.md).
+For more example code, see the [batch writing and scanning example][batch].
 
 At this time, there is no client side isolation support for the [BatchScanner].
 You may consider using the [WholeRowIterator] with the BatchScanner to achieve
@@ -288,3 +282,7 @@ This page covers Accumulo client basics.  Below are links to additional
document
 [Iterators]: {{ page.docs_baseurl }}/development/iterators
 [Proxy]: {{ page.docs_baseurl }}/development/proxy
 [MapReduce]: {{ page.docs_baseurl }}/development/mapreduce
+[mapred-example]: https://github.com/apache/accumulo-examples/blob/master/docs/mapred.md
+[batch]: https://github.com/apache/accumulo-examples/blob/master/docs/batch.md
+[reservations]: https://github.com/apache/accumulo-examples/blob/master/docs/reservations.md
+[isolation]: https://github.com/apache/accumulo-examples/blob/master/docs/isolation.md

http://git-wip-us.apache.org/repos/asf/accumulo-website/blob/26f0eb30/_docs-unreleased/getting-started/table_configuration.md
----------------------------------------------------------------------
diff --git a/_docs-unreleased/getting-started/table_configuration.md b/_docs-unreleased/getting-started/table_configuration.md
index 554804b..4bb7de6 100644
--- a/_docs-unreleased/getting-started/table_configuration.md
+++ b/_docs-unreleased/getting-started/table_configuration.md
@@ -52,8 +52,7 @@ localityGroups.put("content", contentColumns);
 conn.tableOperations().setLocalityGroups("mytable", localityGroups);
 
 // existing locality groups can be obtained as follows
-Map<String, Set<Text>> groups =
-    conn.tableOperations().getLocalityGroups("mytable");
+Map<String, Set<Text>> groups = conn.tableOperations().getLocalityGroups("mytable");
 ```
 
 The assignment of Column Families to Locality Groups can be changed at any time. The

http://git-wip-us.apache.org/repos/asf/accumulo-website/blob/26f0eb30/_docs-unreleased/getting-started/table_design.md
----------------------------------------------------------------------
diff --git a/_docs-unreleased/getting-started/table_design.md b/_docs-unreleased/getting-started/table_design.md
index 4f340b5..af5b17f 100644
--- a/_docs-unreleased/getting-started/table_design.md
+++ b/_docs-unreleased/getting-started/table_design.md
@@ -38,7 +38,7 @@ Scanner s = conn.createScanner("userdata", auths);
 s.setRange(r);
 s.fetchColumnFamily(new Text("age"));
 
-for(Entry<Key,Value> entry : s) {
+for (Entry<Key,Value> entry : s) {
   System.out.println(entry.getValue().toString());
 }
 ```
@@ -160,7 +160,7 @@ Scanner indexScanner = createScanner("index", auths);
 indexScanner.setRange(new Range(term, term));
 
 // we retrieve the matching rowIDs and create a set of ranges
-for(Entry<Key,Value> entry : indexScanner) {
+for (Entry<Key,Value> entry : indexScanner) {
     matchingRows.add(new Range(entry.getKey().getColumnQualifier()));
 }
 
@@ -169,7 +169,7 @@ BatchScanner bscan = conn.createBatchScanner("table", auths, 10);
 bscan.setRanges(matchingRows);
 bscan.fetchColumnFamily(new Text("attributes"));
 
-for(Entry<Key,Value> entry : bscan) {
+for (Entry<Key,Value> entry : bscan) {
     System.out.println(entry.getValue());
 }
 ```
@@ -285,7 +285,7 @@ IntersectingIterator.setColumnFamilies(iter, terms);
 bscan.addScanIterator(iter);
 bscan.setRanges(Collections.singleton(new Range()));
 
-for(Entry<Key,Value> entry : bscan) {
+for (Entry<Key,Value> entry : bscan) {
     System.out.println(" " + entry.getKey().getColumnQualifier());
 }
 ```


Mime
View raw message