From commits-return-22641-archive-asf-public=cust-asf.ponee.io@accumulo.apache.org Mon Feb 25 22:22:31 2019
Return-Path: yarn
command.
AccumuloInputFormat has optional settings.
List<Range> ranges = new ArrayList<Range>();
- List<Pair<Text,Text>> columns = new ArrayList<Pair<Text,TextCollection<IteratorSetting.Column> columns = new ArrayList<IteratorSetting.Column>();
// populate ranges & columns
IteratorSetting is = new IteratorSetting(30, RexExFilter.class);
RegExFilter.setRegexs(is, ".*suffix", null, null, null, true);
diff --git a/docs/2.x/getting-started/clients.html b/docs/2.x/getting-started/clients.html
index 6e2f7ff..7cdff39 100644
--- a/docs/2.x/getting-started/clients.html
+++ b/docs/2.x/getting-started/clients.html
@@ -681,7 +681,7 @@ to return a subset of the columns available.
try (Scanner scan = client.createScanner("table", auths)) {
scan.setRange(new Range("harry","john"));
- scan.fetchColumnFamily(new Text("attributes"));
+ scan.fetchColumnFamily("attributes");
for (Entry<Key,Value> entry : scan) {
Text row = entry.getKey().getRow();
diff --git a/docs/2.x/getting-started/table_design.html b/docs/2.x/getting-started/table_design.html
index fb2cab6..9151de9 100644
--- a/docs/2.x/getting-started/table_design.html
+++ b/docs/2.x/getting-started/table_design.html
@@ -435,11 +435,9 @@ if we have the following data in a comma-separated file:
name in the column family, and a blank column qualifier:
Mutation m = new Mutation(userid);
-final String column_qualifier = "";
-m.put("age", column_qualifier, age);
-m.put("address", column_qualifier, address);
-m.put("balance", column_qualifier, account_balance);
-
+m.at().family("age").put(age);
+m.at().family("address").put(address);
+m.at().family("balance").put(account_balance);
writer.add(m);
@@ -451,7 +449,7 @@ userid as the range of a scanner and fetching specific columns:
Range r = new Range(userid, userid); // single row
Scanner s = client.createScanner("userdata", auths);
s.setRange(r);
-s.fetchColumnFamily(new Text("age"));
+s.fetchColumnFamily("age");
for (Entry<Key,Value> entry : s) {
System.out.println(entry.getValue().toString());
@@ -517,7 +515,7 @@ of a lexicoder that encodes a java Date object so that it sorts lexicographicall
// encode the rowId so that it is sorted lexicographically
Mutation mutation = new Mutation(dateEncoder.encode(hour));
-mutation.put(new Text("colf"), new Text("colq"), new Value(new mutation.at().family("colf").qualifier("colq").put(new byte[]{});
If we want to return the most recent date first, we can reverse the sort order @@ -533,7 +531,7 @@ with the reverse lexicoder:
// encode the rowId so that it sorts in reverse lexicographic order Mutation mutation = new Mutation(reverseEncoder.encode(hour)); -mutation.put(new Text("colf"), new Text("colq"), new Value(new mutation.at().family("colf").qualifier("colq").put(new byte[]{});// first we scan the index for IDs of rows matching our query
-Text term = new Text("mySearchTerm");
-
-HashSet<Range> matchingRows = new HashSet<Range>();
+HashSet<Range> matchingRows = new HashSet<Range>();
-Scanner indexScanner = createScanner("index", auths);
-indexScanner.setRange(new Range(term, term));
+// first we scan the index for IDs of rows matching our query
+try (Scanner indexScanner = client.createScanner("index", auths)) {
+ indexScanner.setRange(Range.exact("mySearchTerm");
-// we retrieve the matching rowIDs and create a set of ranges
-for (Entry<Key,Value> entry : indexScanner) {
+ // we retrieve the matching rowIDs and create a set of ranges
+ for (Entry<Key,Value> entry : indexScanner) {
matchingRows.add(new Range(entry.getKey().getColumnQualifier()));
+ }
}
// now we pass the set of rowIDs to the batch scanner to retrieve them
-BatchScanner bscan = client.createBatchScanner("table", auths, 10);
-bscan.setRanges(matchingRows);
-bscan.fetchColumnFamily(new Text("attributes"));
+try (BatchScanner bscan = client.createBatchScanner("table", auths, 10)) {
+ bscan.setRanges(matchingRows);
+ bscan.fetchColumnFamily("attributes");
-for (Entry<Key,Value> entry : bscan) {
+ for (Entry<Key,Value> entry : bscan) {
System.out.println(entry.getValue());
+ }
}
@@ -856,16 +854,17 @@ BatchScanner within user query code as follows:
Text[] terms = {new Text("the"), new Text("white"),BatchScanner bscan = client.createBatchScanner(table, auths, 20);
+try (BatchScanner bscan = client.createBatchScanner(table, auths, 20)) {
-IteratorSetting iter = new IteratorSetting(20, "ii", IntersectingIterator.class);
-IntersectingIterator.setColumnFamilies(iter, terms);
+ IteratorSetting iter = new IteratorSetting(20, "ii", IntersectingIterator.class);
+ IntersectingIterator.setColumnFamilies(iter, terms);
-bscan.addScanIterator(iter);
-bscan.setRanges(Collections.singleton(new Range()));
+ bscan.addScanIterator(iter);
+ bscan.setRanges(Collections.singleton(new Range()));
-for (Entry<Key,Value> entry : bscan) {
+ for (Entry<Key,Value> entry : bscan) {
System.out.println(" " + entry.getKey().getColumnQualifier());
+ }
}
diff --git a/feed.xml b/feed.xml
index d13fd10..c68e987 100644
--- a/feed.xml
+++ b/feed.xml
@@ -6,8 +6,8 @@
https://accumulo.apache.org/
- Mon, 25 Feb 2019 10:59:35 -0500
- Mon, 25 Feb 2019 10:59:35 -0500
+ Mon, 25 Feb 2019 17:21:39 -0500
+ Mon, 25 Feb 2019 17:21:39 -0500
Jekyll v3.7.3
diff --git a/search_data.json b/search_data.json
index 0385f83..4354ad4 100644
--- a/search_data.json
+++ b/search_data.json
@@ -107,7 +107,7 @@
"docs-2-x-development-mapreduce": {
"title": "MapReduce",
- "content" : "Accumulo tables can be used as the source and destination of MapReduce jobs.General MapReduce configurationAdd Accumulo’s MapReduce API to your dependenciesIf you are using Maven, add the following dependency to your pom.xml to use Accumulo’s MapReduce API:<dependency> <groupId>org.apache.accumulo</groupId> <artifactId>accumulo-hadoop-mapreduce</artifactId> <version>2.0.0-alpha-2&am [...]
+ "content" : "Accumulo tables can be used as the source and destination of MapReduce jobs.General MapReduce configurationAdd Accumulo’s MapReduce API to your dependenciesIf you are using Maven, add the following dependency to your pom.xml to use Accumulo’s MapReduce API:<dependency> <groupId>org.apache.accumulo</groupId> <artifactId>accumulo-hadoop-mapreduce</artifactId> <version>2.0.0-alpha-2&am [...]
"url": " /docs/2.x/development/mapreduce",
"categories": "development"
},
@@ -135,7 +135,7 @@
"docs-2-x-getting-started-clients": {
"title": "Accumulo Clients",
- "content" : "Creating Client CodeIf you are using Maven to create Accumulo client code, add the following dependency to your pom:<dependency> <groupId>org.apache.accumulo</groupId> <artifactId>accumulo-core</artifactId> <version>2.0.0-alpha-2</version></dependency>When writing code that uses Accumulo, only use the Accumulo Public API.The accumulo-core artifact include [...]
+ "content" : "Creating Client CodeIf you are using Maven to create Accumulo client code, add the following dependency to your pom:<dependency> <groupId>org.apache.accumulo</groupId> <artifactId>accumulo-core</artifactId> <version>2.0.0-alpha-2</version></dependency>When writing code that uses Accumulo, only use the Accumulo Public API.The accumulo-core artifact include [...]
"url": " /docs/2.x/getting-started/clients",
"categories": "getting-started"
},
@@ -184,7 +184,7 @@
"docs-2-x-getting-started-table-design": {
"title": "Table Design",
- "content" : "Basic TableSince Accumulo tables are sorted by row ID, each table can be thought of as beingindexed by the row ID. Lookups performed by row ID can be executed quickly, by doinga binary search, first across the tablets, and then within a tablet. Clients shouldchoose a row ID carefully in order to support their desired application. A simple ruleis to select a unique identifier as the row ID for each entity to be stored and assignall the other attributes to be tracked to [...]
+ "content" : "Basic TableSince Accumulo tables are sorted by row ID, each table can be thought of as beingindexed by the row ID. Lookups performed by row ID can be executed quickly, by doinga binary search, first across the tablets, and then within a tablet. Clients shouldchoose a row ID carefully in order to support their desired application. A simple ruleis to select a unique identifier as the row ID for each entity to be stored and assignall the other attributes to be tracked to [...]
"url": " /docs/2.x/getting-started/table_design",
"categories": "getting-started"
},