accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject svn commit: r1483461 - in /accumulo/trunk: ./ assemble/ core/ core/src/main/java/org/apache/accumulo/core/client/mapreduce/ core/src/main/java/org/apache/accumulo/core/conf/ examples/ fate/src/main/java/org/apache/accumulo/fate/ fate/src/main/java/org/...
Date Thu, 16 May 2013 17:21:02 GMT
Author: ctubbsii
Date: Thu May 16 17:21:02 2013
New Revision: 1483461

URL: http://svn.apache.org/r1483461
Log:
ACCUMULO-1421, ACCUMULO-904 merge to trunk

Modified:
    accumulo/trunk/   (props changed)
    accumulo/trunk/assemble/   (props changed)
    accumulo/trunk/core/   (props changed)
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/conf/Property.java
    accumulo/trunk/examples/   (props changed)
    accumulo/trunk/fate/src/main/java/org/apache/accumulo/fate/ZooStore.java   (props changed)
    accumulo/trunk/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java 
 (props changed)
    accumulo/trunk/pom.xml   (props changed)
    accumulo/trunk/proxy/README   (props changed)
    accumulo/trunk/server/   (props changed)
    accumulo/trunk/server/src/main/java/org/apache/accumulo/server/Accumulo.java
    accumulo/trunk/src/   (props changed)

Propchange: accumulo/trunk/
------------------------------------------------------------------------------
  Merged /accumulo/branches/1.5:r1483424-1483451

Propchange: accumulo/trunk/assemble/
------------------------------------------------------------------------------
  Merged /accumulo/branches/1.5/assemble:r1483424-1483451

Propchange: accumulo/trunk/core/
------------------------------------------------------------------------------
  Merged /accumulo/branches/1.5/core:r1483424-1483451

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java?rev=1483461&r1=1483460&r2=1483461&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
(original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
Thu May 16 17:21:02 2013
@@ -785,7 +785,8 @@ public abstract class InputFormatBase<K,
         // its possible that the cache could contain complete, but old information about
a tables tablets... so clear it
         tl.invalidateCache();
         while (!tl.binRanges(ranges, binnedRanges,
-            new TCredentials(getPrincipal(context), getTokenClass(context), ByteBuffer.wrap(getToken(context)),
getInstance(context).getInstanceID())).isEmpty()) {
+            new TCredentials(getPrincipal(context), getTokenClass(context), ByteBuffer.wrap(getToken(context)),
getInstance(context).getInstanceID()))
+            .isEmpty()) {
           if (!(instance instanceof MockInstance)) {
             if (tableId == null)
               tableId = Tables.getTableId(instance, tableName);
@@ -1342,14 +1343,14 @@ public abstract class InputFormatBase<K,
     }
     
   }
-
+  
   // use reflection to pull the Configuration out of the JobContext for Hadoop 1 and Hadoop
2 compatibility
   public static Configuration getConfiguration(JobContext context) {
     try {
-      Class c = InputFormatBase.class.getClassLoader().loadClass("org.apache.hadoop.mapreduce.JobContext");
+      Class<?> c = InputFormatBase.class.getClassLoader().loadClass("org.apache.hadoop.mapreduce.JobContext");
       Method m = c.getMethod("getConfiguration");
       Object o = m.invoke(context, new Object[0]);
-      return (Configuration)o;
+      return (Configuration) o;
     } catch (Exception e) {
       throw new RuntimeException(e);
     }

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/conf/Property.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/conf/Property.java?rev=1483461&r1=1483460&r2=1483461&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/conf/Property.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/conf/Property.java Thu May
16 17:21:02 2013
@@ -34,18 +34,12 @@ public enum Property {
   // Crypto-related properties
   CRYPTO_PREFIX("crypto.", null, PropertyType.PREFIX, "Properties in this category related
to the configuration of both default and custom crypto modules.",
       true),
-  CRYPTO_MODULE_CLASS(
-      "crypto.module.class",
-      "NullCryptoModule",
-      PropertyType.STRING,
-      "Fully qualified class name of the class that implements the CryptoModule interface,
to be used in setting up encryption at rest for the WAL and (future) other parts of the code.",
-      true),
+  CRYPTO_MODULE_CLASS("crypto.module.class", "NullCryptoModule", PropertyType.STRING,
+      "Fully qualified class name of the class that implements the CryptoModule interface,
to be used in setting up encryption at rest for the WAL and "
+          + "(future) other parts of the code.", true),
   CRYPTO_CIPHER_SUITE("crypto.cipher.suite", "NullCipher", PropertyType.STRING, "Describes
the cipher suite to use for the write-ahead log", true),
-  CRYPTO_CIPHER_ALGORITHM_NAME(
-      "crypto.cipher.algorithm.name",
-      "NullCipher",
-      PropertyType.STRING,
-      "States the name of the algorithm used in the corresponding cipher suite.  Do not make
these different, unless you enjoy mysterious exceptions and bugs.",
+  CRYPTO_CIPHER_ALGORITHM_NAME("crypto.cipher.algorithm.name", "NullCipher", PropertyType.STRING,
+      "States the name of the algorithm used in the corresponding cipher suite. Do not make
these different, unless you enjoy mysterious exceptions and bugs.",
       true),
   CRYPTO_CIPHER_KEY_LENGTH("crypto.cipher.key.length", "128", PropertyType.STRING,
       "Specifies the key length *in bits* to use for the symmetric key, should probably be
128 or 256 unless you really know what you're doing", true),
@@ -138,16 +132,15 @@ public enum Property {
   TSERV_MAJC_THREAD_MAXOPEN("tserver.compaction.major.thread.files.open.max", "10", PropertyType.COUNT,
       "Max number of files a major compaction thread can open at once. "),
   TSERV_SCAN_MAX_OPENFILES("tserver.scan.files.open.max", "100", PropertyType.COUNT,
-      "Maximum total map files that all tablets in a tablet server can open for scans. "),
-  TSERV_MAX_IDLE("tserver.files.open.idle", "1m", PropertyType.TIMEDURATION, "Tablet servers
leave previously used map files open for future queries. "
-      + "This setting determines how much time an unused map file should be kept open until
it is closed."),
+      "Maximum total files that all tablets in a tablet server can open for scans. "),
+  TSERV_MAX_IDLE("tserver.files.open.idle", "1m", PropertyType.TIMEDURATION, "Tablet servers
leave previously used files open for future queries. "
+      + "This setting determines how much time an unused file should be kept open until it
is closed."),
   TSERV_NATIVEMAP_ENABLED("tserver.memory.maps.native.enabled", "true", PropertyType.BOOLEAN,
-      "An in-memory data store for accumulo implemented in c++ that increases the amount
of data " + "accumulo can hold in memory and avoids Java GC pauses."),
-  TSERV_MAXMEM(
-      "tserver.memory.maps.max",
-      "1G",
-      PropertyType.MEMORY,
-      "Maximum amount of memory that can be used to buffer data written to a tablet server.
 There are two other properties that can effectively limit memory usage table.compaction.minor.logs.threshold
and tserver.walog.max.size.  Ensure that table.compaction.minor.logs.threshold * tserver.walog.max.size
>= this property."),
+      "An in-memory data store for accumulo implemented in c++ that increases the amount
of data accumulo can hold in memory and avoids Java GC pauses."),
+  TSERV_MAXMEM("tserver.memory.maps.max", "1G", PropertyType.MEMORY,
+      "Maximum amount of memory that can be used to buffer data written to a tablet server.
There are two other properties that can effectively limit memory"
+          + " usage table.compaction.minor.logs.threshold and tserver.walog.max.size. Ensure
that table.compaction.minor.logs.threshold *"
+          + " tserver.walog.max.size >= this property."),
   TSERV_MEM_MGMT("tserver.memory.manager", "org.apache.accumulo.server.tabletserver.LargestFirstMemoryManager",
PropertyType.CLASSNAME,
       "An implementation of MemoryManger that accumulo will use."),
   TSERV_SESSION_MAXIDLE("tserver.session.idle.max", "1m", PropertyType.TIMEDURATION, "maximum
idle time for a session"),
@@ -193,15 +186,17 @@ public enum Property {
       "The maximum time for a tablet server to be in the \"memory full\" state.  If the tablet
server cannot write out memory"
           + " in this much time, it will assume there is some failure local to its node,
and quit.  A value of zero is equivalent to forever."),
   TSERV_WAL_BLOCKSIZE("tserver.wal.blocksize", "0", PropertyType.MEMORY,
-      "The size of the HDFS blocks used to write to the Write-Ahead log.  If zero, it will
be 110% of tserver.walog.max.size (that is, try to use just one block)"),
+      "The size of the HDFS blocks used to write to the Write-Ahead log.  If zero, it will
be 110% of tserver.walog.max.size (that is, try to use just one"
+          + " block)"),
   TSERV_WAL_REPLICATION("tserver.wal.replication", "0", PropertyType.COUNT,
       "The replication to use when writing the Write-Ahead log to HDFS. If zero, it will
use the HDFS default replication setting."),
-  TSERV_RECOVERY_MAX_CONCURRENT("tserver.recovery.concurrent.max", "2", PropertyType.COUNT,
"The maximum number of threads to use to sort logs during recovery"),
+  TSERV_RECOVERY_MAX_CONCURRENT("tserver.recovery.concurrent.max", "2", PropertyType.COUNT,
"The maximum number of threads to use to sort logs during"
+      + " recovery"),
   TSERV_SORT_BUFFER_SIZE("tserver.sort.buffer.size", "200M", PropertyType.MEMORY, "The amount
of memory to use when sorting logs during recovery."),
   TSERV_ARCHIVE_WALOGS("tserver.archive.walogs", "false", PropertyType.BOOLEAN, "Keep copies
of the WALOGs for debugging purposes"),
   TSERV_WORKQ_THREADS("tserver.workq.threads", "2", PropertyType.COUNT,
       "The number of threads for the distributed workq.  These threads are used for copying
failed bulk files."),
-  TSERV_WAL_SYNC("tserver.wal.sync", "true", PropertyType.BOOLEAN, 
+  TSERV_WAL_SYNC("tserver.wal.sync", "true", PropertyType.BOOLEAN,
       "Use the SYNC_BLOCK create flag to sync WAL writes to disk. Prevents problems recovering
from sudden system resets."),
   
   // properties that are specific to logger server behavior
@@ -253,9 +248,9 @@ public enum Property {
           + "adjust table.file.max.  Want to avoid the situation where only merging minor
compactions occur."),
   TABLE_MAJC_COMPACTALL_IDLETIME("table.compaction.major.everything.idle", "1h", PropertyType.TIMEDURATION,
       "After a tablet has been idle (no mutations) for this time period it may have all "
-          + "of its map file compacted into one.  There is no guarantee an idle tablet will
be compacted. "
+          + "of its files compacted into one.  There is no guarantee an idle tablet will
be compacted. "
           + "Compactions of idle tablets are only started when regular compactions are not
running. Idle "
-          + "compactions only take place for tablets that have one or more map files."),
+          + "compactions only take place for tablets that have one or more files."),
   TABLE_SPLIT_THRESHOLD("table.split.threshold", "1G", PropertyType.MEMORY, "When combined
size of files exceeds this amount a tablet is split."),
   TABLE_MINC_LOGS_MAX("table.compaction.minor.logs.threshold", "3", PropertyType.COUNT,
       "When there are more than this many write-ahead logs against a tablet, it will be minor
compacted.  See comment for property tserver.memory.maps.max"),
@@ -270,33 +265,30 @@ public enum Property {
       "This property can be set to allow the LoadBalanceByTable load balancer to change the
called Load Balancer for this table"),
   TABLE_FILE_COMPRESSION_TYPE("table.file.compress.type", "gz", PropertyType.STRING, "One
of gz,lzo,none"),
   TABLE_FILE_COMPRESSED_BLOCK_SIZE("table.file.compress.blocksize", "100K", PropertyType.MEMORY,
-      "Overrides the hadoop io.seqfile.compress.blocksize setting so that map files have
better query performance. The maximum value for this is "
-          + Integer.MAX_VALUE),
+      "Similar to the hadoop io.seqfile.compress.blocksize setting, so that files have better
query performance. The maximum value for this is "
+          + Integer.MAX_VALUE + ". (This setting is the size threshold prior to compression,
and applies even compression is disabled.)"),
   TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX("table.file.compress.blocksize.index", "128K", PropertyType.MEMORY,
-      "Determines how large index blocks can be in files that support multilevel indexes.
The maximum value for this is " + Integer.MAX_VALUE),
+      "Determines how large index blocks can be in files that support multilevel indexes.
The maximum value for this is " + Integer.MAX_VALUE + "."
+          + " (This setting is the size threshold prior to compression, and applies even
compression is disabled.)"),
   TABLE_FILE_BLOCK_SIZE("table.file.blocksize", "0B", PropertyType.MEMORY,
-      "Overrides the hadoop dfs.block.size setting so that map files have better query performance.
The maximum value for this is " + Integer.MAX_VALUE),
-  TABLE_FILE_REPLICATION("table.file.replication", "0", PropertyType.COUNT, "Determines how
many replicas to keep of a tables map files in HDFS. "
+      "Overrides the hadoop dfs.block.size setting so that files have better query performance.
The maximum value for this is " + Integer.MAX_VALUE),
+  TABLE_FILE_REPLICATION("table.file.replication", "0", PropertyType.COUNT, "Determines how
many replicas to keep of a tables' files in HDFS. "
       + "When this value is LTE 0, HDFS defaults are used."),
-  TABLE_FILE_MAX(
-      "table.file.max",
-      "15",
-      PropertyType.COUNT,
-      "Determines the max # of files each tablet in a table can have. When adjusting this
property you may want to consider adjusting table.compaction.major.ratio also.  Setting this
property to 0 will make it default to tserver.scan.files.open.max-1, this will prevent a tablet
from having more files than can be opened.  Setting this property low may throttle ingest
and increase query performance."),
+  TABLE_FILE_MAX("table.file.max", "15", PropertyType.COUNT,
+      "Determines the max # of files each tablet in a table can have. When adjusting this
property you may want to consider adjusting"
+          + " table.compaction.major.ratio also.  Setting this property to 0 will make it
default to tserver.scan.files.open.max-1, this will prevent a"
+          + " tablet from having more files than can be opened.  Setting this property low
may throttle ingest and increase query performance."),
   TABLE_WALOG_ENABLED("table.walog.enabled", "true", PropertyType.BOOLEAN, "Use the write-ahead
log to prevent the loss of data."),
   TABLE_BLOOM_ENABLED("table.bloom.enabled", "false", PropertyType.BOOLEAN, "Use bloom filters
on this table."),
   TABLE_BLOOM_LOAD_THRESHOLD("table.bloom.load.threshold", "1", PropertyType.COUNT,
-      "This number of seeks that would actually use a bloom filter must occur before a "
-          + "map files bloom filter is loaded. Set this to zero to initiate loading of bloom
" + "filters when a map file opened."),
+      "This number of seeks that would actually use a bloom filter must occur before a file's
bloom filter is loaded."
+          + " Set this to zero to initiate loading of bloom filters when a file is opened."),
   TABLE_BLOOM_SIZE("table.bloom.size", "1048576", PropertyType.COUNT, "Bloom filter size,
as number of keys."),
   TABLE_BLOOM_ERRORRATE("table.bloom.error.rate", "0.5%", PropertyType.FRACTION, "Bloom filter
error rate."),
-  TABLE_BLOOM_KEY_FUNCTOR(
-      "table.bloom.key.functor",
-      "org.apache.accumulo.core.file.keyfunctor.RowFunctor",
-      PropertyType.CLASSNAME,
+  TABLE_BLOOM_KEY_FUNCTOR("table.bloom.key.functor", "org.apache.accumulo.core.file.keyfunctor.RowFunctor",
PropertyType.CLASSNAME,
       "A function that can transform the key prior to insertion and check of bloom filter.
 org.apache.accumulo.core.file.keyfunctor.RowFunctor,"
-          + ",org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor, and org.apache.accumulo.core.file.keyfunctor.ColumnQualifierFunctor
are allowable values."
-          + " One can extend any of the above mentioned classes to perform specialized parsing
of the key. "),
+          + ",org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor, and org.apache.accumulo.core.file.keyfunctor.ColumnQualifierFunctor
are"
+          + " allowable values. One can extend any of the above mentioned classes to perform
specialized parsing of the key. "),
   TABLE_BLOOM_HASHTYPE("table.bloom.hash.type", "murmur", PropertyType.STRING, "The bloom
filter hash type"),
   TABLE_FAILURES_IGNORE("table.failures.ignore", "false", PropertyType.BOOLEAN,
       "If you want queries for your table to hang or fail when data is missing from the system,
"
@@ -341,28 +333,23 @@ public enum Property {
   // VFS ClassLoader properties
   VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY(AccumuloVFSClassLoader.VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY,
"", PropertyType.STRING,
       "Configuration for a system level vfs classloader.  Accumulo jar can be configured
here and loaded out of HDFS."),
-  VFS_CONTEXT_CLASSPATH_PROPERTY(
-      AccumuloVFSClassLoader.VFS_CONTEXT_CLASSPATH_PROPERTY,
-      null,
-      PropertyType.PREFIX,
+  VFS_CONTEXT_CLASSPATH_PROPERTY(AccumuloVFSClassLoader.VFS_CONTEXT_CLASSPATH_PROPERTY, null,
PropertyType.PREFIX,
       "Properties in this category are define a classpath. These properties start  with the
category prefix, followed by a context name.  "
-          + "The value is a comma seperated list of URIs. Supports full regex on filename
alone. For example general.vfs.context.classpath.cx1=hdfs://nn1:9902/mylibdir/*.jar.  "
+          + "The value is a comma seperated list of URIs. Supports full regex on filename
alone. For example, "
+          + "general.vfs.context.classpath.cx1=hdfs://nn1:9902/mylibdir/*.jar.  "
           + "You can enable post delegation for a context, which will load classes from the
context first instead of the parent first.  "
           + "Do this by setting general.vfs.context.classpath.&lt;name&gt;.delegation=post,
where &lt;name&gt; is your context name.  "
           + "If delegation is not specified, it defaults to loading from parent classloader
first."),
-  VFS_CLASSLOADER_CACHE_DIR(
-      AccumuloVFSClassLoader.VFS_CACHE_DIR,
-      new File(System.getProperty("java.io.tmpdir"), "accumulo-vfs-cache-" + System.getProperty("user.name",
"nouser")).getAbsolutePath(),
-      PropertyType.ABSOLUTEPATH,
-      "Directory to use for the vfs cache. The cache will keep a soft reference to all of
the classes loaded in the VM. This should be on local disk on each node with sufficient space.
It defaults to /tmp",
-      false);
+  VFS_CLASSLOADER_CACHE_DIR(AccumuloVFSClassLoader.VFS_CACHE_DIR, new File(System.getProperty("java.io.tmpdir"),
"accumulo-vfs-cache-"
+      + System.getProperty("user.name", "nouser")).getAbsolutePath(), PropertyType.ABSOLUTEPATH,
+      "Directory to use for the vfs cache. The cache will keep a soft reference to all of
the classes loaded in the VM."
+          + " This should be on local disk on each node with sufficient space. It defaults
to /tmp", false);
   
   private String key, defaultValue, description;
   private PropertyType type;
   private boolean experimental;
   static Logger log = Logger.getLogger(Property.class);
   
-
   private Property(String name, String defaultValue, PropertyType type, String description,
boolean experimental) {
     this.key = name;
     this.defaultValue = defaultValue;
@@ -430,7 +417,6 @@ public enum Property {
     return validProperties.contains(key) || isKeyValidlyPrefixed(key);
   }
   
-  // Is this method still needed?
   public synchronized static boolean isValidTablePropertyKey(String key) {
     if (validTableProperties == null) {
       validTableProperties = new HashSet<String>();
@@ -471,8 +457,6 @@ public enum Property {
   }
   
   /**
-   * 
-   * @param key
    * @return true if this is a property whose value is expected to be a java class
    */
   public static boolean isClassProperty(String key) {

Propchange: accumulo/trunk/examples/
------------------------------------------------------------------------------
  Merged /accumulo/branches/1.5/examples:r1483424-1483451

Propchange: accumulo/trunk/fate/src/main/java/org/apache/accumulo/fate/ZooStore.java
------------------------------------------------------------------------------
  Merged /accumulo/branches/1.5/fate/src/main/java/org/apache/accumulo/fate/ZooStore.java:r1483424-1483451

Propchange: accumulo/trunk/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java
------------------------------------------------------------------------------
  Merged /accumulo/branches/1.5/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java:r1483424-1483451

Propchange: accumulo/trunk/pom.xml
------------------------------------------------------------------------------
  Merged /accumulo/branches/1.5/pom.xml:r1483424-1483451

Propchange: accumulo/trunk/proxy/README
------------------------------------------------------------------------------
  Merged /accumulo/branches/1.5/proxy/README:r1483424-1483451

Propchange: accumulo/trunk/server/
------------------------------------------------------------------------------
  Merged /accumulo/branches/1.5/server:r1483424-1483451

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/Accumulo.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/Accumulo.java?rev=1483461&r1=1483460&r2=1483461&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/Accumulo.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/Accumulo.java Thu May 16
17:21:02 2013
@@ -40,7 +40,6 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.log4j.Logger;
 import org.apache.log4j.helpers.LogLog;
 import org.apache.log4j.xml.DOMConfigurator;
@@ -212,12 +211,12 @@ public class Accumulo {
     }
     log.info("Connected to HDFS");
   }
-
+  
   private static boolean isInSafeMode(FileSystem fs) throws IOException {
     if (!(fs instanceof DistributedFileSystem))
       return false;
     DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(CachedConfiguration.getInstance());
-    // So this:  if (!dfs.setSafeMode(SafeModeAction.SAFEMODE_GET))
+    // So this: if (!dfs.setSafeMode(SafeModeAction.SAFEMODE_GET))
     // Becomes this:
     Class<?> constantClass;
     try {
@@ -252,7 +251,7 @@ public class Accumulo {
     }
     try {
       Method setSafeMode = dfs.getClass().getMethod("setSafeMode", safeModeAction);
-      return (Boolean)setSafeMode.invoke(dfs, get);
+      return (Boolean) setSafeMode.invoke(dfs, get);
     } catch (Exception ex) {
       throw new RuntimeException("cannot find method setSafeMode");
     }

Propchange: accumulo/trunk/src/
------------------------------------------------------------------------------
  Merged /accumulo/branches/1.5/src:r1483424-1483451



Mime
View raw message