accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [34/61] [abbrv] [partial] accumulo git commit: ACCUMULO-722 put trunk in my sandbox
Date Thu, 03 Mar 2016 21:59:59 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/conf/Property.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/1.5/core/src/main/java/org/apache/accumulo/core/conf/Property.java
new file mode 100644
index 0000000..e25ce5f
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -0,0 +1,371 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.conf;
+
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.start.classloader.AccumuloClassLoader;
+
+public enum Property {
+  // instance properties (must be the same for every node in an instance)
+  INSTANCE_PREFIX("instance.", null, PropertyType.PREFIX,
+      "Properties in this category must be consistent throughout a cloud. This is enforced and servers won't be able to communicate if these differ."),
+  INSTANCE_ZK_HOST("instance.zookeeper.host", "localhost:2181", PropertyType.HOSTLIST, "Comma separated list of zookeeper servers"),
+  INSTANCE_ZK_TIMEOUT("instance.zookeeper.timeout", "30s", PropertyType.TIMEDURATION,
+      "Zookeeper session timeout; max value when represented as milliseconds should be no larger than " + Integer.MAX_VALUE),
+  INSTANCE_DFS_URI("instance.dfs.uri", "", PropertyType.URI,
+      "The url accumulo should use to connect to DFS.  If this is empty, accumulo will obtain this information from the hadoop configuration."),
+  INSTANCE_DFS_DIR("instance.dfs.dir", "/accumulo", PropertyType.ABSOLUTEPATH,
+      "HDFS directory in which accumulo instance will run.  Do not change after accumulo is initialized."),
+  INSTANCE_SECRET("instance.secret", "DEFAULT", PropertyType.STRING,
+      "A secret unique to a given instance that all servers must know in order to communicate with one another."
+          + " Change it before initialization. To change it later use ./bin/accumulo accumulo.server.util.ChangeSecret [oldpasswd] [newpasswd], "
+          + " and then update conf/accumulo-site.xml everywhere."),
+  
+  // general properties
+  GENERAL_PREFIX("general.", null, PropertyType.PREFIX,
+      "Properties in this category affect the behavior of accumulo overall, but do not have to be consistent throughout a cloud."),
+  GENERAL_CLASSPATHS(AccumuloClassLoader.CLASSPATH_PROPERTY_NAME, AccumuloClassLoader.DEFAULT_CLASSPATH_VALUE, PropertyType.STRING,
+      "A list of all of the places to look for a class. Order does matter, as it will look for the jar "
+          + "starting in the first location to the last. Please note, hadoop conf and hadoop lib directories NEED to be here, "
+          + "along with accumulo lib and zookeeper directory. Supports full regex on filename alone."), // needs special treatment in accumulo start jar
+  GENERAL_DYNAMIC_CLASSPATHS(AccumuloClassLoader.DYNAMIC_CLASSPATH_PROPERTY_NAME, AccumuloClassLoader.DEFAULT_DYNAMIC_CLASSPATH_VALUE, PropertyType.STRING,
+      "A list of all of the places where changes in jars or classes will force a reload of the classloader."),
+  GENERAL_RPC_TIMEOUT("general.rpc.timeout", "120s", PropertyType.TIMEDURATION, "Time to wait on I/O for simple, short RPC calls"),
+  GENERAL_KERBEROS_KEYTAB("general.kerberos.keytab", "", PropertyType.PATH, "Path to the kerberos keytab to use. Leave blank if not using kerberoized hdfs"),
+  GENERAL_KERBEROS_PRINCIPAL("general.kerberos.principal", "", PropertyType.STRING, "Name of the kerberos principal to use. _HOST will automatically be "
+      + "replaced by the machines hostname in the hostname portion of the principal. Leave blank if not using kerberoized hdfs"),
+  
+  // properties that are specific to master server behavior
+  MASTER_PREFIX("master.", null, PropertyType.PREFIX, "Properties in this category affect the behavior of the master server"),
+  MASTER_CLIENTPORT("master.port.client", "9999", PropertyType.PORT, "The port used for handling client connections on the master"),
+  MASTER_TABLET_BALANCER("master.tablet.balancer", "org.apache.accumulo.server.master.balancer.TableLoadBalancer", PropertyType.CLASSNAME,
+      "The balancer class that accumulo will use to make tablet assignment and migration decisions."),
+  MASTER_RECOVERY_MAXAGE("master.recovery.max.age", "60m", PropertyType.TIMEDURATION, "Recovery files older than this age will be removed."),
+  MASTER_RECOVERY_MAXTIME("master.recovery.time.max", "30m", PropertyType.TIMEDURATION, "The maximum time to attempt recovery before giving up"),
+  MASTER_BULK_RETRIES("master.bulk.retries", "3", PropertyType.COUNT, "The number of attempts to bulk-load a file before giving up."),
+  MASTER_BULK_THREADPOOL_SIZE("master.bulk.threadpool.size", "5", PropertyType.COUNT, "The number of threads to use when coordinating a bulk-import."),
+  MASTER_MINTHREADS("master.server.threads.minimum", "2", PropertyType.COUNT, "The minimum number of threads to use to handle incoming requests."),
+  MASTER_THREADCHECK("master.server.threadcheck.time", "1s", PropertyType.TIMEDURATION, "The time between adjustments of the server thread pool."),
+  MASTER_RECOVERY_DELAY("master.recovery.delay", "10s", PropertyType.TIMEDURATION,
+      "When a tablet server's lock is deleted, it takes time for it to completely quit. This delay gives it time before log recoveries begin."),
+  
+  // properties that are specific to tablet server behavior
+  TSERV_PREFIX("tserver.", null, PropertyType.PREFIX, "Properties in this category affect the behavior of the tablet servers"),
+  TSERV_CLIENT_TIMEOUT("tserver.client.timeout", "3s", PropertyType.TIMEDURATION, "Time to wait for clients to continue scans before closing a session."),
+  TSERV_DEFAULT_BLOCKSIZE("tserver.default.blocksize", "1M", PropertyType.MEMORY, "Specifies a default blocksize for the tserver caches"),
+  TSERV_DATACACHE_SIZE("tserver.cache.data.size", "100M", PropertyType.MEMORY, "Specifies the size of the cache for file data blocks."),
+  TSERV_INDEXCACHE_SIZE("tserver.cache.index.size", "512M", PropertyType.MEMORY, "Specifies the size of the cache for file indices."),
+  TSERV_PORTSEARCH("tserver.port.search", "false", PropertyType.BOOLEAN, "if the ports above are in use, search higher ports until one is available"),
+  TSERV_CLIENTPORT("tserver.port.client", "9997", PropertyType.PORT, "The port used for handling client connections on the tablet servers"),
+  TSERV_MUTATION_QUEUE_MAX("tserver.mutation.queue.max", "256K", PropertyType.MEMORY,
+      "The amount of memory to use to store write-ahead-log mutations-per-session before flushing them."),
+  TSERV_TABLET_SPLIT_FINDMIDPOINT_MAXOPEN("tserver.tablet.split.midpoint.files.max", "30", PropertyType.COUNT,
+      "To find a tablets split points, all index files are opened. This setting determines how many index "
+          + "files can be opened at once. When there are more index files than this setting multiple passes "
+          + "must be made, which is slower. However opening too many files at once can cause problems."),
+  TSERV_WALOG_MAX_SIZE("tserver.walog.max.size", "1G", PropertyType.MEMORY,
+      "The maximum size for each write-ahead log.  See comment for property tserver.memory.maps.max"),
+  TSERV_MAJC_DELAY("tserver.compaction.major.delay", "30s", PropertyType.TIMEDURATION,
+      "Time a tablet server will sleep between checking which tablets need compaction."),
+  TSERV_MAJC_THREAD_MAXOPEN("tserver.compaction.major.thread.files.open.max", "10", PropertyType.COUNT,
+      "Max number of files a major compaction thread can open at once. "),
+  TSERV_SCAN_MAX_OPENFILES("tserver.scan.files.open.max", "100", PropertyType.COUNT,
+      "Maximum total map files that all tablets in a tablet server can open for scans. "),
+  TSERV_MAX_IDLE("tserver.files.open.idle", "1m", PropertyType.TIMEDURATION, "Tablet servers leave previously used map files open for future queries. "
+      + "This setting determines how much time an unused map file should be kept open until it is closed."),
+  TSERV_NATIVEMAP_ENABLED("tserver.memory.maps.native.enabled", "true", PropertyType.BOOLEAN,
+      "An in-memory data store for accumulo implemented in c++ that increases the amount of data " + "accumulo can hold in memory and avoids Java GC pauses."),
+  TSERV_MAXMEM(
+      "tserver.memory.maps.max",
+      "1G",
+      PropertyType.MEMORY,
+      "Maximum amount of memory that can be used to buffer data written to a tablet server.  There are two other properties that can effectively limit memory usage table.compaction.minor.logs.threshold and tserver.walog.max.size.  Ensure that table.compaction.minor.logs.threshold * tserver.walog.max.size >= this property."),
+  TSERV_MEM_MGMT("tserver.memory.manager", "org.apache.accumulo.server.tabletserver.LargestFirstMemoryManager", PropertyType.CLASSNAME,
+      "An implementation of MemoryManger that accumulo will use."),
+  TSERV_SESSION_MAXIDLE("tserver.session.idle.max", "1m", PropertyType.TIMEDURATION, "maximum idle time for a session"),
+  TSERV_READ_AHEAD_MAXCONCURRENT("tserver.readahead.concurrent.max", "16", PropertyType.COUNT,
+      "The maximum number of concurrent read ahead that will execute.  This effectively"
+          + " limits the number of long running scans that can run concurrently per tserver."),
+  TSERV_METADATA_READ_AHEAD_MAXCONCURRENT("tserver.metadata.readahead.concurrent.max", "8", PropertyType.COUNT,
+      "The maximum number of concurrent metadata read ahead that will execute."),
+  TSERV_MIGRATE_MAXCONCURRENT("tserver.migrations.concurrent.max", "1", PropertyType.COUNT,
+      "The maximum number of concurrent tablet migrations for a tablet server"),
+  TSERV_MAJC_MAXCONCURRENT("tserver.compaction.major.concurrent.max", "3", PropertyType.COUNT,
+      "The maximum number of concurrent major compactions for a tablet server"),
+  TSERV_MINC_MAXCONCURRENT("tserver.compaction.minor.concurrent.max", "4", PropertyType.COUNT,
+      "The maximum number of concurrent minor compactions for a tablet server"),
+  TSERV_BLOOM_LOAD_MAXCONCURRENT("tserver.bloom.load.concurrent.max", "4", PropertyType.COUNT,
+      "The number of concurrent threads that will load bloom filters in the background. "
+          + "Setting this to zero will make bloom filters load in the foreground."),
+  TSERV_MONITOR_FS(
+      "tserver.monitor.fs",
+      "true",
+      PropertyType.BOOLEAN,
+      "When enabled the tserver will monitor file systems and kill itself when one switches from rw to ro.  This is usually and indication that Linux has detected a bad disk."),
+  TSERV_MEMDUMP_DIR(
+      "tserver.dir.memdump",
+      "/tmp",
+      PropertyType.PATH,
+      "A long running scan could possibly hold memory that has been minor compacted.  To prevent this, the in memory map is dumped to a local file and the scan is switched to that local file.  We can not switch to the minor compacted file because it may have been modified by iterators.  The file dumped to the local dir is an exact copy of what was in memory."),
+  TSERV_LOCK_MEMORY("tserver.memory.lock", "false", PropertyType.BOOLEAN,
+      "The tablet server must communicate with zookeeper frequently to maintain its locks.  If the tablet server's memory is swapped out"
+          + " the java garbage collector can stop all processing for long periods.  Change this property to true and the tablet server will "
+          + " attempt to lock all of its memory to RAM, which may reduce delays during java garbage collection.  You will have to modify the "
+          + " system limit for \"max locked memory\". This feature is only available when running on Linux.  Alternatively you may also "
+          + " want to set /proc/sys/vm/swappiness to zero (again, this is Linux-specific)."),
+  TSERV_BULK_PROCESS_THREADS("tserver.bulk.process.threads", "1", PropertyType.COUNT,
+      "The master will task a tablet server with pre-processing a bulk file prior to assigning it to the appropriate tablet servers.  This configuration"
+          + " value controls the number of threads used to process the files."),
+  TSERV_BULK_ASSIGNMENT_THREADS("tserver.bulk.assign.threads", "1", PropertyType.COUNT,
+      "The master delegates bulk file processing and assignment to tablet servers. After the bulk file has been processed, the tablet server will assign"
+          + " the file to the appropriate tablets on all servers.  This property controls the number of threads used to communicate to the other servers."),
+  TSERV_BULK_RETRY("tserver.bulk.retry.max", "3", PropertyType.COUNT,
+      "The number of times the tablet server will attempt to assign a file to a tablet as it migrates and splits."),
+  TSERV_MINTHREADS("tserver.server.threads.minimum", "2", PropertyType.COUNT, "The minimum number of threads to use to handle incoming requests."),
+  TSERV_THREADCHECK("tserver.server.threadcheck.time", "1s", PropertyType.TIMEDURATION, "The time between adjustments of the server thread pool."),
+  TSERV_HOLD_TIME_SUICIDE("tserver.hold.time.max", "5m", PropertyType.TIMEDURATION,
+      "The maximum time for a tablet server to be in the \"memory full\" state.  If the tablet server cannot write out memory"
+          + " in this much time, it will assume there is some failure local to its node, and quit.  A value of zero is equivalent to forever."),
+  TSERV_WAL_BLOCKSIZE("tserver.wal.blocksize", "0", PropertyType.MEMORY,
+      "The size of the HDFS blocks used to write to the Write-Ahead log.  If zero, it will be 110% of tserver.walog.max.size (that is, try to use just one block)"),
+  TSERV_WAL_REPLICATION("tserver.wal.replication", "0", PropertyType.COUNT,
+      "The replication to use when writing the Write-Ahead log to HDFS. If zero, it will use the HDFS default replication setting."),
+  TSERV_RECOVERY_MAX_CONCURRENT("tserver.recovery.concurrent.max", "2", PropertyType.COUNT, "The maximum number of threads to use to sort logs during recovery"),
+  TSERV_SORT_BUFFER_SIZE("tserver.sort.buffer.size", "200M", PropertyType.MEMORY, "The amount of memory to use when sorting logs during recovery."),
+  TSERV_ARCHIVE_WALOGS("tserver.archive.walogs", "false", PropertyType.BOOLEAN, "Keep copies of the WALOGs for debugging purposes"),
+  TSERV_WORKQ_THREADS("tserver.workq.threads", "2", PropertyType.COUNT,
+      "The number of threads for the distributed workq.  These threads are used for copying failed bulk files."),
+
+  // properties that are specific to logger server behavior
+  LOGGER_PREFIX("logger.", null, PropertyType.PREFIX, "Properties in this category affect the behavior of the write-ahead logger servers"),
+  LOGGER_DIR("logger.dir.walog", "walogs", PropertyType.PATH,
+      "The directory used to store write-ahead logs on the local filesystem. It is possible to specify a comma-separated list of directories."),
+  
+  // accumulo garbage collector properties
+  GC_PREFIX("gc.", null, PropertyType.PREFIX, "Properties in this category affect the behavior of the accumulo garbage collector."),
+  GC_CYCLE_START("gc.cycle.start", "30s", PropertyType.TIMEDURATION, "Time to wait before attempting to garbage collect any old files."),
+  GC_CYCLE_DELAY("gc.cycle.delay", "5m", PropertyType.TIMEDURATION, "Time between garbage collection cycles. In each cycle, old files "
+      + "no longer in use are removed from the filesystem."),
+  GC_PORT("gc.port.client", "50091", PropertyType.PORT, "The listening port for the garbage collector's monitor service"),
+  GC_DELETE_THREADS("gc.threads.delete", "16", PropertyType.COUNT, "The number of threads used to delete files"),
+  
+  // properties that are specific to the monitor server behavior
+  MONITOR_PREFIX("monitor.", null, PropertyType.PREFIX, "Properties in this category affect the behavior of the monitor web server."),
+  MONITOR_PORT("monitor.port.client", "50095", PropertyType.PORT, "The listening port for the monitor's http service"),
+  MONITOR_LOG4J_PORT("monitor.port.log4j", "4560", PropertyType.PORT, "The listening port for the monitor's log4j logging collection."),
+  MONITOR_BANNER_TEXT("monitor.banner.text", "", PropertyType.STRING, "The banner text displayed on the monitor page."),
+  MONITOR_BANNER_COLOR("monitor.banner.color", "#c4c4c4", PropertyType.STRING, "The color of the banner text displayed on the monitor page."),
+  MONITOR_BANNER_BACKGROUND("monitor.banner.background", "#304065", PropertyType.STRING,
+      "The background color of the banner text displayed on the monitor page."),
+  MONITOR_SSL_KEYSTORE("monitor.ssl.keyStore", "", PropertyType.PATH, "The keystore for enabling monitor SSL."),
+  MONITOR_SSL_KEYSTOREPASS("monitor.ssl.keyStorePassword", "", PropertyType.STRING, "The keystore password for enabling monitor SSL."),
+  MONITOR_SSL_TRUSTSTORE("monitor.ssl.trustStore", "", PropertyType.PATH, "The truststore for enabling monitor SSL."),
+  MONITOR_SSL_TRUSTSTOREPASS("monitor.ssl.trustStorePassword", "", PropertyType.STRING, "The truststore password for enabling monitor SSL."),
+  
+  TRACE_PREFIX("trace.", null, PropertyType.PREFIX, "Properties in this category affect the behavior of distributed tracing."),
+  TRACE_PORT("trace.port.client", "12234", PropertyType.PORT, "The listening port for the trace server"),
+  TRACE_TABLE("trace.table", "trace", PropertyType.STRING, "The name of the table to store distributed traces"),
+  TRACE_USER("trace.user", "root", PropertyType.STRING, "The name of the user to store distributed traces"),
+  TRACE_PASSWORD("trace.password", "secret", PropertyType.STRING, "The password for the user used to store distributed traces"),
+  
+  // per table properties
+  TABLE_PREFIX("table.", null, PropertyType.PREFIX, "Properties in this category affect tablet server treatment of tablets, but can be configured "
+      + "on a per-table basis. Setting these properties in the site file will override the default globally "
+      + "for all tables and not any specific table. However, both the default and the global setting can be "
+      + "overridden per table using the table operations API or in the shell, which sets the overridden value "
+      + "in zookeeper. Restarting accumulo tablet servers after setting these properties in the site file "
+      + "will cause the global setting to take effect. However, you must use the API or the shell to change "
+      + "properties in zookeeper that are set on a table."),
+  TABLE_MAJC_RATIO(
+      "table.compaction.major.ratio",
+      "3",
+      PropertyType.FRACTION,
+      "minimum ratio of total input size to maximum input file size for running a major compaction.   When adjusting this property you may want to also adjust table.file.max.  Want to avoid the situation where only merging minor compactions occur."),
+  TABLE_MAJC_COMPACTALL_IDLETIME("table.compaction.major.everything.idle", "1h", PropertyType.TIMEDURATION,
+      "After a tablet has been idle (no mutations) for this time period it may have all "
+          + "of its map file compacted into one.  There is no guarantee an idle tablet will be compacted. "
+          + "Compactions of idle tablets are only started when regular compactions are not running. Idle "
+          + "compactions only take place for tablets that have one or more map files."),
+  TABLE_SPLIT_THRESHOLD("table.split.threshold", "1G", PropertyType.MEMORY, "When combined size of files exceeds this amount a tablet is split."),
+  TABLE_MINC_LOGS_MAX("table.compaction.minor.logs.threshold", "3", PropertyType.COUNT,
+      "When there are more than this many write-ahead logs against a tablet, it will be minor compacted.  See comment for property tserver.memory.maps.max"),
+  TABLE_MINC_COMPACT_IDLETIME("table.compaction.minor.idle", "5m", PropertyType.TIMEDURATION,
+      "After a tablet has been idle (no mutations) for this time period it may have its "
+          + "in-memory map flushed to disk in a minor compaction.  There is no guarantee an idle " + "tablet will be compacted."),
+  TABLE_SCAN_MAXMEM("table.scan.max.memory", "512K", PropertyType.MEMORY,
+      "The maximum amount of memory that will be used to cache results of a client query/scan. "
+          + "Once this limit is reached, the buffered data is sent to the client."),
+  TABLE_FILE_TYPE("table.file.type", RFile.EXTENSION, PropertyType.STRING, "Change the type of file a table writes"),
+  TABLE_LOAD_BALANCER("table.balancer", "org.apache.accumulo.server.master.balancer.DefaultLoadBalancer", PropertyType.STRING,
+      "This property can be set to allow the LoadBalanceByTable load balancer to change the called Load Balancer for this table"),
+  TABLE_FILE_COMPRESSION_TYPE("table.file.compress.type", "gz", PropertyType.STRING, "One of gz,lzo,none"),
+  TABLE_FILE_COMPRESSED_BLOCK_SIZE("table.file.compress.blocksize", "100K", PropertyType.MEMORY,
+      "Overrides the hadoop io.seqfile.compress.blocksize setting so that map files have better query performance. " + "The maximum value for this is "
+          + Integer.MAX_VALUE),
+  TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX("table.file.compress.blocksize.index", "128K", PropertyType.MEMORY,
+      "Determines how large index blocks can be in files that support multilevel indexes. The maximum value for this is " + Integer.MAX_VALUE),
+  TABLE_FILE_BLOCK_SIZE("table.file.blocksize", "0B", PropertyType.MEMORY,
+      "Overrides the hadoop dfs.block.size setting so that map files have better query performance. " + "The maximum value for this is " + Integer.MAX_VALUE),
+  TABLE_FILE_REPLICATION("table.file.replication", "0", PropertyType.COUNT, "Determines how many replicas to keep of a tables map files in HDFS. "
+      + "When this value is LTE 0, HDFS defaults are used."),
+  TABLE_FILE_MAX(
+      "table.file.max",
+      "15",
+      PropertyType.COUNT,
+      "Determines the max # of files each tablet in a table can have. When adjusting this property you may want to consider adjusting table.compaction.major.ratio also.  Setting this property to 0 will make it default to tserver.scan.files.open.max-1, this will prevent a tablet from having more files than can be opened.  Setting this property low may throttle ingest and increase query performance."),
+  TABLE_WALOG_ENABLED("table.walog.enabled", "true", PropertyType.BOOLEAN, "Use the write-ahead log to prevent the loss of data."),
+  TABLE_BLOOM_ENABLED("table.bloom.enabled", "false", PropertyType.BOOLEAN, "Use bloom filters on this table."),
+  TABLE_BLOOM_LOAD_THRESHOLD("table.bloom.load.threshold", "1", PropertyType.COUNT,
+      "This number of seeks that would actually use a bloom filter must occur before a "
+          + "map files bloom filter is loaded. Set this to zero to initiate loading of bloom " + "filters when a map file opened."),
+  TABLE_BLOOM_SIZE("table.bloom.size", "1048576", PropertyType.COUNT, "Bloom filter size, as number of keys."),
+  TABLE_BLOOM_ERRORRATE("table.bloom.error.rate", "0.5%", PropertyType.FRACTION, "Bloom filter error rate."),
+  TABLE_BLOOM_KEY_FUNCTOR(
+      "table.bloom.key.functor",
+      "org.apache.accumulo.core.file.keyfunctor.RowFunctor",
+      PropertyType.CLASSNAME,
+      "A function that can transform the key prior to insertion and check of bloom filter.  org.apache.accumulo.core.file.keyfunctor.RowFunctor,"
+          + ",org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor, and org.apache.accumulo.core.file.keyfunctor.ColumnQualifierFunctor are allowable values."
+          + " One can extend any of the above mentioned classes to perform specialized parsing of the key. "),
+  TABLE_BLOOM_HASHTYPE("table.bloom.hash.type", "murmur", PropertyType.STRING, "The bloom filter hash type"),
+  TABLE_FAILURES_IGNORE("table.failures.ignore", "false", PropertyType.BOOLEAN,
+      "If you want queries for your table to hang or fail when data is missing from the system, "
+          + "then set this to false. When this set to true missing data will be reported but queries "
+          + "will still run possibly returning a subset of the data."),
+  TABLE_DEFAULT_SCANTIME_VISIBILITY("table.security.scan.visibility.default", "", PropertyType.STRING,
+      "The security label that will be assumed at scan time if an entry does not have a visibility set.<br />"
+          + "Note: An empty security label is displayed as []. The scan results will show an empty visibility even if "
+          + "the visibility from this setting is applied to the entry.<br />"
+          + "CAUTION: If a particular key has an empty security label AND its table's default visibility is also empty, "
+          + "access will ALWAYS be granted for users with permission to that table. Additionally, if this field is changed, "
+          + "all existing data with an empty visibility label will be interpreted with the new label on the next scan."),
+  TABLE_LOCALITY_GROUPS("table.groups.enabled", "", PropertyType.STRING, "A comma separated list of locality group names to enable for this table."),
+  TABLE_CONSTRAINT_PREFIX("table.constraint.", null, PropertyType.PREFIX,
+      "Properties in this category are per-table properties that add constraints to a table. "
+          + "These properties start with the category prefix, followed by a number, and their values "
+          + "correspond to a fully qualified Java class that implements the Constraint interface.<br />"
+          + "For example, table.constraint.1 = org.apache.accumulo.core.constraints.MyCustomConstraint "
+          + "and table.constraint.2 = my.package.constraints.MySecondConstraint"),
+  TABLE_INDEXCACHE_ENABLED("table.cache.index.enable", "true", PropertyType.BOOLEAN, "Determines whether index cache is enabled."),
+  TABLE_BLOCKCACHE_ENABLED("table.cache.block.enable", "false", PropertyType.BOOLEAN, "Determines whether file block cache is enabled."),
+  TABLE_ITERATOR_PREFIX("table.iterator.", null, PropertyType.PREFIX,
+      "Properties in this category specify iterators that are applied at various stages (scopes) of interaction "
+          + "with a table. These properties start with the category prefix, followed by a scope (minc, majc, scan, etc.), "
+          + "followed by a period, followed by a name, as in table.iterator.scan.vers, or table.iterator.scan.custom. "
+          + "The values for these properties are a number indicating the ordering in which it is applied, and a class name "
+          + "such as table.iterator.scan.vers = 10,org.apache.accumulo.core.iterators.VersioningIterator<br /> "
+          + "These iterators can take options if additional properties are set that look like this property, "
+          + "but are suffixed with a period, followed by 'opt' followed by another period, and a property name.<br />"
+          + "For example, table.iterator.minc.vers.opt.maxVersions = 3"),
+  TABLE_LOCALITY_GROUP_PREFIX("table.group.", null, PropertyType.PREFIX,
+      "Properties in this category are per-table properties that define locality groups in a table. These properties start "
+          + "with the category prefix, followed by a name, followed by a period, and followed by a property for that group.<br />"
+          + "For example table.group.group1=x,y,z sets the column families for a group called group1. Once configured, "
+          + "group1 can be enabled by adding it to the list of groups in the " + TABLE_LOCALITY_GROUPS.getKey() + " property.<br />"
+          + "Additional group options may be specified for a named group by setting table.group.&lt;name&gt;.opt.&lt;key&gt;=&lt;value&gt;."),
+  TABLE_FORMATTER_CLASS("table.formatter", "org.apache.accumulo.core.util.format.DefaultFormatter", PropertyType.STRING,
+      "The Formatter class to apply on results in the shell");
+  
+  private String key, defaultValue, description;
+  private PropertyType type;
+  
+  private Property(String name, String defaultValue, PropertyType type, String description) {
+    this.key = name;
+    this.defaultValue = defaultValue;
+    this.description = description;
+    this.type = type;
+  }
+  
+  public String toString() {
+    return this.key;
+  }
+  
+  public String getKey() {
+    return this.key;
+  }
+  
+  public String getDefaultValue() {
+    return this.defaultValue;
+  }
+  
+  public PropertyType getType() {
+    return this.type;
+  }
+  
+  public String getDescription() {
+    return this.description;
+  }
+  
+  private static HashSet<String> validTableProperties = null;
+  
+  public static boolean isValidTablePropertyKey(String key) {
+    if (validTableProperties == null) {
+      synchronized (Property.class) {
+        if (validTableProperties == null) {
+          HashSet<String> tmp = new HashSet<String>();
+          for (Property p : Property.values())
+            if (!p.getType().equals(PropertyType.PREFIX) && p.getKey().startsWith(Property.TABLE_PREFIX.getKey()))
+              tmp.add(p.getKey());
+          validTableProperties = tmp;
+        }
+      }
+    }
+    
+    return validTableProperties.contains(key) || key.startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey())
+        || key.startsWith(Property.TABLE_ITERATOR_PREFIX.getKey()) || key.startsWith(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey());
+  }
+  
+  private static final EnumSet<Property> fixedProperties = EnumSet.of(Property.TSERV_CLIENTPORT, Property.TSERV_NATIVEMAP_ENABLED,
+      Property.TSERV_SCAN_MAX_OPENFILES, Property.MASTER_CLIENTPORT, Property.GC_PORT);
+  
+  public static boolean isFixedZooPropertyKey(Property key) {
+    return fixedProperties.contains(key);
+  }
+  
+  public static Set<Property> getFixedProperties() {
+    return fixedProperties;
+  }
+  
+  public static boolean isValidZooPropertyKey(String key) {
+    // white list prefixes
+    return key.startsWith(Property.TABLE_PREFIX.getKey()) || key.startsWith(Property.TSERV_PREFIX.getKey()) || key.startsWith(Property.LOGGER_PREFIX.getKey())
+        || key.startsWith(Property.MASTER_PREFIX.getKey()) || key.startsWith(Property.GC_PREFIX.getKey())
+        || key.startsWith(Property.MONITOR_PREFIX.getKey() + "banner.");
+  }
+  
+  public static Property getPropertyByKey(String key) {
+    for (Property prop : Property.values())
+      if (prop.getKey().equals(key))
+        return prop;
+    return null;
+  }
+  
+  /**
+   * 
+   * @param key
+   * @return true if this is a property whose value is expected to be a java class
+   */
+  public static boolean isClassProperty(String key) {
+    return (key.startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey()) && key.substring(Property.TABLE_CONSTRAINT_PREFIX.getKey().length()).split("\\.").length == 1)
+        || (key.startsWith(Property.TABLE_ITERATOR_PREFIX.getKey()) && key.substring(Property.TABLE_ITERATOR_PREFIX.getKey().length()).split("\\.").length == 2)
+        || key.equals(Property.TABLE_LOAD_BALANCER.getKey());
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java b/1.5/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java
new file mode 100644
index 0000000..67bd6c4
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.conf;
+
+import java.util.regex.Pattern;
+
+public enum PropertyType {
+  PREFIX(null, null, null),
+  
+  TIMEDURATION("duration", "\\d{1," + Long.toString(Long.MAX_VALUE).length() + "}(?:ms|s|m|h|d)?",
+      "A non-negative integer optionally followed by a unit of time (whitespace disallowed), as in 30s.<br />"
+          + "If no unit of time is specified, seconds are assumed. Valid units are 'ms', 's', 'm', 'h' for milliseconds, seconds, minutes, and hours.<br />"
+          + "Examples of valid durations are '600', '30s', '45m', '30000ms', '3d', and '1h'.<br />"
+          + "Examples of invalid durations are '1w', '1h30m', '1s 200ms', 'ms', '', and 'a'.<br />"
+          + "Unless otherwise stated, the max value for the duration represented in milliseconds is " + Long.MAX_VALUE),
+  DATETIME("date/time", "(?:19|20)\\d{12}[A-Z]{3}", "A date/time string in the format: YYYYMMDDhhmmssTTT where TTT is the 3 character time zone"),
+  MEMORY("memory", "\\d{1," + Long.toString(Long.MAX_VALUE).length() + "}(?:B|K|M|G)?",
+      "A positive integer optionally followed by a unit of memory (whitespace disallowed), as in 2G.<br />"
+          + "If no unit is specified, bytes are assumed. Valid units are 'B', 'K', 'M', 'G', for bytes, kilobytes, megabytes, and gigabytes.<br />"
+          + "Examples of valid memories are '1024', '20B', '100K', '1500M', '2G'.<br />"
+          + "Examples of invalid memories are '1M500K', '1M 2K', '1MB', '1.5G', '1,024K', '', and 'a'.<br .>"
+          + "Unless otherwise stated, the max value for the memory represented in bytes is " + Long.MAX_VALUE),
+  
+  HOSTLIST("host list", "[\\w-]+(?:\\.[\\w-]+)*(?:\\:\\d{1,5})?(?:,[\\w-]+(?:\\.[\\w-]+)*(?:\\:\\d{1,5})?)*",
+      "A comma-separated list of hostnames or ip addresses, with optional port numbers.<br />"
+          + "Examples of valid host lists are 'localhost:2000,www.example.com,10.10.1.1:500' and 'localhost'.<br />"
+          + "Examples of invalid host lists are '', ':1000', and 'localhost:80000'"),
+  
+  PORT("port", "\\d{1,5}", "An positive integer in the range 1024-65535, not already in use or specified elsewhere in the configuration"),
+  COUNT("count", "\\d{1,10}", "A non-negative integer in the range of 0-" + Integer.MAX_VALUE),
+  FRACTION("fraction/percentage", "\\d*(?:\\.\\d+)?%?",
+      "A floating point number that represents either a fraction or, if suffixed with the '%' character, a percentage.<br />"
+          + "Examples of valid fractions/percentages are '10', '1000%', '0.05', '5%', '0.2%', '0.0005'.<br />"
+          + "Examples of invalid fractions/percentages are '', '10 percent', 'Hulk Hogan'"),
+  
+  PATH("path", ".*",
+      "A string that represents a filesystem path, which can be either relative or absolute to some directory. The filesystem depends on the property."),
+  ABSOLUTEPATH("absolute path", "[/].*",
+      "An absolute filesystem path. The filesystem depends on the property. This is the same as path, but enforces that its root is explicitly specified."),
+  
+  CLASSNAME("java class", "[\\w$.]*", "A fully qualified java class name representing a class on the classpath.<br />"
+      + "An example is 'java.lang.String', rather than 'String'"),
+  
+  STRING("string", ".*",
+      "An arbitrary string of characters whose format is unspecified and interpreted based on the context of the property to which it applies."),
+  BOOLEAN("boolean", "(?:true|false)", "Has a value of either 'true' or 'false'"),
+  URI("uri", ".*", "A valid URI");
+  
+  private String shortname, format;
+  private Pattern regex;
+  
+  private PropertyType(String shortname, String regex, String formatDescription) {
+    this.shortname = shortname;
+    this.format = formatDescription;
+    this.regex = regex == null ? null : Pattern.compile(regex, Pattern.DOTALL);
+  }
+  
+  public String toString() {
+    return shortname;
+  }
+  
+  String getFormatDescription() {
+    return format;
+  }
+  
+  public boolean isValidFormat(String value) {
+    return (regex == null && value == null) ? true : regex.matcher(value).matches();
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java b/1.5/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
new file mode 100644
index 0000000..a1ad300
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.conf;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.TreeMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.log4j.Logger;
+
+public class SiteConfiguration extends AccumuloConfiguration {
+  private static final Logger log = Logger.getLogger(SiteConfiguration.class);
+  
+  private static AccumuloConfiguration parent = null;
+  private static SiteConfiguration instance = null;
+  
+  private static Configuration xmlConfig;
+  
+  private SiteConfiguration(AccumuloConfiguration parent) {
+    SiteConfiguration.parent = parent;
+  }
+  
+  public static SiteConfiguration getInstance(AccumuloConfiguration parent) {
+    if (instance == null) {
+      instance = new SiteConfiguration(parent);
+      ConfigSanityCheck.validate(instance);
+    }
+    return instance;
+  }
+  
+  private static Configuration getXmlConfig() {
+    String configFile = System.getProperty("org.apache.accumulo.config.file", "accumulo-site.xml");
+    if (xmlConfig == null) {
+      xmlConfig = new Configuration(false);
+      
+      if (SiteConfiguration.class.getClassLoader().getResource(configFile) == null)
+        log.warn(configFile + " not found on classpath");
+      else
+        xmlConfig.addResource(configFile);
+    }
+    return xmlConfig;
+  }
+  
+  @Override
+  public String get(Property property) {
+    String key = property.getKey();
+    
+    String value = getXmlConfig().get(key);
+    
+    if (value == null || !property.getType().isValidFormat(value)) {
+      if (value != null)
+        log.error("Using default value for " + key + " due to improperly formatted " + property.getType() + ": " + value);
+      value = parent.get(property);
+    }
+    return value;
+  }
+  
+  @Override
+  public Iterator<Entry<String,String>> iterator() {
+    TreeMap<String,String> entries = new TreeMap<String,String>();
+    
+    for (Entry<String,String> parentEntry : parent)
+      entries.put(parentEntry.getKey(), parentEntry.getValue());
+    
+    for (Entry<String,String> siteEntry : getXmlConfig())
+      entries.put(siteEntry.getKey(), siteEntry.getValue());
+    
+    return entries.entrySet().iterator();
+  }
+  
+  /**
+   * method here to support testing, do not call
+   */
+  public void clear() {
+    getXmlConfig().clear();
+  }
+  
+  /**
+   * method here to support testing, do not call
+   */
+  public void set(Property property, String value) {
+    set(property.getKey(), value);
+  }
+  
+  /**
+   * method here to support testing, do not call
+   */
+  public void set(String key, String value) {
+    getXmlConfig().set(key, value);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/constraints/Constraint.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/constraints/Constraint.java b/1.5/core/src/main/java/org/apache/accumulo/core/constraints/Constraint.java
new file mode 100644
index 0000000..c7fb070
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/constraints/Constraint.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.constraints;
+
+import java.util.List;
+
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.security.Authorizations;
+
+/**
+ * Accumulo uses Constraint objects to determine if mutations will be applied to a table.
+ * 
+ * This interface expects implementers to return violation codes. The reason codes are returned instead of arbitrary strings it to encourage conciseness.
+ * Conciseness is needed because violations are aggregated. If a user sends a batch of 10,000 mutations to accumulo, only aggregated counts about which
+ * violations occurred are returned.
+ * 
+ * If the Constraint implementer was allowed to return arbitrary violation strings like the following :
+ * 
+ * Value "abc" is not a number Value "vbg" is not a number
+ * 
+ * Then this would not aggregate very well, because the same violation is represented with two different strings.
+ * 
+ * 
+ * 
+ */
+
+public interface Constraint {
+  
+  public interface Environment {
+    KeyExtent getExtent();
+    
+    String getUser();
+    
+    Authorizations getAuthorizations();
+  }
+  
+  /**
+   * Implementers of this method should return a short one sentence description of what a given violation code means.
+   * 
+   */
+  
+  String getViolationDescription(short violationCode);
+  
+  /**
+   * Checks a mutation for constrain violations. If the mutation contains no violations, then the implementation should return null. Otherwise it should return
+   * a list of violation codes.
+   * 
+   * Violation codes must be non negative. Negative violation codes are reserved for system use.
+   * 
+   */
+  
+  List<Short> check(Environment env, Mutation mutation);
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/constraints/Violations.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/constraints/Violations.java b/1.5/core/src/main/java/org/apache/accumulo/core/constraints/Violations.java
new file mode 100644
index 0000000..fe41d54
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/constraints/Violations.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.constraints;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.core.data.ConstraintViolationSummary;
+
+public class Violations {
+  
+  private static class CVSKey {
+    private String className;
+    private short vcode;
+    
+    CVSKey(ConstraintViolationSummary cvs) {
+      this.className = cvs.constrainClass;
+      this.vcode = cvs.violationCode;
+    }
+    
+    @Override
+    public int hashCode() {
+      return className.hashCode() + vcode;
+    }
+    
+    @Override
+    public boolean equals(Object o) {
+      if (o instanceof CVSKey)
+        return equals((CVSKey) o);
+      return false;
+    }
+    
+    public boolean equals(CVSKey ocvsk) {
+      return className.equals(ocvsk.className) && vcode == ocvsk.vcode;
+    }
+  }
+  
+  private HashMap<CVSKey,ConstraintViolationSummary> cvsmap;
+  
+  public Violations() {
+    cvsmap = new HashMap<CVSKey,ConstraintViolationSummary>();
+  }
+  
+  public boolean isEmpty() {
+    return cvsmap.isEmpty();
+  }
+  
+  private void add(CVSKey cvsk, ConstraintViolationSummary cvs) {
+    ConstraintViolationSummary existingCvs = cvsmap.get(cvsk);
+    
+    if (existingCvs == null) {
+      cvsmap.put(cvsk, cvs);
+    } else {
+      existingCvs.numberOfViolatingMutations += cvs.numberOfViolatingMutations;
+    }
+  }
+  
+  public void add(ConstraintViolationSummary cvs) {
+    CVSKey cvsk = new CVSKey(cvs);
+    add(cvsk, cvs);
+  }
+  
+  public void add(Violations violations) {
+    Set<Entry<CVSKey,ConstraintViolationSummary>> es = violations.cvsmap.entrySet();
+    
+    for (Entry<CVSKey,ConstraintViolationSummary> entry : es) {
+      add(entry.getKey(), entry.getValue());
+    }
+    
+  }
+  
+  public void add(List<ConstraintViolationSummary> cvsList) {
+    for (ConstraintViolationSummary constraintViolationSummary : cvsList) {
+      add(constraintViolationSummary);
+    }
+    
+  }
+  
+  public List<ConstraintViolationSummary> asList() {
+    return new ArrayList<ConstraintViolationSummary>(cvsmap.values());
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/data/ArrayByteSequence.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/data/ArrayByteSequence.java b/1.5/core/src/main/java/org/apache/accumulo/core/data/ArrayByteSequence.java
new file mode 100644
index 0000000..d44a7a6
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/data/ArrayByteSequence.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.data;
+
+import java.io.Serializable;
+
+public class ArrayByteSequence extends ByteSequence implements Serializable {
+  
+  private static final long serialVersionUID = 1L;
+
+  protected byte data[];
+  protected int offset;
+  protected int length;
+  
+  public ArrayByteSequence(byte data[]) {
+    this.data = data;
+    this.offset = 0;
+    this.length = data.length;
+  }
+  
+  public ArrayByteSequence(byte data[], int offset, int length) {
+    
+    if (offset < 0 || offset > data.length || length < 0 || (offset + length) > data.length) {
+      throw new IllegalArgumentException(" Bad offset and/or length data.length = " + data.length + " offset = " + offset + " length = " + length);
+    }
+    
+    this.data = data;
+    this.offset = offset;
+    this.length = length;
+    
+  }
+  
+  public ArrayByteSequence(String s) {
+    this(s.getBytes());
+  }
+  
+  @Override
+  public byte byteAt(int i) {
+    
+    if (i < 0) {
+      throw new IllegalArgumentException("i < 0, " + i);
+    }
+    
+    if (i >= length) {
+      throw new IllegalArgumentException("i >= length, " + i + " >= " + length);
+    }
+    
+    return data[offset + i];
+  }
+  
+  @Override
+  public byte[] getBackingArray() {
+    return data;
+  }
+  
+  @Override
+  public boolean isBackedByArray() {
+    return true;
+  }
+  
+  @Override
+  public int length() {
+    return length;
+  }
+  
+  @Override
+  public int offset() {
+    return offset;
+  }
+  
+  @Override
+  public ByteSequence subSequence(int start, int end) {
+    
+    if (start > end || start < 0 || end > length) {
+      throw new IllegalArgumentException("Bad start and/end start = " + start + " end=" + end + " offset=" + offset + " length=" + length);
+    }
+    
+    return new ArrayByteSequence(data, offset + start, end - start);
+  }
+  
+  @Override
+  public byte[] toArray() {
+    if (offset == 0 && length == data.length)
+      return data;
+    
+    byte[] copy = new byte[length];
+    System.arraycopy(data, offset, copy, 0, length);
+    return copy;
+  }
+  
+  public String toString() {
+    return new String(data, offset, length);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/data/ByteSequence.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/data/ByteSequence.java b/1.5/core/src/main/java/org/apache/accumulo/core/data/ByteSequence.java
new file mode 100644
index 0000000..4dc921c
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/data/ByteSequence.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.data;
+
+import org.apache.hadoop.io.WritableComparator;
+
+public abstract class ByteSequence implements Comparable<ByteSequence> {
+  
+  public abstract byte byteAt(int i);
+  
+  public abstract int length();
+  
+  public abstract ByteSequence subSequence(int start, int end);
+  
+  // may copy data
+  public abstract byte[] toArray();
+  
+  public abstract boolean isBackedByArray();
+  
+  public abstract byte[] getBackingArray();
+  
+  public abstract int offset();
+  
+  public static int compareBytes(ByteSequence bs1, ByteSequence bs2) {
+    
+    int minLen = Math.min(bs1.length(), bs2.length());
+    
+    for (int i = 0; i < minLen; i++) {
+      int a = (bs1.byteAt(i) & 0xff);
+      int b = (bs2.byteAt(i) & 0xff);
+      
+      if (a != b) {
+        return a - b;
+      }
+    }
+    
+    return bs1.length() - bs2.length();
+  }
+  
+  public int compareTo(ByteSequence obs) {
+    if (isBackedByArray() && obs.isBackedByArray()) {
+      return WritableComparator.compareBytes(getBackingArray(), offset(), length(), obs.getBackingArray(), obs.offset(), obs.length());
+    }
+    
+    return compareBytes(this, obs);
+  }
+  
+  @Override
+  public boolean equals(Object o) {
+    if (o instanceof ByteSequence) {
+      ByteSequence obs = (ByteSequence) o;
+      
+      if (this == o)
+        return true;
+      
+      if (length() != obs.length())
+        return false;
+      
+      return compareTo(obs) == 0;
+    }
+    
+    return false;
+    
+  }
+  
+  @Override
+  public int hashCode() {
+    int hash = 1;
+    if (isBackedByArray()) {
+      byte[] data = getBackingArray();
+      int end = offset() + length();
+      for (int i = offset(); i < end; i++)
+        hash = (31 * hash) + data[i];
+    } else {
+      for (int i = 0; i < length(); i++)
+        hash = (31 * hash) + byteAt(i);
+    }
+    return hash;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/data/Column.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/data/Column.java b/1.5/core/src/main/java/org/apache/accumulo/core/data/Column.java
new file mode 100644
index 0000000..46de01f
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/data/Column.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Autogenerated by Thrift
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ */
+package org.apache.accumulo.core.data;
+
+import static org.apache.accumulo.core.util.ByteBufferUtil.toBytes;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.accumulo.core.data.thrift.TColumn;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.io.WritableComparator;
+
+public class Column implements WritableComparable<Column> {
+  
+  static private int compareBytes(byte[] a, byte[] b) {
+    if (a == null && b == null)
+      return 0;
+    if (a == null)
+      return -1;
+    if (b == null)
+      return 1;
+    return WritableComparator.compareBytes(a, 0, a.length, b, 0, b.length);
+  }
+  
+  public int compareTo(Column that) {
+    int result;
+    result = compareBytes(this.columnFamily, that.columnFamily);
+    if (result != 0)
+      return result;
+    result = compareBytes(this.columnQualifier, that.columnQualifier);
+    if (result != 0)
+      return result;
+    return compareBytes(this.columnVisibility, that.columnVisibility);
+  }
+  
+  public void readFields(DataInput in) throws IOException {
+    if (in.readBoolean()) {
+      int len = in.readInt();
+      columnFamily = new byte[len];
+      in.readFully(columnFamily);
+    } else {
+      columnFamily = null;
+    }
+    
+    if (in.readBoolean()) {
+      int len = in.readInt();
+      columnQualifier = new byte[len];
+      in.readFully(columnQualifier);
+    } else {
+      columnQualifier = null;
+    }
+    
+    if (in.readBoolean()) {
+      int len = in.readInt();
+      columnVisibility = new byte[len];
+      in.readFully(columnVisibility);
+    } else {
+      columnVisibility = null;
+    }
+  }
+  
+  @Override
+  public void write(DataOutput out) throws IOException {
+    if (columnFamily == null) {
+      out.writeBoolean(false);
+    } else {
+      out.writeBoolean(true);
+      out.writeInt(columnFamily.length);
+      out.write(columnFamily);
+    }
+    
+    if (columnQualifier == null) {
+      out.writeBoolean(false);
+    } else {
+      out.writeBoolean(true);
+      out.writeInt(columnQualifier.length);
+      out.write(columnQualifier);
+    }
+    
+    if (columnVisibility == null) {
+      out.writeBoolean(false);
+    } else {
+      out.writeBoolean(true);
+      out.writeInt(columnVisibility.length);
+      out.write(columnVisibility);
+    }
+  }
+  
+  public byte[] columnFamily;
+  public byte[] columnQualifier;
+  public byte[] columnVisibility;
+  
+  public Column() {}
+  
+  public Column(byte[] columnFamily, byte[] columnQualifier, byte[] columnVisibility) {
+    this();
+    this.columnFamily = columnFamily;
+    this.columnQualifier = columnQualifier;
+    this.columnVisibility = columnVisibility;
+  }
+  
+  public Column(TColumn tcol) {
+    this(toBytes(tcol.columnFamily), toBytes(tcol.columnQualifier), toBytes(tcol.columnVisibility));
+  }
+  
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof Column)
+      return this.equals((Column) that);
+    return false;
+  }
+  
+  public boolean equals(Column that) {
+    return this.compareTo(that) == 0;
+  }
+  
+  private static int hash(byte[] b) {
+    if (b == null)
+      return 0;
+    
+    return WritableComparator.hashBytes(b, b.length);
+  }
+  
+  @Override
+  public int hashCode() {
+    return hash(columnFamily) + hash(columnQualifier) + hash(columnVisibility);
+  }
+  
+  public byte[] getColumnFamily() {
+    return columnFamily;
+  }
+  
+  public byte[] getColumnQualifier() {
+    return columnQualifier;
+  }
+  
+  public byte[] getColumnVisibility() {
+    return columnVisibility;
+  }
+  
+  public String toString() {
+    return new String(columnFamily == null ? new byte[0] : columnFamily) + ":" + new String(columnQualifier == null ? new byte[0] : columnQualifier) + ":"
+        + new String(columnVisibility == null ? new byte[0] : columnVisibility);
+  }
+  
+  public TColumn toThrift() {
+    return new TColumn(columnFamily == null ? null : ByteBuffer.wrap(columnFamily), columnQualifier == null ? null : ByteBuffer.wrap(columnQualifier),
+        columnVisibility == null ? null : ByteBuffer.wrap(columnVisibility));
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/data/ColumnUpdate.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/data/ColumnUpdate.java b/1.5/core/src/main/java/org/apache/accumulo/core/data/ColumnUpdate.java
new file mode 100644
index 0000000..ffe0b0f
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/data/ColumnUpdate.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.data;
+
+import java.util.Arrays;
+
+/**
+ * A single column and value pair within a mutation
+ * 
+ */
+
+public class ColumnUpdate {
+  
+  private byte[] columnFamily;
+  private byte[] columnQualifier;
+  private byte[] columnVisibility;
+  private long timestamp;
+  private boolean hasTimestamp;
+  private byte[] val;
+  private byte[] data;
+  private int tsOffset;
+  private boolean deleted;
+  
+  public ColumnUpdate(byte[] cf, byte[] cq, byte[] cv, boolean hasts, long ts, boolean deleted, byte[] val, byte[] data, int tsOffset) {
+    this.columnFamily = cf;
+    this.columnQualifier = cq;
+    this.columnVisibility = cv;
+    this.hasTimestamp = hasts;
+    this.timestamp = ts;
+    this.deleted = deleted;
+    this.val = val;
+    this.data = data;
+    this.tsOffset = tsOffset;
+  }
+  
+  public void setSystemTimestamp(long v) {
+    if (hasTimestamp)
+      throw new IllegalStateException("Cannot set system timestamp when user set a timestamp");
+    
+    int tso = this.tsOffset;
+    data[tso++] = (byte) (v >>> 56);
+    data[tso++] = (byte) (v >>> 48);
+    data[tso++] = (byte) (v >>> 40);
+    data[tso++] = (byte) (v >>> 32);
+    data[tso++] = (byte) (v >>> 24);
+    data[tso++] = (byte) (v >>> 16);
+    data[tso++] = (byte) (v >>> 8);
+    data[tso++] = (byte) (v >>> 0);
+    
+    this.timestamp = v;
+  }
+  
+  public boolean hasTimestamp() {
+    return hasTimestamp;
+  }
+  
+  /**
+   * Returns the column
+   * 
+   */
+  public byte[] getColumnFamily() {
+    return columnFamily;
+  }
+  
+  public byte[] getColumnQualifier() {
+    return columnQualifier;
+  }
+  
+  public byte[] getColumnVisibility() {
+    return columnVisibility;
+  }
+  
+  public long getTimestamp() {
+    return this.timestamp;
+  }
+  
+  public boolean isDeleted() {
+    return this.deleted;
+  }
+  
+  public byte[] getValue() {
+    return this.val;
+  }
+  
+  public String toString() {
+    return new String(Arrays.toString(columnFamily)) + ":" + new String(Arrays.toString(columnQualifier)) + " ["
+        + new String(Arrays.toString(columnVisibility)) + "] " + (hasTimestamp ? timestamp : "NO_TIME_STAMP") + " " + Arrays.toString(val) + " " + deleted;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/data/ComparableBytes.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/data/ComparableBytes.java b/1.5/core/src/main/java/org/apache/accumulo/core/data/ComparableBytes.java
new file mode 100644
index 0000000..ce61844
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/data/ComparableBytes.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.data;
+
+import org.apache.hadoop.io.BinaryComparable;
+
+public class ComparableBytes extends BinaryComparable {
+  
+  public byte[] data;
+  
+  public ComparableBytes(byte[] b) {
+    this.data = b;
+  }
+  
+  public byte[] getBytes() {
+    return data;
+  }
+  
+  @Override
+  public int getLength() {
+    return data.length;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/data/ConstraintViolationSummary.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/data/ConstraintViolationSummary.java b/1.5/core/src/main/java/org/apache/accumulo/core/data/ConstraintViolationSummary.java
new file mode 100644
index 0000000..2929bc6
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/data/ConstraintViolationSummary.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.data;
+
+import org.apache.accumulo.core.data.thrift.TConstraintViolationSummary;
+
+public class ConstraintViolationSummary {
+  
+  public String constrainClass;
+  public short violationCode;
+  public String violationDescription;
+  public long numberOfViolatingMutations;
+  
+  public ConstraintViolationSummary(String constrainClass, short violationCode, String violationDescription, long numberOfViolatingMutations) {
+    this.constrainClass = constrainClass;
+    this.violationCode = violationCode;
+    this.violationDescription = violationDescription;
+    this.numberOfViolatingMutations = numberOfViolatingMutations;
+  }
+  
+  public ConstraintViolationSummary(TConstraintViolationSummary tcvs) {
+    this(tcvs.constrainClass, tcvs.violationCode, tcvs.violationDescription, tcvs.numberOfViolatingMutations);
+  }
+  
+  public String getConstrainClass() {
+    return this.constrainClass;
+  }
+  
+  public short getViolationCode() {
+    return this.violationCode;
+  }
+  
+  public String getViolationDescription() {
+    return this.violationDescription;
+  }
+  
+  public long getNumberOfViolatingMutations() {
+    return this.numberOfViolatingMutations;
+  }
+  
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ConstraintViolationSummary(");
+    boolean first = true;
+    
+    sb.append("constrainClass:");
+    if (this.constrainClass == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.constrainClass);
+    }
+    first = false;
+    if (!first)
+      sb.append(", ");
+    sb.append("violationCode:");
+    sb.append(this.violationCode);
+    first = false;
+    if (!first)
+      sb.append(", ");
+    sb.append("violationDescription:");
+    if (this.violationDescription == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.violationDescription);
+    }
+    first = false;
+    if (!first)
+      sb.append(", ");
+    sb.append("numberOfViolatingMutations:");
+    sb.append(this.numberOfViolatingMutations);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+  
+  public TConstraintViolationSummary toThrift() {
+    return new TConstraintViolationSummary(this.constrainClass, violationCode, violationDescription, numberOfViolatingMutations);
+  }
+  
+}


Mime
View raw message