accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [19/50] [abbrv] Merge branch '1.5' into 1.6
Date Sat, 01 Nov 2014 04:57:13 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
index cf1f0ea,0000000..da83386
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
@@@ -1,407 -1,0 +1,409 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapreduce.lib.impl;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.io.IOException;
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.ClientConfiguration;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.ZooKeeperInstance;
 +import org.apache.accumulo.core.client.mock.MockInstance;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
 +import org.apache.accumulo.core.security.Credentials;
 +import org.apache.accumulo.core.util.ArgumentChecker;
 +import org.apache.accumulo.core.util.Base64;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FSDataInputStream;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.util.StringUtils;
 +import org.apache.log4j.Level;
 +import org.apache.log4j.Logger;
 +
 +/**
 + * @since 1.6.0
 + */
 +public class ConfiguratorBase {
 +
 +  /**
 +   * Configuration keys for {@link Instance#getConnector(String, AuthenticationToken)}.
 +   * 
 +   * @since 1.6.0
 +   */
 +  public static enum ConnectorInfo {
 +    IS_CONFIGURED, PRINCIPAL, TOKEN,
 +  }
 +
 +  public static enum TokenSource {
 +    FILE, INLINE;
 +
 +    private String prefix;
 +
 +    private TokenSource() {
 +      prefix = name().toLowerCase() + ":";
 +    }
 +
 +    public String prefix() {
 +      return prefix;
 +    }
 +  }
 +
 +  /**
 +   * Configuration keys for {@link Instance}, {@link ZooKeeperInstance}, and {@link MockInstance}.
 +   * 
 +   * @since 1.6.0
 +   */
 +  public static enum InstanceOpts {
 +    TYPE, NAME, ZOO_KEEPERS, CLIENT_CONFIG;
 +  }
 +
 +  /**
 +   * Configuration keys for general configuration options.
 +   * 
 +   * @since 1.6.0
 +   */
 +  public static enum GeneralOpts {
 +    LOG_LEVEL,
 +    VISIBILITY_CACHE_SIZE
 +  }
 +
 +  /**
 +   * Provides a configuration key for a given feature enum, prefixed by the implementingClass
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param e
 +   *          the enum used to provide the unique part of the configuration key
 +   * @return the configuration key
 +   * @since 1.6.0
 +   */
 +  protected static String enumToConfKey(Class<?> implementingClass, Enum<?> e) {
 +    return implementingClass.getSimpleName() + "." + e.getDeclaringClass().getSimpleName() + "." + StringUtils.camelize(e.name().toLowerCase());
 +  }
 +
 +  /**
 +  * Provides a configuration key for a given feature enum.
 +  * 
 +  * @param e
 +  *          the enum used to provide the unique part of the configuration key
 +  * @return the configuration key
 +  */
 +  protected static String enumToConfKey(Enum<?> e) {
 +	  return  e.getDeclaringClass().getSimpleName() + "." +  StringUtils.camelize(e.name().toLowerCase());
 +  }
 +
 +  /**
 +   * Sets the connector information needed to communicate with Accumulo in this job.
 +   * 
 +   * <p>
 +   * <b>WARNING:</b> The serialized token is stored in the configuration and shared with all MapReduce tasks. It is BASE64 encoded to provide a charset safe
 +   * conversion to a string, and is not intended to be secure.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param principal
 +   *          a valid Accumulo user name
 +   * @param token
 +   *          the user's password
 +   * @since 1.6.0
 +   */
 +  public static void setConnectorInfo(Class<?> implementingClass, Configuration conf, String principal, AuthenticationToken token)
 +      throws AccumuloSecurityException {
 +    if (isConnectorInfoSet(implementingClass, conf))
 +      throw new IllegalStateException("Connector info for " + implementingClass.getSimpleName() + " can only be set once per job");
 +
 +    ArgumentChecker.notNull(principal, token);
 +    conf.setBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), true);
 +    conf.set(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL), principal);
 +    conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN),
 +        TokenSource.INLINE.prefix() + token.getClass().getName() + ":" + Base64.encodeBase64String(AuthenticationTokenSerializer.serialize(token)));
 +  }
 +
 +  /**
 +   * Sets the connector information needed to communicate with Accumulo in this job.
 +   * 
 +   * <p>
 +   * Pulls a token file into the Distributed Cache that contains the authentication token in an attempt to be more secure than storing the password in the
 +   * Configuration. Token file created with "bin/accumulo create-token".
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param principal
 +   *          a valid Accumulo user name
 +   * @param tokenFile
 +   *          the path to the token file in DFS
 +   * @since 1.6.0
 +   */
 +  public static void setConnectorInfo(Class<?> implementingClass, Configuration conf, String principal, String tokenFile) throws AccumuloSecurityException {
 +    if (isConnectorInfoSet(implementingClass, conf))
 +      throw new IllegalStateException("Connector info for " + implementingClass.getSimpleName() + " can only be set once per job");
 +
 +    ArgumentChecker.notNull(principal, tokenFile);
 +
 +    try {
 +      DistributedCacheHelper.addCacheFile(new URI(tokenFile), conf);
 +    } catch (URISyntaxException e) {
 +      throw new IllegalStateException("Unable to add tokenFile \"" + tokenFile + "\" to distributed cache.");
 +    }
 +
 +    conf.setBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), true);
 +    conf.set(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL), principal);
 +    conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN), TokenSource.FILE.prefix() + tokenFile);
 +  }
 +
 +  /**
 +   * Determines if the connector info has already been set for this instance.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return true if the connector info has already been set, false otherwise
 +   * @since 1.6.0
 +   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 +   */
 +  public static Boolean isConnectorInfoSet(Class<?> implementingClass, Configuration conf) {
 +    return conf.getBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), false);
 +  }
 +
 +  /**
 +   * Gets the user name from the configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the principal
 +   * @since 1.6.0
 +   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 +   */
 +  public static String getPrincipal(Class<?> implementingClass, Configuration conf) {
 +    return conf.get(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL));
 +  }
 +
 +  /**
 +   * Gets the authenticated token from either the specified token file or directly from the configuration, whichever was used when the job was configured.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the principal's authentication token
 +   * @since 1.6.0
 +   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 +   * @see #setConnectorInfo(Class, Configuration, String, String)
 +   */
 +  public static AuthenticationToken getAuthenticationToken(Class<?> implementingClass, Configuration conf) {
 +    String token = conf.get(enumToConfKey(implementingClass, ConnectorInfo.TOKEN));
 +    if (token == null || token.isEmpty())
 +      return null;
 +    if (token.startsWith(TokenSource.INLINE.prefix())) {
 +      String[] args = token.substring(TokenSource.INLINE.prefix().length()).split(":", 2);
 +      if (args.length == 2)
-         return AuthenticationTokenSerializer.deserialize(args[0], Base64.decodeBase64(args[1].getBytes(Constants.UTF8)));
++        return AuthenticationTokenSerializer.deserialize(args[0], Base64.decodeBase64(args[1].getBytes(UTF_8)));
 +    } else if (token.startsWith(TokenSource.FILE.prefix())) {
 +      String tokenFileName = token.substring(TokenSource.FILE.prefix().length());
 +      return getTokenFromFile(conf, getPrincipal(implementingClass, conf), tokenFileName);
 +    }
 +
 +    throw new IllegalStateException("Token was not properly serialized into the configuration");
 +  }
 +
 +  /**
 +   * Reads from the token file in distributed cache. Currently, the token file stores data separated by colons e.g. principal:token_class:token
 +   * 
 +   * @param conf
 +   *          the Hadoop context for the configured job
 +   * @return path to the token file as a String
 +   * @since 1.6.0
 +   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 +   */
 +  public static AuthenticationToken getTokenFromFile(Configuration conf, String principal, String tokenFile) {
 +    FSDataInputStream in = null;
 +    try {
 +      URI[] uris = DistributedCacheHelper.getCacheFiles(conf);
 +      Path path = null;
 +      for (URI u : uris) {
 +        if (u.toString().equals(tokenFile)) {
 +          path = new Path(u);
 +        }
 +      }
 +      if (path == null) {
 +        throw new IllegalArgumentException("Couldn't find password file called \"" + tokenFile + "\" in cache.");
 +      }
 +      FileSystem fs = FileSystem.get(conf);
 +      in = fs.open(path);
 +    } catch (IOException e) {
 +      throw new IllegalArgumentException("Couldn't open password file called \"" + tokenFile + "\".");
 +    }
 +    java.util.Scanner fileScanner = new java.util.Scanner(in);
 +    try {
 +      while (fileScanner.hasNextLine()) {
 +        Credentials creds = Credentials.deserialize(fileScanner.nextLine());
 +        if (principal.equals(creds.getPrincipal())) {
 +          return creds.getToken();
 +        }
 +      }
 +      throw new IllegalArgumentException("Couldn't find token for user \"" + principal + "\" in file \"" + tokenFile + "\"");
 +    } finally {
 +      if (fileScanner != null && fileScanner.ioException() == null)
 +        fileScanner.close();
 +      else if (fileScanner.ioException() != null)
 +        throw new RuntimeException(fileScanner.ioException());
 +    }
 +  }
 +
 +  /**
 +   * Configures a {@link ZooKeeperInstance} for this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param clientConfig
 +   *          client configuration for specifying connection timeouts, SSL connection options, etc.
 +   * @since 1.6.0
 +   */
 +  public static void setZooKeeperInstance(Class<?> implementingClass, Configuration conf, ClientConfiguration clientConfig) {
 +    String key = enumToConfKey(implementingClass, InstanceOpts.TYPE);
 +    if (!conf.get(key, "").isEmpty())
 +      throw new IllegalStateException("Instance info can only be set once per job; it has already been configured with " + conf.get(key));
 +    conf.set(key, "ZooKeeperInstance");
 +    if (clientConfig != null) {
 +      conf.set(enumToConfKey(implementingClass, InstanceOpts.CLIENT_CONFIG), clientConfig.serialize());
 +    }
 +  }
 +
 +  /**
 +   * Configures a {@link MockInstance} for this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param instanceName
 +   *          the Accumulo instance name
 +   * @since 1.6.0
 +   */
 +  public static void setMockInstance(Class<?> implementingClass, Configuration conf, String instanceName) {
 +    String key = enumToConfKey(implementingClass, InstanceOpts.TYPE);
 +    if (!conf.get(key, "").isEmpty())
 +      throw new IllegalStateException("Instance info can only be set once per job; it has already been configured with " + conf.get(key));
 +    conf.set(key, "MockInstance");
 +
 +    ArgumentChecker.notNull(instanceName);
 +    conf.set(enumToConfKey(implementingClass, InstanceOpts.NAME), instanceName);
 +  }
 +
 +  /**
 +   * Initializes an Accumulo {@link Instance} based on the configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return an Accumulo instance
 +   * @since 1.6.0
 +   * @see #setZooKeeperInstance(Class, Configuration, ClientConfiguration)
 +   * @see #setMockInstance(Class, Configuration, String)
 +   */
 +  public static Instance getInstance(Class<?> implementingClass, Configuration conf) {
 +    String instanceType = conf.get(enumToConfKey(implementingClass, InstanceOpts.TYPE), "");
 +    if ("MockInstance".equals(instanceType))
 +      return new MockInstance(conf.get(enumToConfKey(implementingClass, InstanceOpts.NAME)));
 +    else if ("ZooKeeperInstance".equals(instanceType)) {
 +      String clientConfigString = conf.get(enumToConfKey(implementingClass, InstanceOpts.CLIENT_CONFIG));
 +      if (clientConfigString == null) {
 +        String instanceName = conf.get(enumToConfKey(implementingClass, InstanceOpts.NAME));
 +        String zookeepers = conf.get(enumToConfKey(implementingClass, InstanceOpts.ZOO_KEEPERS));
 +        return new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(instanceName).withZkHosts(zookeepers));
 +      } else {
 +        return new ZooKeeperInstance(ClientConfiguration.deserialize(clientConfigString));
 +      }
 +    } else if (instanceType.isEmpty())
 +      throw new IllegalStateException("Instance has not been configured for " + implementingClass.getSimpleName());
 +    else
 +      throw new IllegalStateException("Unrecognized instance type " + instanceType);
 +  }
 +
 +  /**
 +   * Sets the log level for this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param level
 +   *          the logging level
 +   * @since 1.6.0
 +   */
 +  public static void setLogLevel(Class<?> implementingClass, Configuration conf, Level level) {
 +    ArgumentChecker.notNull(level);
 +    Logger.getLogger(implementingClass).setLevel(level);
 +    conf.setInt(enumToConfKey(implementingClass, GeneralOpts.LOG_LEVEL), level.toInt());
 +  }
 +
 +  /**
 +   * Gets the log level from this configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the log level
 +   * @since 1.6.0
 +   * @see #setLogLevel(Class, Configuration, Level)
 +   */
 +  public static Level getLogLevel(Class<?> implementingClass, Configuration conf) {
 +    return Level.toLevel(conf.getInt(enumToConfKey(implementingClass, GeneralOpts.LOG_LEVEL), Level.INFO.toInt()));
 +  }
 +
 +  /**
 +   * Sets the valid visibility count for this job.
 +   * 
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param visibilityCacheSize
 +   *          the LRU cache size
 +   */
 +  public static void setVisibilityCacheSize(Configuration conf, int visibilityCacheSize) {
 +    conf.setInt(enumToConfKey(GeneralOpts.VISIBILITY_CACHE_SIZE), visibilityCacheSize);
 +  }
 +
 +  /**
 +   * Gets the valid visibility count for this job.
 +   * 
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the valid visibility count
 +   */
 +  public static int getVisibilityCacheSize(Configuration conf) {
 +    return conf.getInt(enumToConfKey(GeneralOpts.VISIBILITY_CACHE_SIZE),Constants.DEFAULT_VISIBILITY_CACHE_SIZE);
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
index 1ed23e3,0000000..f8a5d03
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
@@@ -1,795 -1,0 +1,795 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapreduce.lib.impl;
 +
++import static com.google.common.base.Charsets.UTF_8;
 +import static org.apache.accumulo.core.util.ArgumentChecker.notNull;
 +
 +import java.io.ByteArrayInputStream;
 +import java.io.ByteArrayOutputStream;
 +import java.io.DataInputStream;
 +import java.io.DataOutputStream;
 +import java.io.IOException;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Set;
 +import java.util.StringTokenizer;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.ClientSideIteratorScanner;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.IsolatedScanner;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.RowIterator;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.impl.Tables;
 +import org.apache.accumulo.core.client.impl.TabletLocator;
 +import org.apache.accumulo.core.client.mapreduce.InputTableConfig;
 +import org.apache.accumulo.core.client.mock.impl.MockTabletLocator;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.KeyExtent;
 +import org.apache.accumulo.core.data.PartialKey;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 +import org.apache.accumulo.core.master.state.tables.TableState;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.security.TablePermission;
 +import org.apache.accumulo.core.util.Base64;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.accumulo.core.util.TextUtil;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.io.MapWritable;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.io.Writable;
 +import org.apache.hadoop.util.StringUtils;
 +
 +import com.google.common.collect.Maps;
 +
 +/**
 + * @since 1.6.0
 + */
 +public class InputConfigurator extends ConfiguratorBase {
 +
 +  /**
 +   * Configuration keys for {@link Scanner}.
 +   * 
 +   * @since 1.6.0
 +   */
 +  public static enum ScanOpts {
 +    TABLE_NAME, AUTHORIZATIONS, RANGES, COLUMNS, ITERATORS, TABLE_CONFIGS
 +  }
 +
 +  /**
 +   * Configuration keys for various features.
 +   * 
 +   * @since 1.6.0
 +   */
 +  public static enum Features {
 +    AUTO_ADJUST_RANGES, SCAN_ISOLATION, USE_LOCAL_ITERATORS, SCAN_OFFLINE
 +  }
 +
 +  /**
 +   * Sets the name of the input table, over which this job will scan.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param tableName
 +   *          the table to use when the tablename is null in the write call
 +   * @since 1.6.0
 +   */
 +  public static void setInputTableName(Class<?> implementingClass, Configuration conf, String tableName) {
 +    notNull(tableName);
 +    conf.set(enumToConfKey(implementingClass, ScanOpts.TABLE_NAME), tableName);
 +  }
 +
 +  /**
 +   * Sets the name of the input table, over which this job will scan.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @since 1.6.0
 +   */
 +  public static String getInputTableName(Class<?> implementingClass, Configuration conf) {
 +    return conf.get(enumToConfKey(implementingClass, ScanOpts.TABLE_NAME));
 +  }
 +
 +  /**
 +   * Sets the {@link Authorizations} used to scan. Must be a subset of the user's authorization. Defaults to the empty set.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param auths
 +   *          the user's authorizations
 +   * @since 1.6.0
 +   */
 +  public static void setScanAuthorizations(Class<?> implementingClass, Configuration conf, Authorizations auths) {
 +    if (auths != null && !auths.isEmpty())
 +      conf.set(enumToConfKey(implementingClass, ScanOpts.AUTHORIZATIONS), auths.serialize());
 +  }
 +
 +  /**
 +   * Gets the authorizations to set for the scans from the configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the Accumulo scan authorizations
 +   * @since 1.6.0
 +   * @see #setScanAuthorizations(Class, Configuration, Authorizations)
 +   */
 +  public static Authorizations getScanAuthorizations(Class<?> implementingClass, Configuration conf) {
 +    String authString = conf.get(enumToConfKey(implementingClass, ScanOpts.AUTHORIZATIONS));
-     return authString == null ? Authorizations.EMPTY : new Authorizations(authString.getBytes(Constants.UTF8));
++    return authString == null ? Authorizations.EMPTY : new Authorizations(authString.getBytes(UTF_8));
 +  }
 +
 +  /**
 +   * Sets the input ranges to scan on all input tables for this job. If not set, the entire table will be scanned.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param ranges
 +   *          the ranges that will be mapped over
 +   * @throws IllegalArgumentException
 +   *           if the ranges cannot be encoded into base 64
 +   * @since 1.6.0
 +   */
 +  public static void setRanges(Class<?> implementingClass, Configuration conf, Collection<Range> ranges) {
 +    notNull(ranges);
 +
 +    ArrayList<String> rangeStrings = new ArrayList<String>(ranges.size());
 +    try {
 +      for (Range r : ranges) {
 +        ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +        r.write(new DataOutputStream(baos));
 +        rangeStrings.add(Base64.encodeBase64String(baos.toByteArray()));
 +      }
 +      conf.setStrings(enumToConfKey(implementingClass, ScanOpts.RANGES), rangeStrings.toArray(new String[0]));
 +    } catch (IOException ex) {
 +      throw new IllegalArgumentException("Unable to encode ranges to Base64", ex);
 +    }
 +  }
 +
 +  /**
 +   * Gets the ranges to scan over from a job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the ranges
 +   * @throws IOException
 +   *           if the ranges have been encoded improperly
 +   * @since 1.6.0
 +   * @see #setRanges(Class, Configuration, Collection)
 +   */
 +  public static List<Range> getRanges(Class<?> implementingClass, Configuration conf) throws IOException {
 +
 +    Collection<String> encodedRanges = conf.getStringCollection(enumToConfKey(implementingClass, ScanOpts.RANGES));
 +    List<Range> ranges = new ArrayList<Range>();
 +    for (String rangeString : encodedRanges) {
 +      ByteArrayInputStream bais = new ByteArrayInputStream(Base64.decodeBase64(rangeString.getBytes()));
 +      Range range = new Range();
 +      range.readFields(new DataInputStream(bais));
 +      ranges.add(range);
 +    }
 +    return ranges;
 +  }
 +
 +  /**
 +   * Gets a list of the iterator settings (for iterators to apply to a scanner) from this configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return a list of iterators
 +   * @since 1.6.0
 +   * @see #addIterator(Class, Configuration, IteratorSetting)
 +   */
 +  public static List<IteratorSetting> getIterators(Class<?> implementingClass, Configuration conf) {
 +    String iterators = conf.get(enumToConfKey(implementingClass, ScanOpts.ITERATORS));
 +
 +    // If no iterators are present, return an empty list
 +    if (iterators == null || iterators.isEmpty())
 +      return new ArrayList<IteratorSetting>();
 +
 +    // Compose the set of iterators encoded in the job configuration
 +    StringTokenizer tokens = new StringTokenizer(iterators, StringUtils.COMMA_STR);
 +    List<IteratorSetting> list = new ArrayList<IteratorSetting>();
 +    try {
 +      while (tokens.hasMoreTokens()) {
 +        String itstring = tokens.nextToken();
 +        ByteArrayInputStream bais = new ByteArrayInputStream(Base64.decodeBase64(itstring.getBytes()));
 +        list.add(new IteratorSetting(new DataInputStream(bais)));
 +        bais.close();
 +      }
 +    } catch (IOException e) {
 +      throw new IllegalArgumentException("couldn't decode iterator settings");
 +    }
 +    return list;
 +  }
 +
 +  /**
 +   * Restricts the columns that will be mapped over for the single input table on this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param columnFamilyColumnQualifierPairs
 +   *          a pair of {@link Text} objects corresponding to column family and column qualifier. If the column qualifier is null, the entire column family is
 +   *          selected. An empty set is the default and is equivalent to scanning the all columns.
 +   * @throws IllegalArgumentException
 +   *           if the column family is null
 +   * @since 1.6.0
 +   */
 +  public static void fetchColumns(Class<?> implementingClass, Configuration conf, Collection<Pair<Text,Text>> columnFamilyColumnQualifierPairs) {
 +    notNull(columnFamilyColumnQualifierPairs);
 +    String[] columnStrings = serializeColumns(columnFamilyColumnQualifierPairs);
 +    conf.setStrings(enumToConfKey(implementingClass, ScanOpts.COLUMNS), columnStrings);
 +  }
 +
 +  public static String[] serializeColumns(Collection<Pair<Text,Text>> columnFamilyColumnQualifierPairs) {
 +    notNull(columnFamilyColumnQualifierPairs);
 +    ArrayList<String> columnStrings = new ArrayList<String>(columnFamilyColumnQualifierPairs.size());
 +    for (Pair<Text,Text> column : columnFamilyColumnQualifierPairs) {
 +
 +      if (column.getFirst() == null)
 +        throw new IllegalArgumentException("Column family can not be null");
 +
 +      String col = Base64.encodeBase64String(TextUtil.getBytes(column.getFirst()));
 +      if (column.getSecond() != null)
 +        col += ":" + Base64.encodeBase64String(TextUtil.getBytes(column.getSecond()));
 +      columnStrings.add(col);
 +    }
 +
 +    return columnStrings.toArray(new String[0]);
 +  }
 +
 +  /**
 +   * Gets the columns to be mapped over from this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return a set of columns
 +   * @since 1.6.0
 +   * @see #fetchColumns(Class, Configuration, Collection)
 +   */
 +  public static Set<Pair<Text,Text>> getFetchedColumns(Class<?> implementingClass, Configuration conf) {
 +    notNull(conf);
 +    String confValue = conf.get(enumToConfKey(implementingClass, ScanOpts.COLUMNS));
 +    List<String> serialized = new ArrayList<String>();
 +    if (confValue != null) {
 +      // Split and include any trailing empty strings to allow empty column families
 +      for (String val : confValue.split(",", -1)) {
 +        serialized.add(val);
 +      }
 +    }
 +    return deserializeFetchedColumns(serialized);
 +  }
 +
 +  public static Set<Pair<Text,Text>> deserializeFetchedColumns(Collection<String> serialized) {
 +    Set<Pair<Text,Text>> columns = new HashSet<Pair<Text,Text>>();
 +
 +    if (null == serialized) {
 +      return columns;
 +    }
 +
 +    for (String col : serialized) {
 +      int idx = col.indexOf(":");
-       Text cf = new Text(idx < 0 ? Base64.decodeBase64(col.getBytes(Constants.UTF8)) : Base64.decodeBase64(col.substring(0, idx).getBytes(Constants.UTF8)));
-       Text cq = idx < 0 ? null : new Text(Base64.decodeBase64(col.substring(idx + 1).getBytes(Constants.UTF8)));
++      Text cf = new Text(idx < 0 ? Base64.decodeBase64(col.getBytes(UTF_8)) : Base64.decodeBase64(col.substring(0, idx).getBytes(UTF_8)));
++      Text cq = idx < 0 ? null : new Text(Base64.decodeBase64(col.substring(idx + 1).getBytes(UTF_8)));
 +      columns.add(new Pair<Text,Text>(cf, cq));
 +    }
 +    return columns;
 +  }
 +
 +  /**
 +   * Encode an iterator on the input for the single input table associated with this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param cfg
 +   *          the configuration of the iterator
 +   * @throws IllegalArgumentException
 +   *           if the iterator can't be serialized into the configuration
 +   * @since 1.6.0
 +   */
 +  public static void addIterator(Class<?> implementingClass, Configuration conf, IteratorSetting cfg) {
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    String newIter;
 +    try {
 +      cfg.write(new DataOutputStream(baos));
 +      newIter = Base64.encodeBase64String(baos.toByteArray());
 +      baos.close();
 +    } catch (IOException e) {
 +      throw new IllegalArgumentException("unable to serialize IteratorSetting");
 +    }
 +
 +    String confKey = enumToConfKey(implementingClass, ScanOpts.ITERATORS);
 +    String iterators = conf.get(confKey);
 +    // No iterators specified yet, create a new string
 +    if (iterators == null || iterators.isEmpty()) {
 +      iterators = newIter;
 +    } else {
 +      // append the next iterator & reset
 +      iterators = iterators.concat(StringUtils.COMMA_STR + newIter);
 +    }
 +    // Store the iterators w/ the job
 +    conf.set(confKey, iterators);
 +  }
 +
 +  /**
 +   * Controls the automatic adjustment of ranges for this job. This feature merges overlapping ranges, then splits them to align with tablet boundaries.
 +   * Disabling this feature will cause exactly one Map task to be created for each specified range. The default setting is enabled. *
 +   * 
 +   * <p>
 +   * By default, this feature is <b>enabled</b>.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param enableFeature
 +   *          the feature is enabled if true, disabled otherwise
 +   * @see #setRanges(Class, Configuration, Collection)
 +   * @since 1.6.0
 +   */
 +  public static void setAutoAdjustRanges(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
 +    conf.setBoolean(enumToConfKey(implementingClass, Features.AUTO_ADJUST_RANGES), enableFeature);
 +  }
 +
 +  /**
 +   * Determines whether a configuration has auto-adjust ranges enabled.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return false if the feature is disabled, true otherwise
 +   * @since 1.6.0
 +   * @see #setAutoAdjustRanges(Class, Configuration, boolean)
 +   */
 +  public static Boolean getAutoAdjustRanges(Class<?> implementingClass, Configuration conf) {
 +    return conf.getBoolean(enumToConfKey(implementingClass, Features.AUTO_ADJUST_RANGES), true);
 +  }
 +
 +  /**
 +   * Controls the use of the {@link IsolatedScanner} in this job.
 +   * 
 +   * <p>
 +   * By default, this feature is <b>disabled</b>.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param enableFeature
 +   *          the feature is enabled if true, disabled otherwise
 +   * @since 1.6.0
 +   */
 +  public static void setScanIsolation(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
 +    conf.setBoolean(enumToConfKey(implementingClass, Features.SCAN_ISOLATION), enableFeature);
 +  }
 +
 +  /**
 +   * Determines whether a configuration has isolation enabled.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return true if the feature is enabled, false otherwise
 +   * @since 1.6.0
 +   * @see #setScanIsolation(Class, Configuration, boolean)
 +   */
 +  public static Boolean isIsolated(Class<?> implementingClass, Configuration conf) {
 +    return conf.getBoolean(enumToConfKey(implementingClass, Features.SCAN_ISOLATION), false);
 +  }
 +
 +  /**
 +   * Controls the use of the {@link ClientSideIteratorScanner} in this job. Enabling this feature will cause the iterator stack to be constructed within the Map
 +   * task, rather than within the Accumulo TServer. To use this feature, all classes needed for those iterators must be available on the classpath for the task.
 +   * 
 +   * <p>
 +   * By default, this feature is <b>disabled</b>.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param enableFeature
 +   *          the feature is enabled if true, disabled otherwise
 +   * @since 1.6.0
 +   */
 +  public static void setLocalIterators(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
 +    conf.setBoolean(enumToConfKey(implementingClass, Features.USE_LOCAL_ITERATORS), enableFeature);
 +  }
 +
 +  /**
 +   * Determines whether a configuration uses local iterators.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return true if the feature is enabled, false otherwise
 +   * @since 1.6.0
 +   * @see #setLocalIterators(Class, Configuration, boolean)
 +   */
 +  public static Boolean usesLocalIterators(Class<?> implementingClass, Configuration conf) {
 +    return conf.getBoolean(enumToConfKey(implementingClass, Features.USE_LOCAL_ITERATORS), false);
 +  }
 +
 +  /**
 +   * <p>
 +   * Enable reading offline tables. By default, this feature is disabled and only online tables are scanned. This will make the map reduce job directly read the
 +   * table's files. If the table is not offline, then the job will fail. If the table comes online during the map reduce job, it is likely that the job will
 +   * fail.
 +   * 
 +   * <p>
 +   * To use this option, the map reduce user will need access to read the Accumulo directory in HDFS.
 +   * 
 +   * <p>
 +   * Reading the offline table will create the scan time iterator stack in the map process. So any iterators that are configured for the table will need to be
 +   * on the mapper's classpath.
 +   * 
 +   * <p>
 +   * One way to use this feature is to clone a table, take the clone offline, and use the clone as the input table for a map reduce job. If you plan to map
 +   * reduce over the data many times, it may be better to the compact the table, clone it, take it offline, and use the clone for all map reduce jobs. The
 +   * reason to do this is that compaction will reduce each tablet in the table to one file, and it is faster to read from one file.
 +   * 
 +   * <p>
 +   * There are two possible advantages to reading a tables file directly out of HDFS. First, you may see better read performance. Second, it will support
 +   * speculative execution better. When reading an online table speculative execution can put more load on an already slow tablet server.
 +   * 
 +   * <p>
 +   * By default, this feature is <b>disabled</b>.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param enableFeature
 +   *          the feature is enabled if true, disabled otherwise
 +   * @since 1.6.0
 +   */
 +  public static void setOfflineTableScan(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
 +    conf.setBoolean(enumToConfKey(implementingClass, Features.SCAN_OFFLINE), enableFeature);
 +  }
 +
 +  /**
 +   * Determines whether a configuration has the offline table scan feature enabled.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return true if the feature is enabled, false otherwise
 +   * @since 1.6.0
 +   * @see #setOfflineTableScan(Class, Configuration, boolean)
 +   */
 +  public static Boolean isOfflineScan(Class<?> implementingClass, Configuration conf) {
 +    return conf.getBoolean(enumToConfKey(implementingClass, Features.SCAN_OFFLINE), false);
 +  }
 +
 +  /**
 +   * Sets configurations for multiple tables at a time.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param configs
 +   *          an array of {@link InputTableConfig} objects to associate with the job
 +   * @since 1.6.0
 +   */
 +  public static void setInputTableConfigs(Class<?> implementingClass, Configuration conf, Map<String,InputTableConfig> configs) {
 +    MapWritable mapWritable = new MapWritable();
 +    for (Map.Entry<String,InputTableConfig> tableConfig : configs.entrySet())
 +      mapWritable.put(new Text(tableConfig.getKey()), tableConfig.getValue());
 +
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    try {
 +      mapWritable.write(new DataOutputStream(baos));
 +    } catch (IOException e) {
 +      throw new IllegalStateException("Table configuration could not be serialized.");
 +    }
 +
 +    String confKey = enumToConfKey(implementingClass, ScanOpts.TABLE_CONFIGS);
 +    conf.set(confKey, Base64.encodeBase64String(baos.toByteArray()));
 +  }
 +
 +  /**
 +   * Returns all {@link InputTableConfig} objects associated with this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return all of the table query configs for the job
 +   * @since 1.6.0
 +   */
 +  public static Map<String,InputTableConfig> getInputTableConfigs(Class<?> implementingClass, Configuration conf) {
 +    Map<String,InputTableConfig> configs = new HashMap<String,InputTableConfig>();
 +    Map.Entry<String,InputTableConfig> defaultConfig = getDefaultInputTableConfig(implementingClass, conf);
 +    if (defaultConfig != null)
 +      configs.put(defaultConfig.getKey(), defaultConfig.getValue());
 +    String configString = conf.get(enumToConfKey(implementingClass, ScanOpts.TABLE_CONFIGS));
 +    MapWritable mapWritable = new MapWritable();
 +    if (configString != null) {
 +      try {
 +        byte[] bytes = Base64.decodeBase64(configString.getBytes());
 +        ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
 +        mapWritable.readFields(new DataInputStream(bais));
 +        bais.close();
 +      } catch (IOException e) {
 +        throw new IllegalStateException("The table query configurations could not be deserialized from the given configuration");
 +      }
 +    }
 +    for (Map.Entry<Writable,Writable> entry : mapWritable.entrySet())
 +      configs.put(((Text) entry.getKey()).toString(), (InputTableConfig) entry.getValue());
 +
 +    return configs;
 +  }
 +
 +  /**
 +   * Returns the {@link InputTableConfig} for the given table
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param tableName
 +   *          the table name for which to fetch the table query config
 +   * @return the table query config for the given table name (if it exists) and null if it does not
 +   * @since 1.6.0
 +   */
 +  public static InputTableConfig getInputTableConfig(Class<?> implementingClass, Configuration conf, String tableName) {
 +    Map<String,InputTableConfig> queryConfigs = getInputTableConfigs(implementingClass, conf);
 +    return queryConfigs.get(tableName);
 +  }
 +
 +  /**
 +   * Initializes an Accumulo {@link TabletLocator} based on the configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param tableId
 +   *          The table id for which to initialize the {@link TabletLocator}
 +   * @return an Accumulo tablet locator
 +   * @throws TableNotFoundException
 +   *           if the table name set on the configuration doesn't exist
 +   * @since 1.6.0
 +   */
 +  public static TabletLocator getTabletLocator(Class<?> implementingClass, Configuration conf, String tableId) throws TableNotFoundException {
 +    String instanceType = conf.get(enumToConfKey(implementingClass, InstanceOpts.TYPE));
 +    if ("MockInstance".equals(instanceType))
 +      return new MockTabletLocator();
 +    Instance instance = getInstance(implementingClass, conf);
 +    return TabletLocator.getLocator(instance, new Text(tableId));
 +  }
 +
 +  // InputFormat doesn't have the equivalent of OutputFormat's checkOutputSpecs(JobContext job)
 +  /**
 +   * Check whether a configuration is fully configured to be used with an Accumulo {@link org.apache.hadoop.mapreduce.InputFormat}.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @throws IOException
 +   *           if the context is improperly configured
 +   * @since 1.6.0
 +   */
 +  public static void validateOptions(Class<?> implementingClass, Configuration conf) throws IOException {
 +
 +    Map<String,InputTableConfig> inputTableConfigs = getInputTableConfigs(implementingClass, conf);
 +    if (!isConnectorInfoSet(implementingClass, conf))
 +      throw new IOException("Input info has not been set.");
 +    String instanceKey = conf.get(enumToConfKey(implementingClass, InstanceOpts.TYPE));
 +    if (!"MockInstance".equals(instanceKey) && !"ZooKeeperInstance".equals(instanceKey))
 +      throw new IOException("Instance info has not been set.");
 +    // validate that we can connect as configured
 +    try {
 +      String principal = getPrincipal(implementingClass, conf);
 +      AuthenticationToken token = getAuthenticationToken(implementingClass, conf);
 +      Connector c = getInstance(implementingClass, conf).getConnector(principal, token);
 +      if (!c.securityOperations().authenticateUser(principal, token))
 +        throw new IOException("Unable to authenticate user");
 +
 +      if (getInputTableConfigs(implementingClass, conf).size() == 0)
 +        throw new IOException("No table set.");
 +
 +      for (Map.Entry<String,InputTableConfig> tableConfig : inputTableConfigs.entrySet()) {
 +        if (!c.securityOperations().hasTablePermission(getPrincipal(implementingClass, conf), tableConfig.getKey(), TablePermission.READ))
 +          throw new IOException("Unable to access table");
 +      }
 +      for (Map.Entry<String,InputTableConfig> tableConfigEntry : inputTableConfigs.entrySet()) {
 +        InputTableConfig tableConfig = tableConfigEntry.getValue();
 +        if (!tableConfig.shouldUseLocalIterators()) {
 +          if (tableConfig.getIterators() != null) {
 +            for (IteratorSetting iter : tableConfig.getIterators()) {
 +              if (!c.tableOperations().testClassLoad(tableConfigEntry.getKey(), iter.getIteratorClass(), SortedKeyValueIterator.class.getName()))
 +                throw new AccumuloException("Servers are unable to load " + iter.getIteratorClass() + " as a " + SortedKeyValueIterator.class.getName());
 +            }
 +          }
 +        }
 +      }
 +    } catch (AccumuloException e) {
 +      throw new IOException(e);
 +    } catch (AccumuloSecurityException e) {
 +      throw new IOException(e);
 +    } catch (TableNotFoundException e) {
 +      throw new IOException(e);
 +    }
 +  }
 +
 +  /**
 +   * Returns the {@link org.apache.accumulo.core.client.mapreduce.InputTableConfig} for the configuration based on the properties set using the single-table
 +   * input methods.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop instance for which to retrieve the configuration
 +   * @return the config object built from the single input table properties set on the job
 +   * @since 1.6.0
 +   */
 +  protected static Map.Entry<String,InputTableConfig> getDefaultInputTableConfig(Class<?> implementingClass, Configuration conf) {
 +    String tableName = getInputTableName(implementingClass, conf);
 +    if (tableName != null) {
 +      InputTableConfig queryConfig = new InputTableConfig();
 +      List<IteratorSetting> itrs = getIterators(implementingClass, conf);
 +      if (itrs != null)
 +        queryConfig.setIterators(itrs);
 +      Set<Pair<Text,Text>> columns = getFetchedColumns(implementingClass, conf);
 +      if (columns != null)
 +        queryConfig.fetchColumns(columns);
 +      List<Range> ranges = null;
 +      try {
 +        ranges = getRanges(implementingClass, conf);
 +      } catch (IOException e) {
 +        throw new RuntimeException(e);
 +      }
 +      if (ranges != null)
 +        queryConfig.setRanges(ranges);
 +
 +      queryConfig.setAutoAdjustRanges(getAutoAdjustRanges(implementingClass, conf)).setUseIsolatedScanners(isIsolated(implementingClass, conf))
 +          .setUseLocalIterators(usesLocalIterators(implementingClass, conf)).setOfflineScan(isOfflineScan(implementingClass, conf));
 +      return Maps.immutableEntry(tableName, queryConfig);
 +    }
 +    return null;
 +  }
 +
 +  public static Map<String,Map<KeyExtent,List<Range>>> binOffline(String tableId, List<Range> ranges, Instance instance, Connector conn)
 +      throws AccumuloException, TableNotFoundException {
 +    Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<String,Map<KeyExtent,List<Range>>>();
 +
 +    if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
 +      Tables.clearCache(instance);
 +      if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
 +        throw new AccumuloException("Table is online tableId:" + tableId + " cannot scan table in offline mode ");
 +      }
 +    }
 +
 +    for (Range range : ranges) {
 +      Text startRow;
 +
 +      if (range.getStartKey() != null)
 +        startRow = range.getStartKey().getRow();
 +      else
 +        startRow = new Text();
 +
 +      Range metadataRange = new Range(new KeyExtent(new Text(tableId), startRow, null).getMetadataEntry(), true, null, false);
 +      Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
 +      MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
 +      scanner.fetchColumnFamily(MetadataSchema.TabletsSection.LastLocationColumnFamily.NAME);
 +      scanner.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
 +      scanner.fetchColumnFamily(MetadataSchema.TabletsSection.FutureLocationColumnFamily.NAME);
 +      scanner.setRange(metadataRange);
 +
 +      RowIterator rowIter = new RowIterator(scanner);
 +      KeyExtent lastExtent = null;
 +      while (rowIter.hasNext()) {
 +        Iterator<Map.Entry<Key,Value>> row = rowIter.next();
 +        String last = "";
 +        KeyExtent extent = null;
 +        String location = null;
 +
 +        while (row.hasNext()) {
 +          Map.Entry<Key,Value> entry = row.next();
 +          Key key = entry.getKey();
 +
 +          if (key.getColumnFamily().equals(MetadataSchema.TabletsSection.LastLocationColumnFamily.NAME)) {
 +            last = entry.getValue().toString();
 +          }
 +
 +          if (key.getColumnFamily().equals(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME)
 +              || key.getColumnFamily().equals(MetadataSchema.TabletsSection.FutureLocationColumnFamily.NAME)) {
 +            location = entry.getValue().toString();
 +          }
 +
 +          if (MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
 +            extent = new KeyExtent(key.getRow(), entry.getValue());
 +          }
 +
 +        }
 +
 +        if (location != null)
 +          return null;
 +
 +        if (!extent.getTableId().toString().equals(tableId)) {
 +          throw new AccumuloException("Saw unexpected table Id " + tableId + " " + extent);
 +        }
 +
 +        if (lastExtent != null && !extent.isPreviousExtent(lastExtent)) {
 +          throw new AccumuloException(" " + lastExtent + " is not previous extent " + extent);
 +        }
 +
 +        Map<KeyExtent,List<Range>> tabletRanges = binnedRanges.get(last);
 +        if (tabletRanges == null) {
 +          tabletRanges = new HashMap<KeyExtent,List<Range>>();
 +          binnedRanges.put(last, tabletRanges);
 +        }
 +
 +        List<Range> rangeList = tabletRanges.get(extent);
 +        if (rangeList == null) {
 +          rangeList = new ArrayList<Range>();
 +          tabletRanges.put(extent, rangeList);
 +        }
 +
 +        rangeList.add(range);
 +
 +        if (extent.getEndRow() == null || range.afterEndKey(new Key(extent.getEndRow()).followingKey(PartialKey.ROW))) {
 +          break;
 +        }
 +
 +        lastExtent = extent;
 +      }
 +
 +    }
 +    return binnedRanges;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/OutputConfigurator.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/OutputConfigurator.java
index 0ba42cd,0000000..26680dc
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/OutputConfigurator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/OutputConfigurator.java
@@@ -1,204 -1,0 +1,205 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapreduce.lib.impl;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.io.ByteArrayInputStream;
 +import java.io.ByteArrayOutputStream;
 +import java.io.DataInputStream;
 +import java.io.DataOutputStream;
 +import java.io.IOException;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.hadoop.conf.Configuration;
 +
 +/**
 + * @since 1.6.0
 + */
 +public class OutputConfigurator extends ConfiguratorBase {
 +
 +  /**
 +   * Configuration keys for {@link BatchWriter}.
 +   * 
 +   * @since 1.6.0
 +   */
 +  public static enum WriteOpts {
 +    DEFAULT_TABLE_NAME, BATCH_WRITER_CONFIG
 +  }
 +
 +  /**
 +   * Configuration keys for various features.
 +   * 
 +   * @since 1.6.0
 +   */
 +  public static enum Features {
 +    CAN_CREATE_TABLES, SIMULATION_MODE
 +  }
 +
 +  /**
 +   * Sets the default table name to use if one emits a null in place of a table name for a given mutation. Table names can only be alpha-numeric and
 +   * underscores.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param tableName
 +   *          the table to use when the tablename is null in the write call
 +   * @since 1.6.0
 +   */
 +  public static void setDefaultTableName(Class<?> implementingClass, Configuration conf, String tableName) {
 +    if (tableName != null)
 +      conf.set(enumToConfKey(implementingClass, WriteOpts.DEFAULT_TABLE_NAME), tableName);
 +  }
 +
 +  /**
 +   * Gets the default table name from the configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the default table name
 +   * @since 1.6.0
 +   * @see #setDefaultTableName(Class, Configuration, String)
 +   */
 +  public static String getDefaultTableName(Class<?> implementingClass, Configuration conf) {
 +    return conf.get(enumToConfKey(implementingClass, WriteOpts.DEFAULT_TABLE_NAME));
 +  }
 +
 +  /**
 +   * Sets the configuration for for the job's {@link BatchWriter} instances. If not set, a new {@link BatchWriterConfig}, with sensible built-in defaults is
 +   * used. Setting the configuration multiple times overwrites any previous configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param bwConfig
 +   *          the configuration for the {@link BatchWriter}
 +   * @since 1.6.0
 +   */
 +  public static void setBatchWriterOptions(Class<?> implementingClass, Configuration conf, BatchWriterConfig bwConfig) {
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    String serialized;
 +    try {
 +      bwConfig.write(new DataOutputStream(baos));
-       serialized = new String(baos.toByteArray(), Constants.UTF8);
++      serialized = new String(baos.toByteArray(), UTF_8);
 +      baos.close();
 +    } catch (IOException e) {
 +      throw new IllegalArgumentException("unable to serialize " + BatchWriterConfig.class.getName());
 +    }
 +    conf.set(enumToConfKey(implementingClass, WriteOpts.BATCH_WRITER_CONFIG), serialized);
 +  }
 +
 +  /**
 +   * Gets the {@link BatchWriterConfig} settings.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the configuration object
 +   * @since 1.6.0
 +   * @see #setBatchWriterOptions(Class, Configuration, BatchWriterConfig)
 +   */
 +  public static BatchWriterConfig getBatchWriterOptions(Class<?> implementingClass, Configuration conf) {
 +    String serialized = conf.get(enumToConfKey(implementingClass, WriteOpts.BATCH_WRITER_CONFIG));
 +    BatchWriterConfig bwConfig = new BatchWriterConfig();
 +    if (serialized == null || serialized.isEmpty()) {
 +      return bwConfig;
 +    } else {
 +      try {
-         ByteArrayInputStream bais = new ByteArrayInputStream(serialized.getBytes(Constants.UTF8));
++        ByteArrayInputStream bais = new ByteArrayInputStream(serialized.getBytes(UTF_8));
 +        bwConfig.readFields(new DataInputStream(bais));
 +        bais.close();
 +        return bwConfig;
 +      } catch (IOException e) {
 +        throw new IllegalArgumentException("unable to serialize " + BatchWriterConfig.class.getName());
 +      }
 +    }
 +  }
 +
 +  /**
 +   * Sets the directive to create new tables, as necessary. Table names can only be alpha-numeric and underscores.
 +   * 
 +   * <p>
 +   * By default, this feature is <b>disabled</b>.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param enableFeature
 +   *          the feature is enabled if true, disabled otherwise
 +   * @since 1.6.0
 +   */
 +  public static void setCreateTables(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
 +    conf.setBoolean(enumToConfKey(implementingClass, Features.CAN_CREATE_TABLES), enableFeature);
 +  }
 +
 +  /**
 +   * Determines whether tables are permitted to be created as needed.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return true if the feature is disabled, false otherwise
 +   * @since 1.6.0
 +   * @see #setCreateTables(Class, Configuration, boolean)
 +   */
 +  public static Boolean canCreateTables(Class<?> implementingClass, Configuration conf) {
 +    return conf.getBoolean(enumToConfKey(implementingClass, Features.CAN_CREATE_TABLES), false);
 +  }
 +
 +  /**
 +   * Sets the directive to use simulation mode for this job. In simulation mode, no output is produced. This is useful for testing.
 +   * 
 +   * <p>
 +   * By default, this feature is <b>disabled</b>.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param enableFeature
 +   *          the feature is enabled if true, disabled otherwise
 +   * @since 1.6.0
 +   */
 +  public static void setSimulationMode(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
 +    conf.setBoolean(enumToConfKey(implementingClass, Features.SIMULATION_MODE), enableFeature);
 +  }
 +
 +  /**
 +   * Determines whether this feature is enabled.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return true if the feature is enabled, false otherwise
 +   * @since 1.6.0
 +   * @see #setSimulationMode(Class, Configuration, boolean)
 +   */
 +  public static Boolean getSimulationMode(Class<?> implementingClass, Configuration conf) {
 +    return conf.getBoolean(enumToConfKey(implementingClass, Features.SIMULATION_MODE), false);
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
index 1541fae,0753282..9e26672
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
@@@ -26,11 -28,10 +28,10 @@@ import java.util.Arrays
  import java.util.Scanner;
  import java.util.TreeSet;
  
- import org.apache.accumulo.core.Constants;
 -import org.apache.commons.codec.binary.Base64;
 +import org.apache.accumulo.core.client.mapreduce.lib.impl.DistributedCacheHelper;
 +import org.apache.accumulo.core.util.Base64;
  import org.apache.hadoop.conf.Configurable;
  import org.apache.hadoop.conf.Configuration;
 -import org.apache.hadoop.filecache.DistributedCache;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.io.Text;
  import org.apache.hadoop.io.Writable;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
index 0d3aca2,d95a647..95fcf53
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
@@@ -16,20 -16,18 +16,21 @@@
   */
  package org.apache.accumulo.core.client.mock;
  
+ import static com.google.common.base.Charsets.UTF_8;
+ 
  import java.io.ByteArrayInputStream;
 -import java.io.File;
  import java.io.IOException;
  import java.io.InputStream;
 +import java.io.OutputStream;
  import java.io.Writer;
  
 -import jline.ConsoleReader;
 +import org.apache.commons.io.output.WriterOutputStream;
  
 -import org.apache.accumulo.core.util.shell.Shell;
  import org.apache.commons.cli.CommandLine;
 +import jline.console.ConsoleReader;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.util.shell.Shell;
 +import org.apache.accumulo.core.util.shell.ShellOptionsJC;
  
  /**
   * An Accumulo Shell implementation that allows a developer to attach an InputStream and Writer to the Shell for testing purposes.
@@@ -38,29 -36,12 +39,29 @@@ public class MockShell extends Shell 
    private static final String NEWLINE = "\n";
    
    protected InputStream in;
 -  protected Writer writer;
 -  
 -  public MockShell(InputStream in, Writer writer) throws IOException {
 +  protected OutputStream out;
 +
 +  /**
 +   * Will only be set if you use either the Writer constructor or the setWriter(Writer) method
 +   * @deprecated since 1.6.0; use out
 +   */
 +  @Deprecated
 +  protected Writer writer = null;
 +
 +  public MockShell(InputStream in, OutputStream out) throws IOException {
      super();
      this.in = in;
 -    this.writer = writer;
 +    this.out = out;
 +    // we presume they don't use the writer field unless they use the other constructor.
 +  }
 +
 +  /**
 +   * @deprecated since 1.6.0; use OutputStream version
 +   */
 +  @Deprecated
 +  public MockShell(InputStream in, Writer out) throws IOException {
-     this(in, new WriterOutputStream(out, Constants.UTF8.name()));
++    this(in, new WriterOutputStream(out, UTF_8.name()));
 +    this.writer = out;
    }
    
    public boolean config(String... args) {
@@@ -107,7 -79,7 +108,7 @@@
        printInfo();
      
      if (execFile != null) {
-       java.util.Scanner scanner = new java.util.Scanner(execFile, Constants.UTF8.name());
 -      java.util.Scanner scanner = new java.util.Scanner(new File(execFile), UTF_8.name());
++      java.util.Scanner scanner = new java.util.Scanner(execFile, UTF_8.name());
        try {
          while (scanner.hasNextLine() && !hasExited()) {
            execCommand(scanner.nextLine(), true, isVerbose());
@@@ -146,20 -118,11 +147,20 @@@
    }
    
    /**
 -   * @param writer
 -   *          the writer to set
 +   * @param out
 +   *          the output stream to set
 +   */
 +  public void setConsoleWriter(OutputStream out) {
 +    this.out = out;
 +  }
 +
 +  /**
 +   * @deprecated since 1.6.0; use the OutputStream version
     */
 -  public void setConsoleWriter(Writer writer) {
 -    this.writer = writer;
 +  @Deprecated
 +  public void setConsoleWriter(Writer out) {
-     setConsoleWriter(new WriterOutputStream(out, Constants.UTF8.name()));
++    setConsoleWriter(new WriterOutputStream(out, UTF_8.name()));
 +    this.writer = out;
    }
    
    /**

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/main/java/org/apache/accumulo/core/client/security/tokens/PasswordToken.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/security/tokens/PasswordToken.java
index bd9feba,05aa0c3..4d73979
--- a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/PasswordToken.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/PasswordToken.java
@@@ -124,14 -123,10 +125,14 @@@ public class PasswordToken implements A
        throw new RuntimeException(e);
      }
    }
 -  
 -  private void setPassword(CharBuffer charBuffer) {
 +
 +  protected void setPassword(byte[] password) {
 +    this.password = Arrays.copyOf(password, password.length);
 +  }
 +
 +  protected void setPassword(CharBuffer charBuffer) {
      // encode() kicks back a C-string, which is not compatible with the old passwording system
-     ByteBuffer bb = Constants.UTF8.encode(charBuffer);
+     ByteBuffer bb = UTF_8.encode(charBuffer);
      // create array using byter buffer length
      this.password = new byte[bb.remaining()];
      bb.get(this.password);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java
index 41a1f3b,0000000..618ff7e
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java
@@@ -1,356 -1,0 +1,357 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.conf;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.io.FileNotFoundException;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.io.PrintStream;
 +import java.io.UnsupportedEncodingException;
 +import java.util.ArrayList;
 +import java.util.TreeMap;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.log4j.Logger;
 +
 +/**
 + * This class generates documentation to inform users of the available configuration properties in a presentable form.
 + */
 +class ConfigurationDocGen {
 +  private abstract class Format {
 +
 +    abstract void beginSection(String section);
 +
 +    void endSection() {}
 +
 +    void generate() {
 +      pageHeader();
 +
 +      beginSection("Available Properties");
 +      for (Property prefix : prefixes) {
 +        if (!prefix.isExperimental()) {
 +          prefixSection(prefix);
 +          for (Property prop : sortedProps.values()) {
 +            if (!prop.isExperimental()) {
 +              property(prefix, prop);
 +            }
 +          }
 +        }
 +      }
 +      endSection();
 +
 +      beginSection("Property Types");
 +      propertyTypeDescriptions();
 +      endSection();
 +
 +      pageFooter();
 +      doc.close();
 +    }
 +
 +    abstract String getExt();
 +
 +    void pageFooter() {}
 +
 +    // read static header content from resources and output
 +    void pageHeader() {
 +      appendResource("config-header." + getExt());
 +      doc.println();
 +    }
 +
 +    abstract void prefixSection(Property prefix);
 +
 +    abstract void property(Property prefix, Property prop);
 +
 +    abstract void propertyTypeDescriptions();
 +
 +    abstract String sanitize(String str);
 +
 +  }
 +
 +  private class HTML extends Format {
 +    private boolean highlight;
 +
 +    private void beginRow() {
 +      doc.println(startTag("tr", highlight ? "class='highlight'" : null));
 +      highlight = !highlight;
 +    }
 +
 +    @Override
 +    void beginSection(String section) {
 +      doc.println(t("h1", section, null));
 +      highlight = true;
 +      doc.println("<table>");
 +    }
 +
 +    private String t(String tag, String cell, String options) {
 +      return startTag(tag, options) + cell + "</" + tag + ">";
 +    }
 +
 +    private void cellData(String cell, String options) {
 +      doc.println(t("td", cell, options));
 +    }
 +
 +    private void columnNames(String... names) {
 +      beginRow();
 +      for (String s : names)
 +        doc.println(t("th", s, null));
 +      endRow();
 +    }
 +
 +    private void endRow() {
 +      doc.println("</tr>");
 +    }
 +
 +    @Override
 +    void endSection() {
 +      doc.println("</table>");
 +    }
 +
 +    @Override
 +    String getExt() {
 +      return "html";
 +    }
 +
 +    @Override
 +    void pageFooter() {
 +      doc.println("</body>");
 +      doc.println("</html>");
 +    }
 +
 +    @Override
 +    void pageHeader() {
 +      super.pageHeader();
 +      doc.println("<p>Jump to: ");
 +      String delimiter = "";
 +      for (Property prefix : prefixes) {
 +        if (!prefix.isExperimental()) {
 +          doc.print(delimiter + "<a href='#" + prefix.name() + "'>" + prefix.getKey() + "*</a>");
 +          delimiter = "&nbsp;|&nbsp;";
 +        }
 +      }
 +      doc.println("</p>");
 +    }
 +
 +    @Override
 +    void prefixSection(Property prefix) {
 +      beginRow();
 +      doc.println(t("td", t("span", prefix.getKey() + '*', "id='" + prefix.name() + "' class='large'"), "colspan='5'"
 +          + (prefix.isDeprecated() ? " class='deprecated'" : "")));
 +      endRow();
 +      beginRow();
 +      doc.println(t("td", (prefix.isDeprecated() ? t("b", t("i", "Deprecated. ", null), null) : "") + sanitize(prefix.getDescription()), "colspan='5'"
 +          + (prefix.isDeprecated() ? " class='deprecated'" : "")));
 +      endRow();
 +
 +      switch (prefix) {
 +        case TABLE_CONSTRAINT_PREFIX:
 +          break;
 +        case TABLE_ITERATOR_PREFIX:
 +          break;
 +        case TABLE_LOCALITY_GROUP_PREFIX:
 +          break;
 +        case TABLE_COMPACTION_STRATEGY_PREFIX:
 +          break;
 +        default:
 +          columnNames("Property", "Type", "ZooKeeper Mutable", "Default Value", "Description");
 +          break;
 +      }
 +    }
 +
 +    @Override
 +    void property(Property prefix, Property prop) {
 +      boolean isDeprecated = prefix.isDeprecated() || prop.isDeprecated();
 +      if (prop.getKey().startsWith(prefix.getKey())) {
 +        beginRow();
 +        cellData(prop.getKey(), isDeprecated ? "class='deprecated'" : null);
 +        cellData("<b><a href='#" + prop.getType().name() + "'>" + prop.getType().toString().replaceAll(" ", "&nbsp;") + "</a></b>",
 +            isDeprecated ? "class='deprecated'" : null);
 +        cellData(isZooKeeperMutable(prop), isDeprecated ? "class='deprecated'" : null);
 +        cellData("<pre>" + (prop.getRawDefaultValue().isEmpty() ? "&nbsp;" : sanitize(prop.getRawDefaultValue().replaceAll(" ", "&nbsp;"))) + "</pre>",
 +            isDeprecated ? "class='deprecated'" : null);
 +        cellData((isDeprecated ? "<b><i>Deprecated.</i></b> " : "") + sanitize(prop.getDescription()), isDeprecated ? "class='deprecated'" : null);
 +        endRow();
 +      }
 +
 +    }
 +
 +    @Override
 +    void propertyTypeDescriptions() {
 +      columnNames("Property Type", "Description");
 +      for (PropertyType type : PropertyType.values()) {
 +        if (type == PropertyType.PREFIX)
 +          continue;
 +        beginRow();
 +        cellData("<h3 id='" + type.name() + "'>" + type + "</h3>", null);
 +        cellData(type.getFormatDescription(), null);
 +        endRow();
 +      }
 +    }
 +
 +    @Override
 +    String sanitize(String str) {
 +      return str.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;").replaceAll("(?:\r\n|\r|\n)", "<br />");
 +    }
 +
 +    private String startTag(String tag, String options) {
 +      return "<" + tag + (options != null ? " " + options : "") + ">";
 +    }
 +
 +  }
 +
 +  private class LaTeX extends Format {
 +    @Override
 +    void beginSection(String section) {
 +      doc.println("\\section{" + section + "}");
 +    }
 +
 +    @Override
 +    String getExt() {
 +      return "tex";
 +    }
 +
 +    @Override
 +    void prefixSection(Property prefix) {
 +      boolean depr = prefix.isDeprecated();
 +      doc.println("\\subsection{" + prefix.getKey() + "*}" + (depr ? " (Deprecated)" : ""));
 +      doc.println((depr ? "\\sout{\\textit{Deprecated.} " : "") + sanitize(prefix.getDescription()) + (depr ? "}" : ""));
 +      doc.println();
 +    }
 +
 +    @Override
 +    void property(Property prefix, Property prop) {
 +      boolean depr = prefix.isDeprecated() || prop.isDeprecated();
 +      if (prop.getKey().startsWith(prefix.getKey())) {
 +        doc.println("\\subsubsection{" + prop.getKey() + "}");
 +        doc.println(strike((depr ? "\\textit{Deprecated.} " : "") + sanitize(prop.getDescription()), depr));
 +        doc.println();
 +        doc.println(strike("\\textit{Type:} " + prop.getType().name() + "\\\\", depr));
 +        doc.println(strike("\\textit{ZooKeeper Mutable:} " + isZooKeeperMutable(prop) + "\\\\", depr));
 +        doc.println(strike("\\textit{Default Value:} " + sanitize(prop.getRawDefaultValue()), depr));
 +        doc.println();
 +      }
 +    }
 +
 +    private String strike(String s, boolean isDeprecated) {
 +      return (isDeprecated ? "\\sout{" : "") + s + (isDeprecated ? "}" : "");
 +    }
 +
 +    @Override
 +    void propertyTypeDescriptions() {
 +      for (PropertyType type : PropertyType.values()) {
 +        if (type == PropertyType.PREFIX)
 +          continue;
 +        doc.println("\\subsection{" + sanitize(type.toString()) + "}");
 +        doc.println(sanitize(type.getFormatDescription()));
 +        doc.println();
 +      }
 +    }
 +
 +    @Override
 +    String sanitize(String str) {
 +      str = str.replace("\\", "\\textbackslash{}");
 +      str = str.replace("~", "\\textasciitilde{}");
 +      str = str.replace("^", "\\textasciicircum");
 +
 +      str = str.replace("&", "\\&");
 +      str = str.replace("%", "\\%");
 +      str = str.replace("$", "\\$");
 +      str = str.replace("#", "\\#");
 +      str = str.replace("_", "\\_");
 +      str = str.replace("{", "\\{");
 +      str = str.replace("}", "\\}");
 +
 +      str = str.replaceAll("(?:\r\n|\r|\n)", "\\\\\\\\\n");
 +      return str;
 +    }
 +  }
 +
 +  private static Logger log = Logger.getLogger(ConfigurationDocGen.class);
 +
 +  private PrintStream doc;
 +
 +  private final ArrayList<Property> prefixes;
 +
 +  private final TreeMap<String,Property> sortedProps;
 +
 +  ConfigurationDocGen(PrintStream doc) {
 +    this.doc = doc;
 +    this.prefixes = new ArrayList<Property>();
 +    this.sortedProps = new TreeMap<String,Property>();
 +
 +    for (Property prop : Property.values()) {
 +      if (prop.isExperimental())
 +        continue;
 +
 +      if (prop.getType() == PropertyType.PREFIX)
 +        this.prefixes.add(prop);
 +      else
 +        this.sortedProps.put(prop.getKey(), prop);
 +    }
 +  }
 +
 +  private void appendResource(String resourceName) {
 +    InputStream data = ConfigurationDocGen.class.getResourceAsStream(resourceName);
 +    if (data != null) {
 +      byte[] buffer = new byte[1024];
 +      int n;
 +      try {
 +        while ((n = data.read(buffer)) > 0)
-           doc.print(new String(buffer, 0, n, Constants.UTF8));
++          doc.print(new String(buffer, 0, n, UTF_8));
 +      } catch (IOException e) {
 +        e.printStackTrace();
 +        return;
 +      } finally {
 +        try {
 +          data.close();
 +        } catch (IOException ex) {
 +          log.error(ex, ex);
 +        }
 +      }
 +    }
 +  }
 +
 +  private String isZooKeeperMutable(Property prop) {
 +    if (!Property.isValidZooPropertyKey(prop.getKey()))
 +      return "no";
 +    if (Property.isFixedZooPropertyKey(prop))
 +      return "yes but requires restart of the " + prop.getKey().split("[.]")[0];
 +    return "yes";
 +  }
 +
 +  void generateHtml() {
 +    new HTML().generate();
 +  }
 +
 +  void generateLaTeX() {
 +    new LaTeX().generate();
 +  }
 +
 +  /*
 +   * Generate documentation for conf/accumulo-site.xml file usage
 +   */
 +  public static void main(String[] args) throws FileNotFoundException, UnsupportedEncodingException {
 +    if (args.length == 2 && args[0].equals("--generate-html")) {
-       new ConfigurationDocGen(new PrintStream(args[1], Constants.UTF8.name())).generateHtml();
++      new ConfigurationDocGen(new PrintStream(args[1], UTF_8.name())).generateHtml();
 +    } else if (args.length == 2 && args[0].equals("--generate-latex")) {
-       new ConfigurationDocGen(new PrintStream(args[1], Constants.UTF8.name())).generateLaTeX();
++      new ConfigurationDocGen(new PrintStream(args[1], UTF_8.name())).generateLaTeX();
 +    } else {
 +      throw new IllegalArgumentException("Usage: " + ConfigurationDocGen.class.getName() + " --generate-html <filename> | --generate-latex <filename>");
 +    }
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/main/java/org/apache/accumulo/core/data/ArrayByteSequence.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/data/ArrayByteSequence.java
index ca769cb,7c4f66e..437590f
--- a/core/src/main/java/org/apache/accumulo/core/data/ArrayByteSequence.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/ArrayByteSequence.java
@@@ -16,11 -16,10 +16,11 @@@
   */
  package org.apache.accumulo.core.data;
  
+ import static com.google.common.base.Charsets.UTF_8;
+ 
  import java.io.Serializable;
 +import java.nio.ByteBuffer;
  
- import org.apache.accumulo.core.Constants;
- 
  public class ArrayByteSequence extends ByteSequence implements Serializable {
    
    private static final long serialVersionUID = 1L;
@@@ -48,22 -47,9 +48,22 @@@
    }
    
    public ArrayByteSequence(String s) {
-     this(s.getBytes(Constants.UTF8));
+     this(s.getBytes(UTF_8));
    }
    
 +  public ArrayByteSequence(ByteBuffer buffer) {
 +    this.length = buffer.remaining();
 +
 +    if (buffer.hasArray()) {
 +      this.data = buffer.array();
 +      this.offset = buffer.position();
 +    } else {
 +      this.data = new byte[length];
 +      this.offset = 0;
 +      buffer.get(data);
 +    }
 +  }
 +
    @Override
    public byte byteAt(int i) {
      


Mime
View raw message