accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [6/7] Merge branch '1.6.0-SNAPSHOT'
Date Fri, 11 Apr 2014 20:33:15 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/bf102d07/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java
index 1a0ec69,68cd389..0000000
deleted file mode 100644,100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java
+++ /dev/null
@@@ -1,397 -1,277 +1,0 @@@
--/*
-- * Licensed to the Apache Software Foundation (ASF) under one or more
-- * contributor license agreements.  See the NOTICE file distributed with
-- * this work for additional information regarding copyright ownership.
-- * The ASF licenses this file to You under the Apache License, Version 2.0
-- * (the "License"); you may not use this file except in compliance with
-- * the License.  You may obtain a copy of the License at
-- *
-- *     http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing, software
-- * distributed under the License is distributed on an "AS IS" BASIS,
-- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- * See the License for the specific language governing permissions and
-- * limitations under the License.
-- */
--package org.apache.accumulo.core.client.mapreduce.lib.util;
--
- import static com.google.common.base.Preconditions.checkArgument;
- 
- import java.io.IOException;
- import java.net.URI;
- import java.net.URISyntaxException;
- import java.nio.charset.StandardCharsets;
- 
--import org.apache.accumulo.core.client.AccumuloSecurityException;
--import org.apache.accumulo.core.client.ClientConfiguration;
--import org.apache.accumulo.core.client.Instance;
--import org.apache.accumulo.core.client.ZooKeeperInstance;
--import org.apache.accumulo.core.client.mock.MockInstance;
--import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
--import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
- import org.apache.accumulo.core.security.Credentials;
- import org.apache.commons.codec.binary.Base64;
--import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.fs.FSDataInputStream;
- import org.apache.hadoop.fs.FileSystem;
- import org.apache.hadoop.fs.Path;
--import org.apache.hadoop.util.StringUtils;
--import org.apache.log4j.Level;
- import org.apache.log4j.Logger;
--
--/**
-  * @since 1.5.0
-  */
- public class ConfiguratorBase {
- 
-   /**
-    * Configuration keys for {@link Instance#getConnector(String, AuthenticationToken)}.
-    * 
-    * @since 1.5.0
-    */
-   public static enum ConnectorInfo {
-     IS_CONFIGURED, PRINCIPAL, TOKEN,
-   }
- 
-   public static enum TokenSource {
-     FILE, INLINE;
- 
-     private String prefix;
- 
-     private TokenSource() {
-       prefix = name().toLowerCase() + ":";
-     }
- 
-     public String prefix() {
-       return prefix;
-     }
-   }
- 
-   /**
-    * Configuration keys for {@link Instance}, {@link ZooKeeperInstance}, and {@link MockInstance}.
-    * 
-    * @since 1.5.0
-    */
-   public static enum InstanceOpts {
-     TYPE, NAME, ZOO_KEEPERS, CLIENT_CONFIG;
-   }
- 
-   /**
-    * Configuration keys for general configuration options.
-    * 
-    * @since 1.5.0
-    */
-   public static enum GeneralOpts {
-     LOG_LEVEL
-   }
- 
-   /**
-    * Provides a configuration key for a given feature enum, prefixed by the implementingClass
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param e
-    *          the enum used to provide the unique part of the configuration key
-    * @return the configuration key
-    * @since 1.5.0
-    */
-   protected static String enumToConfKey(Class<?> implementingClass, Enum<?>
e) {
-     return implementingClass.getSimpleName() + "." + e.getDeclaringClass().getSimpleName()
+ "." + StringUtils.camelize(e.name().toLowerCase());
-   }
- 
-   /**
-    * Sets the connector information needed to communicate with Accumulo in this job.
-    * 
-    * <p>
-    * <b>WARNING:</b> The serialized token is stored in the configuration and
shared with all MapReduce tasks. It is BASE64 encoded to provide a charset safe
-    * conversion to a string, and is not intended to be secure.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @param principal
-    *          a valid Accumulo user name
-    * @param token
-    *          the user's password
-    * @since 1.5.0
-    */
-   public static void setConnectorInfo(Class<?> implementingClass, Configuration conf,
String principal, AuthenticationToken token)
-       throws AccumuloSecurityException {
-     if (isConnectorInfoSet(implementingClass, conf))
-       throw new IllegalStateException("Connector info for " + implementingClass.getSimpleName()
+ " can only be set once per job");
- 
-     checkArgument(principal != null, "principal is null");
-     checkArgument(token != null, "token is null");
-     conf.setBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), true);
-     conf.set(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL), principal);
-     conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN),
-         TokenSource.INLINE.prefix() + token.getClass().getName() + ":" + Base64.encodeBase64String(AuthenticationTokenSerializer.serialize(token)));
-   }
- 
-   /**
-    * Sets the connector information needed to communicate with Accumulo in this job.
-    * 
-    * <p>
-    * Pulls a token file into the Distributed Cache that contains the authentication token
in an attempt to be more secure than storing the password in the
-    * Configuration. Token file created with "bin/accumulo create-token".
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @param principal
-    *          a valid Accumulo user name
-    * @param tokenFile
-    *          the path to the token file in DFS
-    * @since 1.6.0
-    */
-   public static void setConnectorInfo(Class<?> implementingClass, Configuration conf,
String principal, String tokenFile) throws AccumuloSecurityException {
-     if (isConnectorInfoSet(implementingClass, conf))
-       throw new IllegalStateException("Connector info for " + implementingClass.getSimpleName()
+ " can only be set once per job");
- 
-     checkArgument(principal != null, "principal is null");
-     checkArgument(tokenFile != null, "tokenFile is null");
- 
-     try {
-       DistributedCacheHelper.addCacheFile(new URI(tokenFile), conf);
-     } catch (URISyntaxException e) {
-       throw new IllegalStateException("Unable to add tokenFile \"" + tokenFile + "\" to
distributed cache.");
-     }
- 
-     conf.setBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), true);
-     conf.set(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL), principal);
-     conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN), TokenSource.FILE.prefix()
+ tokenFile);
-   }
- 
-   /**
-    * Determines if the connector info has already been set for this instance.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @return true if the connector info has already been set, false otherwise
-    * @since 1.5.0
-    * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
-    */
-   public static Boolean isConnectorInfoSet(Class<?> implementingClass, Configuration
conf) {
-     return conf.getBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED),
false);
-   }
- 
-   /**
-    * Gets the user name from the configuration.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @return the principal
-    * @since 1.5.0
-    * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
-    */
-   public static String getPrincipal(Class<?> implementingClass, Configuration conf)
{
-     return conf.get(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL));
-   }
- 
-   /**
-    * Gets the authenticated token from either the specified token file or directly from
the configuration, whichever was used when the job was configured.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @return the principal's authentication token
-    * @since 1.6.0
-    * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
-    * @see #setConnectorInfo(Class, Configuration, String, String)
-    */
-   public static AuthenticationToken getAuthenticationToken(Class<?> implementingClass,
Configuration conf) {
-     String token = conf.get(enumToConfKey(implementingClass, ConnectorInfo.TOKEN));
-     if (token == null || token.isEmpty())
-       return null;
-     if (token.startsWith(TokenSource.INLINE.prefix())) {
-       String[] args = token.substring(TokenSource.INLINE.prefix().length()).split(":", 2);
-       if (args.length == 2)
-         return AuthenticationTokenSerializer.deserialize(args[0], Base64.decodeBase64(args[1].getBytes(StandardCharsets.UTF_8)));
-     } else if (token.startsWith(TokenSource.FILE.prefix())) {
-       String tokenFileName = token.substring(TokenSource.FILE.prefix().length());
-       return getTokenFromFile(conf, getPrincipal(implementingClass, conf), tokenFileName);
-     }
- 
-     throw new IllegalStateException("Token was not properly serialized into the configuration");
-   }
- 
-   /**
-    * Reads from the token file in distributed cache. Currently, the token file stores data
separated by colons e.g. principal:token_class:token
-    * 
-    * @param conf
-    *          the Hadoop context for the configured job
-    * @return path to the token file as a String
-    * @since 1.6.0
-    * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
-    */
-   public static AuthenticationToken getTokenFromFile(Configuration conf, String principal,
String tokenFile) {
-     FSDataInputStream in = null;
-     try {
-       URI[] uris = DistributedCacheHelper.getCacheFiles(conf);
-       Path path = null;
-       for (URI u : uris) {
-         if (u.toString().equals(tokenFile)) {
-           path = new Path(u);
-         }
-       }
-       if (path == null) {
-         throw new IllegalArgumentException("Couldn't find password file called \"" + tokenFile
+ "\" in cache.");
-       }
-       FileSystem fs = FileSystem.get(conf);
-       in = fs.open(path);
-     } catch (IOException e) {
-       throw new IllegalArgumentException("Couldn't open password file called \"" + tokenFile
+ "\".");
-     }
-     java.util.Scanner fileScanner = new java.util.Scanner(in);
-     try {
-       while (fileScanner.hasNextLine()) {
-         Credentials creds = Credentials.deserialize(fileScanner.nextLine());
-         if (principal.equals(creds.getPrincipal())) {
-           return creds.getToken();
-         }
-       }
-       throw new IllegalArgumentException("Couldn't find token for user \"" + principal +
"\" in file \"" + tokenFile + "\"");
-     } finally {
-       if (fileScanner != null && fileScanner.ioException() == null)
-         fileScanner.close();
-       else if (fileScanner.ioException() != null)
-         throw new RuntimeException(fileScanner.ioException());
-     }
-   }
- 
-   /**
-    * Configures a {@link ZooKeeperInstance} for this job.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @param instanceName
-    *          the Accumulo instance name
-    * @param zooKeepers
-    *          a comma-separated list of zookeeper servers
-    * @since 1.5.0
-    * @deprecated since 1.6.0; Use {@link #setZooKeeperInstance(Class, Configuration, ClientConfiguration)}
instead.
-    */
- 
-   @Deprecated
-   public static void setZooKeeperInstance(Class<?> implementingClass, Configuration
conf, String instanceName, String zooKeepers) {
-     checkArgument(instanceName != null, "instanceName is null");
-     checkArgument(zooKeepers != null, "zooKeepers is null");
-     setZooKeeperInstance(implementingClass, conf, new ClientConfiguration().withInstance(instanceName).withZkHosts(zooKeepers));
-   }
- 
-   /**
-    * Configures a {@link ZooKeeperInstance} for this job.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @param clientConfig
-    *          client configuration for specifying connection timeouts, SSL connection options,
etc.
-    * @since 1.5.0
-    */
-   public static void setZooKeeperInstance(Class<?> implementingClass, Configuration
conf, ClientConfiguration clientConfig) {
-     String key = enumToConfKey(implementingClass, InstanceOpts.TYPE);
-     if (!conf.get(key, "").isEmpty())
-       throw new IllegalStateException("Instance info can only be set once per job; it has
already been configured with " + conf.get(key));
-     conf.set(key, "ZooKeeperInstance");
-     if (clientConfig != null) {
-       conf.set(enumToConfKey(implementingClass, InstanceOpts.CLIENT_CONFIG), clientConfig.serialize());
-     }
-   }
- 
-   /**
-    * Configures a {@link MockInstance} for this job.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @param instanceName
-    *          the Accumulo instance name
-    * @since 1.5.0
-    */
-   public static void setMockInstance(Class<?> implementingClass, Configuration conf,
String instanceName) {
-     String key = enumToConfKey(implementingClass, InstanceOpts.TYPE);
-     if (!conf.get(key, "").isEmpty())
-       throw new IllegalStateException("Instance info can only be set once per job; it has
already been configured with " + conf.get(key));
-     conf.set(key, "MockInstance");
- 
-     checkArgument(instanceName != null, "instanceName is null");
-     conf.set(enumToConfKey(implementingClass, InstanceOpts.NAME), instanceName);
-   }
- 
-   /**
-    * Initializes an Accumulo {@link Instance} based on the configuration.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @return an Accumulo instance
-    * @since 1.5.0
-    * @see #setZooKeeperInstance(Class, Configuration, ClientConfiguration)
-    * @see #setMockInstance(Class, Configuration, String)
-    */
-   public static Instance getInstance(Class<?> implementingClass, Configuration conf)
{
-     String instanceType = conf.get(enumToConfKey(implementingClass, InstanceOpts.TYPE),
"");
-     if ("MockInstance".equals(instanceType))
-       return new MockInstance(conf.get(enumToConfKey(implementingClass, InstanceOpts.NAME)));
-     else if ("ZooKeeperInstance".equals(instanceType)) {
-       String clientConfigString = conf.get(enumToConfKey(implementingClass, InstanceOpts.CLIENT_CONFIG));
-       if (clientConfigString == null) {
-         String instanceName = conf.get(enumToConfKey(implementingClass, InstanceOpts.NAME));
-         String zookeepers = conf.get(enumToConfKey(implementingClass, InstanceOpts.ZOO_KEEPERS));
-         return new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(instanceName).withZkHosts(zookeepers));
-       } else {
-         return new ZooKeeperInstance(ClientConfiguration.deserialize(clientConfigString));
-       }
-     } else if (instanceType.isEmpty())
-       throw new IllegalStateException("Instance has not been configured for " + implementingClass.getSimpleName());
-     else
-       throw new IllegalStateException("Unrecognized instance type " + instanceType);
-   }
- 
-   /**
-    * Sets the log level for this job.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @param level
-    *          the logging level
-    * @since 1.5.0
-    */
-   public static void setLogLevel(Class<?> implementingClass, Configuration conf, Level
level) {
-     checkArgument(level != null, "level is null");
-     Logger.getLogger(implementingClass).setLevel(level);
-     conf.setInt(enumToConfKey(implementingClass, GeneralOpts.LOG_LEVEL), level.toInt());
-   }
- 
-   /**
-    * Gets the log level from this configuration.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @return the log level
-    * @since 1.5.0
-    * @see #setLogLevel(Class, Configuration, Level)
-    */
-   public static Level getLogLevel(Class<?> implementingClass, Configuration conf)
{
-     return Level.toLevel(conf.getInt(enumToConfKey(implementingClass, GeneralOpts.LOG_LEVEL),
Level.INFO.toInt()));
-   }
- 
- }
 - * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 - * @since 1.5.0
 - */
 -@Deprecated
 -public class ConfiguratorBase {
 -
 -  /**
 -   * Configuration keys for {@link Instance#getConnector(String, AuthenticationToken)}.
 -   * 
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  public static enum ConnectorInfo {
 -    IS_CONFIGURED, PRINCIPAL, TOKEN, TOKEN_CLASS
 -  }
 -
 -  /**
 -   * Configuration keys for {@link Instance}, {@link ZooKeeperInstance}, and {@link MockInstance}.
 -   * 
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  protected static enum InstanceOpts {
 -    TYPE, NAME, ZOO_KEEPERS;
 -  }
 -
 -  /**
 -   * Configuration keys for general configuration options.
 -   * 
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  protected static enum GeneralOpts {
 -    LOG_LEVEL
 -  }
 -
 -  /**
 -   * Provides a configuration key for a given feature enum, prefixed by the implementingClass
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param e
 -   *          the enum used to provide the unique part of the configuration key
 -   * @return the configuration key
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  protected static String enumToConfKey(Class<?> implementingClass, Enum<?>
e) {
 -    return implementingClass.getSimpleName() + "." + e.getDeclaringClass().getSimpleName()
+ "." + StringUtils.camelize(e.name().toLowerCase());
 -  }
 -
 -  /**
 -   * Sets the connector information needed to communicate with Accumulo in this job.
 -   * 
 -   * <p>
 -   * <b>WARNING:</b> The serialized token is stored in the configuration and
shared with all MapReduce tasks. It is BASE64 encoded to provide a charset safe
 -   * conversion to a string, and is not intended to be secure.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @param principal
 -   *          a valid Accumulo user name
 -   * @param token
 -   *          the user's password
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  public static void setConnectorInfo(Class<?> implementingClass, Configuration conf,
String principal, AuthenticationToken token)
 -      throws AccumuloSecurityException {
 -    org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.setConnectorInfo(implementingClass,
conf, principal, token);
 -  }
 -
 -  /**
 -   * Determines if the connector info has already been set for this instance.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @return true if the connector info has already been set, false otherwise
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 -   */
 -  @Deprecated
 -  public static Boolean isConnectorInfoSet(Class<?> implementingClass, Configuration
conf) {
 -    return org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.isConnectorInfoSet(implementingClass,
conf);
 -  }
 -
 -  /**
 -   * Gets the user name from the configuration.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @return the principal
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 -   */
 -  @Deprecated
 -  public static String getPrincipal(Class<?> implementingClass, Configuration conf)
{
 -    return org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.getPrincipal(implementingClass,
conf);
 -  }
 -
 -  /**
 -   * DON'T USE THIS. No, really, don't use this. You already have an {@link AuthenticationToken}
with
 -   * {@link org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase#getAuthenticationToken(Class,
Configuration)}. You don't need to construct it
 -   * yourself.
 -   * <p>
 -   * Gets the serialized token class from the configuration.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @return the principal
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 -   */
 -  @Deprecated
 -  public static String getTokenClass(Class<?> implementingClass, Configuration conf)
{
 -    return org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.getAuthenticationToken(implementingClass,
conf).getClass().getName();
 -  }
 -
 -  /**
 -   * DON'T USE THIS. No, really, don't use this. You already have an {@link AuthenticationToken}
with
 -   * {@link org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase#getAuthenticationToken(Class,
Configuration)}. You don't need to construct it
 -   * yourself.
 -   * <p>
 -   * Gets the password from the configuration. WARNING: The password is stored in the Configuration
and shared with all MapReduce tasks; It is BASE64 encoded to
 -   * provide a charset safe conversion to a string, and is not intended to be secure.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @return the decoded principal's authentication token
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 -   */
 -  @Deprecated
 -  public static byte[] getToken(Class<?> implementingClass, Configuration conf) {
 -    return AuthenticationTokenSerializer.serialize(org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.getAuthenticationToken(
 -        implementingClass, conf));
 -  }
 -
 -  /**
 -   * Configures a {@link ZooKeeperInstance} for this job.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @param instanceName
 -   *          the Accumulo instance name
 -   * @param zooKeepers
 -   *          a comma-separated list of zookeeper servers
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  public static void setZooKeeperInstance(Class<?> implementingClass, Configuration
conf, String instanceName, String zooKeepers) {
 -    org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.setZooKeeperInstance(implementingClass,
conf,
 -        new ClientConfiguration().withInstance(instanceName).withZkHosts(zooKeepers));
 -  }
 -
 -  /**
 -   * Configures a {@link MockInstance} for this job.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @param instanceName
 -   *          the Accumulo instance name
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  public static void setMockInstance(Class<?> implementingClass, Configuration conf,
String instanceName) {
 -    org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.setMockInstance(implementingClass,
conf, instanceName);
 -  }
 -
 -  /**
 -   * Initializes an Accumulo {@link Instance} based on the configuration.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @return an Accumulo instance
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   * @see #setZooKeeperInstance(Class, Configuration, String, String)
 -   * @see #setMockInstance(Class, Configuration, String)
 -   */
 -  @Deprecated
 -  public static Instance getInstance(Class<?> implementingClass, Configuration conf)
{
 -    return org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.getInstance(implementingClass,
conf);
 -  }
 -
 -  /**
 -   * Sets the log level for this job.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @param level
 -   *          the logging level
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  public static void setLogLevel(Class<?> implementingClass, Configuration conf, Level
level) {
 -    org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.setLogLevel(implementingClass,
conf, level);
 -  }
 -
 -  /**
 -   * Gets the log level from this configuration.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @return the log level
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   * @see #setLogLevel(Class, Configuration, Level)
 -   */
 -  @Deprecated
 -  public static Level getLogLevel(Class<?> implementingClass, Configuration conf)
{
 -    return org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.getLogLevel(implementingClass,
conf);
 -  }
 -
 -}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/bf102d07/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/FileOutputConfigurator.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/FileOutputConfigurator.java
index 7c49f79,5b431c3..0000000
deleted file mode 100644,100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/FileOutputConfigurator.java
+++ /dev/null
@@@ -1,187 -1,170 +1,0 @@@
--/*
-- * Licensed to the Apache Software Foundation (ASF) under one or more
-- * contributor license agreements.  See the NOTICE file distributed with
-- * this work for additional information regarding copyright ownership.
-- * The ASF licenses this file to You under the Apache License, Version 2.0
-- * (the "License"); you may not use this file except in compliance with
-- * the License.  You may obtain a copy of the License at
-- *
-- *     http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing, software
-- * distributed under the License is distributed on an "AS IS" BASIS,
-- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- * See the License for the specific language governing permissions and
-- * limitations under the License.
-- */
--package org.apache.accumulo.core.client.mapreduce.lib.util;
--
- import java.util.Arrays;
- import java.util.Map.Entry;
- 
--import org.apache.accumulo.core.conf.AccumuloConfiguration;
- import org.apache.accumulo.core.conf.ConfigurationCopy;
--import org.apache.accumulo.core.conf.Property;
--import org.apache.hadoop.conf.Configuration;
--
--/**
-  * @since 1.5.0
-  */
- public class FileOutputConfigurator extends ConfiguratorBase {
-   
-   /**
-    * Configuration keys for {@link AccumuloConfiguration}.
-    * 
-    * @since 1.5.0
-    */
-   public static enum Opts {
-     ACCUMULO_PROPERTIES;
-   }
-   
-   /**
-    * The supported Accumulo properties we set in this OutputFormat, that change the behavior
of the RecordWriter.<br />
-    * These properties correspond to the supported public static setter methods available
to this class.
-    * 
-    * @param property
-    *          the Accumulo property to check
-    * @since 1.5.0
-    */
-   protected static Boolean isSupportedAccumuloProperty(Property property) {
-     switch (property) {
-       case TABLE_FILE_COMPRESSION_TYPE:
-       case TABLE_FILE_COMPRESSED_BLOCK_SIZE:
-       case TABLE_FILE_BLOCK_SIZE:
-       case TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX:
-       case TABLE_FILE_REPLICATION:
-         return true;
-       default:
-         return false;
-     }
-   }
-   
-   /**
-    * Helper for transforming Accumulo configuration properties into something that can be
stored safely inside the Hadoop Job configuration.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @param property
-    *          the supported Accumulo property
-    * @param value
-    *          the value of the property to set
-    * @since 1.5.0
-    */
-   private static <T> void setAccumuloProperty(Class<?> implementingClass, Configuration
conf, Property property, T value) {
-     if (isSupportedAccumuloProperty(property)) {
-       String val = String.valueOf(value);
-       if (property.getType().isValidFormat(val))
-         conf.set(enumToConfKey(implementingClass, Opts.ACCUMULO_PROPERTIES) + "." + property.getKey(),
val);
-       else
-         throw new IllegalArgumentException("Value is not appropriate for property type '"
+ property.getType() + "'");
-     } else
-       throw new IllegalArgumentException("Unsupported configuration property " + property.getKey());
-   }
-   
-   /**
-    * This helper method provides an AccumuloConfiguration object constructed from the Accumulo
defaults, and overridden with Accumulo properties that have been
-    * stored in the Job's configuration.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @since 1.5.0
-    */
-   public static AccumuloConfiguration getAccumuloConfiguration(Class<?> implementingClass,
Configuration conf) {
-     String prefix = enumToConfKey(implementingClass, Opts.ACCUMULO_PROPERTIES) + ".";
-     ConfigurationCopy acuConf = new ConfigurationCopy(AccumuloConfiguration.getDefaultConfiguration());
-     for (Entry<String,String> entry : conf)
-       if (entry.getKey().startsWith(prefix))
-         acuConf.set(Property.getPropertyByKey(entry.getKey().substring(prefix.length())),
entry.getValue());
-     return acuConf;
-   }
-   
-   /**
-    * Sets the compression type to use for data blocks. Specifying a compression may require
additional libraries to be available to your Job.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @param compressionType
-    *          one of "none", "gz", "lzo", or "snappy"
-    * @since 1.5.0
-    */
-   public static void setCompressionType(Class<?> implementingClass, Configuration
conf, String compressionType) {
-     if (compressionType == null || !Arrays.asList("none", "gz", "lzo", "snappy").contains(compressionType))
-       throw new IllegalArgumentException("Compression type must be one of: none, gz, lzo,
snappy");
-     setAccumuloProperty(implementingClass, conf, Property.TABLE_FILE_COMPRESSION_TYPE, compressionType);
-   }
-   
-   /**
-    * Sets the size for data blocks within each file.<br />
-    * Data blocks are a span of key/value pairs stored in the file that are compressed and
indexed as a group.
-    * 
-    * <p>
-    * Making this value smaller may increase seek performance, but at the cost of increasing
the size of the indexes (which can also affect seek performance).
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @param dataBlockSize
-    *          the block size, in bytes
-    * @since 1.5.0
-    */
-   public static void setDataBlockSize(Class<?> implementingClass, Configuration conf,
long dataBlockSize) {
-     setAccumuloProperty(implementingClass, conf, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE,
dataBlockSize);
-   }
-   
-   /**
-    * Sets the size for file blocks in the file system; file blocks are managed, and replicated,
by the underlying file system.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @param fileBlockSize
-    *          the block size, in bytes
-    * @since 1.5.0
-    */
-   public static void setFileBlockSize(Class<?> implementingClass, Configuration conf,
long fileBlockSize) {
-     setAccumuloProperty(implementingClass, conf, Property.TABLE_FILE_BLOCK_SIZE, fileBlockSize);
-   }
-   
-   /**
-    * Sets the size for index blocks within each file; smaller blocks means a deeper index
hierarchy within the file, while larger blocks mean a more shallow
-    * index hierarchy within the file. This can affect the performance of queries.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @param indexBlockSize
-    *          the block size, in bytes
-    * @since 1.5.0
-    */
-   public static void setIndexBlockSize(Class<?> implementingClass, Configuration conf,
long indexBlockSize) {
-     setAccumuloProperty(implementingClass, conf, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX,
indexBlockSize);
-   }
-   
-   /**
-    * Sets the file system replication factor for the resulting file, overriding the file
system default.
-    * 
-    * @param implementingClass
-    *          the class whose name will be used as a prefix for the property configuration
key
-    * @param conf
-    *          the Hadoop configuration object to configure
-    * @param replication
-    *          the number of replicas for produced files
-    * @since 1.5.0
-    */
-   public static void setReplication(Class<?> implementingClass, Configuration conf,
int replication) {
-     setAccumuloProperty(implementingClass, conf, Property.TABLE_FILE_REPLICATION, replication);
-   }
-   
- }
 - * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 - * @since 1.5.0
 - */
 -@Deprecated
 -public class FileOutputConfigurator extends ConfiguratorBase {
 -
 -  /**
 -   * Configuration keys for {@link AccumuloConfiguration}.
 -   * 
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  public static enum Opts {
 -    ACCUMULO_PROPERTIES;
 -  }
 -
 -  /**
 -   * The supported Accumulo properties we set in this OutputFormat, that change the behavior
of the RecordWriter.<br />
 -   * These properties correspond to the supported public static setter methods available
to this class.
 -   * 
 -   * @param property
 -   *          the Accumulo property to check
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  protected static Boolean isSupportedAccumuloProperty(Property property) {
 -    switch (property) {
 -      case TABLE_FILE_COMPRESSION_TYPE:
 -      case TABLE_FILE_COMPRESSED_BLOCK_SIZE:
 -      case TABLE_FILE_BLOCK_SIZE:
 -      case TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX:
 -      case TABLE_FILE_REPLICATION:
 -        return true;
 -      default:
 -        return false;
 -    }
 -  }
 -
 -  /**
 -   * This helper method provides an AccumuloConfiguration object constructed from the Accumulo
defaults, and overridden with Accumulo properties that have been
 -   * stored in the Job's configuration.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  public static AccumuloConfiguration getAccumuloConfiguration(Class<?> implementingClass,
Configuration conf) {
 -    return org.apache.accumulo.core.client.mapreduce.lib.impl.FileOutputConfigurator.getAccumuloConfiguration(implementingClass,
conf);
 -  }
 -
 -  /**
 -   * Sets the compression type to use for data blocks. Specifying a compression may require
additional libraries to be available to your Job.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @param compressionType
 -   *          one of "none", "gz", "lzo", or "snappy"
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  public static void setCompressionType(Class<?> implementingClass, Configuration
conf, String compressionType) {
 -    org.apache.accumulo.core.client.mapreduce.lib.impl.FileOutputConfigurator.setCompressionType(implementingClass,
conf, compressionType);
 -  }
 -
 -  /**
 -   * Sets the size for data blocks within each file.<br />
 -   * Data blocks are a span of key/value pairs stored in the file that are compressed and
indexed as a group.
 -   * 
 -   * <p>
 -   * Making this value smaller may increase seek performance, but at the cost of increasing
the size of the indexes (which can also affect seek performance).
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @param dataBlockSize
 -   *          the block size, in bytes
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  public static void setDataBlockSize(Class<?> implementingClass, Configuration conf,
long dataBlockSize) {
 -    org.apache.accumulo.core.client.mapreduce.lib.impl.FileOutputConfigurator.setDataBlockSize(implementingClass,
conf, dataBlockSize);
 -  }
 -
 -  /**
 -   * Sets the size for file blocks in the file system; file blocks are managed, and replicated,
by the underlying file system.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @param fileBlockSize
 -   *          the block size, in bytes
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  public static void setFileBlockSize(Class<?> implementingClass, Configuration conf,
long fileBlockSize) {
 -    org.apache.accumulo.core.client.mapreduce.lib.impl.FileOutputConfigurator.setFileBlockSize(implementingClass,
conf, fileBlockSize);
 -  }
 -
 -  /**
 -   * Sets the size for index blocks within each file; smaller blocks means a deeper index
hierarchy within the file, while larger blocks mean a more shallow
 -   * index hierarchy within the file. This can affect the performance of queries.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @param indexBlockSize
 -   *          the block size, in bytes
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  public static void setIndexBlockSize(Class<?> implementingClass, Configuration conf,
long indexBlockSize) {
 -    org.apache.accumulo.core.client.mapreduce.lib.impl.FileOutputConfigurator.setIndexBlockSize(implementingClass,
conf, indexBlockSize);
 -  }
 -
 -  /**
 -   * Sets the file system replication factor for the resulting file, overriding the file
system default.
 -   * 
 -   * @param implementingClass
 -   *          the class whose name will be used as a prefix for the property configuration
key
 -   * @param conf
 -   *          the Hadoop configuration object to configure
 -   * @param replication
 -   *          the number of replicas for produced files
 -   * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
 -   * @since 1.5.0
 -   */
 -  @Deprecated
 -  public static void setReplication(Class<?> implementingClass, Configuration conf,
int replication) {
 -    org.apache.accumulo.core.client.mapreduce.lib.impl.FileOutputConfigurator.setReplication(implementingClass,
conf, replication);
 -  }
 -
 -}


Mime
View raw message