Return-Path: X-Original-To: apmail-accumulo-commits-archive@www.apache.org Delivered-To: apmail-accumulo-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id BA6A211885 for ; Mon, 9 Jun 2014 14:42:56 +0000 (UTC) Received: (qmail 26603 invoked by uid 500); 9 Jun 2014 14:42:51 -0000 Delivered-To: apmail-accumulo-commits-archive@accumulo.apache.org Received: (qmail 26573 invoked by uid 500); 9 Jun 2014 14:42:51 -0000 Mailing-List: contact commits-help@accumulo.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@accumulo.apache.org Delivered-To: mailing list commits@accumulo.apache.org Received: (qmail 26566 invoked by uid 99); 9 Jun 2014 14:42:51 -0000 Received: from tyr.zones.apache.org (HELO tyr.zones.apache.org) (140.211.11.114) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 09 Jun 2014 14:42:51 +0000 Received: by tyr.zones.apache.org (Postfix, from userid 65534) id 39C4C8B2BF5; Mon, 9 Jun 2014 14:42:51 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: elserj@apache.org To: commits@accumulo.apache.org Date: Mon, 09 Jun 2014 14:42:56 -0000 Message-Id: <7492262fdb9246508da55cf1455b4302@git.apache.org> In-Reply-To: <2a322267f5604c808e4f1bd910031543@git.apache.org> References: <2a322267f5604c808e4f1bd910031543@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [6/6] git commit: Merge branch '1.6.1-SNAPSHOT' Merge branch '1.6.1-SNAPSHOT' Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/b06799a3 Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/b06799a3 Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/b06799a3 Branch: refs/heads/master Commit: b06799a3d11ab23b5187b2d3bdc69cb8b37a5da2 Parents: 0e097ac ae1682d Author: Josh Elser Authored: Mon Jun 9 10:42:14 2014 -0400 Committer: Josh Elser Committed: Mon Jun 9 10:42:14 2014 -0400 ---------------------------------------------------------------------- .../org/apache/accumulo/core/Constants.java | 2 ++ .../client/mapred/AccumuloFileOutputFormat.java | 3 +- .../mapreduce/lib/impl/ConfiguratorBase.java | 37 +++++++++++++++++++- .../mapred/AccumuloFileOutputFormatTest.java | 3 ++ .../lib/impl/ConfiguratorBaseTest.java | 8 +++++ .../system/continuous/continuous-env.sh.example | 30 +++++++++++----- test/system/continuous/start-agitator.sh | 12 +++---- 7 files changed, 78 insertions(+), 17 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/accumulo/blob/b06799a3/core/src/main/java/org/apache/accumulo/core/Constants.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/accumulo/blob/b06799a3/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormat.java ---------------------------------------------------------------------- diff --cc mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormat.java index 8a1d6df,0000000..cfaaa58 mode 100644,000000..100644 --- a/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormat.java +++ b/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormat.java @@@ -1,178 -1,0 +1,179 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.accumulo.core.client.mapred; + +import java.io.IOException; +import java.util.Arrays; + ++import org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase; +import org.apache.accumulo.core.client.mapreduce.lib.impl.FileOutputConfigurator; +import org.apache.accumulo.core.conf.AccumuloConfiguration; +import org.apache.accumulo.core.conf.Property; +import org.apache.accumulo.core.data.ArrayByteSequence; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.file.FileOperations; +import org.apache.accumulo.core.file.FileSKVWriter; +import org.apache.accumulo.core.security.ColumnVisibility; +import org.apache.commons.collections.map.LRUMap; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordWriter; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.util.Progressable; +import org.apache.log4j.Logger; + +/** + * This class allows MapReduce jobs to write output in the Accumulo data file format.
+ * Care should be taken to write only sorted data (sorted by {@link Key}), as this is an important requirement of Accumulo data files. + * + *

+ * The output path to be created must be specified via {@link AccumuloFileOutputFormat#setOutputPath(JobConf, Path)}. This is inherited from + * {@link FileOutputFormat#setOutputPath(JobConf, Path)}. Other methods from {@link FileOutputFormat} are not supported and may be ignored or cause failures. + * Using other Hadoop configuration options that affect the behavior of the underlying files directly in the Job's configuration may work, but are not directly + * supported at this time. + */ +public class AccumuloFileOutputFormat extends FileOutputFormat { + + private static final Class CLASS = AccumuloFileOutputFormat.class; + protected static final Logger log = Logger.getLogger(CLASS); + + /** + * This helper method provides an AccumuloConfiguration object constructed from the Accumulo defaults, and overridden with Accumulo properties that have been + * stored in the Job's configuration. + * + * @param job + * the Hadoop context for the configured job + * @since 1.5.0 + */ + protected static AccumuloConfiguration getAccumuloConfiguration(JobConf job) { + return FileOutputConfigurator.getAccumuloConfiguration(CLASS, job); + } + + /** + * Sets the compression type to use for data blocks. Specifying a compression may require additional libraries to be available to your Job. + * + * @param job + * the Hadoop job instance to be configured + * @param compressionType + * one of "none", "gz", "lzo", or "snappy" + * @since 1.5.0 + */ + public static void setCompressionType(JobConf job, String compressionType) { + FileOutputConfigurator.setCompressionType(CLASS, job, compressionType); + } + + /** + * Sets the size for data blocks within each file.
+ * Data blocks are a span of key/value pairs stored in the file that are compressed and indexed as a group. + * + *

+ * Making this value smaller may increase seek performance, but at the cost of increasing the size of the indexes (which can also affect seek performance). + * + * @param job + * the Hadoop job instance to be configured + * @param dataBlockSize + * the block size, in bytes + * @since 1.5.0 + */ + public static void setDataBlockSize(JobConf job, long dataBlockSize) { + FileOutputConfigurator.setDataBlockSize(CLASS, job, dataBlockSize); + } + + /** + * Sets the size for file blocks in the file system; file blocks are managed, and replicated, by the underlying file system. + * + * @param job + * the Hadoop job instance to be configured + * @param fileBlockSize + * the block size, in bytes + * @since 1.5.0 + */ + public static void setFileBlockSize(JobConf job, long fileBlockSize) { + FileOutputConfigurator.setFileBlockSize(CLASS, job, fileBlockSize); + } + + /** + * Sets the size for index blocks within each file; smaller blocks means a deeper index hierarchy within the file, while larger blocks mean a more shallow + * index hierarchy within the file. This can affect the performance of queries. + * + * @param job + * the Hadoop job instance to be configured + * @param indexBlockSize + * the block size, in bytes + * @since 1.5.0 + */ + public static void setIndexBlockSize(JobConf job, long indexBlockSize) { + FileOutputConfigurator.setIndexBlockSize(CLASS, job, indexBlockSize); + } + + /** + * Sets the file system replication factor for the resulting file, overriding the file system default. + * + * @param job + * the Hadoop job instance to be configured + * @param replication + * the number of replicas for produced files + * @since 1.5.0 + */ + public static void setReplication(JobConf job, int replication) { + FileOutputConfigurator.setReplication(CLASS, job, replication); + } + + @Override + public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, String name, Progressable progress) throws IOException { + // get the path of the temporary output file + final Configuration conf = job; + final AccumuloConfiguration acuConf = getAccumuloConfiguration(job); + + final String extension = acuConf.get(Property.TABLE_FILE_TYPE); + final Path file = new Path(getWorkOutputPath(job), getUniqueName(job, "part") + "." + extension); + - final LRUMap validVisibilities = new LRUMap(1000); ++ final LRUMap validVisibilities = new LRUMap(ConfiguratorBase.getVisibilityCacheSize(conf)); + + return new RecordWriter() { + FileSKVWriter out = null; + + @Override + public void close(Reporter reporter) throws IOException { + if (out != null) + out.close(); + } + + @Override + public void write(Key key, Value value) throws IOException { + + Boolean wasChecked = (Boolean) validVisibilities.get(key.getColumnVisibilityData()); + if (wasChecked == null) { + byte[] cv = key.getColumnVisibilityData().toArray(); + new ColumnVisibility(cv); + validVisibilities.put(new ArrayByteSequence(Arrays.copyOf(cv, cv.length)), Boolean.TRUE); + } + + if (out == null) { + out = FileOperations.getInstance().openWriter(file.toString(), file.getFileSystem(conf), conf, acuConf); + out.startDefaultLocalityGroup(); + } + out.append(key, value); + } + }; + } + +} http://git-wip-us.apache.org/repos/asf/accumulo/blob/b06799a3/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java ---------------------------------------------------------------------- diff --cc mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java index cf131bd,0000000..bd7c6bf mode 100644,000000..100644 --- a/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java +++ b/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java @@@ -1,369 -1,0 +1,404 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.accumulo.core.client.mapreduce.lib.impl; + +import static com.google.common.base.Preconditions.checkArgument; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; + +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.client.ClientConfiguration; +import org.apache.accumulo.core.client.Instance; +import org.apache.accumulo.core.client.ZooKeeperInstance; +import org.apache.accumulo.core.client.mock.MockInstance; +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken; +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer; +import org.apache.accumulo.core.security.Credentials; +import org.apache.accumulo.core.util.Base64; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.StringUtils; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + +/** + * @since 1.6.0 + */ +public class ConfiguratorBase { + + /** + * Configuration keys for {@link Instance#getConnector(String, AuthenticationToken)}. + * + * @since 1.6.0 + */ + public static enum ConnectorInfo { + IS_CONFIGURED, PRINCIPAL, TOKEN, + } + + public static enum TokenSource { + FILE, INLINE; + + private String prefix; + + private TokenSource() { + prefix = name().toLowerCase() + ":"; + } + + public String prefix() { + return prefix; + } + } + + /** + * Configuration keys for {@link Instance}, {@link ZooKeeperInstance}, and {@link MockInstance}. + * + * @since 1.6.0 + */ + public static enum InstanceOpts { + TYPE, NAME, ZOO_KEEPERS, CLIENT_CONFIG; + } + + /** + * Configuration keys for general configuration options. + * + * @since 1.6.0 + */ + public static enum GeneralOpts { - LOG_LEVEL ++ LOG_LEVEL, ++ VISIBILITY_CACHE_SIZE + } + + /** + * Provides a configuration key for a given feature enum, prefixed by the implementingClass + * + * @param implementingClass + * the class whose name will be used as a prefix for the property configuration key + * @param e + * the enum used to provide the unique part of the configuration key + * @return the configuration key + * @since 1.6.0 + */ + protected static String enumToConfKey(Class implementingClass, Enum e) { + return implementingClass.getSimpleName() + "." + e.getDeclaringClass().getSimpleName() + "." + StringUtils.camelize(e.name().toLowerCase()); + } + + /** ++ * Provides a configuration key for a given feature enum. ++ * ++ * @param e ++ * the enum used to provide the unique part of the configuration key ++ * @return the configuration key ++ */ ++ protected static String enumToConfKey(Enum e) { ++ return e.getDeclaringClass().getSimpleName() + "." + StringUtils.camelize(e.name().toLowerCase()); ++ } ++ ++ /** + * Sets the connector information needed to communicate with Accumulo in this job. + * + *

+ * WARNING: The serialized token is stored in the configuration and shared with all MapReduce tasks. It is BASE64 encoded to provide a charset safe + * conversion to a string, and is not intended to be secure. + * + * @param implementingClass + * the class whose name will be used as a prefix for the property configuration key + * @param conf + * the Hadoop configuration object to configure + * @param principal + * a valid Accumulo user name + * @param token + * the user's password + * @since 1.6.0 + */ + public static void setConnectorInfo(Class implementingClass, Configuration conf, String principal, AuthenticationToken token) + throws AccumuloSecurityException { + if (isConnectorInfoSet(implementingClass, conf)) + throw new IllegalStateException("Connector info for " + implementingClass.getSimpleName() + " can only be set once per job"); + + checkArgument(principal != null, "principal is null"); + checkArgument(token != null, "token is null"); + conf.setBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), true); + conf.set(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL), principal); + conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN), + TokenSource.INLINE.prefix() + token.getClass().getName() + ":" + Base64.encodeBase64String(AuthenticationTokenSerializer.serialize(token))); + } + + /** + * Sets the connector information needed to communicate with Accumulo in this job. + * + *

+ * Pulls a token file into the Distributed Cache that contains the authentication token in an attempt to be more secure than storing the password in the + * Configuration. Token file created with "bin/accumulo create-token". + * + * @param implementingClass + * the class whose name will be used as a prefix for the property configuration key + * @param conf + * the Hadoop configuration object to configure + * @param principal + * a valid Accumulo user name + * @param tokenFile + * the path to the token file in DFS + * @since 1.6.0 + */ + public static void setConnectorInfo(Class implementingClass, Configuration conf, String principal, String tokenFile) throws AccumuloSecurityException { + if (isConnectorInfoSet(implementingClass, conf)) + throw new IllegalStateException("Connector info for " + implementingClass.getSimpleName() + " can only be set once per job"); + + checkArgument(principal != null, "principal is null"); + checkArgument(tokenFile != null, "tokenFile is null"); + + try { + DistributedCacheHelper.addCacheFile(new URI(tokenFile), conf); + } catch (URISyntaxException e) { + throw new IllegalStateException("Unable to add tokenFile \"" + tokenFile + "\" to distributed cache."); + } + + conf.setBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), true); + conf.set(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL), principal); + conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN), TokenSource.FILE.prefix() + tokenFile); + } + + /** + * Determines if the connector info has already been set for this instance. + * + * @param implementingClass + * the class whose name will be used as a prefix for the property configuration key + * @param conf + * the Hadoop configuration object to configure + * @return true if the connector info has already been set, false otherwise + * @since 1.6.0 + * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken) + */ + public static Boolean isConnectorInfoSet(Class implementingClass, Configuration conf) { + return conf.getBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), false); + } + + /** + * Gets the user name from the configuration. + * + * @param implementingClass + * the class whose name will be used as a prefix for the property configuration key + * @param conf + * the Hadoop configuration object to configure + * @return the principal + * @since 1.6.0 + * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken) + */ + public static String getPrincipal(Class implementingClass, Configuration conf) { + return conf.get(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL)); + } + + /** + * Gets the authenticated token from either the specified token file or directly from the configuration, whichever was used when the job was configured. + * + * @param implementingClass + * the class whose name will be used as a prefix for the property configuration key + * @param conf + * the Hadoop configuration object to configure + * @return the principal's authentication token + * @since 1.6.0 + * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken) + * @see #setConnectorInfo(Class, Configuration, String, String) + */ + public static AuthenticationToken getAuthenticationToken(Class implementingClass, Configuration conf) { + String token = conf.get(enumToConfKey(implementingClass, ConnectorInfo.TOKEN)); + if (token == null || token.isEmpty()) + return null; + if (token.startsWith(TokenSource.INLINE.prefix())) { + String[] args = token.substring(TokenSource.INLINE.prefix().length()).split(":", 2); + if (args.length == 2) + return AuthenticationTokenSerializer.deserialize(args[0], Base64.decodeBase64(args[1].getBytes(StandardCharsets.UTF_8))); + } else if (token.startsWith(TokenSource.FILE.prefix())) { + String tokenFileName = token.substring(TokenSource.FILE.prefix().length()); + return getTokenFromFile(conf, getPrincipal(implementingClass, conf), tokenFileName); + } + + throw new IllegalStateException("Token was not properly serialized into the configuration"); + } + + /** + * Reads from the token file in distributed cache. Currently, the token file stores data separated by colons e.g. principal:token_class:token + * + * @param conf + * the Hadoop context for the configured job + * @return path to the token file as a String + * @since 1.6.0 + * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken) + */ + public static AuthenticationToken getTokenFromFile(Configuration conf, String principal, String tokenFile) { + FSDataInputStream in = null; + try { + URI[] uris = DistributedCacheHelper.getCacheFiles(conf); + Path path = null; + for (URI u : uris) { + if (u.toString().equals(tokenFile)) { + path = new Path(u); + } + } + if (path == null) { + throw new IllegalArgumentException("Couldn't find password file called \"" + tokenFile + "\" in cache."); + } + FileSystem fs = FileSystem.get(conf); + in = fs.open(path); + } catch (IOException e) { + throw new IllegalArgumentException("Couldn't open password file called \"" + tokenFile + "\"."); + } + try (java.util.Scanner fileScanner = new java.util.Scanner(in)) { + while (fileScanner.hasNextLine()) { + Credentials creds = Credentials.deserialize(fileScanner.nextLine()); + if (principal.equals(creds.getPrincipal())) { + return creds.getToken(); + } + } + throw new IllegalArgumentException("Couldn't find token for user \"" + principal + "\" in file \"" + tokenFile + "\""); + } + } + + /** + * Configures a {@link ZooKeeperInstance} for this job. + * + * @param implementingClass + * the class whose name will be used as a prefix for the property configuration key + * @param conf + * the Hadoop configuration object to configure + * @param clientConfig + * client configuration for specifying connection timeouts, SSL connection options, etc. + * @since 1.6.0 + */ + public static void setZooKeeperInstance(Class implementingClass, Configuration conf, ClientConfiguration clientConfig) { + String key = enumToConfKey(implementingClass, InstanceOpts.TYPE); + if (!conf.get(key, "").isEmpty()) + throw new IllegalStateException("Instance info can only be set once per job; it has already been configured with " + conf.get(key)); + conf.set(key, "ZooKeeperInstance"); + if (clientConfig != null) { + conf.set(enumToConfKey(implementingClass, InstanceOpts.CLIENT_CONFIG), clientConfig.serialize()); + } + } + + /** + * Configures a {@link MockInstance} for this job. + * + * @param implementingClass + * the class whose name will be used as a prefix for the property configuration key + * @param conf + * the Hadoop configuration object to configure + * @param instanceName + * the Accumulo instance name + * @since 1.6.0 + */ + public static void setMockInstance(Class implementingClass, Configuration conf, String instanceName) { + String key = enumToConfKey(implementingClass, InstanceOpts.TYPE); + if (!conf.get(key, "").isEmpty()) + throw new IllegalStateException("Instance info can only be set once per job; it has already been configured with " + conf.get(key)); + conf.set(key, "MockInstance"); + + checkArgument(instanceName != null, "instanceName is null"); + conf.set(enumToConfKey(implementingClass, InstanceOpts.NAME), instanceName); + } + + /** + * Initializes an Accumulo {@link Instance} based on the configuration. + * + * @param implementingClass + * the class whose name will be used as a prefix for the property configuration key + * @param conf + * the Hadoop configuration object to configure + * @return an Accumulo instance + * @since 1.6.0 + * @see #setZooKeeperInstance(Class, Configuration, ClientConfiguration) + * @see #setMockInstance(Class, Configuration, String) + */ + public static Instance getInstance(Class implementingClass, Configuration conf) { + String instanceType = conf.get(enumToConfKey(implementingClass, InstanceOpts.TYPE), ""); + if ("MockInstance".equals(instanceType)) + return new MockInstance(conf.get(enumToConfKey(implementingClass, InstanceOpts.NAME))); + else if ("ZooKeeperInstance".equals(instanceType)) { + String clientConfigString = conf.get(enumToConfKey(implementingClass, InstanceOpts.CLIENT_CONFIG)); + if (clientConfigString == null) { + String instanceName = conf.get(enumToConfKey(implementingClass, InstanceOpts.NAME)); + String zookeepers = conf.get(enumToConfKey(implementingClass, InstanceOpts.ZOO_KEEPERS)); + return new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(instanceName).withZkHosts(zookeepers)); + } else { + return new ZooKeeperInstance(ClientConfiguration.deserialize(clientConfigString)); + } + } else if (instanceType.isEmpty()) + throw new IllegalStateException("Instance has not been configured for " + implementingClass.getSimpleName()); + else + throw new IllegalStateException("Unrecognized instance type " + instanceType); + } + + /** + * Sets the log level for this job. + * + * @param implementingClass + * the class whose name will be used as a prefix for the property configuration key + * @param conf + * the Hadoop configuration object to configure + * @param level + * the logging level + * @since 1.6.0 + */ + public static void setLogLevel(Class implementingClass, Configuration conf, Level level) { + checkArgument(level != null, "level is null"); + Logger.getLogger(implementingClass).setLevel(level); + conf.setInt(enumToConfKey(implementingClass, GeneralOpts.LOG_LEVEL), level.toInt()); + } + + /** + * Gets the log level from this configuration. + * + * @param implementingClass + * the class whose name will be used as a prefix for the property configuration key + * @param conf + * the Hadoop configuration object to configure + * @return the log level + * @since 1.6.0 + * @see #setLogLevel(Class, Configuration, Level) + */ + public static Level getLogLevel(Class implementingClass, Configuration conf) { + return Level.toLevel(conf.getInt(enumToConfKey(implementingClass, GeneralOpts.LOG_LEVEL), Level.INFO.toInt())); + } + ++ /** ++ * Sets the valid visibility count for this job. ++ * ++ * @param conf ++ * the Hadoop configuration object to configure ++ * @param visibilityCacheSize ++ * the LRU cache size ++ */ ++ public static void setVisibilityCacheSize(Configuration conf, int visibilityCacheSize) { ++ conf.setInt(enumToConfKey(GeneralOpts.VISIBILITY_CACHE_SIZE), visibilityCacheSize); ++ } ++ ++ /** ++ * Gets the valid visibility count for this job. ++ * ++ * @param conf ++ * the Hadoop configuration object to configure ++ * @return the valid visibility count ++ */ ++ public static int getVisibilityCacheSize(Configuration conf) { ++ return conf.getInt(enumToConfKey(GeneralOpts.VISIBILITY_CACHE_SIZE),Constants.DEFAULT_VISIBILITY_CACHE_SIZE); ++ } ++ +} http://git-wip-us.apache.org/repos/asf/accumulo/blob/b06799a3/mapreduce/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormatTest.java ---------------------------------------------------------------------- diff --cc mapreduce/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormatTest.java index aad544b,0000000..3a4d641 mode 100644,000000..100644 --- a/mapreduce/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormatTest.java +++ b/mapreduce/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormatTest.java @@@ -1,247 -1,0 +1,250 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.accumulo.core.client.mapred; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileFilter; +import java.io.IOException; + +import org.apache.accumulo.core.client.BatchWriter; +import org.apache.accumulo.core.client.BatchWriterConfig; +import org.apache.accumulo.core.client.Connector; ++import org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase; +import org.apache.accumulo.core.client.mock.MockInstance; +import org.apache.accumulo.core.client.security.tokens.PasswordToken; +import org.apache.accumulo.core.conf.AccumuloConfiguration; +import org.apache.accumulo.core.conf.Property; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.util.CachedConfiguration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.lib.IdentityMapper; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; +import org.apache.log4j.Logger; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public class AccumuloFileOutputFormatTest { ++ private static final int JOB_VISIBILITY_CACHE_SIZE = 3000; + private static final String PREFIX = AccumuloFileOutputFormatTest.class.getSimpleName(); + private static final String INSTANCE_NAME = PREFIX + "_mapred_instance"; + private static final String BAD_TABLE = PREFIX + "_mapred_bad_table"; + private static final String TEST_TABLE = PREFIX + "_mapred_test_table"; + private static final String EMPTY_TABLE = PREFIX + "_mapred_empty_table"; + + private static AssertionError e1 = null; + private static AssertionError e2 = null; + + @Rule + public TemporaryFolder folder = new TemporaryFolder(new File(System.getProperty("user.dir") + "/target")); + + @BeforeClass + public static void setup() throws Exception { + MockInstance mockInstance = new MockInstance(INSTANCE_NAME); + Connector c = mockInstance.getConnector("root", new PasswordToken("")); + c.tableOperations().create(EMPTY_TABLE); + c.tableOperations().create(TEST_TABLE); + c.tableOperations().create(BAD_TABLE); + BatchWriter bw = c.createBatchWriter(TEST_TABLE, new BatchWriterConfig()); + Mutation m = new Mutation("Key"); + m.put("", "", ""); + bw.addMutation(m); + bw.close(); + bw = c.createBatchWriter(BAD_TABLE, new BatchWriterConfig()); + m = new Mutation("r1"); + m.put("cf1", "cq1", "A&B"); + m.put("cf1", "cq1", "A&B"); + m.put("cf1", "cq2", "A&"); + bw.addMutation(m); + bw.close(); + } + + @Test + public void testEmptyWrite() throws Exception { + handleWriteTests(false); + } + + @Test + public void testRealWrite() throws Exception { + handleWriteTests(true); + } + + private static class MRTester extends Configured implements Tool { + private static class BadKeyMapper implements Mapper { + + int index = 0; + + @Override + public void map(Key key, Value value, OutputCollector output, Reporter reporter) throws IOException { + try { + try { + output.collect(key, value); + if (index == 2) + fail(); + } catch (Exception e) { + Logger.getLogger(this.getClass()).error(e, e); + assertEquals(2, index); + } + } catch (AssertionError e) { + e1 = e; + } + index++; + } + + @Override + public void configure(JobConf job) {} + + @Override + public void close() throws IOException { + try { + assertEquals(2, index); + } catch (AssertionError e) { + e2 = e; + } + } + + } + + @Override + public int run(String[] args) throws Exception { + + if (args.length != 4) { + throw new IllegalArgumentException("Usage : " + MRTester.class.getName() + " "); + } + + String user = args[0]; + String pass = args[1]; + String table = args[2]; + + JobConf job = new JobConf(getConf()); + job.setJarByClass(this.getClass()); ++ ConfiguratorBase.setVisibilityCacheSize(job, JOB_VISIBILITY_CACHE_SIZE); + + job.setInputFormat(AccumuloInputFormat.class); + + AccumuloInputFormat.setConnectorInfo(job, user, new PasswordToken(pass)); + AccumuloInputFormat.setInputTableName(job, table); + AccumuloInputFormat.setMockInstance(job, INSTANCE_NAME); + AccumuloFileOutputFormat.setOutputPath(job, new Path(args[3])); + + job.setMapperClass(BAD_TABLE.equals(table) ? BadKeyMapper.class : IdentityMapper.class); + job.setMapOutputKeyClass(Key.class); + job.setMapOutputValueClass(Value.class); + job.setOutputFormat(AccumuloFileOutputFormat.class); + + job.setNumReduceTasks(0); + + return JobClient.runJob(job).isSuccessful() ? 0 : 1; + } + + public static void main(String[] args) throws Exception { + assertEquals(0, ToolRunner.run(CachedConfiguration.getInstance(), new MRTester(), args)); + } + } + + public void handleWriteTests(boolean content) throws Exception { + File f = folder.newFile("handleWriteTests"); + f.delete(); + MRTester.main(new String[] {"root", "", content ? TEST_TABLE : EMPTY_TABLE, f.getAbsolutePath()}); + + assertTrue(f.exists()); + File[] files = f.listFiles(new FileFilter() { + @Override + public boolean accept(File file) { + return file.getName().startsWith("part-m-"); + } + }); + if (content) { + assertEquals(1, files.length); + assertTrue(files[0].exists()); + } else { + assertEquals(0, files.length); + } + } + + @Test + public void writeBadVisibility() throws Exception { + File f = folder.newFile("writeBadVisibility"); + f.delete(); + MRTester.main(new String[] {"root", "", BAD_TABLE, f.getAbsolutePath()}); + Logger.getLogger(this.getClass()).error(e1, e1); + assertNull(e1); + assertNull(e2); + } + + @Test + public void validateConfiguration() throws IOException, InterruptedException { + + int a = 7; + long b = 300l; + long c = 50l; + long d = 10l; + String e = "snappy"; + + JobConf job = new JobConf(); + AccumuloFileOutputFormat.setReplication(job, a); + AccumuloFileOutputFormat.setFileBlockSize(job, b); + AccumuloFileOutputFormat.setDataBlockSize(job, c); + AccumuloFileOutputFormat.setIndexBlockSize(job, d); + AccumuloFileOutputFormat.setCompressionType(job, e); + + AccumuloConfiguration acuconf = AccumuloFileOutputFormat.getAccumuloConfiguration(job); + + assertEquals(7, acuconf.getCount(Property.TABLE_FILE_REPLICATION)); + assertEquals(300l, acuconf.getMemoryInBytes(Property.TABLE_FILE_BLOCK_SIZE)); + assertEquals(50l, acuconf.getMemoryInBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE)); + assertEquals(10l, acuconf.getMemoryInBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX)); + assertEquals("snappy", acuconf.get(Property.TABLE_FILE_COMPRESSION_TYPE)); + + a = 17; + b = 1300l; + c = 150l; + d = 110l; + e = "lzo"; + + job = new JobConf(); + AccumuloFileOutputFormat.setReplication(job, a); + AccumuloFileOutputFormat.setFileBlockSize(job, b); + AccumuloFileOutputFormat.setDataBlockSize(job, c); + AccumuloFileOutputFormat.setIndexBlockSize(job, d); + AccumuloFileOutputFormat.setCompressionType(job, e); + + acuconf = AccumuloFileOutputFormat.getAccumuloConfiguration(job); + + assertEquals(17, acuconf.getCount(Property.TABLE_FILE_REPLICATION)); + assertEquals(1300l, acuconf.getMemoryInBytes(Property.TABLE_FILE_BLOCK_SIZE)); + assertEquals(150l, acuconf.getMemoryInBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE)); + assertEquals(110l, acuconf.getMemoryInBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX)); + assertEquals("lzo", acuconf.get(Property.TABLE_FILE_COMPRESSION_TYPE)); + + } +} http://git-wip-us.apache.org/repos/asf/accumulo/blob/b06799a3/mapreduce/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java ---------------------------------------------------------------------- diff --cc mapreduce/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java index d5ebb22,0000000..7c1f98b mode 100644,000000..100644 --- a/mapreduce/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java +++ b/mapreduce/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java @@@ -1,129 -1,0 +1,137 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.accumulo.core.client.mapreduce.lib.impl; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + ++import org.apache.accumulo.core.Constants; +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.client.ClientConfiguration; +import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty; +import org.apache.accumulo.core.client.Instance; +import org.apache.accumulo.core.client.ZooKeeperInstance; +import org.apache.accumulo.core.client.mock.MockInstance; +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken; +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer; +import org.apache.accumulo.core.client.security.tokens.PasswordToken; +import org.apache.accumulo.core.util.Base64; +import org.apache.hadoop.conf.Configuration; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.junit.Test; + +/** + * + */ +public class ConfiguratorBaseTest { + + private static enum PrivateTestingEnum { + SOMETHING, SOMETHING_ELSE + } + + @Test + public void testEnumToConfKey() { + assertEquals(this.getClass().getSimpleName() + ".PrivateTestingEnum.Something", + ConfiguratorBase.enumToConfKey(this.getClass(), PrivateTestingEnum.SOMETHING)); + assertEquals(this.getClass().getSimpleName() + ".PrivateTestingEnum.SomethingElse", + ConfiguratorBase.enumToConfKey(this.getClass(), PrivateTestingEnum.SOMETHING_ELSE)); + } + + @Test + public void testSetConnectorInfoClassOfQConfigurationStringAuthenticationToken() throws AccumuloSecurityException { + Configuration conf = new Configuration(); + assertFalse(ConfiguratorBase.isConnectorInfoSet(this.getClass(), conf)); + ConfiguratorBase.setConnectorInfo(this.getClass(), conf, "testUser", new PasswordToken("testPassword")); + assertTrue(ConfiguratorBase.isConnectorInfoSet(this.getClass(), conf)); + assertEquals("testUser", ConfiguratorBase.getPrincipal(this.getClass(), conf)); + AuthenticationToken token = ConfiguratorBase.getAuthenticationToken(this.getClass(), conf); + assertEquals(PasswordToken.class, token.getClass()); + assertEquals(new PasswordToken("testPassword"), token); + assertEquals( + "inline:" + PasswordToken.class.getName() + ":" + Base64.encodeBase64String(AuthenticationTokenSerializer.serialize(new PasswordToken("testPassword"))), + conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), ConfiguratorBase.ConnectorInfo.TOKEN))); + } + + @Test + public void testSetConnectorInfoClassOfQConfigurationStringString() throws AccumuloSecurityException { + Configuration conf = new Configuration(); + assertFalse(ConfiguratorBase.isConnectorInfoSet(this.getClass(), conf)); + ConfiguratorBase.setConnectorInfo(this.getClass(), conf, "testUser", "testFile"); + assertTrue(ConfiguratorBase.isConnectorInfoSet(this.getClass(), conf)); + assertEquals("testUser", ConfiguratorBase.getPrincipal(this.getClass(), conf)); + assertEquals("file:testFile", conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), ConfiguratorBase.ConnectorInfo.TOKEN))); + } + + @Test + public void testSetZooKeeperInstance() { + Configuration conf = new Configuration(); + ConfiguratorBase.setZooKeeperInstance(this.getClass(), conf, new ClientConfiguration().withInstance("testInstanceName").withZkHosts("testZooKeepers") + .withSsl(true).withZkTimeout(1234)); + ClientConfiguration clientConf = ClientConfiguration.deserialize(conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), + ConfiguratorBase.InstanceOpts.CLIENT_CONFIG))); + assertEquals("testInstanceName", clientConf.get(ClientProperty.INSTANCE_NAME)); + assertEquals("testZooKeepers", clientConf.get(ClientProperty.INSTANCE_ZK_HOST)); + assertEquals("true", clientConf.get(ClientProperty.INSTANCE_RPC_SSL_ENABLED)); + assertEquals("1234", clientConf.get(ClientProperty.INSTANCE_ZK_TIMEOUT)); + assertEquals(ZooKeeperInstance.class.getSimpleName(), conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), ConfiguratorBase.InstanceOpts.TYPE))); + + Instance instance = ConfiguratorBase.getInstance(this.getClass(), conf); + assertEquals(ZooKeeperInstance.class.getName(), instance.getClass().getName()); + assertEquals("testInstanceName", ((ZooKeeperInstance) instance).getInstanceName()); + assertEquals("testZooKeepers", ((ZooKeeperInstance) instance).getZooKeepers()); + assertEquals(1234000, ((ZooKeeperInstance) instance).getZooKeepersSessionTimeOut()); + } + + @Test + public void testSetMockInstance() { + Configuration conf = new Configuration(); + ConfiguratorBase.setMockInstance(this.getClass(), conf, "testInstanceName"); + assertEquals("testInstanceName", conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), ConfiguratorBase.InstanceOpts.NAME))); + assertEquals(null, conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), ConfiguratorBase.InstanceOpts.ZOO_KEEPERS))); + assertEquals(MockInstance.class.getSimpleName(), conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), ConfiguratorBase.InstanceOpts.TYPE))); + Instance instance = ConfiguratorBase.getInstance(this.getClass(), conf); + assertEquals(MockInstance.class.getName(), instance.getClass().getName()); + } + + @Test + public void testSetLogLevel() { + Configuration conf = new Configuration(); + Level currentLevel = Logger.getLogger(this.getClass()).getLevel(); + + ConfiguratorBase.setLogLevel(this.getClass(), conf, Level.DEBUG); + Logger.getLogger(this.getClass()).setLevel(currentLevel); + assertEquals(Level.DEBUG, ConfiguratorBase.getLogLevel(this.getClass(), conf)); + + ConfiguratorBase.setLogLevel(this.getClass(), conf, Level.INFO); + Logger.getLogger(this.getClass()).setLevel(currentLevel); + assertEquals(Level.INFO, ConfiguratorBase.getLogLevel(this.getClass(), conf)); + + ConfiguratorBase.setLogLevel(this.getClass(), conf, Level.FATAL); + Logger.getLogger(this.getClass()).setLevel(currentLevel); + assertEquals(Level.FATAL, ConfiguratorBase.getLogLevel(this.getClass(), conf)); + } + ++ @Test ++ public void testSetVisibiltyCacheSize(){ ++ Configuration conf = new Configuration(); ++ assertEquals(Constants.DEFAULT_VISIBILITY_CACHE_SIZE,ConfiguratorBase.getVisibilityCacheSize(conf)); ++ ConfiguratorBase.setVisibilityCacheSize(conf, 2000); ++ assertEquals(2000,ConfiguratorBase.getVisibilityCacheSize(conf)); ++ } +}