accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject svn commit: r1437818 - in /accumulo/trunk/core/src/main/java/org/apache/accumulo/core: client/mapred/ client/mapreduce/ client/mapreduce/lib/util/ client/mapreduce/util/ util/
Date Thu, 24 Jan 2013 01:09:46 GMT
Author: ctubbsii
Date: Thu Jan 24 01:09:46 2013
New Revision: 1437818

URL: http://svn.apache.org/viewvc?rev=1437818&view=rev
Log:
ACCUMULO-695 Support the old mapreduce API. This is a first cut. Some attempt was made to reuse code, but it may benefit from further consolidation. This will
need unit tests at some point.

Added:
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormat.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloRowInputFormat.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/
      - copied from r1437759, accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/util/
Removed:
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/util/
Modified:
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormat.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/FileOutputConfigurator.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/OutputConfigurator.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/package-info.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/PeekingIterator.java

Added: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormat.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormat.java?rev=1437818&view=auto
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormat.java (added)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormat.java Thu Jan 24 01:09:46 2013
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapred;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.apache.accumulo.core.client.mapreduce.lib.util.FileOutputConfigurator;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.ArrayByteSequence;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.core.file.FileSKVWriter;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.commons.collections.map.LRUMap;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.FileOutputFormat;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.RecordWriter;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.util.Progressable;
+import org.apache.log4j.Logger;
+
+/**
+ * This class allows MapReduce jobs to write output in the Accumulo data file format.<br />
+ * Care should be taken to write only sorted data (sorted by {@link Key}), as this is an important requirement of Accumulo data files.
+ * 
+ * <p>
+ * The output path to be created must be specified via {@link AccumuloFileOutputFormat#setOutputPath(JobConf, Path)}. This is inherited from
+ * {@link FileOutputFormat#setOutputPath(JobConf, Path)}. Other methods from {@link FileOutputFormat} are not supported and may be ignored or cause failures.
+ * Using other Hadoop configuration options that affect the behavior of the underlying files directly in the Job's configuration may work, but are not directly
+ * supported at this time.
+ */
+public class AccumuloFileOutputFormat extends FileOutputFormat<Key,Value> {
+  
+  private static final Class<?> CLASS = AccumuloFileOutputFormat.class;
+  protected static final Logger log = Logger.getLogger(CLASS);
+  
+  /**
+   * This helper method provides an AccumuloConfiguration object constructed from the Accumulo defaults, and overridden with Accumulo properties that have been
+   * stored in the Job's configuration.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @since 1.5.0
+   */
+  protected static AccumuloConfiguration getAccumuloConfiguration(JobConf job) {
+    return FileOutputConfigurator.getAccumuloConfiguration(CLASS, job);
+  }
+  
+  /**
+   * Sets the compression type to use for data blocks. Specifying a compression may require additional libraries to be available to your Job.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param compressionType
+   *          one of "none", "gz", "lzo", or "snappy"
+   * @since 1.5.0
+   */
+  public static void setCompressionType(JobConf job, String compressionType) {
+    FileOutputConfigurator.setCompressionType(CLASS, job, compressionType);
+  }
+  
+  /**
+   * Sets the size for data blocks within each file.<br />
+   * Data blocks are a span of key/value pairs stored in the file that are compressed and indexed as a group.
+   * 
+   * <p>
+   * Making this value smaller may increase seek performance, but at the cost of increasing the size of the indexes (which can also affect seek performance).
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param dataBlockSize
+   *          the block size, in bytes
+   * @since 1.5.0
+   */
+  public static void setDataBlockSize(JobConf job, long dataBlockSize) {
+    FileOutputConfigurator.setDataBlockSize(CLASS, job, dataBlockSize);
+  }
+  
+  /**
+   * Sets the size for file blocks in the file system; file blocks are managed, and replicated, by the underlying file system.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param fileBlockSize
+   *          the block size, in bytes
+   * @since 1.5.0
+   */
+  public static void setFileBlockSize(JobConf job, long fileBlockSize) {
+    FileOutputConfigurator.setFileBlockSize(CLASS, job, fileBlockSize);
+  }
+  
+  /**
+   * Sets the size for index blocks within each file; smaller blocks means a deeper index hierarchy within the file, while larger blocks mean a more shallow
+   * index hierarchy within the file. This can affect the performance of queries.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param indexBlockSize
+   *          the block size, in bytes
+   * @since 1.5.0
+   */
+  public static void setIndexBlockSize(JobConf job, long indexBlockSize) {
+    FileOutputConfigurator.setIndexBlockSize(CLASS, job, indexBlockSize);
+  }
+  
+  /**
+   * Sets the file system replication factor for the resulting file, overriding the file system default.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param replication
+   *          the number of replicas for produced files
+   * @since 1.5.0
+   */
+  public static void setReplication(JobConf job, int replication) {
+    FileOutputConfigurator.setReplication(CLASS, job, replication);
+  }
+  
+  @Override
+  public RecordWriter<Key,Value> getRecordWriter(FileSystem ignored, JobConf job, String name, Progressable progress) throws IOException {
+    // get the path of the temporary output file
+    final Configuration conf = job;
+    final AccumuloConfiguration acuConf = getAccumuloConfiguration(job);
+    
+    final String extension = acuConf.get(Property.TABLE_FILE_TYPE);
+    final Path file = new Path(getWorkOutputPath(job), getUniqueName(job, "part") + "." + extension);
+    
+    final LRUMap validVisibilities = new LRUMap(1000);
+    
+    return new RecordWriter<Key,Value>() {
+      FileSKVWriter out = null;
+      
+      @Override
+      public void close(Reporter reporter) throws IOException {
+        if (out != null)
+          out.close();
+      }
+      
+      @Override
+      public void write(Key key, Value value) throws IOException {
+        
+        Boolean wasChecked = (Boolean) validVisibilities.get(key.getColumnVisibilityData());
+        if (wasChecked == null) {
+          byte[] cv = key.getColumnVisibilityData().toArray();
+          new ColumnVisibility(cv);
+          validVisibilities.put(new ArrayByteSequence(Arrays.copyOf(cv, cv.length)), Boolean.TRUE);
+        }
+        
+        if (out == null) {
+          out = FileOperations.getInstance().openWriter(file.toString(), file.getFileSystem(conf), conf, acuConf);
+          out.startDefaultLocalityGroup();
+        }
+        out.append(key, value);
+      }
+    };
+  }
+  
+}

Added: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java?rev=1437818&view=auto
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java (added)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java Thu Jan 24 01:09:46 2013
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapred;
+
+import java.io.IOException;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.format.DefaultFormatter;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.Reporter;
+
+/**
+ * This class allows MapReduce jobs to use Accumulo as the source of data. This {@link InputFormat} provides keys and values of type {@link Key} and
+ * {@link Value} to the Map function.
+ * 
+ * The user must specify the following via static configurator methods:
+ * 
+ * <ul>
+ * <li>{@link AccumuloInputFormat#setConnectorInfo(JobConf, String, byte[])}
+ * <li>{@link AccumuloInputFormat#setInputTableName(JobConf, String)}
+ * <li>{@link AccumuloInputFormat#setScanAuthorizations(JobConf, Authorizations)}
+ * <li>{@link AccumuloInputFormat#setZooKeeperInstance(JobConf, String, String)} OR {@link AccumuloInputFormat#setMockInstance(JobConf, String)}
+ * </ul>
+ * 
+ * Other static methods are optional.
+ */
+public class AccumuloInputFormat extends InputFormatBase<Key,Value> {
+  
+  @Override
+  public RecordReader<Key,Value> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException {
+    log.setLevel(getLogLevel(job));
+    RecordReaderBase<Key,Value> recordReader = new RecordReaderBase<Key,Value>() {
+      
+      @Override
+      public boolean next(Key key, Value value) throws IOException {
+        if (scannerIterator.hasNext()) {
+          ++numKeysRead;
+          Entry<Key,Value> entry = scannerIterator.next();
+          key.set(currentKey = entry.getKey());
+          value.set(entry.getValue().get());
+          if (log.isTraceEnabled())
+            log.trace("Processing key/value pair: " + DefaultFormatter.formatEntry(entry, true));
+          return true;
+        }
+        return false;
+      }
+      
+      @Override
+      public Key createKey() {
+        return new Key();
+      }
+      
+      @Override
+      public Value createValue() {
+        return new Value();
+      }
+      
+    };
+    recordReader.initialize(split, job);
+    return recordReader;
+  }
+}

Added: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java?rev=1437818&view=auto
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java (added)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java Thu Jan 24 01:09:46 2013
@@ -0,0 +1,491 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapred;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.MultiTableBatchWriter;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.mapreduce.lib.util.OutputConfigurator;
+import org.apache.accumulo.core.client.mock.MockInstance;
+import org.apache.accumulo.core.data.ColumnUpdate;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.core.security.thrift.SecurityErrorCode;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.OutputFormat;
+import org.apache.hadoop.mapred.RecordWriter;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.util.Progressable;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+/**
+ * This class allows MapReduce jobs to use Accumulo as the sink for data. This {@link OutputFormat} accepts keys and values of type {@link Text} (for a table
+ * name) and {@link Mutation} from the Map and Reduce functions.
+ * 
+ * The user must specify the following via static configurator methods:
+ * 
+ * <ul>
+ * <li>{@link AccumuloOutputFormat#setConnectorInfo(JobConf, String, byte[])}
+ * <li>{@link AccumuloOutputFormat#setZooKeeperInstance(JobConf, String, String)} OR {@link AccumuloOutputFormat#setMockInstance(JobConf, String)}
+ * </ul>
+ * 
+ * Other static methods are optional.
+ */
+public class AccumuloOutputFormat implements OutputFormat<Text,Mutation> {
+  
+  private static final Class<?> CLASS = AccumuloOutputFormat.class;
+  protected static final Logger log = Logger.getLogger(CLASS);
+  
+  /**
+   * Sets the connector information needed to communicate with Accumulo in this job.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param user
+   *          a valid Accumulo user name (user must have Table.CREATE permission if {@link #setCreateTables(JobConf, boolean)} is set to true)
+   * @param passwd
+   *          the user's password
+   * @since 1.5.0
+   */
+  public static void setConnectorInfo(JobConf job, String user, byte[] passwd) {
+    OutputConfigurator.setConnectorInfo(CLASS, job, user, passwd);
+  }
+  
+  /**
+   * Determines if the connector has been configured.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return true if the connector has been configured, false otherwise
+   * @since 1.5.0
+   * @see #setConnectorInfo(JobConf, String, byte[])
+   */
+  protected static Boolean isConnectorInfoSet(JobConf job) {
+    return OutputConfigurator.isConnectorInfoSet(CLASS, job);
+  }
+  
+  /**
+   * Gets the user name from the configuration.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return the user name
+   * @since 1.5.0
+   * @see #setConnectorInfo(JobConf, String, byte[])
+   */
+  protected static String getUsername(JobConf job) {
+    return OutputConfigurator.getUsername(CLASS, job);
+  }
+  
+  /**
+   * Gets the password from the configuration. WARNING: The password is stored in the Configuration and shared with all MapReduce tasks; It is BASE64 encoded to
+   * provide a charset safe conversion to a string, and is not intended to be secure.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return the decoded user password
+   * @since 1.5.0
+   * @see #setConnectorInfo(JobConf, String, byte[])
+   */
+  protected static byte[] getPassword(JobConf job) {
+    return OutputConfigurator.getPassword(CLASS, job);
+  }
+  
+  /**
+   * Configures a {@link ZooKeeperInstance} for this job.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param instanceName
+   *          the Accumulo instance name
+   * @param zooKeepers
+   *          a comma-separated list of zookeeper servers
+   * @since 1.5.0
+   */
+  public static void setZooKeeperInstance(JobConf job, String instanceName, String zooKeepers) {
+    OutputConfigurator.setZooKeeperInstance(CLASS, job, instanceName, zooKeepers);
+  }
+  
+  /**
+   * Configures a {@link MockInstance} for this job.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param instanceName
+   *          the Accumulo instance name
+   * @since 1.5.0
+   */
+  public static void setMockInstance(JobConf job, String instanceName) {
+    OutputConfigurator.setMockInstance(CLASS, job, instanceName);
+  }
+  
+  /**
+   * Initializes an Accumulo {@link Instance} based on the configuration.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return an Accumulo instance
+   * @since 1.5.0
+   * @see #setZooKeeperInstance(JobConf, String, String)
+   * @see #setMockInstance(JobConf, String)
+   */
+  protected static Instance getInstance(JobConf job) {
+    return OutputConfigurator.getInstance(CLASS, job);
+  }
+  
+  /**
+   * Sets the log level for this job.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param level
+   *          the logging level
+   * @since 1.5.0
+   */
+  public static void setLogLevel(JobConf job, Level level) {
+    OutputConfigurator.setLogLevel(CLASS, job, level);
+  }
+  
+  /**
+   * Gets the log level from this configuration.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return the log level
+   * @since 1.5.0
+   * @see #setLogLevel(JobConf, Level)
+   */
+  protected static Level getLogLevel(JobConf job) {
+    return OutputConfigurator.getLogLevel(CLASS, job);
+  }
+  
+  /**
+   * Sets the default table name to use if one emits a null in place of a table name for a given mutation. Table names can only be alpha-numeric and
+   * underscores.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param tableName
+   *          the table to use when the tablename is null in the write call
+   * @since 1.5.0
+   */
+  public static void setDefaultTableName(JobConf job, String tableName) {
+    OutputConfigurator.setDefaultTableName(CLASS, job, tableName);
+  }
+  
+  /**
+   * Gets the default table name from the configuration.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return the default table name
+   * @since 1.5.0
+   * @see #setDefaultTableName(JobConf, String)
+   */
+  protected static String getDefaultTableName(JobConf job) {
+    return OutputConfigurator.getDefaultTableName(CLASS, job);
+  }
+  
+  /**
+   * Sets the configuration for for the job's {@link BatchWriter} instances. If not set, a new {@link BatchWriterConfig}, with sensible built-in defaults is
+   * used. Setting the configuration multiple times overwrites any previous configuration.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param bwConfig
+   *          the configuration for the {@link BatchWriter}
+   * @since 1.5.0
+   */
+  public static void setBatchWriterOptions(JobConf job, BatchWriterConfig bwConfig) {
+    OutputConfigurator.setBatchWriterOptions(CLASS, job, bwConfig);
+  }
+  
+  /**
+   * Gets the {@link BatchWriterConfig} settings.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return the configuration object
+   * @since 1.5.0
+   * @see #setBatchWriterOptions(JobConf, BatchWriterConfig)
+   */
+  protected static BatchWriterConfig getBatchWriterOptions(JobConf job) {
+    return OutputConfigurator.getBatchWriterOptions(CLASS, job);
+  }
+  
+  /**
+   * Sets the directive to create new tables, as necessary. Table names can only be alpha-numeric and underscores.
+   * 
+   * <p>
+   * By default, this feature is <b>disabled</b>.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param enableFeature
+   *          the feature is enabled if true, disabled otherwise
+   * @since 1.5.0
+   */
+  public static void setCreateTables(JobConf job, boolean enableFeature) {
+    OutputConfigurator.setCreateTables(CLASS, job, enableFeature);
+  }
+  
+  /**
+   * Determines whether tables are permitted to be created as needed.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return true if the feature is disabled, false otherwise
+   * @since 1.5.0
+   * @see #setCreateTables(JobConf, boolean)
+   */
+  protected static Boolean canCreateTables(JobConf job) {
+    return OutputConfigurator.canCreateTables(CLASS, job);
+  }
+  
+  /**
+   * Sets the directive to use simulation mode for this job. In simulation mode, no output is produced. This is useful for testing.
+   * 
+   * <p>
+   * By default, this feature is <b>disabled</b>.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param enableFeature
+   *          the feature is enabled if true, disabled otherwise
+   * @since 1.5.0
+   */
+  public static void setSimulationMode(JobConf job, boolean enableFeature) {
+    OutputConfigurator.setSimulationMode(CLASS, job, enableFeature);
+  }
+  
+  /**
+   * Determines whether this feature is enabled.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return true if the feature is enabled, false otherwise
+   * @since 1.5.0
+   * @see #setSimulationMode(JobConf, boolean)
+   */
+  protected static Boolean getSimulationMode(JobConf job) {
+    return OutputConfigurator.getSimulationMode(CLASS, job);
+  }
+  
+  /**
+   * A base class to be used to create {@link RecordWriter} instances that write to Accumulo.
+   */
+  protected static class AccumuloRecordWriter implements RecordWriter<Text,Mutation> {
+    private MultiTableBatchWriter mtbw = null;
+    private HashMap<Text,BatchWriter> bws = null;
+    private Text defaultTableName = null;
+    
+    private boolean simulate = false;
+    private boolean createTables = false;
+    
+    private long mutCount = 0;
+    private long valCount = 0;
+    
+    private Connector conn;
+    
+    protected AccumuloRecordWriter(JobConf job) throws AccumuloException, AccumuloSecurityException, IOException {
+      Level l = getLogLevel(job);
+      if (l != null)
+        log.setLevel(getLogLevel(job));
+      this.simulate = getSimulationMode(job);
+      this.createTables = canCreateTables(job);
+      
+      if (simulate)
+        log.info("Simulating output only. No writes to tables will occur");
+      
+      this.bws = new HashMap<Text,BatchWriter>();
+      
+      String tname = getDefaultTableName(job);
+      this.defaultTableName = (tname == null) ? null : new Text(tname);
+      
+      if (!simulate) {
+        this.conn = getInstance(job).getConnector(getUsername(job), getPassword(job));
+        mtbw = conn.createMultiTableBatchWriter(getBatchWriterOptions(job));
+      }
+    }
+    
+    /**
+     * Push a mutation into a table. If table is null, the defaultTable will be used. If canCreateTable is set, the table will be created if it does not exist.
+     * The table name must only contain alphanumerics and underscore.
+     */
+    @Override
+    public void write(Text table, Mutation mutation) throws IOException {
+      if (table == null || table.toString().isEmpty())
+        table = this.defaultTableName;
+      
+      if (!simulate && table == null)
+        throw new IOException("No table or default table specified. Try simulation mode next time");
+      
+      ++mutCount;
+      valCount += mutation.size();
+      printMutation(table, mutation);
+      
+      if (simulate)
+        return;
+      
+      if (!bws.containsKey(table))
+        try {
+          addTable(table);
+        } catch (Exception e) {
+          e.printStackTrace();
+          throw new IOException(e);
+        }
+      
+      try {
+        bws.get(table).addMutation(mutation);
+      } catch (MutationsRejectedException e) {
+        throw new IOException(e);
+      }
+    }
+    
+    public void addTable(Text tableName) throws AccumuloException, AccumuloSecurityException {
+      if (simulate) {
+        log.info("Simulating adding table: " + tableName);
+        return;
+      }
+      
+      log.debug("Adding table: " + tableName);
+      BatchWriter bw = null;
+      String table = tableName.toString();
+      
+      if (createTables && !conn.tableOperations().exists(table)) {
+        try {
+          conn.tableOperations().create(table);
+        } catch (AccumuloSecurityException e) {
+          log.error("Accumulo security violation creating " + table, e);
+          throw e;
+        } catch (TableExistsException e) {
+          // Shouldn't happen
+        }
+      }
+      
+      try {
+        bw = mtbw.getBatchWriter(table);
+      } catch (TableNotFoundException e) {
+        log.error("Accumulo table " + table + " doesn't exist and cannot be created.", e);
+        throw new AccumuloException(e);
+      } catch (AccumuloException e) {
+        throw e;
+      } catch (AccumuloSecurityException e) {
+        throw e;
+      }
+      
+      if (bw != null)
+        bws.put(tableName, bw);
+    }
+    
+    private int printMutation(Text table, Mutation m) {
+      if (log.isTraceEnabled()) {
+        log.trace(String.format("Table %s row key: %s", table, hexDump(m.getRow())));
+        for (ColumnUpdate cu : m.getUpdates()) {
+          log.trace(String.format("Table %s column: %s:%s", table, hexDump(cu.getColumnFamily()), hexDump(cu.getColumnQualifier())));
+          log.trace(String.format("Table %s security: %s", table, new ColumnVisibility(cu.getColumnVisibility()).toString()));
+          log.trace(String.format("Table %s value: %s", table, hexDump(cu.getValue())));
+        }
+      }
+      return m.getUpdates().size();
+    }
+    
+    private String hexDump(byte[] ba) {
+      StringBuilder sb = new StringBuilder();
+      for (byte b : ba) {
+        if ((b > 0x20) && (b < 0x7e))
+          sb.append((char) b);
+        else
+          sb.append(String.format("x%02x", b));
+      }
+      return sb.toString();
+    }
+    
+    @Override
+    public void close(Reporter reporter) throws IOException {
+      log.debug("mutations written: " + mutCount + ", values written: " + valCount);
+      if (simulate)
+        return;
+      
+      try {
+        mtbw.close();
+      } catch (MutationsRejectedException e) {
+        if (e.getAuthorizationFailures().size() >= 0) {
+          HashMap<String,Set<SecurityErrorCode>> tables = new HashMap<String,Set<SecurityErrorCode>>();
+          for (Entry<KeyExtent,Set<SecurityErrorCode>> ke : e.getAuthorizationFailures().entrySet()) {
+            Set<SecurityErrorCode> secCodes = tables.get(ke.getKey().getTableId().toString());
+            if (secCodes == null) {
+              secCodes = new HashSet<SecurityErrorCode>();
+              tables.put(ke.getKey().getTableId().toString(), secCodes);
+            }
+            secCodes.addAll(ke.getValue());
+          }
+          
+          log.error("Not authorized to write to tables : " + tables);
+        }
+        
+        if (e.getConstraintViolationSummaries().size() > 0) {
+          log.error("Constraint violations : " + e.getConstraintViolationSummaries().size());
+        }
+      }
+    }
+  }
+  
+  @Override
+  public void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException {
+    if (!isConnectorInfoSet(job))
+      throw new IOException("Connector info has not been set.");
+    try {
+      // if the instance isn't configured, it will complain here
+      Connector c = getInstance(job).getConnector(getUsername(job), getPassword(job));
+      if (!c.securityOperations().authenticateUser(getUsername(job), getPassword(job)))
+        throw new IOException("Unable to authenticate user");
+    } catch (AccumuloException e) {
+      throw new IOException(e);
+    } catch (AccumuloSecurityException e) {
+      throw new IOException(e);
+    }
+  }
+  
+  @Override
+  public RecordWriter<Text,Mutation> getRecordWriter(FileSystem ignored, JobConf job, String name, Progressable progress) throws IOException {
+    try {
+      return new AccumuloRecordWriter(job);
+    } catch (Exception e) {
+      throw new IOException(e);
+    }
+  }
+  
+}

Added: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloRowInputFormat.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloRowInputFormat.java?rev=1437818&view=auto
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloRowInputFormat.java (added)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloRowInputFormat.java Thu Jan 24 01:09:46 2013
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapred;
+
+import java.io.IOException;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.RowIterator;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.PeekingIterator;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.Reporter;
+
+/**
+ * This class allows MapReduce jobs to use Accumulo as the source of data. This {@link InputFormat} provides row names as {@link Text} as keys, and a
+ * corresponding {@link PeekingIterator} as a value, which in turn makes the {@link Key}/{@link Value} pairs for that row available to the Map function.
+ * 
+ * The user must specify the following via static configurator methods:
+ * 
+ * <ul>
+ * <li>{@link AccumuloRowInputFormat#setConnectorInfo(JobConf, String, byte[])}
+ * <li>{@link AccumuloRowInputFormat#setInputTableName(JobConf, String)}
+ * <li>{@link AccumuloRowInputFormat#setScanAuthorizations(JobConf, Authorizations)}
+ * <li>{@link AccumuloRowInputFormat#setZooKeeperInstance(JobConf, String, String)} OR {@link AccumuloRowInputFormat#setMockInstance(JobConf, String)}
+ * </ul>
+ * 
+ * Other static methods are optional.
+ */
+public class AccumuloRowInputFormat extends InputFormatBase<Text,PeekingIterator<Entry<Key,Value>>> {
+  @Override
+  public RecordReader<Text,PeekingIterator<Entry<Key,Value>>> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException {
+    log.setLevel(getLogLevel(job));
+    RecordReaderBase<Text,PeekingIterator<Entry<Key,Value>>> recordReader = new RecordReaderBase<Text,PeekingIterator<Entry<Key,Value>>>() {
+      RowIterator rowIterator;
+      
+      @Override
+      public void initialize(InputSplit inSplit, JobConf job) throws IOException {
+        super.initialize(inSplit, job);
+        rowIterator = new RowIterator(scannerIterator);
+      }
+      
+      @Override
+      public boolean next(Text key, PeekingIterator<Entry<Key,Value>> value) throws IOException {
+        if (!rowIterator.hasNext())
+          return false;
+        value.initialize(rowIterator.next());
+        numKeysRead = rowIterator.getKVCount();
+        key.set((currentKey = value.peek().getKey()).getRow());
+        return true;
+      }
+      
+      @Override
+      public Text createKey() {
+        return new Text();
+      }
+      
+      @Override
+      public PeekingIterator<Entry<Key,Value>> createValue() {
+        return new PeekingIterator<Entry<Key,Value>>();
+      }
+    };
+    recordReader.initialize(split, job);
+    return recordReader;
+  }
+}

Added: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java?rev=1437818&view=auto
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java (added)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java Thu Jan 24 01:09:46 2013
@@ -0,0 +1,823 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapred;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.ClientSideIteratorScanner;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.IsolatedScanner;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.RowIterator;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableDeletedException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.TableOfflineException;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.impl.OfflineScanner;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.TabletLocator;
+import org.apache.accumulo.core.client.mapreduce.lib.util.InputConfigurator;
+import org.apache.accumulo.core.client.mock.MockInstance;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.thrift.AuthInfo;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+/**
+ * This abstract {@link InputFormat} class allows MapReduce jobs to use Accumulo as the source of K,V pairs.
+ * <p>
+ * Subclasses must implement a {@link #getRecordReader(InputSplit, JobConf, Reporter)} to provide a {@link RecordReader} for K,V.
+ * <p>
+ * A static base class, RecordReaderBase, is provided to retrieve Accumulo {@link Key}/{@link Value} pairs, but one must implement its
+ * {@link RecordReaderBase#next(Object, Object)} to transform them to the desired generic types K,V.
+ * <p>
+ * See {@link AccumuloInputFormat} for an example implementation.
+ */
+public abstract class InputFormatBase<K,V> implements InputFormat<K,V> {
+  
+  private static final Class<?> CLASS = AccumuloInputFormat.class;
+  protected static final Logger log = Logger.getLogger(CLASS);
+  
+  /**
+   * Sets the connector information needed to communicate with Accumulo in this job.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param user
+   *          a valid Accumulo user name (user must have Table.CREATE permission)
+   * @param passwd
+   *          the user's password
+   * @since 1.5.0
+   */
+  public static void setConnectorInfo(JobConf job, String user, byte[] passwd) {
+    InputConfigurator.setConnectorInfo(CLASS, job, user, passwd);
+  }
+  
+  /**
+   * Determines if the connector has been configured.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return true if the connector has been configured, false otherwise
+   * @since 1.5.0
+   * @see #setConnectorInfo(JobConf, String, byte[])
+   */
+  protected static Boolean isConnectorInfoSet(JobConf job) {
+    return InputConfigurator.isConnectorInfoSet(CLASS, job);
+  }
+  
+  /**
+   * Gets the user name from the configuration.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return the user name
+   * @since 1.5.0
+   * @see #setConnectorInfo(JobConf, String, byte[])
+   */
+  protected static String getUsername(JobConf job) {
+    return InputConfigurator.getUsername(CLASS, job);
+  }
+  
+  /**
+   * Gets the password from the configuration. WARNING: The password is stored in the Configuration and shared with all MapReduce tasks; It is BASE64 encoded to
+   * provide a charset safe conversion to a string, and is not intended to be secure.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return the decoded user password
+   * @since 1.5.0
+   * @see #setConnectorInfo(JobConf, String, byte[])
+   */
+  protected static byte[] getPassword(JobConf job) {
+    return InputConfigurator.getPassword(CLASS, job);
+  }
+  
+  /**
+   * Configures a {@link ZooKeeperInstance} for this job.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param instanceName
+   *          the Accumulo instance name
+   * @param zooKeepers
+   *          a comma-separated list of zookeeper servers
+   * @since 1.5.0
+   */
+  public static void setZooKeeperInstance(JobConf job, String instanceName, String zooKeepers) {
+    InputConfigurator.setZooKeeperInstance(CLASS, job, instanceName, zooKeepers);
+  }
+  
+  /**
+   * Configures a {@link MockInstance} for this job.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param instanceName
+   *          the Accumulo instance name
+   * @since 1.5.0
+   */
+  public static void setMockInstance(JobConf job, String instanceName) {
+    InputConfigurator.setMockInstance(CLASS, job, instanceName);
+  }
+  
+  /**
+   * Initializes an Accumulo {@link Instance} based on the configuration.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return an Accumulo instance
+   * @since 1.5.0
+   * @see #setZooKeeperInstance(JobConf, String, String)
+   * @see #setMockInstance(JobConf, String)
+   */
+  protected static Instance getInstance(JobConf job) {
+    return InputConfigurator.getInstance(CLASS, job);
+  }
+  
+  /**
+   * Sets the log level for this job.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param level
+   *          the logging level
+   * @since 1.5.0
+   */
+  public static void setLogLevel(JobConf job, Level level) {
+    InputConfigurator.setLogLevel(CLASS, job, level);
+  }
+  
+  /**
+   * Gets the log level from this configuration.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return the log level
+   * @since 1.5.0
+   * @see #setLogLevel(JobConf, Level)
+   */
+  protected static Level getLogLevel(JobConf job) {
+    return InputConfigurator.getLogLevel(CLASS, job);
+  }
+  
+  /**
+   * Sets the name of the input table, over which this job will scan.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param tableName
+   *          the table to use when the tablename is null in the write call
+   * @since 1.5.0
+   */
+  public static void setInputTableName(JobConf job, String tableName) {
+    InputConfigurator.setInputTableName(CLASS, job, tableName);
+  }
+  
+  /**
+   * Gets the table name from the configuration.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return the table name
+   * @since 1.5.0
+   * @see #setInputTableName(JobConf, String)
+   */
+  protected static String getInputTableName(JobConf job) {
+    return InputConfigurator.getInputTableName(CLASS, job);
+  }
+  
+  /**
+   * Sets the {@link Authorizations} used to scan. Must be a subset of the user's authorization. Defaults to the empty set.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param auths
+   *          the user's authorizations
+   * @since 1.5.0
+   */
+  public static void setScanAuthorizations(JobConf job, Authorizations auths) {
+    InputConfigurator.setScanAuthorizations(CLASS, job, auths);
+  }
+  
+  /**
+   * Gets the authorizations to set for the scans from the configuration.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return the Accumulo scan authorizations
+   * @since 1.5.0
+   * @see #setScanAuthorizations(JobConf, Authorizations)
+   */
+  protected static Authorizations getScanAuthorizations(JobConf job) {
+    return InputConfigurator.getScanAuthorizations(CLASS, job);
+  }
+  
+  /**
+   * Sets the input ranges to scan for this job. If not set, the entire table will be scanned.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param ranges
+   *          the ranges that will be mapped over
+   * @since 1.5.0
+   */
+  public static void setRanges(JobConf job, Collection<Range> ranges) {
+    InputConfigurator.setRanges(CLASS, job, ranges);
+  }
+  
+  /**
+   * Gets the ranges to scan over from a job.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return the ranges
+   * @throws IOException
+   *           if the ranges have been encoded improperly
+   * @since 1.5.0
+   * @see #setRanges(JobConf, Collection)
+   */
+  protected static List<Range> getRanges(JobConf job) throws IOException {
+    return InputConfigurator.getRanges(CLASS, job);
+  }
+  
+  /**
+   * Restricts the columns that will be mapped over for this job.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param columnFamilyColumnQualifierPairs
+   *          a pair of {@link Text} objects corresponding to column family and column qualifier. If the column qualifier is null, the entire column family is
+   *          selected. An empty set is the default and is equivalent to scanning the all columns.
+   * @since 1.5.0
+   */
+  public static void fetchColumns(JobConf job, Collection<Pair<Text,Text>> columnFamilyColumnQualifierPairs) {
+    InputConfigurator.fetchColumns(CLASS, job, columnFamilyColumnQualifierPairs);
+  }
+  
+  /**
+   * Gets the columns to be mapped over from this job.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return a set of columns
+   * @since 1.5.0
+   * @see #fetchColumns(JobConf, Collection)
+   */
+  protected static Set<Pair<Text,Text>> getFetchedColumns(JobConf job) {
+    return InputConfigurator.getFetchedColumns(CLASS, job);
+  }
+  
+  /**
+   * Encode an iterator on the input for this job.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param cfg
+   *          the configuration of the iterator
+   * @since 1.5.0
+   */
+  public static void addIterator(JobConf job, IteratorSetting cfg) {
+    InputConfigurator.addIterator(CLASS, job, cfg);
+  }
+  
+  /**
+   * Gets a list of the iterator settings (for iterators to apply to a scanner) from this configuration.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return a list of iterators
+   * @since 1.5.0
+   * @see #addIterator(JobConf, IteratorSetting)
+   */
+  protected static List<IteratorSetting> getIterators(JobConf job) {
+    return InputConfigurator.getIterators(CLASS, job);
+  }
+  
+  /**
+   * Controls the automatic adjustment of ranges for this job. This feature merges overlapping ranges, then splits them to align with tablet boundaries.
+   * Disabling this feature will cause exactly one Map task to be created for each specified range. The default setting is enabled. *
+   * 
+   * <p>
+   * By default, this feature is <b>enabled</b>.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param enableFeature
+   *          the feature is enabled if true, disabled otherwise
+   * @see #setRanges(JobConf, Collection)
+   * @since 1.5.0
+   */
+  public static void setAutoAdjustRanges(JobConf job, boolean enableFeature) {
+    InputConfigurator.setAutoAdjustRanges(CLASS, job, enableFeature);
+  }
+  
+  /**
+   * Determines whether a configuration has auto-adjust ranges enabled.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return false if the feature is disabled, true otherwise
+   * @since 1.5.0
+   * @see #setAutoAdjustRanges(JobConf, boolean)
+   */
+  protected static boolean getAutoAdjustRanges(JobConf job) {
+    return InputConfigurator.getAutoAdjustRanges(CLASS, job);
+  }
+  
+  /**
+   * Controls the use of the {@link IsolatedScanner} in this job.
+   * 
+   * <p>
+   * By default, this feature is <b>disabled</b>.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param enableFeature
+   *          the feature is enabled if true, disabled otherwise
+   * @since 1.5.0
+   */
+  public static void setScanIsolation(JobConf job, boolean enableFeature) {
+    InputConfigurator.setScanIsolation(CLASS, job, enableFeature);
+  }
+  
+  /**
+   * Determines whether a configuration has isolation enabled.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return true if the feature is enabled, false otherwise
+   * @since 1.5.0
+   * @see #setScanIsolation(JobConf, boolean)
+   */
+  protected static boolean isIsolated(JobConf job) {
+    return InputConfigurator.isIsolated(CLASS, job);
+  }
+  
+  /**
+   * Controls the use of the {@link ClientSideIteratorScanner} in this job. Enabling this feature will cause the iterator stack to be constructed within the Map
+   * task, rather than within the Accumulo TServer. To use this feature, all classes needed for those iterators must be available on the classpath for the task.
+   * 
+   * <p>
+   * By default, this feature is <b>disabled</b>.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param enableFeature
+   *          the feature is enabled if true, disabled otherwise
+   * @since 1.5.0
+   */
+  public static void setLocalIterators(JobConf job, boolean enableFeature) {
+    InputConfigurator.setLocalIterators(CLASS, job, enableFeature);
+  }
+  
+  /**
+   * Determines whether a configuration uses local iterators.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return true if the feature is enabled, false otherwise
+   * @since 1.5.0
+   * @see #setLocalIterators(JobConf, boolean)
+   */
+  protected static boolean usesLocalIterators(JobConf job) {
+    return InputConfigurator.usesLocalIterators(CLASS, job);
+  }
+  
+  /**
+   * <p>
+   * Enable reading offline tables. By default, this feature is disabled and only online tables are scanned. This will make the map reduce job directly read the
+   * table's files. If the table is not offline, then the job will fail. If the table comes online during the map reduce job, it is likely that the job will
+   * fail.
+   * 
+   * <p>
+   * To use this option, the map reduce user will need access to read the Accumulo directory in HDFS.
+   * 
+   * <p>
+   * Reading the offline table will create the scan time iterator stack in the map process. So any iterators that are configured for the table will need to be
+   * on the mapper's classpath. The accumulo-site.xml may need to be on the mapper's classpath if HDFS or the Accumulo directory in HDFS are non-standard.
+   * 
+   * <p>
+   * One way to use this feature is to clone a table, take the clone offline, and use the clone as the input table for a map reduce job. If you plan to map
+   * reduce over the data many times, it may be better to the compact the table, clone it, take it offline, and use the clone for all map reduce jobs. The
+   * reason to do this is that compaction will reduce each tablet in the table to one file, and it is faster to read from one file.
+   * 
+   * <p>
+   * There are two possible advantages to reading a tables file directly out of HDFS. First, you may see better read performance. Second, it will support
+   * speculative execution better. When reading an online table speculative execution can put more load on an already slow tablet server.
+   * 
+   * <p>
+   * By default, this feature is <b>disabled</b>.
+   * 
+   * @param job
+   *          the Hadoop job instance to be configured
+   * @param enableFeature
+   *          the feature is enabled if true, disabled otherwise
+   * @since 1.5.0
+   */
+  public static void setOfflineTableScan(JobConf job, boolean enableFeature) {
+    InputConfigurator.setOfflineTableScan(CLASS, job, enableFeature);
+  }
+  
+  /**
+   * Determines whether a configuration has the offline table scan feature enabled.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return true if the feature is enabled, false otherwise
+   * @since 1.5.0
+   * @see #setOfflineTableScan(JobConf, boolean)
+   */
+  protected static boolean isOfflineScan(JobConf job) {
+    return InputConfigurator.isOfflineScan(CLASS, job);
+  }
+  
+  /**
+   * Initializes an Accumulo {@link TabletLocator} based on the configuration.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @return an Accumulo tablet locator
+   * @throws TableNotFoundException
+   *           if the table name set on the configuration doesn't exist
+   * @since 1.5.0
+   */
+  protected static TabletLocator getTabletLocator(JobConf job) throws TableNotFoundException {
+    return InputConfigurator.getTabletLocator(CLASS, job);
+  }
+  
+  // InputFormat doesn't have the equivalent of OutputFormat's checkOutputSpecs(JobContext job)
+  /**
+   * Check whether a configuration is fully configured to be used with an Accumulo {@link org.apache.hadoop.mapreduce.InputFormat}.
+   * 
+   * @param job
+   *          the Hadoop context for the configured job
+   * @throws IOException
+   *           if the context is improperly configured
+   * @since 1.5.0
+   */
+  protected static void validateOptions(JobConf job) throws IOException {
+    InputConfigurator.validateOptions(CLASS, job);
+  }
+  
+  /**
+   * An abstract base class to be used to create {@link RecordReader} instances that convert from Accumulo {@link Key}/{@link Value} pairs to the user's K/V
+   * types.
+   * 
+   * Subclasses must implement {@link #next(Object, Object)} to update key and value, and also to update the following variables:
+   * <ul>
+   * <li>Key {@link #currentKey} (used for progress reporting)</li>
+   * <li>int {@link #numKeysRead} (used for progress reporting)</li>
+   * </ul>
+   */
+  protected abstract static class RecordReaderBase<K,V> implements RecordReader<K,V> {
+    protected long numKeysRead;
+    protected Iterator<Entry<Key,Value>> scannerIterator;
+    protected RangeInputSplit split;
+    
+    /**
+     * Apply the configured iterators from the configuration to the scanner.
+     * 
+     * @param job
+     *          the Hadoop context for the configured job
+     * @param scanner
+     *          the scanner to configure
+     */
+    protected void setupIterators(JobConf job, Scanner scanner) {
+      List<IteratorSetting> iterators = getIterators(job);
+      for (IteratorSetting iterator : iterators) {
+        scanner.addScanIterator(iterator);
+      }
+    }
+    
+    /**
+     * Initialize a scanner over the given input split using this task attempt configuration.
+     */
+    public void initialize(InputSplit inSplit, JobConf job) throws IOException {
+      Scanner scanner;
+      split = (RangeInputSplit) inSplit;
+      log.debug("Initializing input split: " + split.getRange());
+      Instance instance = getInstance(job);
+      String user = getUsername(job);
+      byte[] password = getPassword(job);
+      Authorizations authorizations = getScanAuthorizations(job);
+      
+      try {
+        log.debug("Creating connector with user: " + user);
+        Connector conn = instance.getConnector(user, password);
+        log.debug("Creating scanner for table: " + getInputTableName(job));
+        log.debug("Authorizations are: " + authorizations);
+        if (isOfflineScan(job)) {
+          scanner = new OfflineScanner(instance, new AuthInfo(user, ByteBuffer.wrap(password), instance.getInstanceID()), Tables.getTableId(instance,
+              getInputTableName(job)), authorizations);
+        } else {
+          scanner = conn.createScanner(getInputTableName(job), authorizations);
+        }
+        if (isIsolated(job)) {
+          log.info("Creating isolated scanner");
+          scanner = new IsolatedScanner(scanner);
+        }
+        if (usesLocalIterators(job)) {
+          log.info("Using local iterators");
+          scanner = new ClientSideIteratorScanner(scanner);
+        }
+        setupIterators(job, scanner);
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+      
+      // setup a scanner within the bounds of this split
+      for (Pair<Text,Text> c : getFetchedColumns(job)) {
+        if (c.getSecond() != null) {
+          log.debug("Fetching column " + c.getFirst() + ":" + c.getSecond());
+          scanner.fetchColumn(c.getFirst(), c.getSecond());
+        } else {
+          log.debug("Fetching column family " + c.getFirst());
+          scanner.fetchColumnFamily(c.getFirst());
+        }
+      }
+      
+      scanner.setRange(split.getRange());
+      
+      numKeysRead = 0;
+      
+      // do this last after setting all scanner options
+      scannerIterator = scanner.iterator();
+    }
+    
+    @Override
+    public void close() {}
+    
+    @Override
+    public long getPos() throws IOException {
+      return numKeysRead;
+    }
+    
+    @Override
+    public float getProgress() throws IOException {
+      if (numKeysRead > 0 && currentKey == null)
+        return 1.0f;
+      return split.getProgress(currentKey);
+    }
+    
+    protected Key currentKey = null;
+    
+  }
+  
+  Map<String,Map<KeyExtent,List<Range>>> binOfflineTable(JobConf job, String tableName, List<Range> ranges) throws TableNotFoundException, AccumuloException,
+      AccumuloSecurityException {
+    
+    Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<String,Map<KeyExtent,List<Range>>>();
+    
+    Instance instance = getInstance(job);
+    Connector conn = instance.getConnector(getUsername(job), getPassword(job));
+    String tableId = Tables.getTableId(instance, tableName);
+    
+    if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
+      Tables.clearCache(instance);
+      if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
+        throw new AccumuloException("Table is online " + tableName + "(" + tableId + ") cannot scan table in offline mode ");
+      }
+    }
+    
+    for (Range range : ranges) {
+      Text startRow;
+      
+      if (range.getStartKey() != null)
+        startRow = range.getStartKey().getRow();
+      else
+        startRow = new Text();
+      
+      Range metadataRange = new Range(new KeyExtent(new Text(tableId), startRow, null).getMetadataEntry(), true, null, false);
+      Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+      Constants.METADATA_PREV_ROW_COLUMN.fetch(scanner);
+      scanner.fetchColumnFamily(Constants.METADATA_LAST_LOCATION_COLUMN_FAMILY);
+      scanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
+      scanner.fetchColumnFamily(Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY);
+      scanner.setRange(metadataRange);
+      
+      RowIterator rowIter = new RowIterator(scanner);
+      
+      // TODO check that extents match prev extent
+      
+      KeyExtent lastExtent = null;
+      
+      while (rowIter.hasNext()) {
+        Iterator<Entry<Key,Value>> row = rowIter.next();
+        String last = "";
+        KeyExtent extent = null;
+        String location = null;
+        
+        while (row.hasNext()) {
+          Entry<Key,Value> entry = row.next();
+          Key key = entry.getKey();
+          
+          if (key.getColumnFamily().equals(Constants.METADATA_LAST_LOCATION_COLUMN_FAMILY)) {
+            last = entry.getValue().toString();
+          }
+          
+          if (key.getColumnFamily().equals(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY)
+              || key.getColumnFamily().equals(Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY)) {
+            location = entry.getValue().toString();
+          }
+          
+          if (Constants.METADATA_PREV_ROW_COLUMN.hasColumns(key)) {
+            extent = new KeyExtent(key.getRow(), entry.getValue());
+          }
+          
+        }
+        
+        if (location != null)
+          return null;
+        
+        if (!extent.getTableId().toString().equals(tableId)) {
+          throw new AccumuloException("Saw unexpected table Id " + tableId + " " + extent);
+        }
+        
+        if (lastExtent != null && !extent.isPreviousExtent(lastExtent)) {
+          throw new AccumuloException(" " + lastExtent + " is not previous extent " + extent);
+        }
+        
+        Map<KeyExtent,List<Range>> tabletRanges = binnedRanges.get(last);
+        if (tabletRanges == null) {
+          tabletRanges = new HashMap<KeyExtent,List<Range>>();
+          binnedRanges.put(last, tabletRanges);
+        }
+        
+        List<Range> rangeList = tabletRanges.get(extent);
+        if (rangeList == null) {
+          rangeList = new ArrayList<Range>();
+          tabletRanges.put(extent, rangeList);
+        }
+        
+        rangeList.add(range);
+        
+        if (extent.getEndRow() == null || range.afterEndKey(new Key(extent.getEndRow()).followingKey(PartialKey.ROW))) {
+          break;
+        }
+        
+        lastExtent = extent;
+      }
+      
+    }
+    
+    return binnedRanges;
+  }
+  
+  /**
+   * Read the metadata table to get tablets and match up ranges to them.
+   */
+  @Override
+  public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
+    log.setLevel(getLogLevel(job));
+    validateOptions(job);
+    
+    String tableName = getInputTableName(job);
+    boolean autoAdjust = getAutoAdjustRanges(job);
+    List<Range> ranges = autoAdjust ? Range.mergeOverlapping(getRanges(job)) : getRanges(job);
+    
+    if (ranges.isEmpty()) {
+      ranges = new ArrayList<Range>(1);
+      ranges.add(new Range());
+    }
+    
+    // get the metadata information for these ranges
+    Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<String,Map<KeyExtent,List<Range>>>();
+    TabletLocator tl;
+    try {
+      if (isOfflineScan(job)) {
+        binnedRanges = binOfflineTable(job, tableName, ranges);
+        while (binnedRanges == null) {
+          // Some tablets were still online, try again
+          UtilWaitThread.sleep(100 + (int) (Math.random() * 100)); // sleep randomly between 100 and 200 ms
+          binnedRanges = binOfflineTable(job, tableName, ranges);
+        }
+      } else {
+        Instance instance = getInstance(job);
+        String tableId = null;
+        tl = getTabletLocator(job);
+        // its possible that the cache could contain complete, but old information about a tables tablets... so clear it
+        tl.invalidateCache();
+        while (!tl.binRanges(ranges, binnedRanges).isEmpty()) {
+          if (!(instance instanceof MockInstance)) {
+            if (tableId == null)
+              tableId = Tables.getTableId(instance, tableName);
+            if (!Tables.exists(instance, tableId))
+              throw new TableDeletedException(tableId);
+            if (Tables.getTableState(instance, tableId) == TableState.OFFLINE)
+              throw new TableOfflineException(instance, tableId);
+          }
+          binnedRanges.clear();
+          log.warn("Unable to locate bins for specified ranges. Retrying.");
+          UtilWaitThread.sleep(100 + (int) (Math.random() * 100)); // sleep randomly between 100 and 200 ms
+          tl.invalidateCache();
+        }
+      }
+    } catch (Exception e) {
+      throw new IOException(e);
+    }
+    
+    ArrayList<InputSplit> splits = new ArrayList<InputSplit>(ranges.size());
+    HashMap<Range,ArrayList<String>> splitsToAdd = null;
+    
+    if (!autoAdjust)
+      splitsToAdd = new HashMap<Range,ArrayList<String>>();
+    
+    HashMap<String,String> hostNameCache = new HashMap<String,String>();
+    
+    for (Entry<String,Map<KeyExtent,List<Range>>> tserverBin : binnedRanges.entrySet()) {
+      String ip = tserverBin.getKey().split(":", 2)[0];
+      String location = hostNameCache.get(ip);
+      if (location == null) {
+        InetAddress inetAddress = InetAddress.getByName(ip);
+        location = inetAddress.getHostName();
+        hostNameCache.put(ip, location);
+      }
+      
+      for (Entry<KeyExtent,List<Range>> extentRanges : tserverBin.getValue().entrySet()) {
+        Range ke = extentRanges.getKey().toDataRange();
+        for (Range r : extentRanges.getValue()) {
+          if (autoAdjust) {
+            // divide ranges into smaller ranges, based on the tablets
+            splits.add(new RangeInputSplit(tableName, ke.clip(r), new String[] {location}));
+          } else {
+            // don't divide ranges
+            ArrayList<String> locations = splitsToAdd.get(r);
+            if (locations == null)
+              locations = new ArrayList<String>(1);
+            locations.add(location);
+            splitsToAdd.put(r, locations);
+          }
+        }
+      }
+    }
+    
+    if (!autoAdjust)
+      for (Entry<Range,ArrayList<String>> entry : splitsToAdd.entrySet())
+        splits.add(new RangeInputSplit(tableName, entry.getKey(), entry.getValue().toArray(new String[0])));
+    return splits.toArray(new InputSplit[splits.size()]);
+  }
+  
+  /**
+   * The Class RangeInputSplit. Encapsulates an Accumulo range for use in Map Reduce jobs.
+   */
+  public static class RangeInputSplit extends org.apache.accumulo.core.client.mapreduce.InputFormatBase.RangeInputSplit implements InputSplit {
+    
+    public RangeInputSplit() {
+      super();
+    }
+    
+    public RangeInputSplit(RangeInputSplit split) throws IOException {
+      super(split);
+    }
+    
+    protected RangeInputSplit(String table, Range range, String[] locations) {
+      super(table, range, locations);
+    }
+    
+  }
+  
+}

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java?rev=1437818&r1=1437817&r2=1437818&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java Thu Jan 24 01:09:46 2013
@@ -20,7 +20,7 @@ import java.io.IOException;
 import java.util.Arrays;
 
 import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.mapreduce.util.FileOutputConfigurator;
+import org.apache.accumulo.core.client.mapreduce.lib.util.FileOutputConfigurator;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.ArrayByteSequence;

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java?rev=1437818&r1=1437817&r2=1437818&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java Thu Jan 24 01:09:46 2013
@@ -34,7 +34,7 @@ import org.apache.accumulo.core.client.M
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.mapreduce.util.OutputConfigurator;
+import org.apache.accumulo.core.client.mapreduce.lib.util.OutputConfigurator;
 import org.apache.accumulo.core.client.mock.MockInstance;
 import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.KeyExtent;

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormat.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormat.java?rev=1437818&r1=1437817&r2=1437818&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormat.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormat.java Thu Jan 24 01:09:46 2013
@@ -50,6 +50,7 @@ public class AccumuloRowInputFormat exte
   @Override
   public RecordReader<Text,PeekingIterator<Entry<Key,Value>>> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException,
       InterruptedException {
+    log.setLevel(getLogLevel(context));
     return new RecordReaderBase<Text,PeekingIterator<Entry<Key,Value>>>() {
       RowIterator rowIterator;
       

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java?rev=1437818&r1=1437817&r2=1437818&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java Thu Jan 24 01:09:46 2013
@@ -52,7 +52,7 @@ import org.apache.accumulo.core.client.Z
 import org.apache.accumulo.core.client.impl.OfflineScanner;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.TabletLocator;
-import org.apache.accumulo.core.client.mapreduce.util.InputConfigurator;
+import org.apache.accumulo.core.client.mapreduce.lib.util.InputConfigurator;
 import org.apache.accumulo.core.client.mock.MockInstance;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
@@ -84,7 +84,7 @@ import org.apache.log4j.Logger;
  * Subclasses must implement a {@link #createRecordReader(InputSplit, TaskAttemptContext)} to provide a {@link RecordReader} for K,V.
  * <p>
  * A static base class, RecordReaderBase, is provided to retrieve Accumulo {@link Key}/{@link Value} pairs, but one must implement its
- * RecordReaderBase.nextKeyValue() to transform them to the desired generic types K,V.
+ * {@link RecordReaderBase#nextKeyValue()} to transform them to the desired generic types K,V.
  * <p>
  * See {@link AccumuloInputFormat} for an example implementation.
  */
@@ -840,6 +840,11 @@ public abstract class InputFormatBase<K,
       this.setLocations(split.getLocations());
     }
     
+    protected RangeInputSplit(String table, Range range, String[] locations) {
+      this.range = range;
+      this.locations = locations;
+    }
+    
     public Range getRange() {
       return range;
     }
@@ -887,11 +892,6 @@ public abstract class InputFormatBase<K,
       return 0f;
     }
     
-    RangeInputSplit(String table, Range range, String[] locations) {
-      this.range = range;
-      this.locations = locations;
-    }
-    
     /**
      * This implementation of length is only an estimate, it does not provide exact values. Do not have your code rely on this return value.
      */

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java?rev=1437818&r1=1437759&r2=1437818&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java Thu Jan 24 01:09:46 2013
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.accumulo.core.client.mapreduce.util;
+package org.apache.accumulo.core.client.mapreduce.lib.util;
 
 import java.nio.charset.Charset;
 

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/FileOutputConfigurator.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/FileOutputConfigurator.java?rev=1437818&r1=1437759&r2=1437818&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/FileOutputConfigurator.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/FileOutputConfigurator.java Thu Jan 24 01:09:46 2013
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.accumulo.core.client.mapreduce.util;
+package org.apache.accumulo.core.client.mapreduce.lib.util;
 
 import java.util.Arrays;
 import java.util.Map.Entry;

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java?rev=1437818&r1=1437759&r2=1437818&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java Thu Jan 24 01:09:46 2013
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.accumulo.core.client.mapreduce.util;
+package org.apache.accumulo.core.client.mapreduce.lib.util;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/OutputConfigurator.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/OutputConfigurator.java?rev=1437818&r1=1437759&r2=1437818&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/OutputConfigurator.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/OutputConfigurator.java Thu Jan 24 01:09:46 2013
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.accumulo.core.client.mapreduce.util;
+package org.apache.accumulo.core.client.mapreduce.lib.util;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/package-info.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/package-info.java?rev=1437818&r1=1437759&r2=1437818&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/package-info.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/package-info.java Thu Jan 24 01:09:46 2013
@@ -30,5 +30,5 @@
  * 
  * @since 1.5.0
  */
-package org.apache.accumulo.core.client.mapreduce.util;
+package org.apache.accumulo.core.client.mapreduce.lib.util;
 

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/PeekingIterator.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/PeekingIterator.java?rev=1437818&r1=1437817&r2=1437818&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/PeekingIterator.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/PeekingIterator.java Thu Jan 24 01:09:46 2013
@@ -19,6 +19,8 @@ package org.apache.accumulo.core.util;
 import java.util.Iterator;
 
 public class PeekingIterator<E> implements Iterator<E> {
+  
+  boolean isInitialized;
   Iterator<E> source;
   E top;
   
@@ -28,13 +30,39 @@ public class PeekingIterator<E> implemen
       top = source.next();
     else
       top = null;
+    isInitialized = true;
+  }
+  
+  /**
+   * Creates an uninitialized instance. This should be used in conjunction with {@link #initialize(Iterator)}.
+   */
+  public PeekingIterator() {
+    isInitialized = false;
+  }
+  
+  /**
+   * Initializes this iterator, to be used with {@link #PeekingIterator()}.
+   */
+  public PeekingIterator<E> initialize(Iterator<E> source) {
+    this.source = source;
+    if (source.hasNext())
+      top = source.next();
+    else
+      top = null;
+    isInitialized = true;
+    return this;
   }
   
   public E peek() {
+    if (!isInitialized)
+      throw new IllegalStateException("Iterator has not yet been initialized");
     return top;
   }
   
+  @Override
   public E next() {
+    if (!isInitialized)
+      throw new IllegalStateException("Iterator has not yet been initialized");
     E lastPeeked = top;
     if (source.hasNext())
       top = source.next();
@@ -43,12 +71,15 @@ public class PeekingIterator<E> implemen
     return lastPeeked;
   }
   
+  @Override
   public void remove() {
     throw new UnsupportedOperationException();
   }
   
   @Override
   public boolean hasNext() {
+    if (!isInitialized)
+      throw new IllegalStateException("Iterator has not yet been initialized");
     return top != null;
   }
 }



Mime
View raw message