accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [37/61] [abbrv] [partial] accumulo git commit: ACCUMULO-722 put trunk in my sandbox
Date Thu, 03 Mar 2016 22:00:02 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/ConfigurationType.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/ConfigurationType.java b/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/ConfigurationType.java
new file mode 100644
index 0000000..2492bc2
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/ConfigurationType.java
@@ -0,0 +1,48 @@
+/**
+ * Autogenerated by Thrift Compiler (0.8.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.accumulo.core.client.impl.thrift;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum ConfigurationType implements org.apache.thrift.TEnum {
+  CURRENT(0),
+  SITE(1),
+  DEFAULT(2);
+
+  private final int value;
+
+  private ConfigurationType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static ConfigurationType findByValue(int value) { 
+    switch (value) {
+      case 0:
+        return CURRENT;
+      case 1:
+        return SITE;
+      case 2:
+        return DEFAULT;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/TableOperation.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/TableOperation.java b/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/TableOperation.java
new file mode 100644
index 0000000..1f1b50c
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/TableOperation.java
@@ -0,0 +1,87 @@
+/**
+ * Autogenerated by Thrift Compiler (0.8.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.accumulo.core.client.impl.thrift;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum TableOperation implements org.apache.thrift.TEnum {
+  CREATE(0),
+  DELETE(1),
+  RENAME(2),
+  SET_PROPERTY(3),
+  REMOVE_PROPERTY(4),
+  OFFLINE(5),
+  ONLINE(6),
+  FLUSH(7),
+  PERMISSION(8),
+  CLONE(9),
+  MERGE(10),
+  DELETE_RANGE(11),
+  BULK_IMPORT(12),
+  COMPACT(13),
+  IMPORT(14),
+  EXPORT(15);
+
+  private final int value;
+
+  private TableOperation(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static TableOperation findByValue(int value) { 
+    switch (value) {
+      case 0:
+        return CREATE;
+      case 1:
+        return DELETE;
+      case 2:
+        return RENAME;
+      case 3:
+        return SET_PROPERTY;
+      case 4:
+        return REMOVE_PROPERTY;
+      case 5:
+        return OFFLINE;
+      case 6:
+        return ONLINE;
+      case 7:
+        return FLUSH;
+      case 8:
+        return PERMISSION;
+      case 9:
+        return CLONE;
+      case 10:
+        return MERGE;
+      case 11:
+        return DELETE_RANGE;
+      case 12:
+        return BULK_IMPORT;
+      case 13:
+        return COMPACT;
+      case 14:
+        return IMPORT;
+      case 15:
+        return EXPORT;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/TableOperationExceptionType.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/TableOperationExceptionType.java b/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/TableOperationExceptionType.java
new file mode 100644
index 0000000..1bd7693
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/TableOperationExceptionType.java
@@ -0,0 +1,60 @@
+/**
+ * Autogenerated by Thrift Compiler (0.8.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.accumulo.core.client.impl.thrift;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum TableOperationExceptionType implements org.apache.thrift.TEnum {
+  EXISTS(0),
+  NOTFOUND(1),
+  OFFLINE(2),
+  BULK_BAD_INPUT_DIRECTORY(3),
+  BULK_BAD_ERROR_DIRECTORY(4),
+  BAD_RANGE(5),
+  OTHER(6);
+
+  private final int value;
+
+  private TableOperationExceptionType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static TableOperationExceptionType findByValue(int value) { 
+    switch (value) {
+      case 0:
+        return EXISTS;
+      case 1:
+        return NOTFOUND;
+      case 2:
+        return OFFLINE;
+      case 3:
+        return BULK_BAD_INPUT_DIRECTORY;
+      case 4:
+        return BULK_BAD_ERROR_DIRECTORY;
+      case 5:
+        return BAD_RANGE;
+      case 6:
+        return OTHER;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/ThriftTableOperationException.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/ThriftTableOperationException.java b/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/ThriftTableOperationException.java
new file mode 100644
index 0000000..3ac89e7
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/client/impl/thrift/ThriftTableOperationException.java
@@ -0,0 +1,814 @@
+/**
+ * Autogenerated by Thrift Compiler (0.8.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.accumulo.core.client.impl.thrift;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings("all") public class ThriftTableOperationException extends Exception implements org.apache.thrift.TBase<ThriftTableOperationException, ThriftTableOperationException._Fields>, java.io.Serializable, Cloneable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ThriftTableOperationException");
+
+  private static final org.apache.thrift.protocol.TField TABLE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("tableId", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField OP_FIELD_DESC = new org.apache.thrift.protocol.TField("op", org.apache.thrift.protocol.TType.I32, (short)3);
+  private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32, (short)4);
+  private static final org.apache.thrift.protocol.TField DESCRIPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("description", org.apache.thrift.protocol.TType.STRING, (short)5);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ThriftTableOperationExceptionStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ThriftTableOperationExceptionTupleSchemeFactory());
+  }
+
+  public String tableId; // required
+  public String tableName; // required
+  /**
+   * 
+   * @see TableOperation
+   */
+  public TableOperation op; // required
+  /**
+   * 
+   * @see TableOperationExceptionType
+   */
+  public TableOperationExceptionType type; // required
+  public String description; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TABLE_ID((short)1, "tableId"),
+    TABLE_NAME((short)2, "tableName"),
+    /**
+     * 
+     * @see TableOperation
+     */
+    OP((short)3, "op"),
+    /**
+     * 
+     * @see TableOperationExceptionType
+     */
+    TYPE((short)4, "type"),
+    DESCRIPTION((short)5, "description");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TABLE_ID
+          return TABLE_ID;
+        case 2: // TABLE_NAME
+          return TABLE_NAME;
+        case 3: // OP
+          return OP;
+        case 4: // TYPE
+          return TYPE;
+        case 5: // DESCRIPTION
+          return DESCRIPTION;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TABLE_ID, new org.apache.thrift.meta_data.FieldMetaData("tableId", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.OP, new org.apache.thrift.meta_data.FieldMetaData("op", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TableOperation.class)));
+    tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TableOperationExceptionType.class)));
+    tmpMap.put(_Fields.DESCRIPTION, new org.apache.thrift.meta_data.FieldMetaData("description", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ThriftTableOperationException.class, metaDataMap);
+  }
+
+  public ThriftTableOperationException() {
+  }
+
+  public ThriftTableOperationException(
+    String tableId,
+    String tableName,
+    TableOperation op,
+    TableOperationExceptionType type,
+    String description)
+  {
+    this();
+    this.tableId = tableId;
+    this.tableName = tableName;
+    this.op = op;
+    this.type = type;
+    this.description = description;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ThriftTableOperationException(ThriftTableOperationException other) {
+    if (other.isSetTableId()) {
+      this.tableId = other.tableId;
+    }
+    if (other.isSetTableName()) {
+      this.tableName = other.tableName;
+    }
+    if (other.isSetOp()) {
+      this.op = other.op;
+    }
+    if (other.isSetType()) {
+      this.type = other.type;
+    }
+    if (other.isSetDescription()) {
+      this.description = other.description;
+    }
+  }
+
+  public ThriftTableOperationException deepCopy() {
+    return new ThriftTableOperationException(this);
+  }
+
+  @Override
+  public void clear() {
+    this.tableId = null;
+    this.tableName = null;
+    this.op = null;
+    this.type = null;
+    this.description = null;
+  }
+
+  public String getTableId() {
+    return this.tableId;
+  }
+
+  public ThriftTableOperationException setTableId(String tableId) {
+    this.tableId = tableId;
+    return this;
+  }
+
+  public void unsetTableId() {
+    this.tableId = null;
+  }
+
+  /** Returns true if field tableId is set (has been assigned a value) and false otherwise */
+  public boolean isSetTableId() {
+    return this.tableId != null;
+  }
+
+  public void setTableIdIsSet(boolean value) {
+    if (!value) {
+      this.tableId = null;
+    }
+  }
+
+  public String getTableName() {
+    return this.tableName;
+  }
+
+  public ThriftTableOperationException setTableName(String tableName) {
+    this.tableName = tableName;
+    return this;
+  }
+
+  public void unsetTableName() {
+    this.tableName = null;
+  }
+
+  /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTableName() {
+    return this.tableName != null;
+  }
+
+  public void setTableNameIsSet(boolean value) {
+    if (!value) {
+      this.tableName = null;
+    }
+  }
+
+  /**
+   * 
+   * @see TableOperation
+   */
+  public TableOperation getOp() {
+    return this.op;
+  }
+
+  /**
+   * 
+   * @see TableOperation
+   */
+  public ThriftTableOperationException setOp(TableOperation op) {
+    this.op = op;
+    return this;
+  }
+
+  public void unsetOp() {
+    this.op = null;
+  }
+
+  /** Returns true if field op is set (has been assigned a value) and false otherwise */
+  public boolean isSetOp() {
+    return this.op != null;
+  }
+
+  public void setOpIsSet(boolean value) {
+    if (!value) {
+      this.op = null;
+    }
+  }
+
+  /**
+   * 
+   * @see TableOperationExceptionType
+   */
+  public TableOperationExceptionType getType() {
+    return this.type;
+  }
+
+  /**
+   * 
+   * @see TableOperationExceptionType
+   */
+  public ThriftTableOperationException setType(TableOperationExceptionType type) {
+    this.type = type;
+    return this;
+  }
+
+  public void unsetType() {
+    this.type = null;
+  }
+
+  /** Returns true if field type is set (has been assigned a value) and false otherwise */
+  public boolean isSetType() {
+    return this.type != null;
+  }
+
+  public void setTypeIsSet(boolean value) {
+    if (!value) {
+      this.type = null;
+    }
+  }
+
+  public String getDescription() {
+    return this.description;
+  }
+
+  public ThriftTableOperationException setDescription(String description) {
+    this.description = description;
+    return this;
+  }
+
+  public void unsetDescription() {
+    this.description = null;
+  }
+
+  /** Returns true if field description is set (has been assigned a value) and false otherwise */
+  public boolean isSetDescription() {
+    return this.description != null;
+  }
+
+  public void setDescriptionIsSet(boolean value) {
+    if (!value) {
+      this.description = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TABLE_ID:
+      if (value == null) {
+        unsetTableId();
+      } else {
+        setTableId((String)value);
+      }
+      break;
+
+    case TABLE_NAME:
+      if (value == null) {
+        unsetTableName();
+      } else {
+        setTableName((String)value);
+      }
+      break;
+
+    case OP:
+      if (value == null) {
+        unsetOp();
+      } else {
+        setOp((TableOperation)value);
+      }
+      break;
+
+    case TYPE:
+      if (value == null) {
+        unsetType();
+      } else {
+        setType((TableOperationExceptionType)value);
+      }
+      break;
+
+    case DESCRIPTION:
+      if (value == null) {
+        unsetDescription();
+      } else {
+        setDescription((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TABLE_ID:
+      return getTableId();
+
+    case TABLE_NAME:
+      return getTableName();
+
+    case OP:
+      return getOp();
+
+    case TYPE:
+      return getType();
+
+    case DESCRIPTION:
+      return getDescription();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TABLE_ID:
+      return isSetTableId();
+    case TABLE_NAME:
+      return isSetTableName();
+    case OP:
+      return isSetOp();
+    case TYPE:
+      return isSetType();
+    case DESCRIPTION:
+      return isSetDescription();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ThriftTableOperationException)
+      return this.equals((ThriftTableOperationException)that);
+    return false;
+  }
+
+  public boolean equals(ThriftTableOperationException that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_tableId = true && this.isSetTableId();
+    boolean that_present_tableId = true && that.isSetTableId();
+    if (this_present_tableId || that_present_tableId) {
+      if (!(this_present_tableId && that_present_tableId))
+        return false;
+      if (!this.tableId.equals(that.tableId))
+        return false;
+    }
+
+    boolean this_present_tableName = true && this.isSetTableName();
+    boolean that_present_tableName = true && that.isSetTableName();
+    if (this_present_tableName || that_present_tableName) {
+      if (!(this_present_tableName && that_present_tableName))
+        return false;
+      if (!this.tableName.equals(that.tableName))
+        return false;
+    }
+
+    boolean this_present_op = true && this.isSetOp();
+    boolean that_present_op = true && that.isSetOp();
+    if (this_present_op || that_present_op) {
+      if (!(this_present_op && that_present_op))
+        return false;
+      if (!this.op.equals(that.op))
+        return false;
+    }
+
+    boolean this_present_type = true && this.isSetType();
+    boolean that_present_type = true && that.isSetType();
+    if (this_present_type || that_present_type) {
+      if (!(this_present_type && that_present_type))
+        return false;
+      if (!this.type.equals(that.type))
+        return false;
+    }
+
+    boolean this_present_description = true && this.isSetDescription();
+    boolean that_present_description = true && that.isSetDescription();
+    if (this_present_description || that_present_description) {
+      if (!(this_present_description && that_present_description))
+        return false;
+      if (!this.description.equals(that.description))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    return 0;
+  }
+
+  public int compareTo(ThriftTableOperationException other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+    ThriftTableOperationException typedOther = (ThriftTableOperationException)other;
+
+    lastComparison = Boolean.valueOf(isSetTableId()).compareTo(typedOther.isSetTableId());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTableId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableId, typedOther.tableId);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTableName()).compareTo(typedOther.isSetTableName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTableName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, typedOther.tableName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetOp()).compareTo(typedOther.isSetOp());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetOp()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.op, typedOther.op);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetType()).compareTo(typedOther.isSetType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, typedOther.type);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDescription()).compareTo(typedOther.isSetDescription());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDescription()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.description, typedOther.description);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ThriftTableOperationException(");
+    boolean first = true;
+
+    sb.append("tableId:");
+    if (this.tableId == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tableId);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tableName:");
+    if (this.tableName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tableName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("op:");
+    if (this.op == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.op);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("type:");
+    if (this.type == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.type);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("description:");
+    if (this.description == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.description);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ThriftTableOperationExceptionStandardSchemeFactory implements SchemeFactory {
+    public ThriftTableOperationExceptionStandardScheme getScheme() {
+      return new ThriftTableOperationExceptionStandardScheme();
+    }
+  }
+
+  private static class ThriftTableOperationExceptionStandardScheme extends StandardScheme<ThriftTableOperationException> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ThriftTableOperationException struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TABLE_ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tableId = iprot.readString();
+              struct.setTableIdIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tableName = iprot.readString();
+              struct.setTableNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // OP
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.op = TableOperation.findByValue(iprot.readI32());
+              struct.setOpIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.type = TableOperationExceptionType.findByValue(iprot.readI32());
+              struct.setTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // DESCRIPTION
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.description = iprot.readString();
+              struct.setDescriptionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+
+      // check for required fields of primitive type, which can't be checked in the validate method
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ThriftTableOperationException struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.tableId != null) {
+        oprot.writeFieldBegin(TABLE_ID_FIELD_DESC);
+        oprot.writeString(struct.tableId);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tableName != null) {
+        oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.tableName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.op != null) {
+        oprot.writeFieldBegin(OP_FIELD_DESC);
+        oprot.writeI32(struct.op.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.type != null) {
+        oprot.writeFieldBegin(TYPE_FIELD_DESC);
+        oprot.writeI32(struct.type.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.description != null) {
+        oprot.writeFieldBegin(DESCRIPTION_FIELD_DESC);
+        oprot.writeString(struct.description);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ThriftTableOperationExceptionTupleSchemeFactory implements SchemeFactory {
+    public ThriftTableOperationExceptionTupleScheme getScheme() {
+      return new ThriftTableOperationExceptionTupleScheme();
+    }
+  }
+
+  private static class ThriftTableOperationExceptionTupleScheme extends TupleScheme<ThriftTableOperationException> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ThriftTableOperationException struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetTableId()) {
+        optionals.set(0);
+      }
+      if (struct.isSetTableName()) {
+        optionals.set(1);
+      }
+      if (struct.isSetOp()) {
+        optionals.set(2);
+      }
+      if (struct.isSetType()) {
+        optionals.set(3);
+      }
+      if (struct.isSetDescription()) {
+        optionals.set(4);
+      }
+      oprot.writeBitSet(optionals, 5);
+      if (struct.isSetTableId()) {
+        oprot.writeString(struct.tableId);
+      }
+      if (struct.isSetTableName()) {
+        oprot.writeString(struct.tableName);
+      }
+      if (struct.isSetOp()) {
+        oprot.writeI32(struct.op.getValue());
+      }
+      if (struct.isSetType()) {
+        oprot.writeI32(struct.type.getValue());
+      }
+      if (struct.isSetDescription()) {
+        oprot.writeString(struct.description);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ThriftTableOperationException struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(5);
+      if (incoming.get(0)) {
+        struct.tableId = iprot.readString();
+        struct.setTableIdIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.tableName = iprot.readString();
+        struct.setTableNameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.op = TableOperation.findByValue(iprot.readI32());
+        struct.setOpIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.type = TableOperationExceptionType.findByValue(iprot.readI32());
+        struct.setTypeIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.description = iprot.readString();
+        struct.setDescriptionIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java b/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java
new file mode 100644
index 0000000..ee20a0d
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapreduce;
+
+import java.io.IOException;
+
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.core.file.FileSKVWriter;
+import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.util.ArgumentChecker;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+
+/**
+ * This class allows MapReduce jobs to use the Accumulo data file format for output of data
+ * 
+ * The user must specify the output path that does not exist following via static method calls to this class:
+ * 
+ * AccumuloFileOutputFormat.setOutputPath(job, outputDirectory)
+ * 
+ * Other methods from FileOutputFormat to configure options are ignored Compression is using the DefaultCodec and is always on
+ */
+public class AccumuloFileOutputFormat extends FileOutputFormat<Key,Value> {
+  private static final String PREFIX = AccumuloOutputFormat.class.getSimpleName();
+  public static final String FILE_TYPE = PREFIX + ".file_type";
+  public static final String BLOCK_SIZE = PREFIX + ".block_size";
+  
+  private static final String INSTANCE_HAS_BEEN_SET = PREFIX + ".instanceConfigured";
+  private static final String INSTANCE_NAME = PREFIX + ".instanceName";
+  private static final String ZOOKEEPERS = PREFIX + ".zooKeepers";
+  
+  @Override
+  public RecordWriter<Key,Value> getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {
+    // get the path of the temporary output file
+    final Configuration conf = job.getConfiguration();
+    
+    String extension = conf.get(FILE_TYPE);
+    if (extension == null || extension.isEmpty())
+      extension = RFile.EXTENSION;
+    
+    handleBlockSize(job.getConfiguration());
+    final Path file = this.getDefaultWorkFile(job, "." + extension);
+    
+    return new RecordWriter<Key,Value>() {
+      FileSKVWriter out = null;
+      
+      @Override
+      public void write(Key key, Value value) throws IOException {
+        if (out == null) {
+          out = FileOperations.getInstance().openWriter(file.toString(), file.getFileSystem(conf), conf, AccumuloConfiguration.getDefaultConfiguration());
+          out.startDefaultLocalityGroup();
+        }
+        out.append(key, value);
+      }
+      
+      @Override
+      public void close(TaskAttemptContext context) throws IOException, InterruptedException {
+        if (out != null)
+          out.close();
+      }
+    };
+  }
+  
+  protected static void handleBlockSize(Configuration conf) {
+    int blockSize;
+    if (conf.getBoolean(INSTANCE_HAS_BEEN_SET, false)) {
+      blockSize = (int) new ZooKeeperInstance(conf.get(INSTANCE_NAME), conf.get(ZOOKEEPERS)).getConfiguration().getMemoryInBytes(
+          Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE);
+    } else {
+      blockSize = getBlockSize(conf);
+    }
+    conf.setInt("io.seqfile.compress.blocksize", blockSize);
+    
+  }
+  
+  public static void setFileType(Configuration conf, String type) {
+    conf.set(FILE_TYPE, type);
+  }
+  
+  public static void setBlockSize(Configuration conf, int blockSize) {
+    conf.setInt(BLOCK_SIZE, blockSize);
+  }
+  
+  private static int getBlockSize(Configuration conf) {
+    return conf.getInt(BLOCK_SIZE, (int) AccumuloConfiguration.getDefaultConfiguration().getMemoryInBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE));
+  }
+  
+  /**
+   * @param conf
+   * @param instanceName
+   * @param zooKeepers
+   */
+  public static void setZooKeeperInstance(Configuration conf, String instanceName, String zooKeepers) {
+    if (conf.getBoolean(INSTANCE_HAS_BEEN_SET, false))
+      throw new IllegalStateException("Instance info can only be set once per job");
+    conf.setBoolean(INSTANCE_HAS_BEEN_SET, true);
+    
+    ArgumentChecker.notNull(instanceName, zooKeepers);
+    conf.set(INSTANCE_NAME, instanceName);
+    conf.set(ZOOKEEPERS, zooKeepers);
+  }
+  
+  /**
+   * @param conf
+   * @return The Accumulo instance.
+   */
+  protected static Instance getInstance(Configuration conf) {
+    return new ZooKeeperInstance(conf.get(INSTANCE_NAME), conf.get(ZOOKEEPERS));
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java b/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
new file mode 100644
index 0000000..14befa9
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapreduce;
+
+import java.io.IOException;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.format.DefaultFormatter;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+
+/**
+ * This class allows MapReduce jobs to use Accumulo as the source of data. This input format provides keys and values of type Key and Value to the Map() and
+ * Reduce() functions.
+ * 
+ * The user must specify the following via static methods:
+ * 
+ * <ul>
+ * <li>AccumuloInputFormat.setInputTableInfo(job, username, password, table, auths)
+ * <li>AccumuloInputFormat.setZooKeeperInstance(job, instanceName, hosts)
+ * </ul>
+ * 
+ * Other static methods are optional
+ */
+
+public class AccumuloInputFormat extends InputFormatBase<Key,Value> {
+  @Override
+  public RecordReader<Key,Value> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
+    log.setLevel(getLogLevel(context.getConfiguration()));
+    return new RecordReaderBase<Key,Value>() {
+      @Override
+      public boolean nextKeyValue() throws IOException, InterruptedException {
+        if (scannerIterator.hasNext()) {
+          ++numKeysRead;
+          Entry<Key,Value> entry = scannerIterator.next();
+          currentK = currentKey = entry.getKey();
+          currentV = currentValue = entry.getValue();
+          if (log.isTraceEnabled())
+            log.trace("Processing key/value pair: " + DefaultFormatter.formatEntry(entry, true));
+          return true;
+        }
+        return false;
+      }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java b/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
new file mode 100644
index 0000000..d463390
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
@@ -0,0 +1,422 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapreduce;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.MultiTableBatchWriter;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.mock.MockInstance;
+import org.apache.accumulo.core.data.ColumnUpdate;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.core.util.ArgumentChecker;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.filecache.DistributedCache;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+/**
+ * This class allows MapReduce jobs to use Accumulo as the sink of data. This output format accepts keys and values of type Text (for a table name) and Mutation
+ * from the Map() and Reduce() functions.
+ * 
+ * The user must specify the following via static methods:
+ * 
+ * <ul>
+ * <li>AccumuloOutputFormat.setOutputInfo(job, username, password, createTables, defaultTableName)
+ * <li>AccumuloOutputFormat.setZooKeeperInstance(job, instanceName, hosts)
+ * </ul>
+ * 
+ * Other static methods are optional
+ */
+public class AccumuloOutputFormat extends OutputFormat<Text,Mutation> {
+  private static final Logger log = Logger.getLogger(AccumuloOutputFormat.class);
+  
+  private static final String PREFIX = AccumuloOutputFormat.class.getSimpleName();
+  private static final String OUTPUT_INFO_HAS_BEEN_SET = PREFIX + ".configured";
+  private static final String INSTANCE_HAS_BEEN_SET = PREFIX + ".instanceConfigured";
+  private static final String USERNAME = PREFIX + ".username";
+  private static final String PASSWORD_PATH = PREFIX + ".password";
+  private static final String DEFAULT_TABLE_NAME = PREFIX + ".defaulttable";
+  
+  private static final String INSTANCE_NAME = PREFIX + ".instanceName";
+  private static final String ZOOKEEPERS = PREFIX + ".zooKeepers";
+  private static final String MOCK = ".useMockInstance";
+  
+  private static final String CREATETABLES = PREFIX + ".createtables";
+  private static final String LOGLEVEL = PREFIX + ".loglevel";
+  private static final String SIMULATE = PREFIX + ".simulate";
+  
+  // BatchWriter options
+  private static final String MAX_MUTATION_BUFFER_SIZE = PREFIX + ".maxmemory";
+  private static final String MAX_LATENCY = PREFIX + ".maxlatency";
+  private static final String NUM_WRITE_THREADS = PREFIX + ".writethreads";
+  
+  private static final long DEFAULT_MAX_MUTATION_BUFFER_SIZE = 50 * 1024 * 1024; // 50MB
+  private static final int DEFAULT_MAX_LATENCY = 60 * 1000; // 1 minute
+  private static final int DEFAULT_NUM_WRITE_THREADS = 2;
+  
+  /**
+   * Configure the output format.
+   * 
+   * @param conf
+   *          the Map/Reduce job object
+   * @param user
+   *          the username, which must have the Table.CREATE permission to create tables
+   * @param passwd
+   *          the passwd for the username
+   * @param createTables
+   *          the output format will create new tables as necessary. Table names can only be alpha-numeric and underscores.
+   * @param defaultTable
+   *          the table to use when the tablename is null in the write call
+   */
+  public static void setOutputInfo(Configuration conf, String user, byte[] passwd, boolean createTables, String defaultTable) {
+    if (conf.getBoolean(OUTPUT_INFO_HAS_BEEN_SET, false))
+      throw new IllegalStateException("Output info can only be set once per job");
+    conf.setBoolean(OUTPUT_INFO_HAS_BEEN_SET, true);
+    
+    ArgumentChecker.notNull(user, passwd);
+    conf.set(USERNAME, user);
+    conf.setBoolean(CREATETABLES, createTables);
+    if (defaultTable != null)
+      conf.set(DEFAULT_TABLE_NAME, defaultTable);
+    
+    try {
+      FileSystem fs = FileSystem.get(conf);
+      Path file = new Path(fs.getWorkingDirectory(), conf.get("mapred.job.name") + System.currentTimeMillis() + ".pw");
+      conf.set(PASSWORD_PATH, file.toString());
+      FSDataOutputStream fos = fs.create(file, false);
+      fs.setPermission(file, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE));
+      fs.deleteOnExit(file);
+      
+      byte[] encodedPw = Base64.encodeBase64(passwd);
+      fos.writeInt(encodedPw.length);
+      fos.write(encodedPw);
+      fos.close();
+      
+      DistributedCache.addCacheFile(file.toUri(), conf);
+    } catch (IOException ioe) {
+      throw new RuntimeException(ioe);
+    }
+
+  }
+  
+  public static void setZooKeeperInstance(Configuration conf, String instanceName, String zooKeepers) {
+    if (conf.getBoolean(INSTANCE_HAS_BEEN_SET, false))
+      throw new IllegalStateException("Instance info can only be set once per job");
+    conf.setBoolean(INSTANCE_HAS_BEEN_SET, true);
+    
+    ArgumentChecker.notNull(instanceName, zooKeepers);
+    conf.set(INSTANCE_NAME, instanceName);
+    conf.set(ZOOKEEPERS, zooKeepers);
+  }
+  
+  public static void setMockInstance(Configuration conf, String instanceName) {
+    conf.setBoolean(INSTANCE_HAS_BEEN_SET, true);
+    conf.setBoolean(MOCK, true);
+    conf.set(INSTANCE_NAME, instanceName);
+  }
+  
+  public static void setMaxMutationBufferSize(Configuration conf, long numberOfBytes) {
+    conf.setLong(MAX_MUTATION_BUFFER_SIZE, numberOfBytes);
+  }
+  
+  public static void setMaxLatency(Configuration conf, int numberOfMilliseconds) {
+    conf.setInt(MAX_LATENCY, numberOfMilliseconds);
+  }
+  
+  public static void setMaxWriteThreads(Configuration conf, int numberOfThreads) {
+    conf.setInt(NUM_WRITE_THREADS, numberOfThreads);
+  }
+  
+  public static void setLogLevel(Configuration conf, Level level) {
+    ArgumentChecker.notNull(level);
+    conf.setInt(LOGLEVEL, level.toInt());
+  }
+  
+  public static void setSimulationMode(Configuration conf) {
+    conf.setBoolean(SIMULATE, true);
+  }
+  
+  protected static String getUsername(Configuration conf) {
+    return conf.get(USERNAME);
+  }
+  
+  /**
+   * @throws IOException
+   */
+  protected static byte[] getPassword(Configuration conf) throws IOException {
+    FileSystem fs = FileSystem.get(conf);
+    Path file = new Path(conf.get(PASSWORD_PATH));
+    
+    FSDataInputStream fdis = fs.open(file);
+    int length = fdis.readInt();
+    byte[] encodedPassword = new byte[length];
+    fdis.read(encodedPassword);
+    fdis.close();
+    
+    return Base64.decodeBase64(encodedPassword);
+  }
+  
+  protected static boolean canCreateTables(Configuration conf) {
+    return conf.getBoolean(CREATETABLES, false);
+  }
+  
+  protected static String getDefaultTableName(Configuration conf) {
+    return conf.get(DEFAULT_TABLE_NAME);
+  }
+  
+  protected static Instance getInstance(Configuration conf) {
+    if (conf.getBoolean(MOCK, false))
+      return new MockInstance(conf.get(INSTANCE_NAME));
+    return new ZooKeeperInstance(conf.get(INSTANCE_NAME), conf.get(ZOOKEEPERS));
+  }
+  
+  protected static long getMaxMutationBufferSize(Configuration conf) {
+    return conf.getLong(MAX_MUTATION_BUFFER_SIZE, DEFAULT_MAX_MUTATION_BUFFER_SIZE);
+  }
+  
+  protected static int getMaxLatency(Configuration conf) {
+    return conf.getInt(MAX_LATENCY, DEFAULT_MAX_LATENCY);
+  }
+  
+  protected static int getMaxWriteThreads(Configuration conf) {
+    return conf.getInt(NUM_WRITE_THREADS, DEFAULT_NUM_WRITE_THREADS);
+  }
+  
+  protected static Level getLogLevel(Configuration conf) {
+    if (conf.get(LOGLEVEL) != null)
+      return Level.toLevel(conf.getInt(LOGLEVEL, Level.INFO.toInt()));
+    return null;
+  }
+  
+  protected static boolean getSimulationMode(Configuration conf) {
+    return conf.getBoolean(SIMULATE, false);
+  }
+  
+  protected static class AccumuloRecordWriter extends RecordWriter<Text,Mutation> {
+    private MultiTableBatchWriter mtbw = null;
+    private HashMap<Text,BatchWriter> bws = null;
+    private Text defaultTableName = null;
+    
+    private boolean simulate = false;
+    private boolean createTables = false;
+    
+    private long mutCount = 0;
+    private long valCount = 0;
+    
+    private Connector conn;
+    
+    protected AccumuloRecordWriter(Configuration conf) throws AccumuloException, AccumuloSecurityException, IOException {
+      Level l = getLogLevel(conf);
+      if (l != null)
+        log.setLevel(getLogLevel(conf));
+      this.simulate = getSimulationMode(conf);
+      this.createTables = canCreateTables(conf);
+      
+      if (simulate)
+        log.info("Simulating output only. No writes to tables will occur");
+      
+      this.bws = new HashMap<Text,BatchWriter>();
+      
+      String tname = getDefaultTableName(conf);
+      this.defaultTableName = (tname == null) ? null : new Text(tname);
+      
+      if (!simulate) {
+        this.conn = getInstance(conf).getConnector(getUsername(conf), getPassword(conf));
+        mtbw = conn.createMultiTableBatchWriter(getMaxMutationBufferSize(conf), getMaxLatency(conf), getMaxWriteThreads(conf));
+      }
+    }
+    
+    /**
+     * Push a mutation into a table. If table is null, the defaultTable will be used. If canCreateTable is set, the table will be created if it does not exist.
+     * The table name must only contain alphanumerics and underscore.
+     */
+    @Override
+    public void write(Text table, Mutation mutation) throws IOException {
+      if (table == null || table.toString().isEmpty())
+        table = this.defaultTableName;
+      
+      if (!simulate && table == null)
+        throw new IOException("No table or default table specified. Try simulation mode next time");
+      
+      ++mutCount;
+      valCount += mutation.size();
+      printMutation(table, mutation);
+      
+      if (simulate)
+        return;
+      
+      if (!bws.containsKey(table))
+        try {
+          addTable(table);
+        } catch (Exception e) {
+          e.printStackTrace();
+          throw new IOException(e);
+        }
+      
+      try {
+        bws.get(table).addMutation(mutation);
+      } catch (MutationsRejectedException e) {
+        throw new IOException(e);
+      }
+    }
+    
+    public void addTable(Text tableName) throws AccumuloException, AccumuloSecurityException {
+      if (simulate) {
+        log.info("Simulating adding table: " + tableName);
+        return;
+      }
+      
+      log.debug("Adding table: " + tableName);
+      BatchWriter bw = null;
+      String table = tableName.toString();
+      
+      if (createTables && !conn.tableOperations().exists(table)) {
+        try {
+          conn.tableOperations().create(table);
+        } catch (AccumuloSecurityException e) {
+          log.error("Accumulo security violation creating " + table, e);
+          throw e;
+        } catch (TableExistsException e) {
+          // Shouldn't happen
+        }
+      }
+      
+      try {
+        bw = mtbw.getBatchWriter(table);
+      } catch (TableNotFoundException e) {
+        log.error("Accumulo table " + table + " doesn't exist and cannot be created.", e);
+        throw new AccumuloException(e);
+      } catch (AccumuloException e) {
+        throw e;
+      } catch (AccumuloSecurityException e) {
+        throw e;
+      }
+      
+      if (bw != null)
+        bws.put(tableName, bw);
+    }
+    
+    private int printMutation(Text table, Mutation m) {
+      if (log.isTraceEnabled()) {
+        log.trace(String.format("Table %s row key: %s", table, hexDump(m.getRow())));
+        for (ColumnUpdate cu : m.getUpdates()) {
+          log.trace(String.format("Table %s column: %s:%s", table, hexDump(cu.getColumnFamily()), hexDump(cu.getColumnQualifier())));
+          log.trace(String.format("Table %s security: %s", table, new ColumnVisibility(cu.getColumnVisibility()).toString()));
+          log.trace(String.format("Table %s value: %s", table, hexDump(cu.getValue())));
+        }
+      }
+      return m.getUpdates().size();
+    }
+    
+    private String hexDump(byte[] ba) {
+      StringBuilder sb = new StringBuilder();
+      for (byte b : ba) {
+        if ((b > 0x20) && (b < 0x7e))
+          sb.append((char) b);
+        else
+          sb.append(String.format("x%02x", b));
+      }
+      return sb.toString();
+    }
+    
+    @Override
+    public void close(TaskAttemptContext attempt) throws IOException, InterruptedException {
+      log.debug("mutations written: " + mutCount + ", values written: " + valCount);
+      if (simulate)
+        return;
+      
+      try {
+        mtbw.close();
+      } catch (MutationsRejectedException e) {
+        if (e.getAuthorizationFailures().size() >= 0) {
+          HashSet<String> tables = new HashSet<String>();
+          for (KeyExtent ke : e.getAuthorizationFailures()) {
+            tables.add(ke.getTableId().toString());
+          }
+          
+          log.error("Not authorized to write to tables : " + tables);
+        }
+        
+        if (e.getConstraintViolationSummaries().size() > 0) {
+          log.error("Constraint violations : " + e.getConstraintViolationSummaries().size());
+        }
+      }
+    }
+  }
+  
+  @Override
+  public void checkOutputSpecs(JobContext job) throws IOException {
+    checkOutputSpecs(job.getConfiguration());
+  }
+  
+  public void checkOutputSpecs(Configuration conf) throws IOException {
+    if (!conf.getBoolean(OUTPUT_INFO_HAS_BEEN_SET, false))
+      throw new IOException("Output info has not been set.");
+    if (!conf.getBoolean(INSTANCE_HAS_BEEN_SET, false))
+      throw new IOException("Instance info has not been set.");
+    try {
+      Connector c = getInstance(conf).getConnector(getUsername(conf), getPassword(conf));
+      if (!c.securityOperations().authenticateUser(getUsername(conf), getPassword(conf)))
+        throw new IOException("Unable to authenticate user");
+    } catch (AccumuloException e) {
+      throw new IOException(e);
+    } catch (AccumuloSecurityException e) {
+      throw new IOException(e);
+    }
+  }
+  
+  @Override
+  public OutputCommitter getOutputCommitter(TaskAttemptContext context) {
+    return new NullOutputFormat<Text,Mutation>().getOutputCommitter(context);
+  }
+  
+  @Override
+  public RecordWriter<Text,Mutation> getRecordWriter(TaskAttemptContext attempt) throws IOException {
+    try {
+      return new AccumuloRecordWriter(attempt.getConfiguration());
+    } catch (Exception e) {
+      throw new IOException(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bdbfccb/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormat.java
----------------------------------------------------------------------
diff --git a/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormat.java b/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormat.java
new file mode 100644
index 0000000..b55b4b5
--- /dev/null
+++ b/1.5/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormat.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapreduce;
+
+import java.io.IOException;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.RowIterator;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.PeekingIterator;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+
+public class AccumuloRowInputFormat extends InputFormatBase<Text,PeekingIterator<Entry<Key,Value>>> {
+  @Override
+  public RecordReader<Text,PeekingIterator<Entry<Key,Value>>> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException,
+      InterruptedException {
+    return new RecordReaderBase<Text,PeekingIterator<Entry<Key,Value>>>() {
+      RowIterator rowIterator;
+      
+      @Override
+      public void initialize(InputSplit inSplit, TaskAttemptContext attempt) throws IOException {
+        super.initialize(inSplit, attempt);
+        rowIterator = new RowIterator(scannerIterator);
+        currentK = new Text();
+        currentV = null;
+      }
+      
+      @Override
+      public boolean nextKeyValue() throws IOException, InterruptedException {
+        if (!rowIterator.hasNext())
+          return false;
+        currentV = new PeekingIterator<Entry<Key,Value>>(rowIterator.next());
+        numKeysRead = rowIterator.getKVCount();
+        currentKey = currentV.peek().getKey();
+        currentK = new Text(currentKey.getRow());
+        return true;
+      }
+    };
+  }
+}


Mime
View raw message