accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From els...@apache.org
Subject [42/50] [abbrv] git commit: ACCUMULO-2573 ACCUMULO-2583 Initial stub test to see WAL get transmitted to the peer.
Date Fri, 09 May 2014 15:29:20 GMT
ACCUMULO-2573 ACCUMULO-2583 Initial stub test to see WAL get transmitted to the peer.

Lots of random cleanup included that I stumbled on. Fix some issues with
the DWQ key name. A minor change to how the thrift IDL looks.


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/3579b674
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/3579b674
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/3579b674

Branch: refs/heads/ACCUMULO-378
Commit: 3579b674a32b61564a29d6dd66ea4057d568c329
Parents: c7b6062
Author: Josh Elser <elserj@apache.org>
Authored: Wed May 7 23:28:32 2014 -0400
Committer: Josh Elser <elserj@apache.org>
Committed: Wed May 7 23:28:32 2014 -0400

----------------------------------------------------------------------
 .../org/apache/accumulo/core/conf/Property.java |  10 +-
 .../replication/thrift/ReplicationServicer.java | 168 ++++++--------
 core/src/main/thrift/replication.thrift         |   4 +-
 .../replication/AccumuloReplicaSystem.java      | 137 ------------
 .../ReplicationWorkAssignerHelper.java          |  22 +-
 .../accumulo/server/util/SystemPropUtil.java    |   9 +-
 .../ReplicationWorkAssignerHelperTest.java      |  56 +++++
 .../java/org/apache/accumulo/master/Master.java |  11 +
 .../master/replication/ReplicationDriver.java   |   4 +-
 .../replication/ReplicationWorkAssigner.java    |  16 +-
 .../master/replication/StatusMaker.java         |  16 +-
 .../ReplicationWorkAssignerTest.java            |  33 +--
 .../apache/accumulo/tserver/TabletServer.java   |  28 ++-
 .../replication/AccumuloReplicaSystem.java      | 224 +++++++++++++++++++
 .../replication/ReplicationProcessor.java       |  43 ++--
 .../replication/ReplicationServicerHandler.java |  44 ++++
 .../tserver/replication/ReplicationWorker.java  |   3 +-
 .../replication/ReplicationProcessorTest.java   |  68 ++++++
 .../tserver/src/test/resources/log4j.properties |  36 +++
 .../test/replication/ReplicationIT.java         | 123 ++++++++++
 20 files changed, 764 insertions(+), 291 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/core/src/main/java/org/apache/accumulo/core/conf/Property.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index 6d3b00c..d611bd5 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -451,10 +451,6 @@ public enum Property {
   @Experimental
   REPLICATION_PEERS("replication.peer.", null, PropertyType.PREFIX, "Properties in this category control what systems data can be replicated to"),
   @Experimental
-  REPLICATION_BATCH_SIZE("replication.batch.size", "1000", PropertyType.COUNT, "Maximum number of updates (WAL) or key-value pairs (RFile) to send in one replication task"),
-  @Experimental
-  REPLICATION_SEND_THREAD_POOL_SIZE("replication.send.threads", "1", PropertyType.COUNT, "Size of threadpool used to start replication to slaves"),
-  @Experimental
   REPLICATION_MAX_WORK_QUEUE("replication.max.work.queue", "20000000", PropertyType.COUNT, "Upper bound of the number of files queued for replication"),
   @Experimental
   REPLICATION_WORK_ASSIGNMENT_SLEEP("replication.work.assignment.sleep", "30s", PropertyType.TIMEDURATION, "Amount of time to sleep between replication work assignment"),
@@ -464,6 +460,10 @@ public enum Property {
   REPLICATION_RECEIPT_SERVICE_PORT("replication.receipt.service.port", "10001", PropertyType.PORT, "Listen port used by thrift service in tserver listening for replication"),
   @Experimental
   REPLICATION_WORK_ATTEMPTS("replication.work.attempts", "10", PropertyType.COUNT, "Number of attempts to try to replicate some data before giving up and letting it naturally be retried later"),
+  @Experimental
+  REPLICATION_MIN_THREADS("replication.receiver.min.threads", "1", PropertyType.COUNT, "Minimum number of threads for replciation"),
+  @Experimental
+  REPLICATION_THREADCHECK("replication.receiver.threadcheck.time", "5s", PropertyType.TIMEDURATION, "The time between adjustments of the replication thread pool."),
 
   ;
 
@@ -713,7 +713,7 @@ public enum Property {
     return key.startsWith(Property.TABLE_PREFIX.getKey()) || key.startsWith(Property.TSERV_PREFIX.getKey()) || key.startsWith(Property.LOGGER_PREFIX.getKey())
         || key.startsWith(Property.MASTER_PREFIX.getKey()) || key.startsWith(Property.GC_PREFIX.getKey())
         || key.startsWith(Property.MONITOR_PREFIX.getKey() + "banner.") || key.startsWith(VFS_CONTEXT_CLASSPATH_PROPERTY.getKey())
-        || key.startsWith(Property.TABLE_COMPACTION_STRATEGY_PREFIX.getKey());
+        || key.startsWith(Property.TABLE_COMPACTION_STRATEGY_PREFIX.getKey()) || key.startsWith(REPLICATION_PREFIX.getKey());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/core/src/main/java/org/apache/accumulo/core/replication/thrift/ReplicationServicer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/replication/thrift/ReplicationServicer.java b/core/src/main/java/org/apache/accumulo/core/replication/thrift/ReplicationServicer.java
index 1a40dc4..e5e26ca 100644
--- a/core/src/main/java/org/apache/accumulo/core/replication/thrift/ReplicationServicer.java
+++ b/core/src/main/java/org/apache/accumulo/core/replication/thrift/ReplicationServicer.java
@@ -50,9 +50,9 @@ import org.slf4j.LoggerFactory;
 
   public interface Iface {
 
-    public ByteBuffer replicateLog(int remoteTableId, WalEdits data) throws RemoteReplicationException, org.apache.thrift.TException;
+    public long replicateLog(int remoteTableId, WalEdits data) throws RemoteReplicationException, org.apache.thrift.TException;
 
-    public ByteBuffer replicateKeyValues(int remoteTableId, KeyValues data) throws RemoteReplicationException, org.apache.thrift.TException;
+    public long replicateKeyValues(int remoteTableId, KeyValues data) throws RemoteReplicationException, org.apache.thrift.TException;
 
   }
 
@@ -84,7 +84,7 @@ import org.slf4j.LoggerFactory;
       super(iprot, oprot);
     }
 
-    public ByteBuffer replicateLog(int remoteTableId, WalEdits data) throws RemoteReplicationException, org.apache.thrift.TException
+    public long replicateLog(int remoteTableId, WalEdits data) throws RemoteReplicationException, org.apache.thrift.TException
     {
       send_replicateLog(remoteTableId, data);
       return recv_replicateLog();
@@ -98,7 +98,7 @@ import org.slf4j.LoggerFactory;
       sendBase("replicateLog", args);
     }
 
-    public ByteBuffer recv_replicateLog() throws RemoteReplicationException, org.apache.thrift.TException
+    public long recv_replicateLog() throws RemoteReplicationException, org.apache.thrift.TException
     {
       replicateLog_result result = new replicateLog_result();
       receiveBase(result, "replicateLog");
@@ -111,7 +111,7 @@ import org.slf4j.LoggerFactory;
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "replicateLog failed: unknown result");
     }
 
-    public ByteBuffer replicateKeyValues(int remoteTableId, KeyValues data) throws RemoteReplicationException, org.apache.thrift.TException
+    public long replicateKeyValues(int remoteTableId, KeyValues data) throws RemoteReplicationException, org.apache.thrift.TException
     {
       send_replicateKeyValues(remoteTableId, data);
       return recv_replicateKeyValues();
@@ -125,7 +125,7 @@ import org.slf4j.LoggerFactory;
       sendBase("replicateKeyValues", args);
     }
 
-    public ByteBuffer recv_replicateKeyValues() throws RemoteReplicationException, org.apache.thrift.TException
+    public long recv_replicateKeyValues() throws RemoteReplicationException, org.apache.thrift.TException
     {
       replicateKeyValues_result result = new replicateKeyValues_result();
       receiveBase(result, "replicateKeyValues");
@@ -181,7 +181,7 @@ import org.slf4j.LoggerFactory;
         prot.writeMessageEnd();
       }
 
-      public ByteBuffer getResult() throws RemoteReplicationException, org.apache.thrift.TException {
+      public long getResult() throws RemoteReplicationException, org.apache.thrift.TException {
         if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
@@ -216,7 +216,7 @@ import org.slf4j.LoggerFactory;
         prot.writeMessageEnd();
       }
 
-      public ByteBuffer getResult() throws RemoteReplicationException, org.apache.thrift.TException {
+      public long getResult() throws RemoteReplicationException, org.apache.thrift.TException {
         if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
@@ -261,6 +261,7 @@ import org.slf4j.LoggerFactory;
         replicateLog_result result = new replicateLog_result();
         try {
           result.success = iface.replicateLog(args.remoteTableId, args.data);
+          result.setSuccessIsSet(true);
         } catch (RemoteReplicationException e) {
           result.e = e;
         }
@@ -285,6 +286,7 @@ import org.slf4j.LoggerFactory;
         replicateKeyValues_result result = new replicateKeyValues_result();
         try {
           result.success = iface.replicateKeyValues(args.remoteTableId, args.data);
+          result.setSuccessIsSet(true);
         } catch (RemoteReplicationException e) {
           result.e = e;
         }
@@ -754,7 +756,7 @@ import org.slf4j.LoggerFactory;
   public static class replicateLog_result implements org.apache.thrift.TBase<replicateLog_result, replicateLog_result._Fields>, java.io.Serializable, Cloneable   {
     private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("replicateLog_result");
 
-    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0);
+    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I64, (short)0);
     private static final org.apache.thrift.protocol.TField E_FIELD_DESC = new org.apache.thrift.protocol.TField("e", org.apache.thrift.protocol.TType.STRUCT, (short)1);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
@@ -763,7 +765,7 @@ import org.slf4j.LoggerFactory;
       schemes.put(TupleScheme.class, new replicateLog_resultTupleSchemeFactory());
     }
 
-    public ByteBuffer success; // required
+    public long success; // required
     public RemoteReplicationException e; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@@ -828,11 +830,13 @@ import org.slf4j.LoggerFactory;
     }
 
     // isset id assignments
+    private static final int __SUCCESS_ISSET_ID = 0;
+    private byte __isset_bitfield = 0;
     public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
     static {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
       tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
       tmpMap.put(_Fields.E, new org.apache.thrift.meta_data.FieldMetaData("e", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
@@ -843,11 +847,12 @@ import org.slf4j.LoggerFactory;
     }
 
     public replicateLog_result(
-      ByteBuffer success,
+      long success,
       RemoteReplicationException e)
     {
       this();
       this.success = success;
+      setSuccessIsSet(true);
       this.e = e;
     }
 
@@ -855,10 +860,8 @@ import org.slf4j.LoggerFactory;
      * Performs a deep copy on <i>other</i>.
      */
     public replicateLog_result(replicateLog_result other) {
-      if (other.isSetSuccess()) {
-        this.success = org.apache.thrift.TBaseHelper.copyBinary(other.success);
-;
-      }
+      __isset_bitfield = other.__isset_bitfield;
+      this.success = other.success;
       if (other.isSetE()) {
         this.e = new RemoteReplicationException(other.e);
       }
@@ -870,42 +873,32 @@ import org.slf4j.LoggerFactory;
 
     @Override
     public void clear() {
-      this.success = null;
+      setSuccessIsSet(false);
+      this.success = 0;
       this.e = null;
     }
 
-    public byte[] getSuccess() {
-      setSuccess(org.apache.thrift.TBaseHelper.rightSize(success));
-      return success == null ? null : success.array();
-    }
-
-    public ByteBuffer bufferForSuccess() {
-      return success;
-    }
-
-    public replicateLog_result setSuccess(byte[] success) {
-      setSuccess(success == null ? (ByteBuffer)null : ByteBuffer.wrap(success));
-      return this;
+    public long getSuccess() {
+      return this.success;
     }
 
-    public replicateLog_result setSuccess(ByteBuffer success) {
+    public replicateLog_result setSuccess(long success) {
       this.success = success;
+      setSuccessIsSet(true);
       return this;
     }
 
     public void unsetSuccess() {
-      this.success = null;
+      __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
     }
 
     /** Returns true if field success is set (has been assigned a value) and false otherwise */
     public boolean isSetSuccess() {
-      return this.success != null;
+      return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID);
     }
 
     public void setSuccessIsSet(boolean value) {
-      if (!value) {
-        this.success = null;
-      }
+      __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
     }
 
     public RemoteReplicationException getE() {
@@ -938,7 +931,7 @@ import org.slf4j.LoggerFactory;
         if (value == null) {
           unsetSuccess();
         } else {
-          setSuccess((ByteBuffer)value);
+          setSuccess((Long)value);
         }
         break;
 
@@ -956,7 +949,7 @@ import org.slf4j.LoggerFactory;
     public Object getFieldValue(_Fields field) {
       switch (field) {
       case SUCCESS:
-        return getSuccess();
+        return Long.valueOf(getSuccess());
 
       case E:
         return getE();
@@ -993,12 +986,12 @@ import org.slf4j.LoggerFactory;
       if (that == null)
         return false;
 
-      boolean this_present_success = true && this.isSetSuccess();
-      boolean that_present_success = true && that.isSetSuccess();
+      boolean this_present_success = true;
+      boolean that_present_success = true;
       if (this_present_success || that_present_success) {
         if (!(this_present_success && that_present_success))
           return false;
-        if (!this.success.equals(that.success))
+        if (this.success != that.success)
           return false;
       }
 
@@ -1068,11 +1061,7 @@ import org.slf4j.LoggerFactory;
       boolean first = true;
 
       sb.append("success:");
-      if (this.success == null) {
-        sb.append("null");
-      } else {
-        org.apache.thrift.TBaseHelper.toString(this.success, sb);
-      }
+      sb.append(this.success);
       first = false;
       if (!first) sb.append(", ");
       sb.append("e:");
@@ -1101,6 +1090,8 @@ import org.slf4j.LoggerFactory;
 
     private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
       try {
+        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+        __isset_bitfield = 0;
         read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
       } catch (org.apache.thrift.TException te) {
         throw new java.io.IOException(te);
@@ -1126,8 +1117,8 @@ import org.slf4j.LoggerFactory;
           }
           switch (schemeField.id) {
             case 0: // SUCCESS
-              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-                struct.success = iprot.readBinary();
+              if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+                struct.success = iprot.readI64();
                 struct.setSuccessIsSet(true);
               } else { 
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
@@ -1157,9 +1148,9 @@ import org.slf4j.LoggerFactory;
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
-        if (struct.success != null) {
+        if (struct.isSetSuccess()) {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
-          oprot.writeBinary(struct.success);
+          oprot.writeI64(struct.success);
           oprot.writeFieldEnd();
         }
         if (struct.e != null) {
@@ -1193,7 +1184,7 @@ import org.slf4j.LoggerFactory;
         }
         oprot.writeBitSet(optionals, 2);
         if (struct.isSetSuccess()) {
-          oprot.writeBinary(struct.success);
+          oprot.writeI64(struct.success);
         }
         if (struct.isSetE()) {
           struct.e.write(oprot);
@@ -1205,7 +1196,7 @@ import org.slf4j.LoggerFactory;
         TTupleProtocol iprot = (TTupleProtocol) prot;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
-          struct.success = iprot.readBinary();
+          struct.success = iprot.readI64();
           struct.setSuccessIsSet(true);
         }
         if (incoming.get(1)) {
@@ -1678,7 +1669,7 @@ import org.slf4j.LoggerFactory;
   public static class replicateKeyValues_result implements org.apache.thrift.TBase<replicateKeyValues_result, replicateKeyValues_result._Fields>, java.io.Serializable, Cloneable   {
     private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("replicateKeyValues_result");
 
-    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0);
+    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I64, (short)0);
     private static final org.apache.thrift.protocol.TField E_FIELD_DESC = new org.apache.thrift.protocol.TField("e", org.apache.thrift.protocol.TType.STRUCT, (short)1);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
@@ -1687,7 +1678,7 @@ import org.slf4j.LoggerFactory;
       schemes.put(TupleScheme.class, new replicateKeyValues_resultTupleSchemeFactory());
     }
 
-    public ByteBuffer success; // required
+    public long success; // required
     public RemoteReplicationException e; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@@ -1752,11 +1743,13 @@ import org.slf4j.LoggerFactory;
     }
 
     // isset id assignments
+    private static final int __SUCCESS_ISSET_ID = 0;
+    private byte __isset_bitfield = 0;
     public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
     static {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
       tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
       tmpMap.put(_Fields.E, new org.apache.thrift.meta_data.FieldMetaData("e", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
@@ -1767,11 +1760,12 @@ import org.slf4j.LoggerFactory;
     }
 
     public replicateKeyValues_result(
-      ByteBuffer success,
+      long success,
       RemoteReplicationException e)
     {
       this();
       this.success = success;
+      setSuccessIsSet(true);
       this.e = e;
     }
 
@@ -1779,10 +1773,8 @@ import org.slf4j.LoggerFactory;
      * Performs a deep copy on <i>other</i>.
      */
     public replicateKeyValues_result(replicateKeyValues_result other) {
-      if (other.isSetSuccess()) {
-        this.success = org.apache.thrift.TBaseHelper.copyBinary(other.success);
-;
-      }
+      __isset_bitfield = other.__isset_bitfield;
+      this.success = other.success;
       if (other.isSetE()) {
         this.e = new RemoteReplicationException(other.e);
       }
@@ -1794,42 +1786,32 @@ import org.slf4j.LoggerFactory;
 
     @Override
     public void clear() {
-      this.success = null;
+      setSuccessIsSet(false);
+      this.success = 0;
       this.e = null;
     }
 
-    public byte[] getSuccess() {
-      setSuccess(org.apache.thrift.TBaseHelper.rightSize(success));
-      return success == null ? null : success.array();
-    }
-
-    public ByteBuffer bufferForSuccess() {
-      return success;
-    }
-
-    public replicateKeyValues_result setSuccess(byte[] success) {
-      setSuccess(success == null ? (ByteBuffer)null : ByteBuffer.wrap(success));
-      return this;
+    public long getSuccess() {
+      return this.success;
     }
 
-    public replicateKeyValues_result setSuccess(ByteBuffer success) {
+    public replicateKeyValues_result setSuccess(long success) {
       this.success = success;
+      setSuccessIsSet(true);
       return this;
     }
 
     public void unsetSuccess() {
-      this.success = null;
+      __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
     }
 
     /** Returns true if field success is set (has been assigned a value) and false otherwise */
     public boolean isSetSuccess() {
-      return this.success != null;
+      return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID);
     }
 
     public void setSuccessIsSet(boolean value) {
-      if (!value) {
-        this.success = null;
-      }
+      __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
     }
 
     public RemoteReplicationException getE() {
@@ -1862,7 +1844,7 @@ import org.slf4j.LoggerFactory;
         if (value == null) {
           unsetSuccess();
         } else {
-          setSuccess((ByteBuffer)value);
+          setSuccess((Long)value);
         }
         break;
 
@@ -1880,7 +1862,7 @@ import org.slf4j.LoggerFactory;
     public Object getFieldValue(_Fields field) {
       switch (field) {
       case SUCCESS:
-        return getSuccess();
+        return Long.valueOf(getSuccess());
 
       case E:
         return getE();
@@ -1917,12 +1899,12 @@ import org.slf4j.LoggerFactory;
       if (that == null)
         return false;
 
-      boolean this_present_success = true && this.isSetSuccess();
-      boolean that_present_success = true && that.isSetSuccess();
+      boolean this_present_success = true;
+      boolean that_present_success = true;
       if (this_present_success || that_present_success) {
         if (!(this_present_success && that_present_success))
           return false;
-        if (!this.success.equals(that.success))
+        if (this.success != that.success)
           return false;
       }
 
@@ -1992,11 +1974,7 @@ import org.slf4j.LoggerFactory;
       boolean first = true;
 
       sb.append("success:");
-      if (this.success == null) {
-        sb.append("null");
-      } else {
-        org.apache.thrift.TBaseHelper.toString(this.success, sb);
-      }
+      sb.append(this.success);
       first = false;
       if (!first) sb.append(", ");
       sb.append("e:");
@@ -2025,6 +2003,8 @@ import org.slf4j.LoggerFactory;
 
     private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
       try {
+        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+        __isset_bitfield = 0;
         read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
       } catch (org.apache.thrift.TException te) {
         throw new java.io.IOException(te);
@@ -2050,8 +2030,8 @@ import org.slf4j.LoggerFactory;
           }
           switch (schemeField.id) {
             case 0: // SUCCESS
-              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-                struct.success = iprot.readBinary();
+              if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+                struct.success = iprot.readI64();
                 struct.setSuccessIsSet(true);
               } else { 
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
@@ -2081,9 +2061,9 @@ import org.slf4j.LoggerFactory;
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
-        if (struct.success != null) {
+        if (struct.isSetSuccess()) {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
-          oprot.writeBinary(struct.success);
+          oprot.writeI64(struct.success);
           oprot.writeFieldEnd();
         }
         if (struct.e != null) {
@@ -2117,7 +2097,7 @@ import org.slf4j.LoggerFactory;
         }
         oprot.writeBitSet(optionals, 2);
         if (struct.isSetSuccess()) {
-          oprot.writeBinary(struct.success);
+          oprot.writeI64(struct.success);
         }
         if (struct.isSetE()) {
           struct.e.write(oprot);
@@ -2129,7 +2109,7 @@ import org.slf4j.LoggerFactory;
         TTupleProtocol iprot = (TTupleProtocol) prot;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
-          struct.success = iprot.readBinary();
+          struct.success = iprot.readI64();
           struct.setSuccessIsSet(true);
         }
         if (incoming.get(1)) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/core/src/main/thrift/replication.thrift
----------------------------------------------------------------------
diff --git a/core/src/main/thrift/replication.thrift b/core/src/main/thrift/replication.thrift
index 2994092..6245973 100644
--- a/core/src/main/thrift/replication.thrift
+++ b/core/src/main/thrift/replication.thrift
@@ -43,6 +43,6 @@ service ReplicationCoordinator {
 }
 
 service ReplicationServicer {
-    binary replicateLog(1:i32 remoteTableId, 2:WalEdits data) throws (1:RemoteReplicationException e),
-    binary replicateKeyValues(1:i32 remoteTableId, 2:KeyValues data) throws (1:RemoteReplicationException e)
+    i64 replicateLog(1:i32 remoteTableId, 2:WalEdits data) throws (1:RemoteReplicationException e),
+    i64 replicateKeyValues(1:i32 remoteTableId, 2:KeyValues data) throws (1:RemoteReplicationException e)
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/base/src/main/java/org/apache/accumulo/server/replication/AccumuloReplicaSystem.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/replication/AccumuloReplicaSystem.java b/server/base/src/main/java/org/apache/accumulo/server/replication/AccumuloReplicaSystem.java
deleted file mode 100644
index 138eb71..0000000
--- a/server/base/src/main/java/org/apache/accumulo/server/replication/AccumuloReplicaSystem.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.replication;
-
-import java.nio.ByteBuffer;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.impl.ClientExecReturn;
-import org.apache.accumulo.core.client.impl.ReplicationClient;
-import org.apache.accumulo.core.client.impl.ServerConfigurationUtil;
-import org.apache.accumulo.core.client.replication.ReplicaSystem;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.replication.ReplicationTarget;
-import org.apache.accumulo.core.replication.proto.Replication.Status;
-import org.apache.accumulo.core.replication.thrift.ReplicationCoordinator;
-import org.apache.accumulo.core.replication.thrift.ReplicationServicer;
-import org.apache.accumulo.core.replication.thrift.ReplicationServicer.Client;
-import org.apache.accumulo.core.util.ByteBufferUtil;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.hadoop.fs.Path;
-import org.apache.thrift.transport.TTransportException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
-
-/**
- * 
- */
-public class AccumuloReplicaSystem implements ReplicaSystem {
-  private static final Logger log = LoggerFactory.getLogger(AccumuloReplicaSystem.class);
-
-  private String instanceName, zookeepers;
-
-  @Override
-  public void configure(String configuration) {
-    Preconditions.checkNotNull(configuration);
-
-    int index = configuration.indexOf(',');
-    if (-1 == index) {
-      throw new IllegalArgumentException("Expected comma in configuration string");
-    }
-
-    instanceName = configuration.substring(0, index);
-    zookeepers = configuration.substring(index + 1);
-  }
-
-  @Override
-  public Status replicate(Path p, Status status, ReplicationTarget target) {
-    Instance localInstance = HdfsZooInstance.getInstance();
-    AccumuloConfiguration localConf = ServerConfigurationUtil.getConfiguration(localInstance);
-    
-    Instance peerInstance = getPeerInstance(target);
-    // Remote identifier is an integer (table id) in this case.
-    final int remoteTableId = Integer.parseInt(target.getRemoteIdentifier());
-
-    // Attempt the replication of this status a number of times before giving up and
-    // trying to replicate it again later some other time.
-    for (int i = 0; i < localConf.getCount(Property.REPLICATION_WORK_ATTEMPTS); i++) {
-      String peerTserver;
-      try {
-        // Ask the master on the remote what TServer we should talk with to replicate the data
-        peerTserver = ReplicationClient.executeCoordinatorWithReturn(peerInstance, new ClientExecReturn<String,ReplicationCoordinator.Client>() {
-  
-          @Override
-          public String execute(ReplicationCoordinator.Client client) throws Exception {
-            return client.getServicerAddress(remoteTableId);
-          }
-          
-        });
-      } catch (AccumuloException | AccumuloSecurityException e) {
-        // No progress is made
-        log.error("Could not connect to master at {}, cannot proceed with replication. Will retry", target, e);
-        continue;
-      }
-  
-      if (null == peerTserver) {
-        // Something went wrong, and we didn't get a valid tserver from the remote for some reason
-        log.warn("Did not receive tserver from master at {}, cannot proceed with replication. Will retry.", target);
-        continue;
-      }
-  
-      // We have a tserver on the remote -- send the data its way.
-      ByteBuffer result;
-      //TODO should chunk up the given file into some configurable sizes instead of just sending the entire file all at once
-      //     configuration should probably just be size based.
-      try {
-        result = ReplicationClient.executeServicerWithReturn(peerInstance, peerTserver, new ClientExecReturn<ByteBuffer,ReplicationServicer.Client>() {
-          @Override
-          public ByteBuffer execute(Client client) throws Exception {
-            //TODO This needs to actually send the appropriate data, and choose replicateLog or replicateKeyValues
-            return client.replicateLog(remoteTableId, null);
-          }
-        });
-
-        // We need to be able to parse the returned Status,
-        // if we can't, we don't know what the server actually parsed.
-        try {
-          return Status.parseFrom(ByteBufferUtil.toBytes(result));
-        } catch (InvalidProtocolBufferException e) {
-          log.error("Could not parse return Status from {}", peerTserver, e);
-          throw new RuntimeException("Could not parse returned Status from " + peerTserver, e);
-        }
-      } catch (TTransportException | AccumuloException | AccumuloSecurityException e) {
-        log.warn("Could not connect to remote server {}, will retry", peerTserver, e);
-        UtilWaitThread.sleep(250);
-      }
-    }
-
-    // We made no status, punt on it for now, and let it re-queue itself for work
-    return status;
-  }
-
-  public Instance getPeerInstance(ReplicationTarget target) {
-    return new ZooKeeperInstance(instanceName, zookeepers);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationWorkAssignerHelper.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationWorkAssignerHelper.java b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationWorkAssignerHelper.java
index dd70134..6b408b7 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationWorkAssignerHelper.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationWorkAssignerHelper.java
@@ -38,18 +38,7 @@ public class ReplicationWorkAssignerHelper {
    * @return Key for identifying work in queue
    */
   public static String getQueueKey(String filename, ReplicationTarget replTarget) {
-    return getQueueKey(filename, replTarget.toText().toString());
-  }
-
-  /**
-   * Serialize a filename and a {@link ReplicationTarget} into the expected
-   * key format for use with the {@link DistributedWorkQueue}
-   * @param filename Filename for data to be replicated
-   * @param replTargetStr Serialized {@link ReplicationTarget}
-   * @return Key for identifying work in queue
-   */
-  public static String getQueueKey(String filename, String replTargetStr) {
-    return filename + KEY_SEPARATOR + replTargetStr;
+    return filename + KEY_SEPARATOR + replTarget.getPeerName() + KEY_SEPARATOR + replTarget.getRemoteIdentifier();
   }
 
   /**
@@ -64,6 +53,13 @@ public class ReplicationWorkAssignerHelper {
       throw new IllegalArgumentException("Could not find expected separator in queue key '" + queueKey + "'");
     }
 
-    return Maps.immutableEntry(queueKey.substring(0, index), ReplicationTarget.from(queueKey.substring(index + 1)));
+    String filename = queueKey.substring(0, index);
+
+    int secondIndex = queueKey.indexOf(KEY_SEPARATOR, index + 1);
+    if (-1 == secondIndex) {
+      throw new IllegalArgumentException("Could not find expected separator in queue key '" + queueKey + "'");
+    }
+
+    return Maps.immutableEntry(filename, new ReplicationTarget(queueKey.substring(index + 1, secondIndex), queueKey.substring(secondIndex + 1)));
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java
index 1d93f90..90047b1 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java
@@ -26,15 +26,22 @@ import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class SystemPropUtil {
+  private static final Logger log = LoggerFactory.getLogger(SystemPropUtil.class);
+
   public static boolean setSystemProperty(String property, String value) throws KeeperException, InterruptedException {
     Property p = Property.getPropertyByKey(property);
-    if ((p != null && !p.getType().isValidFormat(value)) || !Property.isValidZooPropertyKey(property))
+    if ((p != null && !p.getType().isValidFormat(value)) || !Property.isValidZooPropertyKey(property)) {
+      log.warn("Ignoring property {}", property);
       return false;
+    }
     
     // create the zk node for this property and set it's data to the specified value
     String zPath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZCONFIG + "/" + property;
+    log.warn("Setting {}={}", zPath, value);
     ZooReaderWriter.getInstance().putPersistentData(zPath, value.getBytes(StandardCharsets.UTF_8), NodeExistsPolicy.OVERWRITE);
     
     return true;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/base/src/test/java/ReplicationWorkAssignerHelper/ReplicationWorkAssignerHelperTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/ReplicationWorkAssignerHelper/ReplicationWorkAssignerHelperTest.java b/server/base/src/test/java/ReplicationWorkAssignerHelper/ReplicationWorkAssignerHelperTest.java
new file mode 100644
index 0000000..75fc494
--- /dev/null
+++ b/server/base/src/test/java/ReplicationWorkAssignerHelper/ReplicationWorkAssignerHelperTest.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ReplicationWorkAssignerHelper;
+
+import java.util.Map.Entry;
+import java.util.UUID;
+
+import org.apache.accumulo.core.replication.ReplicationTarget;
+import org.apache.accumulo.server.replication.ReplicationWorkAssignerHelper;
+import org.apache.hadoop.fs.Path;
+import org.apache.zookeeper.common.PathUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * 
+ */
+public class ReplicationWorkAssignerHelperTest {
+
+  @Test
+  public void createsValidZKNodeName() {
+    Path p = new Path ("/accumulo/wals/tserver+port/" + UUID.randomUUID().toString());
+    ReplicationTarget target = new ReplicationTarget("cluster1", "table1");
+
+    String key = ReplicationWorkAssignerHelper.getQueueKey(p.toString(), target);
+    
+    PathUtils.validatePath(key);
+  }
+
+  @Test
+  public void queueKeySerialization() {
+    Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID().toString());
+    ReplicationTarget target = new ReplicationTarget("cluster1", "1");
+
+    String key = ReplicationWorkAssignerHelper.getQueueKey(p.toString(), target);
+
+    Entry<String,ReplicationTarget> result = ReplicationWorkAssignerHelper.fromQueueKey(key);
+    Assert.assertEquals(p.toString(), result.getKey());
+    Assert.assertEquals(target, result.getValue());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/master/src/main/java/org/apache/accumulo/master/Master.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/Master.java b/server/master/src/main/java/org/apache/accumulo/master/Master.java
index db7507e..0eac9ab 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/Master.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/Master.java
@@ -77,6 +77,7 @@ import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.accumulo.master.recovery.RecoveryManager;
 import org.apache.accumulo.master.replication.ReplicationDriver;
+import org.apache.accumulo.master.replication.ReplicationWorkAssigner;
 import org.apache.accumulo.master.state.TableCounts;
 import org.apache.accumulo.server.Accumulo;
 import org.apache.accumulo.server.ServerConstants;
@@ -166,6 +167,7 @@ public class Master implements LiveTServerSet.Listener, TableObserver, CurrentSt
   final EventCoordinator nextEvent = new EventCoordinator();
   final private Object mergeLock = new Object();
   private ReplicationDriver replicationWorkDriver;
+  private ReplicationWorkAssigner replicationWorkAssigner;
   RecoveryManager recoveryManager = null;
 
   ZooLock masterLock = null;
@@ -942,6 +944,14 @@ public class Master implements LiveTServerSet.Listener, TableObserver, CurrentSt
     replicationWorkDriver = new ReplicationDriver(this);
     replicationWorkDriver.start();
 
+    // Start the daemon to assign work to tservers to replicate to our peers
+    try {
+      replicationWorkAssigner = new ReplicationWorkAssigner(this, getConnector());
+    } catch (AccumuloException | AccumuloSecurityException e) {
+      throw new RuntimeException(e);
+    }
+    replicationWorkAssigner.start();
+
     // Once we are sure the upgrade is complete, we can safely allow fate use.
     waitForMetadataUpgrade.await();
 
@@ -988,6 +998,7 @@ public class Master implements LiveTServerSet.Listener, TableObserver, CurrentSt
 
     final long deadline = System.currentTimeMillis() + MAX_CLEANUP_WAIT_TIME;
     statusThread.join(remaining(deadline));
+    replicationWorkAssigner.join(remaining(deadline));
     replicationWorkDriver.join(remaining(deadline));
 
     // quit, even if the tablet servers somehow jam up and the watchers

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java b/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java
index 53a70a7..1bc002e 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java
@@ -24,6 +24,7 @@ import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.util.Daemon;
 import org.apache.accumulo.fate.util.UtilWaitThread;
 import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.replication.ReplicationTable;
 import org.apache.log4j.Logger;
 
 /**
@@ -37,6 +38,7 @@ public class ReplicationDriver extends Daemon {
 
   private WorkMaker workMaker;
   private StatusMaker statusMaker;
+  private Connector conn;
 
   public ReplicationDriver(Master master) {
     super("Replication Driver");
@@ -49,7 +51,6 @@ public class ReplicationDriver extends Daemon {
   public void run() {
     while (master.stillMaster()) {
       if (null == workMaker) {
-        Connector conn;
         try {
           conn = master.getConnector();
         } catch (AccumuloException | AccumuloSecurityException e) {
@@ -64,6 +65,7 @@ public class ReplicationDriver extends Daemon {
       }
 
       // Make status markers from replication records in metadata
+      // This will end up creating the replication table too
       statusMaker.run();
 
       // Tell the work maker to make work

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationWorkAssigner.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationWorkAssigner.java b/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationWorkAssigner.java
index 699381f..dddbf58 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationWorkAssigner.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationWorkAssigner.java
@@ -32,8 +32,11 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.replication.ReplicationSchema.WorkSection;
+import org.apache.accumulo.core.replication.ReplicationTarget;
 import org.apache.accumulo.core.replication.StatusUtil;
 import org.apache.accumulo.core.replication.proto.Replication.Status;
+import org.apache.accumulo.core.util.Daemon;
+import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.replication.ReplicationTable;
@@ -48,6 +51,7 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 import com.google.protobuf.InvalidProtocolBufferException;
+import com.google.protobuf.TextFormat;
 
 /**
  * Read work records from the replication table, create work entries for other nodes to complete.
@@ -55,7 +59,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * Uses the DistributedWorkQueue to make the work available for any tserver. This approach does not consider the locality of the tabletserver performing the
  * work in relation to the data being replicated (local HDFS blocks).
  */
-public class ReplicationWorkAssigner implements Runnable {
+public class ReplicationWorkAssigner extends Daemon {
   private static final Logger log = LoggerFactory.getLogger(ReplicationWorkAssigner.class);
 
   private Master master;
@@ -68,6 +72,7 @@ public class ReplicationWorkAssigner implements Runnable {
   private ZooCache zooCache;
 
   public ReplicationWorkAssigner(Master master, Connector conn) {
+    super("Replication Work Assigner");
     this.master = master;
     this.conn = conn;
   }
@@ -185,7 +190,7 @@ public class ReplicationWorkAssigner implements Runnable {
     try {
       bs = ReplicationTable.getBatchScanner(conn, 4);
     } catch (TableNotFoundException e) {
-      log.warn("Could not find replication table", e);
+      UtilWaitThread.sleep(1000);
       return;
     }
 
@@ -216,12 +221,16 @@ public class ReplicationWorkAssigner implements Runnable {
           Path p = new Path(file);
           String filename = p.getName();
           WorkSection.getTarget(entry.getKey(), buffer);
-          String key = ReplicationWorkAssignerHelper.getQueueKey(filename, buffer.toString());
+          String key = ReplicationWorkAssignerHelper.getQueueKey(filename, ReplicationTarget.from(buffer));
 
           // And, we haven't already queued this file up for work already
           if (!queuedWork.contains(key)) {
             queueWork(key, file);
+          } else {
+            log.debug("Not re-queueing work for {}", key);
           }
+        } else {
+          log.debug("Not queueing work for {} because [{}] doesn't need replication", file, TextFormat.shortDebugString(status));
         }
       }
     } finally {
@@ -241,6 +250,7 @@ public class ReplicationWorkAssigner implements Runnable {
    */
   protected void queueWork(String key, String path) {
     try {
+      log.debug("Queued work for {} and {}", key, path);
       workQueue.addWork(key, path);
       queuedWork.add(key);
     } catch (KeeperException | InterruptedException e) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/master/src/main/java/org/apache/accumulo/master/replication/StatusMaker.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/StatusMaker.java b/server/master/src/main/java/org/apache/accumulo/master/replication/StatusMaker.java
index 927d539..140e92d 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/StatusMaker.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/StatusMaker.java
@@ -40,6 +40,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.protobuf.InvalidProtocolBufferException;
+import com.google.protobuf.TextFormat;
 
 /**
  * Reads replication records from the metadata table and creates status records in the replication table
@@ -71,6 +72,7 @@ public class StatusMaker {
   public void run() {
     Span span = Trace.start("replicationStatusMaker");
     try {
+      // Read from a source table (typically accumulo.metadata)
       final Scanner s;
       try {
         s = conn.createScanner(sourceTableName, new Authorizations());
@@ -78,12 +80,13 @@ public class StatusMaker {
         throw new RuntimeException(e);
       }
 
-      // Only pull records about data that has been ingested and is ready for replication
+      // Only pull replication records
       s.fetchColumnFamily(ReplicationSection.COLF);
       s.setRange(ReplicationSection.getRange());
 
       Text row = new Text(), tableId = new Text();
       for (Entry<Key,Value> entry : s) {
+        // Get a writer to the replication table
         if (null == writer) {
           // Ensures table exists and is properly configured
           ReplicationTable.create(conn);
@@ -103,14 +106,15 @@ public class StatusMaker {
         rowStr = rowStr.substring(ReplicationSection.getRowPrefix().length());
 
         try {
-          log.debug("Creating replication status record for {} on table {} with {}.", rowStr, tableId, Status.parseFrom(entry.getValue().get()).toString().replace("\n", ", "));
+          log.debug("Creating replication status record for {} on table {} with {}.", rowStr, tableId, TextFormat.shortDebugString(Status.parseFrom(entry.getValue().get())));
         } catch (InvalidProtocolBufferException e) {
           // TODO Auto-generated catch block
           e.printStackTrace();
-          }
+        }
 
         Span workSpan = Trace.start("createStatusMutations");
         try {
+          // Create entries in the replication table from the metadata table
           addStatusRecord(rowStr, tableId, entry.getValue());
         } finally {
           workSpan.stop();
@@ -125,6 +129,12 @@ public class StatusMaker {
     this.writer = bw;
   }
 
+  /**
+   * Create a status record in the replication table
+   * @param file
+   * @param tableId
+   * @param v
+   */
   protected void addStatusRecord(String file, Text tableId, Value v) {
     // TODO come up with something that tries to avoid creating a new BatchWriter all the time
     try {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/master/src/test/java/org/apache/accumulo/master/replication/ReplicationWorkAssignerTest.java
----------------------------------------------------------------------
diff --git a/server/master/src/test/java/org/apache/accumulo/master/replication/ReplicationWorkAssignerTest.java b/server/master/src/test/java/org/apache/accumulo/master/replication/ReplicationWorkAssignerTest.java
index 16e6757..ad2bf75 100644
--- a/server/master/src/test/java/org/apache/accumulo/master/replication/ReplicationWorkAssignerTest.java
+++ b/server/master/src/test/java/org/apache/accumulo/master/replication/ReplicationWorkAssignerTest.java
@@ -43,6 +43,7 @@ import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.replication.ReplicationTable;
+import org.apache.accumulo.server.replication.ReplicationWorkAssignerHelper;
 import org.apache.accumulo.server.zookeeper.DistributedWorkQueue;
 import org.apache.accumulo.server.zookeeper.ZooCache;
 import org.apache.hadoop.fs.Path;
@@ -74,7 +75,7 @@ public class ReplicationWorkAssignerTest {
 
   @Test
   public void workQueuedUsingFileName() throws Exception {
-    ReplicationTarget target = new ReplicationTarget("cluster1", "table1"); 
+    ReplicationTarget target = new ReplicationTarget("cluster1", "table1");
     Text serializedTarget = target.toText();
 
     DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
@@ -83,7 +84,7 @@ public class ReplicationWorkAssignerTest {
     assigner.setWorkQueue(workQueue);
 
     Path p = new Path("/accumulo/wal/tserver+port/" + UUID.randomUUID());
-    
+
     workQueue.addWork(p.getName() + "|" + serializedTarget.toString(), p.toString());
     expectLastCall().once();
 
@@ -116,8 +117,10 @@ public class ReplicationWorkAssignerTest {
 
   @Test
   public void createWorkForFilesNeedingIt() throws Exception {
-    ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1"), target2 = new ReplicationTarget("cluster1", "table2"); 
+    ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1"), target2 = new ReplicationTarget("cluster1", "table2");
     Text serializedTarget1 = target1.toText(), serializedTarget2 = target2.toText();
+    String keyTarget1 = target1.getPeerName() + ReplicationWorkAssignerHelper.KEY_SEPARATOR + target1.getRemoteIdentifier(), keyTarget2 = target2
+        .getPeerName() + ReplicationWorkAssignerHelper.KEY_SEPARATOR + target2.getRemoteIdentifier();
 
     MockInstance inst = new MockInstance(test.getMethodName());
     Credentials creds = new Credentials("root", new PasswordToken(""));
@@ -155,25 +158,25 @@ public class ReplicationWorkAssignerTest {
 
     // Make sure we expect the invocations in the correct order (accumulo is sorted)
     if (file1.compareTo(file2) <= 0) {
-      String key = filename1 + "|" + serializedTarget1;
+      String key = filename1 + "|" + keyTarget1;
       expect(queuedWork.contains(key)).andReturn(false);
       workQueue.addWork(key, file1);
       expectLastCall().once();
       expect(queuedWork.add(key)).andReturn(true).once();
-      
-      key = filename2 + "|" + serializedTarget2;
+
+      key = filename2 + "|" + keyTarget2;
       expect(queuedWork.contains(key)).andReturn(false);
       workQueue.addWork(key, file2);
       expectLastCall().once();
       expect(queuedWork.add(key)).andReturn(true).once();
     } else {
-      String key = filename2 + "|" + serializedTarget2;
+      String key = filename2 + "|" + keyTarget2;
       expect(queuedWork.contains(key)).andReturn(false);
       workQueue.addWork(key, file2);
       expectLastCall().once();
       expect(queuedWork.add(key)).andReturn(true).once();
 
-      key = filename1 + "|" + serializedTarget1;
+      key = filename1 + "|" + keyTarget1;
       expect(queuedWork.contains(key)).andReturn(false);
       workQueue.addWork(key, file1);
       expectLastCall().once();
@@ -189,7 +192,7 @@ public class ReplicationWorkAssignerTest {
 
   @Test
   public void doNotCreateWorkForFilesNotNeedingIt() throws Exception {
-    ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1"), target2 = new ReplicationTarget("cluster1", "table2"); 
+    ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1"), target2 = new ReplicationTarget("cluster1", "table2");
     Text serializedTarget1 = target1.toText(), serializedTarget2 = target2.toText();
 
     MockInstance inst = new MockInstance(test.getMethodName());
@@ -229,7 +232,7 @@ public class ReplicationWorkAssignerTest {
     replay(queuedWork, workQueue);
 
     assigner.createWork();
-    
+
     verify(queuedWork, workQueue);
   }
 
@@ -256,13 +259,13 @@ public class ReplicationWorkAssignerTest {
   }
 
   @Test
-  public void workNotReAdded() throws Exception  {
+  public void workNotReAdded() throws Exception {
     Set<String> queuedWork = new HashSet<>();
 
     assigner.setQueuedWork(queuedWork);
 
-    ReplicationTarget target = new ReplicationTarget("cluster1", "table1"); 
-    Text serializedTarget = target.toText();
+    ReplicationTarget target = new ReplicationTarget("cluster1", "table1");
+    String serializedTarget = target.getPeerName() + ReplicationWorkAssignerHelper.KEY_SEPARATOR + target.getRemoteIdentifier();
 
     queuedWork.add("wal1|" + serializedTarget.toString());
 
@@ -281,7 +284,7 @@ public class ReplicationWorkAssignerTest {
     BatchWriter bw = ReplicationTable.getBatchWriter(conn);
     String file1 = "/accumulo/wal/tserver+port/wal1";
     Mutation m = new Mutation(file1);
-    WorkSection.add(m, serializedTarget, StatusUtil.openWithUnknownLengthValue());
+    WorkSection.add(m, target.toText(), StatusUtil.openWithUnknownLengthValue());
     bw.addMutation(m);
 
     bw.close();
@@ -291,7 +294,7 @@ public class ReplicationWorkAssignerTest {
     assigner.setMaxQueueSize(Integer.MAX_VALUE);
 
     replay(workQueue);
-    
+
     assigner.createWork();
 
     verify(workQueue);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
index 5e07a70..803419c 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
@@ -116,6 +116,7 @@ import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.replication.thrift.ReplicationServicer;
 import org.apache.accumulo.core.security.AuthorizationContainer;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.SecurityUtil;
@@ -222,6 +223,7 @@ import org.apache.accumulo.tserver.metrics.TabletServerMBean;
 import org.apache.accumulo.tserver.metrics.TabletServerMinCMetrics;
 import org.apache.accumulo.tserver.metrics.TabletServerScanMetrics;
 import org.apache.accumulo.tserver.metrics.TabletServerUpdateMetrics;
+import org.apache.accumulo.tserver.replication.ReplicationServicerHandler;
 import org.apache.accumulo.tserver.replication.ReplicationWorker;
 import org.apache.commons.collections.map.LRUMap;
 import org.apache.hadoop.fs.FSError;
@@ -3017,6 +3019,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
   // used for stopping the server and MasterListener thread
   private volatile boolean serverStopRequested = false;
 
+  private HostAndPort replicationAddress;
   private HostAndPort clientAddress;
 
   private TabletServerResourceManager resourceManager;
@@ -3028,6 +3031,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
   private ZooLock tabletServerLock;
 
   private TServer server;
+  private TServer replServer;
 
   private DistributedWorkQueue bulkFailedCopyQ;
 
@@ -3117,6 +3121,18 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     return address;
   }
 
+  private HostAndPort startReplicationService() throws UnknownHostException {
+    ReplicationServicer.Iface repl = TraceWrap.service(new ReplicationServicerHandler());
+    ReplicationServicer.Processor<ReplicationServicer.Iface> processor = new ReplicationServicer.Processor<ReplicationServicer.Iface>(repl);
+    AccumuloConfiguration conf = getSystemConfiguration();
+    Property maxMessageSizeProperty = (conf.get(Property.TSERV_MAX_MESSAGE_SIZE) != null ? Property.TSERV_MAX_MESSAGE_SIZE : Property.GENERAL_MAX_MESSAGE_SIZE);
+    ServerAddress sp = TServerUtils.startServer(conf, clientAddress.getHostText(), Property.REPLICATION_RECEIPT_SERVICE_PORT, processor,
+        "ReplicationServicerHandler", "Replication Servicer", null, Property.REPLICATION_MIN_THREADS, Property.REPLICATION_THREADCHECK, maxMessageSizeProperty);
+    this.replServer = sp.server;
+    log.info("Started replication service on " + sp.address);
+    return sp.address;
+  }
+
   ZooLock getLock() {
     return tabletServerLock;
   }
@@ -3180,7 +3196,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
   // main loop listens for client requests
   public void run() {
     SecurityUtil.serverLogin(ServerConfiguration.getSiteConfiguration());
-
+    
     try {
       clientAddress = startTabletClientService();
     } catch (UnknownHostException e1) {
@@ -3204,6 +3220,14 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
       throw new RuntimeException(ex);
     }
 
+    // Start the thrift service listening for incoming replication requests
+    try {
+      replicationAddress = startReplicationService();
+    } catch (UnknownHostException e) {
+      throw new RuntimeException("Failed to start replication service", e);
+    }
+
+    // Start the pool to handle outgoing replications
     ThreadPoolExecutor replicationThreadPool = new SimpleThreadPool(getSystemConfiguration().getCount(Property.REPLICATION_WORKER_THREADS), "replication task");
     replWorker.setExecutor(replicationThreadPool);
     replWorker.run();
@@ -3296,6 +3320,8 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
         }
       }
     }
+    log.debug("Stopping Replication Server");
+    TServerUtils.stopTServer(this.replServer);
     log.debug("Stopping Thrift Servers");
     TServerUtils.stopTServer(server);
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
new file mode 100644
index 0000000..40676f7
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.tserver.replication;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.impl.ClientExecReturn;
+import org.apache.accumulo.core.client.impl.ReplicationClient;
+import org.apache.accumulo.core.client.impl.ServerConfigurationUtil;
+import org.apache.accumulo.core.client.replication.ReplicaSystem;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.replication.ReplicationTarget;
+import org.apache.accumulo.core.replication.proto.Replication.Status;
+import org.apache.accumulo.core.replication.thrift.KeyValues;
+import org.apache.accumulo.core.replication.thrift.ReplicationCoordinator;
+import org.apache.accumulo.core.replication.thrift.ReplicationServicer;
+import org.apache.accumulo.core.replication.thrift.ReplicationServicer.Client;
+import org.apache.accumulo.core.replication.thrift.WalEdits;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.conf.ServerConfiguration;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManagerImpl;
+import org.apache.accumulo.tserver.log.DfsLogger;
+import org.apache.accumulo.tserver.log.DfsLogger.DFSLoggerInputStreams;
+import org.apache.accumulo.tserver.logger.LogFileKey;
+import org.apache.accumulo.tserver.logger.LogFileValue;
+import org.apache.hadoop.fs.Path;
+import org.apache.thrift.transport.TTransportException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * 
+ */
+public class AccumuloReplicaSystem implements ReplicaSystem {
+  private static final Logger log = LoggerFactory.getLogger(AccumuloReplicaSystem.class);
+  private static final String RFILE_SUFFIX = "." + RFile.EXTENSION;
+  
+  private String instanceName, zookeepers;
+  private AccumuloConfiguration conf;
+  private VolumeManager fs;
+
+  @Override
+  public void configure(String configuration) {
+    Preconditions.checkNotNull(configuration);
+
+    // instance_name,zookeepers
+    int index = configuration.indexOf(',');
+    if (-1 == index) {
+      throw new IllegalArgumentException("Expected comma in configuration string");
+    }
+
+    instanceName = configuration.substring(0, index);
+    zookeepers = configuration.substring(index + 1);
+
+    conf = ServerConfiguration.getSiteConfiguration();
+
+    try {
+      fs = VolumeManagerImpl.get(conf);
+    } catch (IOException e) {
+      log.error("Could not connect to filesystem", e);
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public Status replicate(final Path p, final Status status, ReplicationTarget target) {
+    Instance localInstance = HdfsZooInstance.getInstance();
+    AccumuloConfiguration localConf = ServerConfigurationUtil.getConfiguration(localInstance);
+    
+    Instance peerInstance = getPeerInstance(target);
+    // Remote identifier is an integer (table id) in this case.
+    final int remoteTableId = Integer.parseInt(target.getRemoteIdentifier());
+
+    // Attempt the replication of this status a number of times before giving up and
+    // trying to replicate it again later some other time.
+    for (int i = 0; i < localConf.getCount(Property.REPLICATION_WORK_ATTEMPTS); i++) {
+      String peerTserver;
+      try {
+        // Ask the master on the remote what TServer we should talk with to replicate the data
+        peerTserver = ReplicationClient.executeCoordinatorWithReturn(peerInstance, new ClientExecReturn<String,ReplicationCoordinator.Client>() {
+  
+          @Override
+          public String execute(ReplicationCoordinator.Client client) throws Exception {
+            return client.getServicerAddress(remoteTableId);
+          }
+          
+        });
+      } catch (AccumuloException | AccumuloSecurityException e) {
+        // No progress is made
+        log.error("Could not connect to master at {}, cannot proceed with replication. Will retry", target, e);
+        continue;
+      }
+  
+      if (null == peerTserver) {
+        // Something went wrong, and we didn't get a valid tserver from the remote for some reason
+        log.warn("Did not receive tserver from master at {}, cannot proceed with replication. Will retry.", target);
+        continue;
+      }
+  
+      // We have a tserver on the remote -- send the data its way.
+      Long entriesReplicated;
+      //TODO should chunk up the given file into some configurable sizes instead of just sending the entire file all at once
+      //     configuration should probably just be size based.
+      final long sizeLimit = Long.MAX_VALUE;
+      try {
+        entriesReplicated = ReplicationClient.executeServicerWithReturn(peerInstance, peerTserver, new ClientExecReturn<Long,ReplicationServicer.Client>() {
+          @Override
+          public Long execute(Client client) throws Exception {
+            // RFiles have an extension, call everything else a WAL
+            if (p.getName().endsWith(RFILE_SUFFIX)) {
+              return client.replicateKeyValues(remoteTableId, getKeyValues(p, status, sizeLimit));
+            } else {
+              return client.replicateLog(remoteTableId, getWalEdits(p, status, sizeLimit));
+            }
+          }
+        });
+
+        log.debug("Replicated {} entries from {} to {} which is a part of {}", entriesReplicated, p, peerTserver, peerInstance.getInstanceName());
+
+        // Update the begin to account for what we replicated
+        Status updatedStatus = Status.newBuilder(status).setBegin(status.getBegin() + entriesReplicated).build();
+
+        return updatedStatus;
+      } catch (TTransportException | AccumuloException | AccumuloSecurityException e) {
+        log.warn("Could not connect to remote server {}, will retry", peerTserver, e);
+        UtilWaitThread.sleep(250);
+      }
+    }
+
+    // We made no status, punt on it for now, and let it re-queue itself for work
+    return status;
+  }
+
+  protected Instance getPeerInstance(ReplicationTarget target) {
+    return new ZooKeeperInstance(instanceName, zookeepers);
+  }
+
+  protected KeyValues getKeyValues(Path p, Status status, long sizeLimit) {
+    // TODO Implement me
+    throw new UnsupportedOperationException();
+  }
+
+  protected WalEdits getWalEdits(Path p, Status status, long sizeLimit) throws IOException {
+    DFSLoggerInputStreams streams = DfsLogger.readHeaderAndReturnStream(fs, p, conf);
+    DataInputStream wal = streams.getDecryptingInputStream();
+    LogFileKey key = new LogFileKey();
+    LogFileValue value = new LogFileValue();
+
+    // Read through the stuff we don't need to replicate
+    for (long i = 0; i < status.getBegin(); i++) {
+      try {
+        key.readFields(wal);
+        value.readFields(wal);
+      } catch (EOFException e) {
+        log.warn("Unexpectedly reached the end of file. Nothing more to replicate.");
+        return null;
+      }
+    }
+
+    WalEdits edits = new WalEdits();
+    edits.edits = new ArrayList<ByteBuffer>();
+    long size = 0l;
+    while (size < sizeLimit) {
+      try {
+        key.readFields(wal);
+        value.readFields(wal);
+      } catch (EOFException e) {
+        log.trace("Caught EOFException, no more data to replicate");
+        break;
+      }
+
+      switch (key.event) {
+        case MUTATION:
+        case MANY_MUTATIONS:
+          ByteArrayOutputStream baos = new ByteArrayOutputStream();
+          DataOutputStream out = new DataOutputStream(baos);
+          key.write(out);
+          value.write(out);
+          out.flush();
+          byte[] data = baos.toByteArray();
+          size += data.length;
+          edits.addToEdits(ByteBuffer.wrap(data));
+          break;
+        default:
+          log.trace("Ignorning WAL entry which doesn't contain mutations");
+          break;
+      }
+    }
+
+    log.debug("Returning {} bytes of WAL entries for replication for {}", size, p);
+
+    return edits;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationProcessor.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationProcessor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationProcessor.java
index c0aa31d..84782df 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationProcessor.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationProcessor.java
@@ -38,10 +38,10 @@ import org.apache.accumulo.core.replication.ReplicationTarget;
 import org.apache.accumulo.core.replication.StatusUtil;
 import org.apache.accumulo.core.replication.proto.Replication.Status;
 import org.apache.accumulo.core.security.Credentials;
+import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.replication.ReplicationTable;
 import org.apache.accumulo.server.replication.ReplicationWorkAssignerHelper;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.zookeeper.DistributedWorkQueue.Processor;
 import org.apache.hadoop.fs.Path;
 import org.slf4j.Logger;
@@ -56,20 +56,21 @@ import com.google.protobuf.InvalidProtocolBufferException;
 public class ReplicationProcessor implements Processor {
   private static final Logger log = LoggerFactory.getLogger(ReplicationProcessor.class);
 
-  private Instance inst;
-  private AccumuloConfiguration conf;
-  private VolumeManager fs;
-  private Credentials creds;
+  private final Instance inst;
+  private final AccumuloConfiguration conf;
+  private final VolumeManager fs;
+  private final Credentials creds;
 
-  public ReplicationProcessor(Instance inst, AccumuloConfiguration conf, VolumeManager fs) {
+  public ReplicationProcessor(Instance inst, AccumuloConfiguration conf, VolumeManager fs, Credentials creds) {
+    this.inst = inst;
     this.conf = conf;
     this.fs = fs;
-    creds = SystemCredentials.get();
+    this.creds = creds;
   }
 
   @Override
   public ReplicationProcessor newProcessor() {
-    return new ReplicationProcessor(inst, conf, fs);
+    return new ReplicationProcessor(inst, new ServerConfiguration(inst).getConfiguration(), fs, creds);
   }
 
   @Override
@@ -77,14 +78,11 @@ public class ReplicationProcessor implements Processor {
     ReplicationTarget target = ReplicationWorkAssignerHelper.fromQueueKey(workID).getValue();
     String file = new String(data);
 
+    log.debug("Received replication work for {} to {}", file, target);
+
     // Find the configured replication peer so we know how to replicate to it
-    Map<String,String> configuredPeers = conf.getAllPropertiesWithPrefix(Property.REPLICATION_PEERS);
-    String peerType = configuredPeers.get(target.getPeerName());
-    if (null == peerType) {
-      String msg = "Cannot process replication for unknown peer: " +  target.getPeerName();
-      log.warn(msg);
-      throw new IllegalArgumentException(msg);
-    }
+    // Classname,Configuration
+    String peerType = getPeerType(target.getPeerName());
 
     // Get the peer that we're replicating to
     ReplicaSystem replica = ReplicaSystemFactory.get(peerType);
@@ -126,10 +124,25 @@ public class ReplicationProcessor implements Processor {
       recordNewStatus(filePath, replicatedStatus, target);
     }
 
+    log.debug("Did not replicate any new data for {} to {}", filePath, target);
+
     // otherwise, we didn't actually replicate because there was error sending the data
     // we can just not record any updates, and it will be picked up again by the work assigner
   }
 
+  public String getPeerType(String peerName) {
+    // Find the configured replication peer so we know how to replicate to it
+    Map<String,String> configuredPeers = conf.getAllPropertiesWithPrefix(Property.REPLICATION_PEERS);
+    String peerType = configuredPeers.get(Property.REPLICATION_PEERS.getKey() + peerName);
+    if (null == peerType) {
+      String msg = "Cannot process replication for unknown peer: " +  peerName;
+      log.warn(msg);
+      throw new IllegalArgumentException(msg);
+    }
+
+    return peerType;
+  }
+
   public Status getStatus(String file, ReplicationTarget target) throws TableNotFoundException, AccumuloException, AccumuloSecurityException, InvalidProtocolBufferException {
     Scanner s = ReplicationTable.getScanner(inst.getConnector(creds.getPrincipal(), creds.getToken()));
     s.setRange(Range.exact(file));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java
new file mode 100644
index 0000000..5036cab
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.tserver.replication;
+
+import org.apache.accumulo.core.replication.thrift.KeyValues;
+import org.apache.accumulo.core.replication.thrift.RemoteReplicationException;
+import org.apache.accumulo.core.replication.thrift.ReplicationServicer.Iface;
+import org.apache.accumulo.core.replication.thrift.WalEdits;
+import org.apache.thrift.TException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * 
+ */
+public class ReplicationServicerHandler implements Iface {
+  private static final Logger log = LoggerFactory.getLogger(ReplicationServicerHandler.class);
+
+  @Override
+  public long replicateLog(int remoteTableId, WalEdits data) throws RemoteReplicationException, TException {
+    log.error("Got replication request to tableID {} with {} edits", remoteTableId, data.getEditsSize());
+    return data.getEditsSize();
+  }
+
+  @Override
+  public long replicateKeyValues(int remoteTableId, KeyValues data) throws RemoteReplicationException, TException {
+    throw new UnsupportedOperationException();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationWorker.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationWorker.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationWorker.java
index e00f343..63bc261 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationWorker.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationWorker.java
@@ -23,6 +23,7 @@ import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.zookeeper.DistributedWorkQueue;
 import org.apache.zookeeper.KeeperException;
 
@@ -49,7 +50,7 @@ public class ReplicationWorker implements Runnable {
   @Override
   public void run() {
     try {
-      new DistributedWorkQueue(ZooUtil.getRoot(inst) + Constants.ZREPLICATION, conf).startProcessing(new ReplicationProcessor(inst, conf, fs), executor);
+      new DistributedWorkQueue(ZooUtil.getRoot(inst) + Constants.ZREPLICATION, conf).startProcessing(new ReplicationProcessor(inst, conf, fs, SystemCredentials.get()), executor);
     } catch (KeeperException | InterruptedException e) {
       throw new RuntimeException(e);
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/ReplicationProcessorTest.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/ReplicationProcessorTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/ReplicationProcessorTest.java
new file mode 100644
index 0000000..ae05800
--- /dev/null
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/ReplicationProcessorTest.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.tserver.replication;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.ConfigurationCopy;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.security.Credentials;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.easymock.EasyMock;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * 
+ */
+public class ReplicationProcessorTest {
+
+  @Test
+  public void peerTypeExtractionFromConfiguration() {
+    Instance inst = EasyMock.createMock(Instance.class);
+    VolumeManager fs = EasyMock.createMock(VolumeManager.class);
+    Credentials creds = new Credentials("foo", new PasswordToken("bar"));
+    
+    Map<String,String> data = new HashMap<>();
+
+    String peerName = "peer";
+    String configuration = "java.lang.String,foo";
+    data.put(Property.REPLICATION_PEERS + peerName, configuration);
+    ConfigurationCopy conf = new ConfigurationCopy(data);
+
+    ReplicationProcessor proc = new ReplicationProcessor(inst, conf, fs, creds);
+
+    Assert.assertEquals(configuration, proc.getPeerType(peerName));
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void noPeerConfigurationThrowsAnException() {
+    Instance inst = EasyMock.createMock(Instance.class);
+    VolumeManager fs = EasyMock.createMock(VolumeManager.class);
+    Credentials creds = new Credentials("foo", new PasswordToken("bar"));
+    
+    Map<String,String> data = new HashMap<>();
+    ConfigurationCopy conf = new ConfigurationCopy(data);
+
+    ReplicationProcessor proc = new ReplicationProcessor(inst, conf, fs, creds);
+
+    proc.getPeerType("foo");
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3579b674/server/tserver/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/server/tserver/src/test/resources/log4j.properties b/server/tserver/src/test/resources/log4j.properties
new file mode 100644
index 0000000..71c06e9
--- /dev/null
+++ b/server/tserver/src/test/resources/log4j.properties
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+log4j.rootLogger=DEBUG, CA
+log4j.appender.CA=org.apache.log4j.ConsoleAppender
+log4j.appender.CA.layout=org.apache.log4j.PatternLayout
+log4j.appender.CA.layout.ConversionPattern=%d{ISO8601} [%c{2}] %-5p: %m%n
+
+log4j.logger.org.apache.accumulo.core=DEBUG
+log4j.logger.org.apache.accumulo.core.client.impl.ServerClient=ERROR
+log4j.logger.org.apache.accumulo.core.util.shell.Shell.audit=off
+log4j.logger.org.apache.accumulo.core.util.shell.Shell=FATAL
+log4j.logger.org.apache.commons.vfs2.impl.DefaultFileSystemManager=WARN
+log4j.logger.org.apache.hadoop.io.compress.CodecPool=WARN
+log4j.logger.org.apache.hadoop.mapred=ERROR
+log4j.logger.org.apache.hadoop.tools.DistCp=WARN
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+log4j.logger.org.apache.hadoop.util.ProcessTree=WARN
+log4j.logger.org.apache.zookeeper.ClientCnxn=FATAL
+log4j.logger.org.apache.zookeeper.ZooKeeper=WARN
+log4j.logger.org.apache.accumulo.core.file.rfile.bcfile=INFO
+log4j.logger.org.apache.accumulo.server.util.ReplicationTableUtil=TRACE
+log4j.logger.org.apache.accumulo.core.client.impl.TabletServerBatchReaderIterator=INFO
+log4j.logger.org.apache.accumulo.core.client.impl.ThriftScanner=INFO


Mime
View raw message