hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ecl...@apache.org
Subject [06/19] hbase git commit: HBASE-12476 HydraBase consensus protocol
Date Tue, 25 Nov 2014 20:28:59 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/io/hfile/Compression.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/io/hfile/Compression.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/io/hfile/Compression.java
new file mode 100644
index 0000000..7c828db
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/io/hfile/Compression.java
@@ -0,0 +1,345 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.hbase.util.InjectionEvent;
+import org.apache.hadoop.hbase.util.InjectionHandler;
+import org.apache.hadoop.io.compress.CodecPool;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.CompressionInputStream;
+import org.apache.hadoop.io.compress.CompressionOutputStream;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.io.compress.GzipCodec;
+import org.apache.hadoop.io.compress.DefaultCodec;
+import org.apache.hadoop.util.ReflectionUtils;
+
+/**
+ * Compression related stuff.
+ * Copied from hadoop-3315 tfile.
+ */
+public final class Compression {
+  static final Log LOG = LogFactory.getLog(Compression.class);
+
+  /**
+   * Prevent the instantiation of class.
+   */
+  private Compression() {
+    super();
+  }
+
+  static class FinishOnFlushCompressionStream extends FilterOutputStream {
+    public FinishOnFlushCompressionStream(CompressionOutputStream cout) {
+      super(cout);
+    }
+
+    @Override
+    public void write(byte b[], int off, int len) throws IOException {
+      out.write(b, off, len);
+    }
+
+    @Override
+    public void flush() throws IOException {
+      CompressionOutputStream cout = (CompressionOutputStream) out;
+      cout.finish();
+      cout.flush();
+      cout.resetState();
+    }
+  }
+
+  /**
+   * Compression algorithms. Don't change the order of codecs here, and add new codecs to the end
+   * of the list, because we are using ordinal numbers of elements of this enum in some of our
+   * persistent data formats.
+   */
+  public static enum Algorithm {
+    LZO("lzo") {
+      // Use base type to avoid compile-time dependencies.
+      private volatile transient CompressionCodec lzoCodec;
+      @Override
+      protected void deleteCodec() {
+        lzoCodec = null;
+      }
+      @Override
+      public CompressionCodec getCodec(Configuration conf) {
+        if (lzoCodec == null) {
+          try {
+            Class<?> externalCodec = ClassLoader.getSystemClassLoader().loadClass(
+                "com.hadoop.compression.lzo.LzoCodec");
+            lzoCodec = (CompressionCodec) ReflectionUtils.newInstance(externalCodec, conf);
+          } catch (ClassNotFoundException e) {
+            throw new RuntimeException(e);
+          }
+        }
+        return lzoCodec;
+      }
+    },
+    GZ("gz") {
+      private volatile transient GzipCodec codec;
+      private transient Object lock = new Object();
+
+      @Override
+      public DefaultCodec getCodec(Configuration conf) {
+        if (codec == null) {
+          synchronized (lock) {
+            if (codec == null) {
+              GzipCodec tmpCodec = new ReusableStreamGzipCodec();
+              tmpCodec.setConf(new Configuration(conf));
+              codec = tmpCodec;
+            }
+          }
+        }
+        return codec;
+      }
+      @Override
+      protected void deleteCodec() {
+        codec = null;
+      }
+    },
+    NONE("none") {
+      @Override
+      public DefaultCodec getCodec(Configuration conf) {
+        return null;
+      }
+      @Override
+      protected void deleteCodec() {
+        return;
+      }
+
+      @Override
+      public synchronized InputStream createDecompressionStream(
+          InputStream downStream, Decompressor decompressor,
+          int downStreamBufferSize) throws IOException {
+        if (downStreamBufferSize > 0) {
+          return new BufferedInputStream(downStream, downStreamBufferSize);
+        }
+        // else {
+          // Make sure we bypass FSInputChecker buffer.
+        // return new BufferedInputStream(downStream, 1024);
+        // }
+        // }
+        return downStream;
+      }
+
+      @Override
+      public synchronized OutputStream createCompressionStream(
+          OutputStream downStream, Compressor compressor,
+          int downStreamBufferSize) throws IOException {
+        if (downStreamBufferSize > 0) {
+          return new BufferedOutputStream(downStream, downStreamBufferSize);
+        }
+
+        return downStream;
+      }
+    },
+    SNAPPY("snappy") {
+      private volatile transient CompressionCodec snappyCodec;
+      private transient Object lock = new Object();
+
+      @Override
+      protected void deleteCodec() {
+        snappyCodec = null;
+      }
+      @SuppressWarnings("unchecked")
+      @Override
+      public CompressionCodec getCodec(Configuration conf) {
+        if (snappyCodec == null) {
+          synchronized (lock) {
+            if (snappyCodec == null) {
+              CompressionCodec tmpCodec;
+              try {
+                Class<? extends CompressionCodec> snappyCodecClass =
+                    (Class<? extends CompressionCodec>)
+                    Class.forName(CompressionCodec.class.getPackage().getName() + ".SnappyCodec");
+                tmpCodec = snappyCodecClass.newInstance();
+              } catch (InstantiationException e) {
+                LOG.error(e);
+                throw new RuntimeException(e);
+              } catch (IllegalAccessException e) {
+                LOG.error(e);
+                throw new RuntimeException(e);
+              } catch (ClassNotFoundException e) {
+                LOG.error(e);
+                throw new RuntimeException(e);
+              }
+              ((Configurable) tmpCodec).setConf(new Configuration(conf));
+              snappyCodec = tmpCodec;
+            }
+          }
+        }
+        return (CompressionCodec) snappyCodec;
+      }
+    };
+
+    private final Configuration conf;
+    private final String compressName;
+    // data input buffer size to absorb small reads from application.
+    private static final int DATA_IBUF_SIZE = 1 * 1024;
+    // data output buffer size to absorb small writes from application.
+    private static final int DATA_OBUF_SIZE = 4 * 1024;
+
+    Algorithm(String name) {
+      this.conf = new Configuration();
+      this.conf.setBoolean("hadoop.native.lib", true);
+      this.compressName = name;
+    }
+
+    public abstract CompressionCodec getCodec(Configuration conf);
+
+    public InputStream createDecompressionStream(
+        InputStream downStream, Decompressor decompressor,
+        int downStreamBufferSize) throws IOException {
+      CompressionCodec codec = getCodec(conf);
+      // Set the internal buffer size to read from down stream.
+      if (downStreamBufferSize > 0) {
+        Configurable c = (Configurable) codec;
+        c.getConf().setInt("io.file.buffer.size", downStreamBufferSize);
+      }
+      CompressionInputStream cis =
+          codec.createInputStream(downStream, decompressor);
+      BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE);
+      return bis2;
+
+    }
+
+    public OutputStream createCompressionStream(
+        OutputStream downStream, Compressor compressor, int downStreamBufferSize)
+        throws IOException {
+      OutputStream bos1 = null;
+      if (downStreamBufferSize > 0) {
+        bos1 = new BufferedOutputStream(downStream, downStreamBufferSize);
+      }
+      else {
+        bos1 = downStream;
+      }
+      CompressionOutputStream cos =
+          createPlainCompressionStream(bos1, compressor);
+      BufferedOutputStream bos2 =
+          new BufferedOutputStream(new FinishOnFlushCompressionStream(cos),
+              DATA_OBUF_SIZE);
+      return bos2;
+    }
+
+    /**
+     * Creates a compression stream without any additional wrapping into
+     * buffering streams.
+     */
+    public CompressionOutputStream createPlainCompressionStream(
+        OutputStream downStream, Compressor compressor) throws IOException {
+      CompressionCodec codec = getCodec(conf);
+      ((Configurable)codec).getConf().setInt("io.file.buffer.size", 32 * 1024);
+      return codec.createOutputStream(downStream, compressor);
+    }
+
+    public Compressor getCompressor() {
+      CompressionCodec codec = getCodec(conf);
+      if (codec != null) {
+        Compressor compressor = CodecPool.getCompressor(codec);
+        if (compressor != null) {
+          if (compressor.finished()) {
+            // Somebody returns the compressor to CodecPool but is still using
+            // it.
+            LOG
+                .warn("Compressor obtained from CodecPool is already finished()");
+            // throw new AssertionError(
+            // "Compressor obtained from CodecPool is already finished()");
+          }
+          compressor.reset();
+        }
+        return compressor;
+      }
+      return null;
+    }
+
+    public void returnCompressor(Compressor compressor) {
+      if (compressor != null) {
+        CodecPool.returnCompressor(compressor);
+      }
+    }
+
+    public Decompressor getDecompressor() {
+      CompressionCodec codec = getCodec(conf);
+      if (codec != null) {
+        Decompressor decompressor = CodecPool.getDecompressor(codec);
+        if (decompressor != null) {
+          if (decompressor.finished()) {
+            // Somebody returns the decompressor to CodecPool but is still using
+            // it.
+            LOG
+                .warn("Deompressor obtained from CodecPool is already finished()");
+            // throw new AssertionError(
+            // "Decompressor obtained from CodecPool is already finished()");
+          }
+          decompressor.reset();
+        }
+        return decompressor;
+      }
+
+      return null;
+    }
+
+    public void returnDecompressor(Decompressor decompressor) {
+      if (decompressor != null) {
+        CodecPool.returnDecompressor(decompressor);
+      }
+    }
+
+    public String getName() {
+      return compressName;
+    }
+
+    protected abstract void deleteCodec();
+  }
+
+  public static Algorithm getCompressionAlgorithmByName(String compressName) {
+    Algorithm[] algos = Algorithm.class.getEnumConstants();
+
+    for (Algorithm a : algos) {
+      if (a.getName().equals(compressName)) {
+        return a;
+      }
+    }
+
+    throw new IllegalArgumentException(
+        "Unsupported compression algorithm name: " + compressName);
+  }
+
+  static String[] getSupportedAlgorithms() {
+    Algorithm[] algos = Algorithm.class.getEnumConstants();
+
+    String[] ret = new String[algos.length];
+    int i = 0;
+    for (Algorithm a : algos) {
+      ret[i++] = a.getName();
+    }
+
+    return ret;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/io/hfile/ReusableStreamGzipCodec.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/io/hfile/ReusableStreamGzipCodec.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/io/hfile/ReusableStreamGzipCodec.java
new file mode 100644
index 0000000..2b1d48b
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/io/hfile/ReusableStreamGzipCodec.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Arrays;
+import java.util.zip.GZIPOutputStream;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.compress.CompressionOutputStream;
+import org.apache.hadoop.io.compress.CompressorStream;
+import org.apache.hadoop.io.compress.GzipCodec;
+import org.apache.hadoop.io.compress.zlib.ZlibFactory;
+
+/**
+ * Fixes an inefficiency in Hadoop's Gzip codec, allowing to reuse compression
+ * streams.
+ */
+public class ReusableStreamGzipCodec extends GzipCodec {
+
+  private static final Log LOG = LogFactory.getLog(Compression.class);
+
+  /**
+   * A bridge that wraps around a DeflaterOutputStream to make it a
+   * CompressionOutputStream.
+   */
+  protected static class ReusableGzipOutputStream extends CompressorStream {
+
+    private static final int GZIP_HEADER_LENGTH = 10;
+
+    /**
+     * Fixed ten-byte gzip header. See {@link GZIPOutputStream}'s source for
+     * details.
+     */
+    private static final byte[] GZIP_HEADER;
+
+    static {
+      // Capture the fixed ten-byte header hard-coded in GZIPOutputStream.
+      ByteArrayOutputStream baos = new ByteArrayOutputStream();
+      byte[] header = null;
+      GZIPOutputStream gzipStream = null;
+      try {
+        gzipStream  = new GZIPOutputStream(baos);
+        gzipStream.finish();
+        header = Arrays.copyOfRange(baos.toByteArray(), 0, GZIP_HEADER_LENGTH);
+      } catch (IOException e) {
+        throw new RuntimeException("Could not create gzip stream", e);
+      } finally {
+        if (gzipStream != null) {
+          try {
+            gzipStream.close();
+          } catch (IOException e) {
+            LOG.error(e);
+          }
+        }
+      }
+      GZIP_HEADER = header;
+    }
+
+    private static class ResetableGZIPOutputStream extends GZIPOutputStream {
+      public ResetableGZIPOutputStream(OutputStream out) throws IOException {
+        super(out);
+      }
+
+      public void resetState() throws IOException {
+        def.reset();
+        crc.reset();
+        out.write(GZIP_HEADER);
+      }
+    }
+
+    public ReusableGzipOutputStream(OutputStream out) throws IOException {
+      super(new ResetableGZIPOutputStream(out));
+    }
+
+    @Override
+    public void close() throws IOException {
+      out.close();
+    }
+
+    @Override
+    public void flush() throws IOException {
+      out.flush();
+    }
+
+    @Override
+    public void write(int b) throws IOException {
+      out.write(b);
+    }
+
+    @Override
+    public void write(byte[] data, int offset, int length) throws IOException {
+      out.write(data, offset, length);
+    }
+
+    @Override
+    public void finish() throws IOException {
+      ((GZIPOutputStream) out).finish();
+    }
+
+    @Override
+    public void resetState() throws IOException {
+      ((ResetableGZIPOutputStream) out).resetState();
+    }
+  }
+
+  @Override
+  public CompressionOutputStream createOutputStream(OutputStream out)
+      throws IOException {
+    if (ZlibFactory.isNativeZlibLoaded(getConf())) {
+      return super.createOutputStream(out);
+    }
+    return new ReusableGzipOutputStream(out);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java
new file mode 100644
index 0000000..cfb3297
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java
@@ -0,0 +1,54 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile.bucket;
+
+import java.io.IOException;
+
+
+/**
+ * Thrown by {@link org.apache.hadoop.hbase.util.BucketAllocator#allocateBlock(int)} when cache is full for
+ * the requested size
+ */
+public class CacheFullException extends IOException {
+  private static final long serialVersionUID = 3265127301824638920L;
+  private int requestedSize, bucketIndex;
+
+  public CacheFullException(int requestedSize, int bucketIndex) {
+    super();
+    this.requestedSize = requestedSize;
+    this.bucketIndex = bucketIndex;
+  }
+
+  public int bucketIndex() {
+    return bucketIndex;
+  }
+
+  public int requestedSize() {
+    return requestedSize;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder(1024);
+    sb.append("Allocator requested size ").append(requestedSize);
+    sb.append(" for bucket ").append(bucketIndex);
+    return sb.toString();
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/ipc/ByteBufferOutputStream.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/ipc/ByteBufferOutputStream.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/ipc/ByteBufferOutputStream.java
new file mode 100644
index 0000000..92856ce
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/ipc/ByteBufferOutputStream.java
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.ipc;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+
+/**
+ * Not thread safe!
+ */
+public class ByteBufferOutputStream extends OutputStream {
+
+  protected ByteBuffer buf;
+
+  public ByteBufferOutputStream(int capacity) {
+    this(capacity, false);
+  }
+
+  public ByteBufferOutputStream(int capacity, boolean useDirectByteBuffer) {
+    if (useDirectByteBuffer) {
+      buf = ByteBuffer.allocateDirect(capacity);
+    } else {
+      buf = ByteBuffer.allocate(capacity);
+    }
+  }
+
+  public int size() {
+    return buf.position();
+  }
+
+  /**
+   * This flips the underlying BB so be sure to use it _last_!
+   * @return
+   */
+  public ByteBuffer getByteBuffer() {
+    buf.flip();
+    return buf;
+  }
+
+  private void checkSizeAndGrow(int extra) {
+    if ( (buf.position() + extra) > buf.limit()) {
+      int newSize = (int)Math.min((((long)buf.capacity()) * 2),
+                                  (long)(Integer.MAX_VALUE));
+      newSize = Math.max(newSize, buf.position() + extra);
+      ByteBuffer newBuf = ByteBuffer.allocate(newSize);
+      buf.flip();
+      newBuf.put(buf);
+      buf = newBuf;
+    }
+  }
+
+  // OutputStream
+  @Override
+  public void write(int b) throws IOException {
+    checkSizeAndGrow(Bytes.SIZEOF_BYTE);
+
+    buf.put((byte)b);
+  }
+
+  @Override
+  public void write(byte[] b) throws IOException {
+    checkSizeAndGrow(b.length);
+
+    buf.put(b);
+  }
+
+  @Override
+  public void write(byte[] b, int off, int len) throws IOException {
+    checkSizeAndGrow(len);
+
+    buf.put(b, off, len);
+  }
+
+  @Override
+  public void flush() throws IOException {
+    // noop
+  }
+
+  @Override
+  public void close() throws IOException {
+    // noop again. heh
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/ipc/thrift/exceptions/ThriftHBaseException.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/ipc/thrift/exceptions/ThriftHBaseException.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/ipc/thrift/exceptions/ThriftHBaseException.java
new file mode 100644
index 0000000..ce37023
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/ipc/thrift/exceptions/ThriftHBaseException.java
@@ -0,0 +1,151 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc.thrift.exceptions;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.util.Arrays;
+
+import com.facebook.swift.codec.ThriftConstructor;
+import com.facebook.swift.codec.ThriftField;
+import com.facebook.swift.codec.ThriftStruct;
+import com.google.common.base.Objects;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.util.Bytes;
+
+@ThriftStruct
+public final class ThriftHBaseException extends Exception {
+
+  private static final long serialVersionUID = -1294763299320737511L;
+
+  public static final Log LOG = LogFactory.getLog(ThriftHBaseException.class);
+
+  private String message;
+  private String exceptionClass;
+  private byte[] serializedServerJavaEx;
+
+  /**
+   * Swift Contructor used for serialization
+   *
+   * @param message - the message of the exception
+   * @param exceptionClass - the class of which instance the exception is
+   * @param serializedServerJavaEx - serialized java exception
+   */
+  @ThriftConstructor
+  public ThriftHBaseException(@ThriftField(1) String message,
+      @ThriftField(2) String exceptionClass,
+      @ThriftField(3) byte[] serializedServerJavaEx) {
+    this.message = message;
+    this.exceptionClass = exceptionClass;
+    this.serializedServerJavaEx = serializedServerJavaEx;
+  }
+
+  public ThriftHBaseException(){}
+
+  public ThriftHBaseException(Exception serverJavaException) {
+    try {
+      ByteArrayOutputStream out = new ByteArrayOutputStream();
+      ObjectOutputStream oout = new ObjectOutputStream(out);
+      oout.writeObject(serverJavaException);
+      this.message = serverJavaException.getMessage();
+      this.exceptionClass = serverJavaException.getClass().getCanonicalName();
+      serializedServerJavaEx = out.toByteArray();
+    } catch (IOException e) {
+      // Should never happen in reality
+      LOG.error("Exception happened during serialization of java server exception");
+    }
+  }
+
+  public Exception getServerJavaException() {
+    Exception ex = null;
+    try {
+      ByteArrayInputStream in = new ByteArrayInputStream(serializedServerJavaEx);
+      ObjectInputStream oin = new ObjectInputStream(in);
+      ex = (Exception) oin.readObject();
+    } catch (Exception e) {
+      // Should never happen in reality
+      LOG.error("Exception happened during serialization of java server exception");
+    }
+    return ex;
+  }
+
+  @ThriftField(1)
+  public String getMessage() {
+    return message;
+  }
+
+  @ThriftField(2)
+  public String getExceptionClass() {
+    return exceptionClass;
+  }
+
+  @ThriftField(3)
+  public byte[] getSerializedServerJavaEx() {
+    return serializedServerJavaEx;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hashCode(message, exceptionClass, serializedServerJavaEx);
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null) {
+      return false;
+    }
+
+    if (other == this) {
+      return true;
+    }
+
+    if (other.getClass() != this.getClass()) {
+      return false;
+    }
+
+    ThriftHBaseException otherException = (ThriftHBaseException) other;
+    if (!this.getMessage().equals(otherException.getMessage())) {
+      return false;
+    }
+
+    if (!this.getExceptionClass().equals(otherException.getExceptionClass())) {
+      return false;
+    }
+
+    if (!Bytes.equals(this.getSerializedServerJavaEx(),
+                      otherException.getSerializedServerJavaEx())) {
+      return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public String toString() {
+    return getClass() + " [message=" + message + ", exceptionClass="
+        + exceptionClass + ", serializedServerJavaEx Hash="
+        + Arrays.toString(serializedServerJavaEx).hashCode() + "]";
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/metrics/MetricsBase.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/metrics/MetricsBase.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/metrics/MetricsBase.java
new file mode 100644
index 0000000..c85944c
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/metrics/MetricsBase.java
@@ -0,0 +1,160 @@
+package org.apache.hadoop.hbase.metrics;
+
+import com.google.common.base.Joiner;
+import org.weakref.jmx.JmxException;
+import org.weakref.jmx.MBeanExporter;
+
+import javax.annotation.concurrent.ThreadSafe;
+import java.util.Collections;
+import java.util.Map;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * This class provides a skeleton implementation of class which can be
+ * exported as a {@link javax.management.DynamicMBean}, using the
+ * {@link org.weakref.jmx} package.
+ */
+@ThreadSafe
+public abstract class MetricsBase {
+  private final String mbeanName;
+  private MBeanExporter exporter;
+
+  /**
+   * Construct an object which can be exported as MBean with the given name and
+   * exporter. The given name will be used to construct an
+   * {@link javax.management.ObjectName}, which has specific requirements.
+   * The caller is responsible to pass a valid name.
+   * @param mbeanName name to be used to export the MBean
+   * @param exporter exporter to be used to export this object
+   */
+  public MetricsBase(final String mbeanName, final MBeanExporter exporter) {
+    this.mbeanName = checkNotNull(mbeanName, "Name can not be null");
+    this.exporter = exporter;
+  }
+
+
+  /**
+   * Construct an object which will exported as MBean using a
+   * {@link javax.management.ObjectName} that follows the following pattern:
+   * <domain>:type=<type>,name=<name>,proc=<prodId>,<extendedAttributes> where
+   * the extended attributes are a map of key value strings.
+   *
+   * @param domain the domain this MBean should belong to
+   * @param type the type of the MBean
+   * @param name the name of the MBean
+   * @param procId a identifier making this MBean unique, for example the PID of
+   *               the running JVM
+   * @param extendedAttributes a key value map of strings containing additional
+   *                           attributes to be added
+   * @param exporter the exporter to be used to export this MBean
+   */
+  public MetricsBase(final String domain, final String type, final String name,
+          final String procId, final Map<String, String> extendedAttributes,
+          final MBeanExporter exporter) {
+    this(getMBeanName(domain, type, name, procId, extendedAttributes),
+            exporter);
+  }
+
+  /**
+   * Get the {@link javax.management.ObjectName} as a string of the MBean
+   * backed by this object.
+   */
+  public String getMBeanName() {
+    return mbeanName;
+  }
+
+  /**
+   * Get the {@link MBeanExporter} used to export this object.
+   */
+  public synchronized MBeanExporter getMBeanExporter() {
+    return exporter;
+  }
+
+  /**
+   * Set the given {@link MBeanExporter} as the exporter to be used to
+   * export/un-export this object.
+   * @param exporter exporter to be used to export this object
+   */
+  public synchronized void setMBeanExporter(final MBeanExporter exporter) {
+    this.exporter = exporter;
+  }
+
+  /**
+   * Check if this object is exported as MBean by the set {@link MBeanExporter}.
+   * @return true if this object is exported as MBean
+   */
+  public boolean isExported() {
+    MBeanExporter exporter = getMBeanExporter();
+    Map<String, Object> exportedObjects = Collections.emptyMap();
+    if (exporter != null) {
+      exportedObjects = exporter.getExportedObjects();
+    }
+    return exportedObjects.containsKey(mbeanName);
+  }
+
+  /**
+   * Export this object as MBean.
+   * @throws JmxException if the object could not be exported
+   */
+  public void export() throws JmxException {
+    MBeanExporter exporter = getMBeanExporter();
+    if (exporter != null) {
+      exporter.export(mbeanName, this);
+    }
+  }
+
+  /**
+   * Convenience method which will set the given {@link MBeanExporter} and
+   * export this object as MBean.
+   * @param exporter MBeanExporter to use when exporting the object
+   * @throws JmxException if the object could not be exported
+   */
+  public synchronized void export(final MBeanExporter exporter)
+          throws JmxException {
+    setMBeanExporter(checkNotNull(exporter, "MBeanExporter can not be null"));
+    export();
+  }
+
+  /**
+   * Un-export the MBean backed by this object.
+   * @throws JmxException if the MBean could not be un-exported
+   */
+  public void unexport() throws JmxException {
+    MBeanExporter exporter = getMBeanExporter();
+    if (exporter != null) {
+      exporter.unexport(mbeanName);
+    }
+  }
+
+  @Override
+  public String toString() {
+    return mbeanName;
+  }
+
+  /**
+   * Get an MBean name that follows the following pattern:
+   * <domain>:type=<type>,name=<name>,proc=<prodId>,<extendedAttributes> where
+   * the extended attributes are a map of key value strings.
+
+   * @param domain the domain this MBean should belong to
+   * @param type the type of the MBean
+   * @param name the name of the MBean
+   * @param procId a identifier making this MBean unique, such as the PID of
+   *               the running JVM
+   * @param extendedAttributes a key value map of strings containing additional
+   *                           attributes to be added
+   * @return the MBean name as string
+   */
+  public static String getMBeanName(final String domain, final String type,
+          final String name, final String procId,
+          final Map<String, String> extendedAttributes) {
+    if (!extendedAttributes.isEmpty()) {
+      return String.format("%s:type=%s,name=%s,proc=%s,%s", domain, type, name,
+              procId, Joiner.on(",").withKeyValueSeparator("=").join(
+                      extendedAttributes));
+    }
+    return String.format("%s:type=%s,name=%s,proc=%s", domain, type, name,
+            procId);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/metrics/TimeStat.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/metrics/TimeStat.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/metrics/TimeStat.java
new file mode 100644
index 0000000..6f26099
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/metrics/TimeStat.java
@@ -0,0 +1,88 @@
+package org.apache.hadoop.hbase.metrics;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Stopwatch;
+import com.google.common.base.Ticker;
+import io.airlift.stats.ExponentialDecay;
+import io.airlift.stats.TimeDistribution;
+
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+public class TimeStat extends TimeDistribution {
+  private final Ticker ticker;
+
+  public TimeStat(TimeUnit unit) {
+    this(ExponentialDecay.oneMinute(), unit);
+  }
+
+  public TimeStat(double alpha, TimeUnit unit) {
+    this(alpha, unit, Ticker.systemTicker());
+  }
+
+  public TimeStat(double alpha, TimeUnit unit, Ticker ticker) {
+    super(alpha, unit);
+    this.ticker = ticker;
+  }
+
+  public synchronized void add(long value, TimeUnit unit) {
+    add(unit.toNanos(value));
+  }
+
+  public BlockTimer time() {
+    return new BlockTimer();
+  }
+
+  public class BlockTimer implements AutoCloseable {
+    private final long start = ticker.read();
+
+    @Override
+    public void close() {
+      add(ticker.read() - start);
+    }
+  }
+
+  public static void main(String [] args) {
+    final TimeStat stat = new TimeStat(TimeUnit.MICROSECONDS);
+    for (int i = 0; i < 100; ++i) {
+      benchmark(stat);
+    }
+  }
+
+  private static void benchmark(final TimeStat stat) {
+    long n = 1000000;
+    long[] randomValues = new long[1000];
+    Stopwatch stopwatch = new Stopwatch();
+    //Stopwatch.createUnstarted();
+
+    long elapsedTotal = 0;
+    long cycleMax = 0;
+    for (int i = 0; i < n / randomValues.length; ++i) {
+      generateValues(randomValues, 3000000L, 4000000000L);
+      stopwatch.start();
+      for (int j = 0; j < randomValues.length; ++j) {
+        stat.add(randomValues[j]);
+      }
+
+      long elapsed = stopwatch.elapsedTime(TimeUnit.NANOSECONDS);
+      elapsedTotal += elapsed;
+      if (elapsed > cycleMax) {
+        cycleMax = elapsed;
+      }
+
+      stopwatch.reset();
+    }
+    System.out.printf("Elapsed: %dns, max cycle: %dns\n", elapsedTotal,
+            cycleMax);
+  }
+
+  private static void generateValues(final long[]a, long start, long end) {
+    Preconditions.checkArgument(start < end, "Start should be less than end");
+    long delta = end - start;
+    Random random = new Random();
+
+    for (int i = 0; i < a.length; ++i) {
+      a[i] = (random.nextLong() % delta) + start;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/DataStoreState.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/DataStoreState.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/DataStoreState.java
new file mode 100644
index 0000000..f7100e7
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/DataStoreState.java
@@ -0,0 +1,101 @@
+package org.apache.hadoop.hbase.regionserver;
+
+import com.facebook.swift.codec.ThriftConstructor;
+import com.facebook.swift.codec.ThriftEnum;
+import com.facebook.swift.codec.ThriftField;
+import com.facebook.swift.codec.ThriftStruct;
+
+@ThriftStruct
+public final class DataStoreState {
+
+  @ThriftEnum
+  public static enum RegionMode {
+    UNASSIGNED,
+    ACTIVE,  // does memstore updates, and flushing/compaction.
+    WITNESS, // does memstore updates. No flushing/compaction.
+    // Prunes the memstore whenever the active replica flushes.
+    SHADOW_WITNESS; // does not update memstore. No flushing/compaction.
+
+    boolean isWitness() {
+      return this.equals(WITNESS);
+    }
+
+    boolean isActive() {
+      return this.equals(ACTIVE);
+    }
+
+    boolean isShadowWitness() {
+      return this.equals(SHADOW_WITNESS);
+    }
+
+    boolean isUnassigned() {
+      return this.equals(UNASSIGNED);
+    }
+  };
+
+  private final String dataStoreId;
+  private volatile long committedUpto;
+  private volatile long canCommitUpto;
+  private volatile RegionMode mode;
+
+  public DataStoreState(final String dataStoreId) {
+    this.dataStoreId = dataStoreId;
+    committedUpto = -1;
+    canCommitUpto = -1;
+    mode = RegionMode.UNASSIGNED;
+  }
+
+  @ThriftConstructor
+  public DataStoreState(@ThriftField(1) String dataStoreId,
+                        @ThriftField(2) long committedUpto,
+                        @ThriftField(3) long canCommitUpto,
+                        @ThriftField(4) RegionMode mode) {
+    this.dataStoreId = dataStoreId;
+    this.committedUpto = committedUpto;
+    this.canCommitUpto = canCommitUpto;
+    this.mode = mode;
+  }
+
+  @ThriftField(1)
+  public String getDataStoreId() {
+    return dataStoreId;
+  }
+
+  @ThriftField(2)
+  public long getCommittedUpto() {
+    return committedUpto;
+  }
+
+  @ThriftField(3)
+  public long getCanCommitUpto() {
+    return canCommitUpto;
+  }
+
+  @ThriftField(4)
+  public RegionMode getMode() {
+    return mode;
+  }
+
+  public void setCommittedUpto(long committedUpto) {
+    this.committedUpto = committedUpto;
+    this.canCommitUpto = committedUpto;
+  }
+
+  public void setMode(RegionMode mode) {
+    this.mode = mode;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+
+    builder.append("{");
+    builder.append("id=" + dataStoreId + ", ");
+    builder.append("mode=" + mode + ", ");
+    builder.append("canCommitUpto=" + canCommitUpto + ", ");
+    builder.append("committedUptoIndex=" + committedUpto);
+    builder.append("}");
+
+    return builder.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/RaftEventListener.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/RaftEventListener.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/RaftEventListener.java
new file mode 100644
index 0000000..04f4a77
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/RaftEventListener.java
@@ -0,0 +1,17 @@
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.consensus.protocol.Payload;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+public interface RaftEventListener {
+  ByteBuffer becameLeader() throws IOException;
+  void becameNonLeader();
+  void commit(final long index, final Payload payload);
+  boolean canStepDown();
+  long getMinUnpersistedIndex();
+  DataStoreState getState();
+  void updatePeerAvailabilityStatus(String peerAddress, boolean isAvailable);
+  void closeDataStore();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/RegionOverloadedException.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/RegionOverloadedException.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/RegionOverloadedException.java
new file mode 100644
index 0000000..f5cec76
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/RegionOverloadedException.java
@@ -0,0 +1,47 @@
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.RegionException;
+
+public class RegionOverloadedException extends RegionException {
+  private static final long serialVersionUID = -8436877560512061623L;
+
+  /** default constructor */
+  public RegionOverloadedException() {
+    super();
+  }
+
+  /** @param s message
+   *  @param waitMillis -- request client to backoff for waitMillis
+   */
+  public RegionOverloadedException(String s, long waitMillis) {
+    super(s, waitMillis);
+  }
+
+  /**
+   * Create a RegionOverloadedException from another one, attaching a set of related exceptions
+   * from a batch operation. The new exception reuses the original exception's stack trace.
+   *  
+   * @param roe the original exception
+   * @param exceptions other exceptions that happened in the same batch operation
+   * @param waitMillis remaining time for the client to wait in milliseconds
+   * @return the new exception with complete information
+   */
+  public static RegionOverloadedException create(RegionOverloadedException roe,
+      List<Throwable> exceptions, int waitMillis) {
+    StringBuilder sb = new StringBuilder(roe.getMessage());
+    for (Throwable t : exceptions) {
+      if (t != roe) {
+        sb.append(t.toString());
+        sb.append("\n");
+      }
+    }
+    RegionOverloadedException e = new RegionOverloadedException(sb.toString(), waitMillis);
+    if (roe != null) {  // Safety check
+      e.setStackTrace(roe.getStackTrace());
+    }
+    return e;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/PercentileMetric.java/PercentileMetric.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/PercentileMetric.java/PercentileMetric.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/PercentileMetric.java/PercentileMetric.java
new file mode 100644
index 0000000..0ad6031
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/PercentileMetric.java/PercentileMetric.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.metrics;
+import org.apache.hadoop.hbase.util.Histogram;
+import org.apache.hadoop.metrics.util.MetricsLongValue;
+import org.apache.hadoop.metrics.util.MetricsRegistry;
+import org.apache.hadoop.metrics.MetricsRecord;
+
+/**
+ * Used the org.apache.hadoop.hbase.util.Histogram to maintain time varying
+ * metrics. The histogram class can provide various approximate details about
+ * a stream of data supplied to the PercentileMetric without actually
+ * storing the data.
+ */
+public class PercentileMetric extends MetricsLongValue {
+  public static final int HISTOGRAM_NUM_BUCKETS_DEFAULT = 20;
+  public static final double HISTOGRAM_MINVALUE_DEFAULT = 0.0;
+  public static final double HISTOGRAM_MAXVALUE_DEFAULT = 1000000000.0;
+  public static final double DEFAULT_PERCENTILE = 99.0;
+  public static final long DEFAULT_SAMPLE_WINDOW = 60;
+  public static final double P99 = 99.0;
+  public static final double P95 = 95.0;
+  public static final double P75 = 75.0;
+  public static final double P50 = 50.0;
+
+  private int numBuckets;
+  private double percentile;
+  private Histogram underlyingHistogram;
+
+  /**
+   * This constructor provides a way to create a HistogramMetric which uses a
+   * Histogram to maintain the statistics of a metric stream.
+   */
+  public PercentileMetric(final String nam, final MetricsRegistry registry,
+    Histogram histogram) {
+    super(nam, registry);
+    underlyingHistogram = histogram;
+  }
+
+  public PercentileMetric(String nam, MetricsRegistry registry,
+      Histogram histogram, double percentile, int numBuckets) {
+    super(nam, registry);
+    this.underlyingHistogram = histogram;
+    this.percentile = percentile;
+    this.numBuckets = numBuckets;
+  }
+
+  /**
+   * The histogram which has the values updated.
+   */
+  public void setHistogram(final Histogram hist) {
+    this.underlyingHistogram = hist;
+  }
+
+  /**
+   * numBuckets : This denotes the number of buckets used to sample the data.
+   * the updateMetric and refresh calls will run in O(numBuckets).
+   */
+  public void setNumBuckets(final int numBuckets) {
+    this.numBuckets = numBuckets;
+  }
+
+  /**
+   * percentile : The percentile estimate of the metric that will be seeked
+   * using this metric. The value should be between 0 and 100,
+   * else it will throw and exception.
+   */
+  public void setPercentile(final double prcntyl) {
+    this.percentile = prcntyl;
+  }
+
+  public double getValue() {
+    return this.get();
+  }
+
+  public void updateMetric() {
+    this.set((long)underlyingHistogram.getPercentileEstimate(percentile));
+  }
+
+  public void refresh() {
+    underlyingHistogram.refresh(this.numBuckets);
+  }
+
+  /**
+   * Add a value in the underlying histogram.
+   * @param value The value to be added.
+   */
+  public void addValueInHistogram(long value) {
+    underlyingHistogram.addValue(value);
+  }
+
+  /**
+   * Push the metric value to the <code>MetricsRecord</code> object
+   * @param mr
+   */
+  public void pushMetric(final MetricsRecord mr) {
+    this.updateMetric();
+    mr.setMetric(getName(), (long)getValue());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractWAL.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractWAL.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractWAL.java
new file mode 100644
index 0000000..2ae84d1
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractWAL.java
@@ -0,0 +1,69 @@
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.regionserver.metrics.PercentileMetric;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Histogram;
+
+import java.io.IOException;
+import java.util.concurrent.ExecutionException;
+
+public abstract class AbstractWAL {
+  public static final byte [] METAFAMILY = Bytes.toBytes("METAFAMILY");
+  public static final byte [] METAROW = Bytes.toBytes("METAROW");
+
+  // For measuring size of each transaction
+  protected static Histogram writeSize = new Histogram(
+    PercentileMetric.HISTOGRAM_NUM_BUCKETS_DEFAULT,
+    PercentileMetric.HISTOGRAM_MINVALUE_DEFAULT,
+    PercentileMetric.HISTOGRAM_MAXVALUE_DEFAULT);
+
+  // For measure the sync time for each HLog.append operation;
+  protected static Histogram syncTime = new Histogram(
+    PercentileMetric.HISTOGRAM_NUM_BUCKETS_DEFAULT,
+    PercentileMetric.HISTOGRAM_MINVALUE_DEFAULT,
+    PercentileMetric.HISTOGRAM_MAXVALUE_DEFAULT);
+
+  // For measuring the internal group commit time
+  protected static Histogram gsyncTime = new Histogram(
+    PercentileMetric.HISTOGRAM_NUM_BUCKETS_DEFAULT,
+    PercentileMetric.HISTOGRAM_MINVALUE_DEFAULT,
+    PercentileMetric.HISTOGRAM_MAXVALUE_DEFAULT);
+
+  public abstract long append(HRegionInfo info, byte [] tableName, WALEdit edits,
+                     final long now)
+    throws IOException, ExecutionException, InterruptedException;
+
+  public abstract long startMemStoreFlush(final byte[] regionName);
+  public abstract void completeMemStoreFlush(final byte[] regionName, final byte[] tableName,
+                                    final long logSeqId, final boolean isMetaRegion);
+  public abstract void abortMemStoreFlush(byte[] regionName);
+  public abstract long startMemStoreFlush(final byte[] regionName,
+                                 long firstSeqIdInStoresToFlush,
+                                 long firstSeqIdInStoresNotToFlush);
+
+  public abstract long obtainNextSequenceNumber()
+    throws IOException, ExecutionException, InterruptedException;
+  public abstract long getSequenceNumber();
+  public abstract void initSequenceNumber(long seqid)
+    throws IOException, ExecutionException, InterruptedException;
+
+  public abstract void close() throws IOException;
+  public abstract void closeAndDelete() throws IOException;
+  public abstract String getPath();
+
+  public static Histogram getWriteSizeHistogram() {
+    return writeSize;
+  }
+
+  public static Histogram getSyncTimeHistogram() {
+    return syncTime;
+  }
+
+  public static Histogram getGSyncTimeHistogram() {
+    return gsyncTime;
+  }
+
+  public abstract long getLastCommittedIndex();
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
new file mode 100644
index 0000000..b1222bf
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
@@ -0,0 +1,521 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import com.facebook.swift.codec.ThriftConstructor;
+import com.facebook.swift.codec.ThriftField;
+import com.facebook.swift.codec.ThriftStruct;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.SettableFuture;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.io.hfile.Compression;
+import org.apache.hadoop.hbase.ipc.ByteBufferOutputStream;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.Decompressor;
+
+import java.io.*;
+import java.nio.ByteBuffer;
+import java.util.*;
+
+/**
+ * WALEdit: Used in HBase's transaction log (WAL) to represent
+ * the collection of edits (KeyValue objects) corresponding to a
+ * single transaction. The class implements "Writable" interface
+ * for serializing/deserializing a set of KeyValue items.
+ *
+ * Previously, if a transaction contains 3 edits to c1, c2, c3 for a row R,
+ * the HLog would have three log entries as follows:
+ *
+ *    <logseq1-for-edit1>:<KeyValue-for-edit-c1>
+ *    <logseq2-for-edit2>:<KeyValue-for-edit-c2>
+ *    <logseq3-for-edit3>:<KeyValue-for-edit-c3>
+ *
+ * This presents problems because row level atomicity of transactions
+ * was not guaranteed. If we crash after few of the above appends make
+ * it, then recovery will restore a partial transaction.
+ *
+ * In the new world, all the edits for a given transaction are written
+ * out as a single record, for example:
+ *
+ *   <logseq#-for-entire-txn>:<WALEdit-for-entire-txn>
+ *
+ * where, the WALEdit is serialized as:
+ *   <-1, # of edits, <KeyValue>, <KeyValue>, ... >
+ * For example:
+ *   <-1, 3, <Keyvalue-for-edit-c1>, <KeyValue-for-edit-c2>, <KeyValue-for-edit-c3>>
+ *
+ * The -1 marker is just a special way of being backward compatible with
+ * an old HLog which would have contained a single <KeyValue>.
+ *
+ * The deserializer for WALEdit backward compatibly detects if the record
+ * is an old style KeyValue or the new style WALEdit.
+ *
+ */
+
+@ThriftStruct
+public final class WALEdit implements Writable, HeapSize {
+  public enum PayloadHeaderField {
+    MAGIC(0, Bytes.SIZEOF_BYTE),
+    TYPE(1, Bytes.SIZEOF_BYTE),
+    VERSION(2, Bytes.SIZEOF_BYTE),
+    TIMESTAMP(3, Bytes.SIZEOF_LONG),
+    COMPRESSION_CODEC(11, Bytes.SIZEOF_BYTE),
+    UNCOMPRESSED_LENGTH(12, Bytes.SIZEOF_INT),
+    NUM_WALEDITS(16, Bytes.SIZEOF_INT);
+
+    final int offset;
+    final int length;
+
+    private PayloadHeaderField(final int offset, final int length) {
+      this.offset = offset;
+      this.length = length;
+    }
+  }
+
+  public final static int PAYLOAD_HEADER_SIZE =
+          PayloadHeaderField.NUM_WALEDITS.offset +
+                  PayloadHeaderField.NUM_WALEDITS.length;
+
+  private final int VERSION_2 = -1;
+
+  private final List<KeyValue> kvs;
+
+  private NavigableMap<byte[], Integer> scopes;
+  
+  private long length = 0;
+
+  private SettableFuture<Long> commitFuture;
+
+  @ThriftConstructor
+  public WALEdit(
+      @ThriftField(1) final List<KeyValue> kvs) {
+    this.kvs = kvs;
+    for (KeyValue k : kvs) {
+      length += k.getLength();
+    }
+  }
+
+  public WALEdit() {
+    kvs = new ArrayList<KeyValue>();
+  }
+
+  public SettableFuture<Long> getCommitFuture() {
+    return commitFuture;
+  }
+
+  public void add(KeyValue kv) {
+    this.kvs.add(kv);
+    length += kv.getLength();
+  }
+
+  public boolean isEmpty() {
+    return kvs.isEmpty();
+  }
+  
+  public long getTotalKeyValueLength() {
+    return length;
+  }
+
+  public int size() {
+    return kvs.size();
+  }
+
+  @ThriftField(1)
+  public List<KeyValue> getKeyValues() {
+    return kvs;
+  }
+
+  public NavigableMap<byte[], Integer> getScopes() {
+    return scopes;
+  }
+
+  public void setScopes (NavigableMap<byte[], Integer> scopes) {
+    // We currently process the map outside of WALEdit,
+    // TODO revisit when replication is part of core
+    this.scopes = scopes;
+  }
+
+  public void readFields(DataInput in) throws IOException {
+    kvs.clear();
+    if (scopes != null) {
+      scopes.clear();
+    }
+    int versionOrLength = in.readInt();
+    if (versionOrLength == VERSION_2) {
+      // this is new style HLog entry containing multiple KeyValues.
+      int numEdits = in.readInt();
+      for (int idx = 0; idx < numEdits; idx++) {
+        KeyValue kv = new KeyValue();
+        kv.readFields(in);
+        this.add(kv);
+      }
+      int numFamilies = in.readInt();
+      if (numFamilies > 0) {
+        if (scopes == null) {
+          scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
+        }
+        for (int i = 0; i < numFamilies; i++) {
+          byte[] fam = Bytes.readByteArray(in);
+          int scope = in.readInt();
+          scopes.put(fam, scope);
+        }
+      }
+    } else {
+      // this is an old style HLog entry. The int that we just
+      // read is actually the length of a single KeyValue.
+      KeyValue kv = new KeyValue();
+      kv.readFields(versionOrLength, in);
+      this.add(kv);
+    }
+
+  }
+
+  public void write(DataOutput out) throws IOException {
+    out.writeInt(VERSION_2);
+    out.writeInt(kvs.size());
+    // We interleave the two lists for code simplicity
+    for (KeyValue kv : kvs) {
+      kv.write(out);
+    }
+    if (scopes == null) {
+      out.writeInt(0);
+    } else {
+      out.writeInt(scopes.size());
+      for (byte[] key : scopes.keySet()) {
+        Bytes.writeByteArray(out, key);
+        out.writeInt(scopes.get(key));
+      }
+    }
+  }
+
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+
+    sb.append("[#edits: " + kvs.size() + " = <");
+    for (KeyValue kv : kvs) {
+      sb.append(kv.toString());
+      sb.append("; ");
+    }
+    if (scopes != null) {
+      sb.append(" scopes: " + scopes.toString());
+    }
+    sb.append(">]");
+    return sb.toString();
+  }
+
+  @Override
+  public long heapSize() {
+    long ret = 0;
+    for (KeyValue kv : kvs) {
+      ret += kv.heapSize();
+    }
+    if (scopes != null) {
+      ret += ClassSize.TREEMAP;
+      ret += ClassSize.align(scopes.size() * ClassSize.MAP_ENTRY);
+    }
+    return ret;
+  }
+
+  /**
+   * Serialize the given list of WALEdits to an OutputStream.
+   * @param edits the WALEdits to be serialized
+   * @param os the {@link DataOutputStream} to write to
+   * @throws IOException if the output could not be written to the stream
+   */
+  private static void serializeWALEdits(final List<WALEdit> edits,
+          final DataOutputStream os) throws IOException {
+    for (final WALEdit e : edits) {
+      os.writeInt(e.getKeyValues().size());
+      for (final KeyValue k : e.getKeyValues()) {
+        os.writeInt(k.getLength());
+        os.write(k.getBuffer(), k.getOffset(), k.getLength());
+      }
+    }
+  }
+
+  /**
+   * Serialize the given list of WALEdits edits to a {@link ByteBuffer},
+   * optionally compressing the WALEdit data using the given compression codec.
+   *
+   * @param edits the list of WALEdits
+   * @return a {@link ByteBuffer} containing a serialized representation of the
+   *          WALEdits.
+   * @throws java.io.IOException if the WALEdits could not be serialized
+   */
+  public static ByteBuffer serializeToByteBuffer(final List<WALEdit> edits,
+          long timestamp, Compression.Algorithm codec) throws IOException {
+    Preconditions.checkNotNull(codec);
+    if (edits == null) {
+      return null;
+    }
+
+    int totalPayloadSize = getTotalPayloadSize(edits);
+    ByteBufferOutputStream buffer = new ByteBufferOutputStream(
+            totalPayloadSize);
+    try (DataOutputStream os = new DataOutputStream(buffer)) {
+      // Write the magic value
+      os.write(HConstants.CONSENSUS_PAYLOAD_MAGIC_VALUE);
+
+      // Write that the payload is WALEdit
+      os.write(HConstants.BATCHED_WALEDIT_TYPE);
+
+      // Write the version of WALEdit
+      os.write(HConstants.BATCHED_WALEDIT_VERSION);
+
+      // Write the timestamp
+      os.writeLong(timestamp);
+
+      // Write compression algorithm
+      os.write((byte) codec.ordinal());
+
+      // Write uncompressed size of the list of WALEdits
+      os.writeInt(totalPayloadSize - PAYLOAD_HEADER_SIZE);
+
+      // Write the number of WALEdits in the list
+      os.writeInt(edits.size());
+    }
+
+    // Turn on compression if requested when serializing the list of WALEdits.
+    boolean compressed = !codec.equals(Compression.Algorithm.NONE);
+    Compressor compressor = codec.getCompressor();
+    try (DataOutputStream os = new DataOutputStream(compressed ?
+                    codec.createCompressionStream(buffer, compressor,
+                            totalPayloadSize - PAYLOAD_HEADER_SIZE) : buffer)) {
+      serializeWALEdits(edits, os);
+    } finally {
+      codec.returnCompressor(compressor);
+    }
+
+    // Flip and return the byte buffer.
+    return buffer.getByteBuffer();
+  }
+
+  public static int getWALEditsSize(final List<WALEdit> edits) {
+    int size = 0;
+    for (final WALEdit e : edits) {
+      size += Bytes.SIZEOF_INT + e.getKeyValues().size() * Bytes.SIZEOF_INT
+              + e.getTotalKeyValueLength();
+    }
+    return size;
+  }
+
+  public static int getTotalPayloadSize(final List<WALEdit> edits) {
+    return PAYLOAD_HEADER_SIZE + getWALEditsSize(edits);
+  }
+
+  public static boolean isBatchedWALEdit(final ByteBuffer data) {
+    // Read the Magic Value
+    if (data.get(data.position() + PayloadHeaderField.MAGIC.offset) !=
+            HConstants.CONSENSUS_PAYLOAD_MAGIC_VALUE) {
+      return false;
+    }
+
+    // Read the payload type
+    if (data.get(data.position() + PayloadHeaderField.TYPE.offset) !=
+            HConstants.BATCHED_WALEDIT_TYPE) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Get the timestamp of the batched WALEdit. This method assumes the given
+   * ByteBuffer contains a valid batched WALEdits which can be verified using
+   * {@link #isBatchedWALEdit}.
+   */
+  public static long getTimestamp(final ByteBuffer data) {
+    return data.getLong(data.position() + PayloadHeaderField.TIMESTAMP.offset);
+  }
+
+  /**
+   * Get the compression codec used to compress the serialized WALEdits
+   * contained in the given {@link ByteBuffer}. This method assumes the position
+   * of the buffer to point to the does not change the
+   * position value of the buffer and assumes the caller has performed a version
+   * check on the buffer to ensure the
+   *
+   * @param data a {@link java.nio.ByteBuffer} containing a serialized list of
+   *             WALEdits
+   * @return the compression codec or the NONE codec if the WALEdit was written
+   *              with a version which does not support compression
+   */
+  public static Compression.Algorithm getCompressionCodec(
+          final ByteBuffer data) {
+    byte codecValue = data.get(data.position() +
+            PayloadHeaderField.COMPRESSION_CODEC.offset);
+    Compression.Algorithm[] codecs = Compression.Algorithm.values();
+    if (codecValue >= 0 && codecValue < codecs.length) {
+      return codecs[codecValue];
+    }
+    return Compression.Algorithm.NONE;
+  }
+
+  /**
+   * Wrap the array backing the given ByteBuffer with a ByteArrayInputStream.
+   * Since this InputStream works on the underlying array the state of the given
+   * ByteBuffer is guaranteed to remain unchanged.
+   *
+   * @param buffer an array backed {@link ByteBuffer}
+   * @param position the position in the buffer from where to start the stream
+   * @param length length of the input stream
+   * @return an {@link java.io.InputStream} wrapping the underlying array of
+   *          the given {@link ByteBuffer}
+   */
+  private static ByteArrayInputStream getByteArrayInputStream(
+          final ByteBuffer buffer, final int position, final int length) {
+    Preconditions.checkArgument(buffer.hasArray(),
+            "An array backed buffer is required");
+    Preconditions.checkArgument(position >= buffer.position(),
+            "Position can not be behind buffer.position()");
+    Preconditions.checkArgument(
+            position - buffer.position() + length <= buffer.remaining(),
+            "Length can not be past the remainder of the buffer");
+    return new ByteArrayInputStream(buffer.array(),
+            buffer.arrayOffset() + position, length);
+  }
+
+  /**
+   * Read a list of serialized WALEdits from the given
+   * {@link DataInputStream}, instantiating them backed by the given
+   * {@link ByteBuffer}.
+   *
+   * @param numEdits the number of WALEdits expected in the stream
+   * @param is the {@link InputStream} containing serialized WALEdits
+   * @param buffer the {@link ByteBuffer} to be used to back the KVs
+   * @param offset the offset in the buffer from where to copy the KVs
+   * @param copyToBuffer copy from the stream to the buffer if true, assume the
+   *                     steam data is already in the buffer otherwise
+   * @return a list of WALEdits
+   * @throws IOException if an exception occurs while reading from the stream
+   */
+  private static List<WALEdit> deserializeWALEdits(final int numEdits,
+          final DataInputStream is, final ByteBuffer buffer, final int offset,
+          final boolean copyToBuffer) throws IOException {
+    List<WALEdit> edits = new ArrayList<>(numEdits);
+    byte[] array = buffer.array();
+    int cursor = buffer.arrayOffset() + offset;
+
+    for (int editIdx = 0; editIdx < numEdits; ++editIdx) {
+      WALEdit edit = new WALEdit();
+      int numKVs = is.readInt();
+      cursor += Bytes.SIZEOF_INT;
+
+      for (int kvIdx = 0; kvIdx < numKVs; ++kvIdx) {
+        int kvLen = is.readInt();
+        cursor += Bytes.SIZEOF_INT;
+
+        if (copyToBuffer) {
+          // If the buffer does not contain the data yet (which would be the
+          // case if it is compressed), copy from the InputStream to the buffer.
+          is.read(array, cursor, kvLen);
+        } else {
+          // Do not copy to the buffer and advance the stream cursor.
+          is.skipBytes(kvLen);
+        }
+
+        // Instantiate the KV backed by the ByteBuffer
+        edit.add(new KeyValue(array, cursor, kvLen));
+        // Move the ByteBuffer write cursor
+        cursor += kvLen;
+      }
+
+      edits.add(edit);
+    }
+
+    return edits;
+  }
+
+  /**
+   * De-serializes a ByteBuffer to list of WALEdits. If the serialized WALEdits
+   * are not compressed, the resulting list of KVs will be backed by the array
+   * backing the ByteBuffer instead of allocating fresh buffers. As a
+   * consequence of this method assumes the state of the ByteBuffer is never
+   * modified.
+   *
+   * @param data a {@link ByteBuffer} containing serialized WALEdits
+   * @return a list of WALEdits
+   * @throws java.io.IOException if the WALEdits could not be deserialized
+   */
+  public static List<WALEdit> deserializeFromByteBuffer(final ByteBuffer data)
+          throws IOException {
+    if (!isBatchedWALEdit(data)) {
+      return null;
+    }
+
+    int firstBytePosition = data.position();
+    int bufferLength = data.remaining();
+
+    // The check above already read the magic value and type fields, so move on
+    // to the version field.
+    byte version = data.get(firstBytePosition +
+            PayloadHeaderField.VERSION.offset);
+    if (version != HConstants.BATCHED_WALEDIT_VERSION) {
+      return null;
+    }
+
+    // Get the compression codec and uncompressed size of the list of WALEdits.
+    // Use the remainder of the current buffer as a hint.
+    Compression.Algorithm codec = getCompressionCodec(data);
+
+    int uncompressedEditsLen = data.getInt(firstBytePosition +
+            PayloadHeaderField.UNCOMPRESSED_LENGTH.offset);
+    int numEdits = data.getInt(firstBytePosition +
+            PayloadHeaderField.NUM_WALEDITS.offset);
+
+    if (numEdits == 0) {
+      return Collections.emptyList();
+    }
+
+    // Wrap the remainder of the given ByteBuffer with a DataInputStream and
+    // de-serialize the list of WALEdits.
+    //
+    // If the WALEdits are compressed, wrap the InputStream by a decompression
+    // stream and allocate a new buffer to store the uncompressed data.
+    int cursor = firstBytePosition + PAYLOAD_HEADER_SIZE;
+    InputStream is = getByteArrayInputStream(data, cursor,
+            bufferLength - PAYLOAD_HEADER_SIZE);
+    ByteBuffer deserializedData = data;
+
+    final boolean compressed = !codec.equals(Compression.Algorithm.NONE);
+    Decompressor decompressor = codec.getDecompressor();
+    try {
+      if (compressed) {
+        int compressedEditsLen = bufferLength - PAYLOAD_HEADER_SIZE;
+        is = codec.createDecompressionStream(is, decompressor,
+                compressedEditsLen);
+        // Allocate a new ByteBuffer for the uncompressed data.
+        deserializedData = ByteBuffer.allocate(uncompressedEditsLen);
+        cursor = 0;
+      }
+
+      try (DataInputStream dis = new DataInputStream(is)) {
+        return deserializeWALEdits(numEdits, dis, deserializedData, cursor,
+                compressed);
+      }
+    } finally {
+      codec.returnDecompressor(decompressor);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java
new file mode 100644
index 0000000..9633d01
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java
@@ -0,0 +1,400 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.1)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hbase.thrift.generated;
+
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * An IllegalArgument exception indicates an illegal or invalid
+ * argument was passed into a procedure.
+ */
+public class IllegalArgument extends TException implements org.apache.thrift.TBase<IllegalArgument, IllegalArgument._Fields>, java.io.Serializable, Cloneable, Comparable<IllegalArgument> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IllegalArgument");
+
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new IllegalArgumentStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new IllegalArgumentTupleSchemeFactory());
+  }
+
+  public String message; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    MESSAGE((short)1, "message");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // MESSAGE
+          return MESSAGE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(IllegalArgument.class, metaDataMap);
+  }
+
+  public IllegalArgument() {
+  }
+
+  public IllegalArgument(
+    String message)
+  {
+    this();
+    this.message = message;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public IllegalArgument(IllegalArgument other) {
+    if (other.isSetMessage()) {
+      this.message = other.message;
+    }
+  }
+
+  public IllegalArgument deepCopy() {
+    return new IllegalArgument(this);
+  }
+
+  @Override
+  public void clear() {
+    this.message = null;
+  }
+
+  public String getMessage() {
+    return this.message;
+  }
+
+  public IllegalArgument setMessage(String message) {
+    this.message = message;
+    return this;
+  }
+
+  public void unsetMessage() {
+    this.message = null;
+  }
+
+  /** Returns true if field message is set (has been assigned a value) and false otherwise */
+  public boolean isSetMessage() {
+    return this.message != null;
+  }
+
+  public void setMessageIsSet(boolean value) {
+    if (!value) {
+      this.message = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case MESSAGE:
+      if (value == null) {
+        unsetMessage();
+      } else {
+        setMessage((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case MESSAGE:
+      return getMessage();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case MESSAGE:
+      return isSetMessage();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof IllegalArgument)
+      return this.equals((IllegalArgument)that);
+    return false;
+  }
+
+  public boolean equals(IllegalArgument that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_message = true && this.isSetMessage();
+    boolean that_present_message = true && that.isSetMessage();
+    if (this_present_message || that_present_message) {
+      if (!(this_present_message && that_present_message))
+        return false;
+      if (!this.message.equals(that.message))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    HashCodeBuilder builder = new HashCodeBuilder();
+
+    boolean present_message = true && (isSetMessage());
+    builder.append(present_message);
+    if (present_message)
+      builder.append(message);
+
+    return builder.toHashCode();
+  }
+
+  @Override
+  public int compareTo(IllegalArgument other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMessage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("IllegalArgument(");
+    boolean first = true;
+
+    sb.append("message:");
+    if (this.message == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.message);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class IllegalArgumentStandardSchemeFactory implements SchemeFactory {
+    public IllegalArgumentStandardScheme getScheme() {
+      return new IllegalArgumentStandardScheme();
+    }
+  }
+
+  private static class IllegalArgumentStandardScheme extends StandardScheme<IllegalArgument> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, IllegalArgument struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // MESSAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.message = iprot.readString();
+              struct.setMessageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+
+      // check for required fields of primitive type, which can't be checked in the validate method
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, IllegalArgument struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.message != null) {
+        oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
+        oprot.writeString(struct.message);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class IllegalArgumentTupleSchemeFactory implements SchemeFactory {
+    public IllegalArgumentTupleScheme getScheme() {
+      return new IllegalArgumentTupleScheme();
+    }
+  }
+
+  private static class IllegalArgumentTupleScheme extends TupleScheme<IllegalArgument> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, IllegalArgument struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetMessage()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetMessage()) {
+        oprot.writeString(struct.message);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, IllegalArgument struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.message = iprot.readString();
+        struct.setMessageIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/util/Arena.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/util/Arena.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/util/Arena.java
new file mode 100644
index 0000000..e27dd1f
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/util/Arena.java
@@ -0,0 +1,12 @@
+package org.apache.hadoop.hbase.util;
+
+import org.apache.hadoop.hbase.io.hfile.bucket.CacheFullException;
+
+public interface Arena {
+
+  MemoryBuffer allocateByteBuffer(int size) throws
+    CacheFullException, BucketAllocatorException;
+
+  void freeByteBuffer(final MemoryBuffer buffer);
+
+}


Mime
View raw message