hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bus...@apache.org
Subject [1/3] hbase git commit: HBASE-13569 Correct Javadoc (for Java8)
Date Sun, 14 Jun 2015 03:27:10 GMT
Repository: hbase
Updated Branches:
  refs/heads/branch-1 4468f3ce9 -> 0a0ff3354


http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
index 1775230..4790ee9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
@@ -947,7 +947,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
 
     /**
      * @param v
-     * @return True if v < 0 or v > current block buffer limit.
+     * @return True if v &lt; 0 or v &gt; current block buffer limit.
      */
     protected final boolean checkLen(final int v) {
       return v < 0 || v > this.blockBuffer.limit();

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
index deaa2c0..3e0f91f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
@@ -44,11 +44,11 @@ public interface HFileScanner {
    * Consider the key stream of all the keys in the file,
    * <code>k[0] .. k[n]</code>, where there are n keys in the file.
    * @param key Key to find.
-   * @return -1, if key < k[0], no position;
+   * @return -1, if key &lt; k[0], no position;
    * 0, such that k[i] = key and scanner is left in position i; and
-   * 1, such that k[i] < key, and scanner is left in position i.
+   * 1, such that k[i] &lt; key, and scanner is left in position i.
    * The scanner will position itself between k[i] and k[i+1] where
-   * k[i] < key <= k[i+1].
+   * k[i] &lt; key &lt;= k[i+1].
    * If there is no key k[i+1] greater than or equal to the input key, then the
    * scanner will position itself at the end of the file and next() will return
    * false when it is called.
@@ -70,14 +70,14 @@ public interface HFileScanner {
    * <code>k[0] .. k[n]</code>, where there are n keys in the file after
    * current position of HFileScanner.
    * The scanner will position itself between k[i] and k[i+1] where
-   * k[i] < key <= k[i+1].
+   * k[i] &lt; key &lt;= k[i+1].
    * If there is no key k[i+1] greater than or equal to the input key, then the
    * scanner will position itself at the end of the file and next() will return
    * false when it is called.
    * @param key Key to find (should be non-null)
-   * @return -1, if key < k[0], no position;
+   * @return -1, if key &lt; k[0], no position;
    * 0, such that k[i] = key and scanner is left in position i; and
-   * 1, such that k[i] < key, and scanner is left in position i.
+   * 1, such that k[i] &lt; key, and scanner is left in position i.
    * @throws IOException
    */
   @Deprecated
@@ -90,9 +90,9 @@ public interface HFileScanner {
    * Consider the key stream of all the keys in the file,
    * <code>k[0] .. k[n]</code>, where there are n keys in the file.
    * @param key Key to find
-   * @return false if key <= k[0] or true with scanner in position 'i' such
-   * that: k[i] < key.  Furthermore: there may be a k[i+1], such that
-   * k[i] < key <= k[i+1] but there may also NOT be a k[i+1], and next() will
+   * @return false if key &lt;= k[0] or true with scanner in position 'i' such
+   * that: k[i] &lt; key.  Furthermore: there may be a k[i+1], such that
+   * k[i] &lt; key &lt;= k[i+1] but there may also NOT be a k[i+1], and next() will
    * return false (EOF).
    * @throws IOException
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index 889b7e7..2bfb821 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -482,7 +482,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
   }
 
   /**
-   * Evict the block, and it will be cached by the victim handler if exists &&
+   * Evict the block, and it will be cached by the victim handler if exists &amp;&amp;
    * block may be read again later
    * @param block
    * @param evictedByEvictionProcess true if the given block is evicted by

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
index 1624082..0b28d72 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.io.HeapSize;
 
 /**
  * A memory-bound queue that will grow until an element brings
- * total size >= maxSize.  From then on, only entries that are sorted larger
+ * total size &gt;= maxSize.  From then on, only entries that are sorted larger
  * than the smallest current entry will be inserted/replaced.
  *
  * <p>Use this when you want to find the largest elements (according to their

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java
index f298698..d4a279c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java
@@ -37,7 +37,7 @@
  * (roughly because GC is less). See Nick Dimiduk's
  * <a href="http://www.n10k.com/blog/blockcache-101/">BlockCache 101</a> for some numbers.
  *
- * <h1>Enabling {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}</h2>
+ * <h1>Enabling {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}</h1>
  * See the HBase Reference Guide <a href="http://hbase.apache.org/book.html#enable.bucketcache">Enable BucketCache</a>.
  *
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
index 0da16a7..bb63e01 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
@@ -27,7 +27,7 @@ public interface RpcCallContext extends Delayable {
   /**
    * Check if the caller who made this IPC call has disconnected.
    * If called from outside the context of IPC, this does nothing.
-   * @return < 0 if the caller is still connected. The time in ms
+   * @return &lt; 0 if the caller is still connected. The time in ms
    *  since the disconnection otherwise
    */
   long disconnectSince();

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java
index 86fc5df..48a982b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.client.Scan;
  * </p>
  *
  * <pre>
- * List<Scan> scans = new ArrayList<Scan>();
+ * List&lt;Scan&gt; scans = new ArrayList&lt;Scan&gt;();
  * 
  * Scan scan1 = new Scan();
  * scan1.setStartRow(firstRow1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index 8cea2f4..5e94cca 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -503,7 +503,8 @@ public class TableMapReduceUtil {
    * and add it to the credentials for the given map reduce job.
    *
    * The quorumAddress is the key to the ZK ensemble, which contains:
-   * hbase.zookeeper.quorum, hbase.zookeeper.client.port and zookeeper.znode.parent
+   * hbase.zookeeper.quorum, hbase.zookeeper.client.port and
+   * zookeeper.znode.parent
    *
    * @param job The job that requires the permission.
    * @param quorumAddress string that contains the 3 required configuratins
@@ -609,7 +610,8 @@ public class TableMapReduceUtil {
    * default; e.g. copying tables between clusters, the source would be
    * designated by <code>hbase-site.xml</code> and this param would have the
    * ensemble address of the remote cluster.  The format to pass is particular.
-   * Pass <code> &lt;hbase.zookeeper.quorum>:&lt;hbase.zookeeper.client.port>:&lt;zookeeper.znode.parent>
+   * Pass <code> &lt;hbase.zookeeper.quorum&gt;:&lt;
+   *             hbase.zookeeper.client.port&gt;:&lt;zookeeper.znode.parent&gt;
    * </code> such as <code>server,server2,server3:2181:/hbase</code>.
    * @param serverClass redefined hbase.regionserver.class
    * @param serverImpl redefined hbase.regionserver.impl
@@ -640,7 +642,8 @@ public class TableMapReduceUtil {
    * default; e.g. copying tables between clusters, the source would be
    * designated by <code>hbase-site.xml</code> and this param would have the
    * ensemble address of the remote cluster.  The format to pass is particular.
-   * Pass <code> &lt;hbase.zookeeper.quorum>:&lt;hbase.zookeeper.client.port>:&lt;zookeeper.znode.parent>
+   * Pass <code> &lt;hbase.zookeeper.quorum&gt;:&lt;
+   *             hbase.zookeeper.client.port&gt;:&lt;zookeeper.znode.parent&gt;
    * </code> such as <code>server,server2,server3:2181:/hbase</code>.
    * @param serverClass redefined hbase.regionserver.class
    * @param serverImpl redefined hbase.regionserver.impl

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
index f859780..5bd8d15 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
@@ -139,7 +139,8 @@ public class TableRecordReaderImpl {
   /**
    * Build the scanner. Not done in constructor to allow for extension.
    *
-   * @throws IOException, InterruptedException
+   * @throws IOException
+   * @throws InterruptedException
    */
   public void initialize(InputSplit inputsplit,
       TaskAttemptContext context) throws IOException,

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index aaf288a..27b70b6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2116,7 +2116,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
   /**
    * Report whether this master has started initialization and is about to do meta region assignment
-   * @return true if master is in initialization & about to assign hbase:meta regions
+   * @return true if master is in initialization &amp; about to assign hbase:meta regions
    */
   public boolean isInitializationStartsMetaRegionAssignment() {
     return this.initializationBeforeMetaAssignment;

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index b2e8306..27fab4f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -763,7 +763,7 @@ public class MasterRpcServices extends RSRpcServices
    * @return Pair indicating the number of regions updated Pair.getFirst is the
    *         regions that are yet to be updated Pair.getSecond is the total number
    *         of regions of the table
-   * @throws IOException
+   * @throws ServiceException
    */
   @Override
   public GetSchemaAlterStatusResponse getSchemaAlterStatus(

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
index fbfb440..454064b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
@@ -54,7 +54,7 @@ import com.google.common.base.Preconditions;
 public class RegionStateStore {
   private static final Log LOG = LogFactory.getLog(RegionStateStore.class);
 
-  /** The delimiter for meta columns for replicaIds > 0 */
+  /** The delimiter for meta columns for replicaIds &gt; 0 */
   protected static final char META_REPLICA_ID_DELIMITER = '_';
 
   private volatile Region metaRegion;

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
index 9673acf..fad84f5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
@@ -132,7 +132,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer {
    *     Order the regions to move from most recent to least.
    *
    * <li>Iterate down the least loaded servers, assigning regions so each server
-   *     has exactly </b>MIN</b> regions.  Stop once you reach a server that
+   *     has exactly <b>MIN</b> regions.  Stop once you reach a server that
    *     already has &gt;= <b>MIN</b> regions.
    *
    *     Regions being assigned to underloaded servers are those that were shed
@@ -159,7 +159,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer {
    *
    * <li>If we still have more regions that need assignment, again iterate the
    *     least loaded servers, this time giving each one (filling them to
-   *     </b>MAX</b>) until we run out.
+   *     <b>MAX</b>) until we run out.
    *
    * <li>All servers will now either host <b>MIN</b> or <b>MAX</b> regions.
    *

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index e58f855..4955cfa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -49,8 +49,8 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
- * <p>This is a best effort load balancer. Given a Cost function F(C) => x It will
- * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the
+ * <p>This is a best effort load balancer. Given a Cost function F(C) =&gt; x It will
+ * randomly try and mutate the cluster to Cprime. If F(Cprime) &lt; F(C) then the
  * new cluster state becomes the plan. It includes costs functions to compute the cost of:</p>
  * <ul>
  * <li>Region Load</li>

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
index 8568cfc..f23a9a8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
@@ -30,15 +30,17 @@ import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode;
  * <p>
  * Currently there are two different types of Store/Family-level queries.
  * <ul><li>{@link ExplicitColumnTracker} is used when the query specifies
- * one or more column qualifiers to return in the family.
- * <ul><li>{@link ScanWildcardColumnTracker} is used when no columns are
- * explicitly specified.
+ * one or more column qualifiers to return in the family.</li>
+ * <li>{@link ScanWildcardColumnTracker} is used when no columns are
+ * explicitly specified.</li>
+ * </ul>
  * <p>
  * This class is utilized by {@link ScanQueryMatcher} mainly through two methods:
  * <ul><li>{@link #checkColumn} is called when a Put satisfies all other
- * conditions of the query.
- * <ul><li>{@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher
- * believes that the current column should be skipped (by timestamp, filter etc.)
+ * conditions of the query.</li>
+ * <li>{@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher
+ * believes that the current column should be skipped (by timestamp, filter etc.)</li>
+ * </ul>
  * <p>
  * These two methods returns a 
  * {@link org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode}

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
index d40b21d..930baf0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
@@ -68,7 +68,7 @@ public interface CompactionRequestor {
   /**
    * @param r Region to compact
    * @param why Why compaction was requested -- used in debug messages
-   * @param pri Priority of this compaction. minHeap. <=0 is critical
+   * @param pri Priority of this compaction. minHeap. &lt;=0 is critical
    * @param requests custom compaction requests. Each compaction must specify the store on which it
    *          is acting. Can be <tt>null</tt> in which case a compaction will be attempted on all
    *          stores for the region.
@@ -84,7 +84,7 @@ public interface CompactionRequestor {
    * @param r Region to compact
    * @param s Store within region to compact
    * @param why Why compaction was requested -- used in debug messages
-   * @param pri Priority of this compaction. minHeap. <=0 is critical
+   * @param pri Priority of this compaction. minHeap. &lt;=0 is critical
    * @param request custom compaction request to run. {@link Store} and {@link Region} for the
    *          request must match the region and store specified here.
    * @return The created {@link CompactionRequest} or <tt>null</tt> if no compaction was started

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index a0766ef..5c08a62 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -221,7 +221,7 @@ public class DefaultMemStore implements MemStore {
   /**
    * Write an update
    * @param cell
-   * @return approximate size of the passed KV & newly added KV which maybe different than the
+   * @return approximate size of the passed KV &amp; newly added KV which maybe different than the
    *         passed-in KV
    */
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
index 70254fe..8f466fc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
@@ -26,9 +26,10 @@ import org.apache.hadoop.hbase.Cell;
  * during the course of a Get or Scan operation.
  * <p>
  * This class is utilized through three methods:
- * <ul><li>{@link #add} when encountering a Delete
- * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
- * <li>{@link #update} when reaching the end of a StoreFile
+ * <ul><li>{@link #add} when encountering a Delete</li>
+ * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted</li>
+ * <li>{@link #update} when reaching the end of a StoreFile</li>
+ * </ul>
  */
 @InterfaceAudience.Private
 public interface DeleteTracker {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
index b779e22..2914b05 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
@@ -40,9 +40,10 @@ import org.apache.hadoop.hbase.util.Bytes;
  * <p>
  * This class is utilized by {@link ScanQueryMatcher} mainly through two methods:
  * <ul><li>{@link #checkColumn} is called when a Put satisfies all other
- * conditions of the query.
- * <ul><li>{@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher
- * believes that the current column should be skipped (by timestamp, filter etc.)
+ * conditions of the query.</li>
+ * <li>{@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher
+ * believes that the current column should be skipped (by timestamp, filter etc.)</li>
+ * </ul>
  * <p>
  * These two methods returns a 
  * {@link org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode}

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
index 37dc16c..fe9df71 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
@@ -206,7 +206,7 @@ public class HeapMemoryManager {
   }
 
   /**
-   * @return heap occupancy percentage, 0 <= n <= 1
+   * @return heap occupancy percentage, 0 &lt;= n &lt;= 1
    */
   public float getHeapOccupancyPercent() {
     return this.heapOccupancyPercent;

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
index 476bcdb..1439388 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
@@ -93,8 +93,8 @@ extends ConstantSizeRegionSplitPolicy {
   }
 
   /**
-   * @return Region max size or <code>count of regions squared * flushsize, which ever is
-   * smaller; guard against there being zero regions on this server.
+   * @return Region max size or <code>count of regions squared * flushsize</code>,
+   * which ever is smaller; guard against there being zero regions on this server.
    */
   protected long getSizeToCheck(final int tableRegionsCount) {
     // safety check for 100 to avoid numerical overflow in extreme cases

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
index 18f5198..b68868e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
@@ -36,7 +36,7 @@ import java.util.Set;
  * The LruHashMap is a memory-aware HashMap with a configurable maximum
  * memory footprint.
  * <p>
- * It maintains an ordered list of all entries in the map ordered by
+ * It maintains an ordered list of all entries in the/ map ordered by
  * access time.  When space needs to be freed becase the maximum has been
  * reached, or the application has asked to free memory, entries will be
  * evicted according to an LRU (least-recently-used) algorithm.  That is,
@@ -102,7 +102,7 @@ implements HeapSize, Map<K,V> {
    * @throws IllegalArgumentException if the initial capacity is less than one
    * @throws IllegalArgumentException if the initial capacity is greater than
    * the maximum capacity
-   * @throws IllegalArgumentException if the load factor is <= 0
+   * @throws IllegalArgumentException if the load factor is &lt;= 0
    * @throws IllegalArgumentException if the max memory usage is too small
    * to support the base overhead
    */
@@ -141,7 +141,7 @@ implements HeapSize, Map<K,V> {
    * @throws IllegalArgumentException if the initial capacity is less than one
    * @throws IllegalArgumentException if the initial capacity is greater than
    * the maximum capacity
-   * @throws IllegalArgumentException if the load factor is <= 0
+   * @throws IllegalArgumentException if the load factor is &lt;= 0
    */
   public LruHashMap(int initialCapacity, float loadFactor) {
     this(initialCapacity, loadFactor, DEFAULT_MAX_MEM_USAGE);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java
index 87710df..0566dca 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.util.StringUtils;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
- * A pool of {@link HeapMemStoreLAB$Chunk} instances.
+ * A pool of {@link HeapMemStoreLAB.Chunk} instances.
  * 
  * MemStoreChunkPool caches a number of retired chunks for reusing, it could
  * decrease allocating bytes when writing, thereby optimizing the garbage

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
index 9f98ba6..b2cb772 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
@@ -22,9 +22,10 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
 
 /**
+ * <p>
  * This class is for maintaining the various regionserver statistics
  * and publishing them through the metrics interfaces.
- * <p/>
+ * </p>
  * This class has a number of metrics variables that are publicly accessible;
  * these variables (objects) have methods to update their values.
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
index 0285a59..2b12dec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
@@ -23,11 +23,11 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 /**
  * Wraps together the mutations which are applied as a batch to the region and their operation
  * status and WALEdits. 
- * @see org.apache.hadoop.hbase.coprocessor.
- *      RegionObserver#preBatchMutate(ObserverContext, MiniBatchOperationInProgress)
- * @see org.apache.hadoop.hbase.coprocessor.
- *      RegionObserver#postBatchMutate(ObserverContext, MiniBatchOperationInProgress)
- * @param <T> Pair<Mutation, Integer> pair of Mutations and associated rowlock ids .
+ * @see org.apache.hadoop.hbase.coprocessor.RegionObserver#preBatchMutate(
+ * ObserverContext, MiniBatchOperationInProgress)
+ * @see org.apache.hadoop.hbase.coprocessor.RegionObserver#postBatchMutate(
+ * ObserverContext, MiniBatchOperationInProgress)
+ * @param T Pair&lt;Mutation, Integer&gt; pair of Mutations and associated rowlock ids .
  */
 @InterfaceAudience.Private
 public class MiniBatchOperationInProgress<T> {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java
index c0ab1a0..1eb05f0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.Cell;
 
 /**
- * A "non-reversed & non-lazy" scanner which does not support backward scanning
+ * A "non-reversed &amp; non-lazy" scanner which does not support backward scanning
  * and always does a real seek operation. Most scanners are inherited from this
  * class.
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 7184b02..f593e3e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -1345,14 +1345,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
    * The opening is coordinated by ZooKeeper, and this method requires the znode to be created
    *  before being called. As a consequence, this method should be called only from the master.
    * <p>
-   * Different manages states for the region are:<ul>
+   * Different manages states for the region are:
+   * </p><ul>
    *  <li>region not opened: the region opening will start asynchronously.</li>
    *  <li>a close is already in progress: this is considered as an error.</li>
    *  <li>an open is already in progress: this new open request will be ignored. This is important
    *  because the Master can do multiple requests if it crashes.</li>
-   *  <li>the region is already opened:  this new open request will be ignored./li>
+   *  <li>the region is already opened:  this new open request will be ignored.</li>
    *  </ul>
-   * </p>
    * <p>
    * Bulk assign: If there are more than 1 region to open, it will be considered as a bulk assign.
    * For a single region opening, errors are sent through a ServiceException. For bulk assign,
@@ -1808,7 +1808,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   /**
    * Atomically bulk load several HFiles into an open region
    * @return true if successful, false is failed but recoverably (no action)
-   * @throws IOException if failed unrecoverably
+   * @throws ServiceException if failed unrecoverably
    */
   @Override
   public BulkLoadHFileResponse bulkLoadHFile(final RpcController controller,

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index da642ca..d2d661e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -510,7 +510,7 @@ public interface Region extends ConfigurationObserver {
    * Attempts to atomically load a group of hfiles.  This is critical for loading
    * rows with multiple column families atomically.
    *
-   * @param familyPaths List of Pair<byte[] column family, String hfilePath>
+   * @param familyPaths List of Pair&lt;byte[] column family, String hfilePath&gt;
    * @param bulkLoadListener Internal hooks enabling massaging/preparation of a
    * file about to be bulk loaded
    * @param assignSeqId
@@ -648,7 +648,6 @@ public interface Region extends ConfigurationObserver {
    * the region needs compacting
    *
    * @throws IOException general io exceptions
-   * @throws DroppedSnapshotException Thrown when abort is required
    * because a snapshot was not properly persisted.
    */
   FlushResult flush(boolean force) throws IOException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
index 66e087b..1bc6546 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
@@ -91,7 +91,7 @@ public interface RegionScanner extends InternalScanner {
    * Upon returning from this method, the {@link ScannerContext} will contain information about the
    * progress made towards the limits. This is a special internal method to be called from
    * coprocessor hooks to avoid expensive setup. Caller must set the thread's readpoint, start and
-   * close a region operation, an synchronize on the scanner object. Example: <code><pre>
+   * close a region operation, an synchronize on the scanner object. Example: <code>
    * HRegion region = ...;
    * RegionScanner scanner = ...
    * MultiVersionConsistencyControl.setThreadReadPoint(scanner.getMvccReadPoint());
@@ -105,7 +105,7 @@ public interface RegionScanner extends InternalScanner {
    * } finally {
    *   region.closeRegionOperation();
    * }
-   * </pre></code>
+   * </code>
    * @param result return output array
    * @param scannerContext The {@link ScannerContext} instance encapsulating all limits that should
    *          be tracked during calls to this method. The progress towards these limits can be

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
index a5c17fb..adee911 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
@@ -36,9 +36,10 @@ import org.apache.hadoop.hbase.util.Bytes;
  *
  * <p>
  * This class is utilized through three methods:
- * <ul><li>{@link #add} when encountering a Delete or DeleteColumn
- * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
- * <li>{@link #update} when reaching the end of a StoreFile or row for scans
+ * <ul><li>{@link #add} when encountering a Delete or DeleteColumn</li>
+ * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted</li>
+ * <li>{@link #update} when reaching the end of a StoreFile or row for scans</li>
+ * </ul>
  * <p>
  * This class is NOT thread-safe as queries are never multi-threaded
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
index a77fc0e..da2cb10 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
@@ -126,7 +126,7 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
   /**
    * Adds a value to the memstore
    * @param cell
-   * @return memstore size delta & newly added KV which maybe different than the passed in KV
+   * @return memstore size delta &amp; newly added KV which maybe different than the passed in KV
    */
   Pair<Long, Cell> add(Cell cell);
 
@@ -136,8 +136,9 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
   long timeOfOldestEdit();
 
   /**
-   * Removes a Cell from the memstore. The Cell is removed only if its key & memstoreTS match the
-   * key & memstoreTS value of the cell parameter.
+   * Removes a Cell from the memstore. The Cell is removed only if its key
+   * &amp; memstoreTS match the key &amp; memstoreTS value of the cell
+   * parameter.
    * @param cell
    */
   void rollback(final Cell cell);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 54164a0..c7991c0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -51,7 +51,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * Scanner scans both the memstore and the Store. Coalesce KeyValue stream
- * into List<KeyValue> for a single row.
+ * into List&lt;KeyValue&gt; for a single row.
  */
 @InterfaceAudience.Private
 public class StoreScanner extends NonReversedNonLazyKeyValueScanner

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java
index 55c057b..cb89346 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java
@@ -44,7 +44,6 @@ public class ReplayHLogKey extends HLogKey {
   /**
    * Returns the original sequence id
    * @return long the new assigned sequence number
-   * @throws InterruptedException
    */
   @Override
   public long getSequenceId() throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
index 081bd8e..d2119d7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
@@ -57,9 +57,9 @@ import com.google.common.annotations.VisibleForTesting;
  * Previously, if a transaction contains 3 edits to c1, c2, c3 for a row R,
  * the WAL would have three log entries as follows:
  *
- *    <logseq1-for-edit1>:<KeyValue-for-edit-c1>
- *    <logseq2-for-edit2>:<KeyValue-for-edit-c2>
- *    <logseq3-for-edit3>:<KeyValue-for-edit-c3>
+ *    &lt;logseq1-for-edit1&gt;:&lt;eyValue-for-edit-c1&gt;
+ *    &lt;logseq2-for-edit2&gt;:&lt;KeyValue-for-edit-c2&gt;
+ *    &lt;logseq3-for-edit3&gt;:&lt;KeyValue-for-edit-c3&gt;
  *
  * This presents problems because row level atomicity of transactions
  * was not guaranteed. If we crash after few of the above appends make
@@ -68,15 +68,15 @@ import com.google.common.annotations.VisibleForTesting;
  * In the new world, all the edits for a given transaction are written
  * out as a single record, for example:
  *
- *   <logseq#-for-entire-txn>:<WALEdit-for-entire-txn>
+ *   &lt;logseq#-for-entire-txn&gt;:&lt;WALEdit-for-entire-txn&gt;
  *
  * where, the WALEdit is serialized as:
- *   <-1, # of edits, <KeyValue>, <KeyValue>, ... >
+ *   &lt;-1, # of edits, &lt;KeyValue&gt;, &lt;KeyValue&gt;, ... &gt;
  * For example:
- *   <-1, 3, <Keyvalue-for-edit-c1>, <KeyValue-for-edit-c2>, <KeyValue-for-edit-c3>>
+ *   &lt;-1, 3, &lt;Keyvalue-for-edit-c1&gt;, &lt;KeyValue-for-edit-c2&gt;, &lt;KeyValue-for-edit-c3&gt;&gt;
  *
  * The -1 marker is just a special way of being backward compatible with
- * an old WAL which would have contained a single <KeyValue>.
+ * an old WAL which would have contained a single &lt;KeyValue&gt;.
  *
  * The deserializer for WALEdit backward compatibly detects if the record
  * is an old style KeyValue or the new style WALEdit.

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
index 59a1b43..1314a4d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
@@ -53,9 +53,9 @@ import com.google.protobuf.ServiceException;
 
 /**
  * This class is responsible for replaying the edits coming from a failed region server.
- * <p/>
+ * <p>
  * This class uses the native HBase client in order to replay WAL entries.
- * <p/>
+ * </p>
  */
 @InterfaceAudience.Private
 public class WALEditsReplaySink {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
index de82b7e..27f019a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
@@ -160,7 +160,6 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint
    * Get a list of all the addresses of all the region servers
    * for this peer cluster
    * @return list of addresses
-   * @throws KeeperException
    */
   // Synchronize peer cluster connection attempts to avoid races and rate
   // limit connections when multiple replication sources try to connect to

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
index 735ad55..62b7963 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
@@ -46,9 +46,10 @@ import org.apache.hadoop.ipc.RemoteException;
  * For the slave cluster it selects a random number of peers
  * using a replication ratio. For example, if replication ration = 0.1
  * and slave cluster has 100 region servers, 10 will be selected.
- * <p/>
+ * <p>
  * A stream is considered down when we cannot contact a region server on the
  * peer cluster for more than 55 seconds by default.
+ * </p>
  */
 @InterfaceAudience.Private
 public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoint {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
index 3276418..7d47677 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
@@ -53,16 +53,17 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 
 /**
+ * <p>
  * This class is responsible for replicating the edits coming
  * from another cluster.
- * <p/>
+ * </p><p>
  * This replication process is currently waiting for the edits to be applied
  * before the method can return. This means that the replication of edits
  * is synchronized (after reading from WALs in ReplicationSource) and that a
  * single region server cannot receive edits from two sources at the same time
- * <p/>
+ * </p><p>
  * This class uses the native HBase client in order to replicate entries.
- * <p/>
+ * </p>
  *
  * TODO make this class more like ReplicationSource wrt log handling
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index f7230ab..3f23837 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -66,10 +66,10 @@ import com.google.common.util.concurrent.Service;
  * For each slave cluster it selects a random number of peers
  * using a replication ratio. For example, if replication ration = 0.1
  * and slave cluster has 100 region servers, 10 will be selected.
- * <p/>
+ * <p>
  * A stream is considered down when we cannot contact a region server on the
  * peer cluster for more than 55 seconds by default.
- * <p/>
+ * </p>
  *
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 4d97257..0c8f6f9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -62,9 +62,11 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 /**
  * This class is responsible to manage all the replication
  * sources. There are two classes of sources:
+ * <ul>
  * <li> Normal sources are persistent and one per peer cluster</li>
  * <li> Old sources are recovered from a failed region server and our
  * only goal is to finish replicating the WAL queue it had up in ZK</li>
+ * </ul>
  *
  * When a region server dies, this class uses a watcher to get notified and it
  * tries to grab a lock in order to transfer all the queues in a local

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java
index 742fbff..c756576 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * Per-peer per-node throttling controller for replication: enabled if
- * bandwidth > 0, a cycle = 100ms, by throttling we guarantee data pushed
+ * bandwidth &gt; 0, a cycle = 100ms, by throttling we guarantee data pushed
  * to peer within each cycle won't exceed 'bandwidth' bytes
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index 19252bb..131ff14 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -93,6 +93,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * user,family,qualifier    column qualifier level permissions for a user
  * group,family,qualifier   column qualifier level permissions for a group
  * </pre>
+ * <p>
  * All values are encoded as byte arrays containing the codes from the
  * org.apache.hadoop.hbase.security.access.TablePermission.Action enum.
  * </p>

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 2890557..e2f4105 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -127,6 +127,7 @@ import com.google.protobuf.Service;
  * <p>
  * {@code AccessController} performs authorization checks for HBase operations
  * based on:
+ * </p>
  * <ul>
  *   <li>the identity of the user performing the operation</li>
  *   <li>the scope over which the operation is performed, in increasing
@@ -134,6 +135,7 @@ import com.google.protobuf.Service;
  *   <li>the type of action being performed (as mapped to
  *   {@link Permission.Action} values)</li>
  * </ul>
+ * <p>
  * If the authorization check fails, an {@link AccessDeniedException}
  * will be thrown for the operation.
  * </p>

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
index 92f9d93..774930d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
@@ -120,7 +120,7 @@ public class VisibilityUtils {
 
   /**
    * Reads back from the zookeeper. The data read here is of the form written by
-   * writeToZooKeeper(Map<byte[], Integer> entries).
+   * writeToZooKeeper(Map&lt;byte[], Integer&gt; entries).
    * 
    * @param data
    * @return Labels and their ordinal details

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index cd04b82..2fc5d83 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
@@ -42,8 +42,8 @@ import org.apache.hadoop.hbase.util.FSUtils;
  *
  * <pre>
  * /hbase/.snapshots
- *          /.tmp                <---- working directory
- *          /[snapshot name]     <----- completed snapshot
+ *          /.tmp                &lt;---- working directory
+ *          /[snapshot name]     &lt;----- completed snapshot
  * </pre>
  *
  * A completed snapshot named 'completed' then looks like (multiple regions, servers, files, etc.
@@ -51,16 +51,16 @@ import org.apache.hadoop.hbase.util.FSUtils;
  *
  * <pre>
  * /hbase/.snapshots/completed
- *                   .snapshotinfo          <--- Description of the snapshot
- *                   .tableinfo             <--- Copy of the tableinfo
+ *                   .snapshotinfo          &lt;--- Description of the snapshot
+ *                   .tableinfo             &lt;--- Copy of the tableinfo
  *                    /.logs
  *                        /[server_name]
  *                            /... [log files]
  *                         ...
- *                   /[region name]           <---- All the region's information
- *                   .regioninfo              <---- Copy of the HRegionInfo
+ *                   /[region name]           &lt;---- All the region's information
+ *                   .regioninfo              &lt;---- Copy of the HRegionInfo
  *                      /[column family name]
- *                          /[hfile name]     <--- name of the hfile in the real region
+ *                          /[hfile name]     &lt;--- name of the hfile in the real region
  *                          ...
  *                      ...
  *                    ...

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java
index aa7f503..a83090a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java
@@ -32,7 +32,7 @@ public interface BloomFilterWriter extends BloomFilterBase {
   /** Allocate memory for the bloom filter data. */
   void allocBloom();
 
-  /** Compact the Bloom filter before writing metadata & data to disk. */
+  /** Compact the Bloom filter before writing metadata &amp; data to disk. */
   void compactBloom();
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index d0b1f42..5540569 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -179,8 +179,9 @@ public abstract class FSUtils {
   }
 
   /**
-   * Compare of path component. Does not consider schema; i.e. if schemas different but <code>path
-   * <code> starts with <code>rootPath<code>, then the function returns true
+   * Compare of path component. Does not consider schema; i.e. if schemas
+   * different but <code>path</code> starts with <code>rootPath</code>,
+   * then the function returns true
    * @param rootPath
    * @param path
    * @return True if <code>path</code> starts with <code>rootPath</code>
@@ -1433,7 +1434,7 @@ public abstract class FSUtils {
    * Given a particular table dir, return all the regiondirs inside it, excluding files such as
    * .tableinfo
    * @param fs A file system for the Path
-   * @param tableDir Path to a specific table directory <hbase.rootdir>/<tabledir>
+   * @param tableDir Path to a specific table directory &lt;hbase.rootdir&gt;/&lt;tabledir&gt;
    * @return List of paths to valid region directories in table dir.
    * @throws IOException
    */
@@ -1450,7 +1451,7 @@ public abstract class FSUtils {
 
   /**
    * Filter for all dirs that are legal column family names.  This is generally used for colfam
-   * dirs <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>.
+   * dirs &lt;hbase.rootdir&gt;/&lt;tabledir&gt;/&lt;regiondir&gt;/&lt;colfamdir&gt;.
    */
   public static class FamilyDirFilter implements PathFilter {
     final FileSystem fs;

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 5244900..406848f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -621,7 +621,7 @@ public class HBaseFsck extends Configured implements Closeable {
    * region servers and the masters.  It makes each region's state in HDFS, in
    * hbase:meta, and deployments consistent.
    *
-   * @return If > 0 , number of errors detected, if < 0 there was an unrecoverable
+   * @return If &gt; 0 , number of errors detected, if &lt; 0 there was an unrecoverable
    * error.  If 0, we have a clean hbase.
    */
   public int onlineConsistencyRepair() throws IOException, KeeperException,

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
index faced06..7f74d55 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
@@ -60,10 +60,12 @@ import org.apache.hadoop.util.ToolRunner;
  * have such files.
  * <p>
  * To print the help section of the tool:
+ * </p>
  * <ul>
- * <li>./bin/hbase org.apache.hadoop.hbase.util.HFileV1Detector --h or,
- * <li>java -cp `hbase classpath` org.apache.hadoop.hbase.util.HFileV1Detector --h
+ * <li>./bin/hbase org.apache.hadoop.hbase.util.HFileV1Detector --h or,</li>
+ * <li>java -cp `hbase classpath` org.apache.hadoop.hbase.util.HFileV1Detector --h</li>
  * </ul>
+ * <p>
  * It also supports -h, --help, -help options.
  * </p>
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
index 81678aa..3ccff4e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
@@ -111,7 +111,6 @@ public class MultiHConnection {
    * @param results the results array
    * @param callback 
    * @throws IOException
-   * @throws InterruptedException
    */
   @SuppressWarnings("deprecation")
   public <R> void processBatchCallback(List<? extends Row> actions, TableName tableName,

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index 3a08750..ea704f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -85,7 +85,7 @@ import com.google.common.collect.Sets;
  * <b>Answer:</b> Automatic splitting is determined by the configuration value
  * <i>HConstants.HREGION_MAX_FILESIZE</i>. It is not recommended that you set this
  * to Long.MAX_VALUE in case you forget about manual splits. A suggested setting
- * is 100GB, which would result in > 1hr major compactions if reached.
+ * is 100GB, which would result in &gt; 1hr major compactions if reached.
  * <p>
  * <b>Question:</b> Why did the original authors decide to manually split? <br>
  * <b>Answer:</b> Specific workload characteristics of our use case allowed us
@@ -227,7 +227,7 @@ public class RegionSplitter {
     /**
      * @param row
      *          byte array representing a row in HBase
-     * @return String to use for debug & file printing
+     * @return String to use for debug &amp; file printing
      */
     String rowToStr(byte[] row);
 
@@ -254,12 +254,12 @@ public class RegionSplitter {
    * <p>
    * <ul>
    * <li>create a table named 'myTable' with 60 pre-split regions containing 2
-   * column families 'test' & 'rs', assuming the keys are hex-encoded ASCII:
+   * column families 'test' &amp; 'rs', assuming the keys are hex-encoded ASCII:
    * <ul>
    * <li>bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 60 -f test:rs
    * myTable HexStringSplit
    * </ul>
-   * <li>perform a rolling split of 'myTable' (i.e. 60 => 120 regions), # 2
+   * <li>perform a rolling split of 'myTable' (i.e. 60 =&gt; 120 regions), # 2
    * outstanding splits at a time, assuming keys are uniformly distributed
    * bytes:
    * <ul>
@@ -878,10 +878,10 @@ public class RegionSplitter {
    * boundaries. The format of a HexStringSplit region boundary is the ASCII
    * representation of an MD5 checksum, or any other uniformly distributed
    * hexadecimal value. Row are hex-encoded long values in the range
-   * <b>"00000000" => "FFFFFFFF"</b> and are left-padded with zeros to keep the
+   * <b>"00000000" =&gt; "FFFFFFFF"</b> and are left-padded with zeros to keep the
    * same order lexicographically as if they were binary.
    *
-   * Since this split algorithm uses hex strings as keys, it is easy to read &
+   * Since this split algorithm uses hex strings as keys, it is easy to read &amp;
    * write in the shell but takes up more space and may be non-intuitive.
    */
   public static class HexStringSplit implements SplitAlgorithm {
@@ -1032,7 +1032,7 @@ public class RegionSplitter {
   /**
    * A SplitAlgorithm that divides the space of possible keys evenly. Useful
    * when the keys are approximately uniform random bytes (e.g. hashes). Rows
-   * are raw byte values in the range <b>00 => FF</b> and are right-padded with
+   * are raw byte values in the range <b>00 =&gt; FF</b> and are right-padded with
    * zeros to keep the same memcmp() order. This is the natural algorithm to use
    * for a byte[] environment and saves space, but is not necessarily the
    * easiest for readability.

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
index 67f8e84..5c61afb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
@@ -44,7 +44,7 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil {
    * Whether asynchronous WAL replication to the secondary region replicas is enabled or not.
    * If this is enabled, a replication peer named "region_replica_replication" will be created
    * which will tail the logs and replicate the mutatations to region replicas for tables that
-   * have region replication > 1. If this is enabled once, disabling this replication also
+   * have region replication &gt; 1. If this is enabled once, disabling this replication also
    * requires disabling the replication peer using shell or ReplicationAdmin java class.
    * Replication to secondary region replicas works over standard inter-cluster replication.·
    * So replication, if disabled explicitly, also has to be enabled by setting "hbase.replication"·

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
index 621c200..5ac8c11 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
@@ -295,9 +295,9 @@ public class WALKey implements SequenceId, Comparable<WALKey> {
   }
   
   /**
-   * Wait for sequence number is assigned & return the assigned value
+   * Wait for sequence number is assigned &amp; return the assigned value
    * @return long the new assigned sequence number
-   * @throws InterruptedException
+   * @throws IOException
    */
   @Override
   public long getSequenceId() throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java
index 376a945..b3e63ac 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java
@@ -143,11 +143,11 @@ public class ZKSplitLog {
    */
 
   /**
-   * check if /hbase/recovering-regions/<current region encoded name> exists. Returns true if exists
-   * and set watcher as well.
+   * check if /hbase/recovering-regions/&lt;current region encoded name&gt;
+   * exists. Returns true if exists and set watcher as well.
    * @param zkw
    * @param regionEncodedName region encode name
-   * @return true when /hbase/recovering-regions/<current region encoded name> exists
+   * @return true when /hbase/recovering-regions/&lt;current region encoded name&gt; exists
    * @throws KeeperException
    */
   public static boolean
@@ -200,7 +200,7 @@ public class ZKSplitLog {
    * @param zkw
    * @param serverName
    * @param encodedRegionName
-   * @return the last flushed sequence ids recorded in ZK of the region for <code>serverName<code>
+   * @return the last flushed sequence ids recorded in ZK of the region for <code>serverName</code>
    * @throws IOException
    */
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
index b86d130..c97467a 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
@@ -676,7 +676,6 @@ public class ThriftServerRunner implements Runnable {
      *          name of table
      * @return Table object
      * @throws IOException
-     * @throws IOError
      */
     public Table getTable(final byte[] tableName) throws
         IOException {
@@ -718,7 +717,7 @@ public class ThriftServerRunner implements Runnable {
 
     /**
      * Removes the scanner associated with the specified ID from the internal
-     * id->scanner hash-map.
+     * id-&gt;scanner hash-map.
      *
      * @param id
      * @return a Scanner, or null if ID was invalid.

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a0ff335/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java
index bb9e58c..db48a62 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java
@@ -564,8 +564,6 @@ public class Hbase {
      * 
      * @throws IllegalArgument if ScannerID is invalid
      * 
-     * @throws NotFound when the scanner reaches the end
-     * 
      * @param id id of a scanner returned by scannerOpen
      */
     public List<TRowResult> scannerGet(int id) throws IOError, IllegalArgument, org.apache.thrift.TException;
@@ -580,8 +578,6 @@ public class Hbase {
      * 
      * @throws IllegalArgument if ScannerID is invalid
      * 
-     * @throws NotFound when the scanner reaches the end
-     * 
      * @param id id of a scanner returned by scannerOpen
      * 
      * @param nbRows number of results to return


Mime
View raw message