hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From syuanji...@apache.org
Subject [29/50] [abbrv] hbase git commit: HBASE-17532 Replaced explicit type with diamond operator
Date Fri, 10 Mar 2017 22:10:09 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index 947b54a..a581ed5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -76,8 +76,7 @@ public class Get extends Query
   private int storeOffset = 0;
   private boolean checkExistenceOnly = false;
   private boolean closestRowBefore = false;
-  private Map<byte [], NavigableSet<byte []>> familyMap =
-    new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
+  private Map<byte [], NavigableSet<byte []>> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
 
   /**
    * Create a Get operation for the specified row.
@@ -184,7 +183,7 @@ public class Get extends Query
   public Get addColumn(byte [] family, byte [] qualifier) {
     NavigableSet<byte []> set = familyMap.get(family);
     if(set == null) {
-      set = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
+      set = new TreeSet<>(Bytes.BYTES_COMPARATOR);
     }
     if (qualifier == null) {
       qualifier = HConstants.EMPTY_BYTE_ARRAY;
@@ -399,8 +398,8 @@ public class Get extends Query
    */
   @Override
   public Map<String, Object> getFingerprint() {
-    Map<String, Object> map = new HashMap<String, Object>();
-    List<String> families = new ArrayList<String>(this.familyMap.entrySet().size());
+    Map<String, Object> map = new HashMap<>();
+    List<String> families = new ArrayList<>(this.familyMap.entrySet().size());
     map.put("families", families);
     for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
       this.familyMap.entrySet()) {
@@ -422,13 +421,13 @@ public class Get extends Query
     Map<String, Object> map = getFingerprint();
     // replace the fingerprint's simple list of families with a
     // map from column families to lists of qualifiers and kv details
-    Map<String, List<String>> columns = new HashMap<String, List<String>>();
+    Map<String, List<String>> columns = new HashMap<>();
     map.put("families", columns);
     // add scalar information first
     map.put("row", Bytes.toStringBinary(this.row));
     map.put("maxVersions", this.maxVersions);
     map.put("cacheBlocks", this.cacheBlocks);
-    List<Long> timeRange = new ArrayList<Long>(2);
+    List<Long> timeRange = new ArrayList<>(2);
     timeRange.add(this.tr.getMin());
     timeRange.add(this.tr.getMax());
     map.put("timeRange", timeRange);
@@ -436,7 +435,7 @@ public class Get extends Query
     // iterate through affected families and add details
     for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
       this.familyMap.entrySet()) {
-      List<String> familyList = new ArrayList<String>();
+      List<String> familyList = new ArrayList<>();
       columns.put(Bytes.toStringBinary(entry.getKey()), familyList);
       if(entry.getValue() == null) {
         colCount++;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index c68d3bb..5265616 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -618,7 +618,7 @@ public class HBaseAdmin implements Admin {
    */
   @Override
   public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException {
-    List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
+    List<HTableDescriptor> failed = new LinkedList<>();
     for (HTableDescriptor table : listTables(pattern)) {
       try {
         deleteTable(table.getTableName());
@@ -743,7 +743,7 @@ public class HBaseAdmin implements Admin {
 
   @Override
   public HTableDescriptor[] enableTables(Pattern pattern) throws IOException {
-    List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
+    List<HTableDescriptor> failed = new LinkedList<>();
     for (HTableDescriptor table : listTables(pattern)) {
       if (isTableDisabled(table.getTableName())) {
         try {
@@ -807,7 +807,7 @@ public class HBaseAdmin implements Admin {
 
   @Override
   public HTableDescriptor[] disableTables(Pattern pattern) throws IOException {
-    List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
+    List<HTableDescriptor> failed = new LinkedList<>();
     for (HTableDescriptor table : listTables(pattern)) {
       if (isTableEnabled(table.getTableName())) {
         try {
@@ -1098,8 +1098,7 @@ public class HBaseAdmin implements Admin {
       LOG.info("Table is disabled: " + tableName.getNameAsString());
       return;
     }
-    execProcedure("flush-table-proc", tableName.getNameAsString(),
-      new HashMap<String, String>());
+    execProcedure("flush-table-proc", tableName.getNameAsString(), new HashMap<>());
   }
 
   @Override
@@ -1796,8 +1795,7 @@ public class HBaseAdmin implements Admin {
     Pair<HRegionInfo, ServerName> pair =
       MetaTableAccessor.getRegion(connection, regionName);
     if (pair == null) {
-      final AtomicReference<Pair<HRegionInfo, ServerName>> result =
-        new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
+      final AtomicReference<Pair<HRegionInfo, ServerName>> result = new AtomicReference<>(null);
       final String encodedName = Bytes.toString(regionName);
       MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
         @Override
@@ -1820,7 +1818,7 @@ public class HBaseAdmin implements Admin {
             }
           }
           if (!matched) return true;
-          result.set(new Pair<HRegionInfo, ServerName>(info, sn));
+          result.set(new Pair<>(info, sn));
           return false; // found the region, stop
         }
       };
@@ -1954,7 +1952,7 @@ public class HBaseAdmin implements Admin {
     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
     HBaseRpcController controller = rpcControllerFactory.newController();
     List<RegionLoad> regionLoads = ProtobufUtil.getRegionLoad(controller, admin, tableName);
-    Map<byte[], RegionLoad> resultMap = new TreeMap<byte[], RegionLoad>(Bytes.BYTES_COMPARATOR);
+    Map<byte[], RegionLoad> resultMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
     for (RegionLoad regionLoad : regionLoads) {
       resultMap.put(regionLoad.getName(), regionLoad);
     }
@@ -2279,7 +2277,7 @@ public class HBaseAdmin implements Admin {
    */
   private HTableDescriptor getTableDescriptorByTableName(TableName tableName)
       throws IOException {
-    List<TableName> tableNames = new ArrayList<TableName>(1);
+    List<TableName> tableNames = new ArrayList<>(1);
     tableNames.add(tableName);
 
     HTableDescriptor[] htdl = getTableDescriptorsByTableName(tableNames);
@@ -2295,7 +2293,7 @@ public class HBaseAdmin implements Admin {
   @Override
   public HTableDescriptor[] getTableDescriptors(List<String> names)
   throws IOException {
-    List<TableName> tableNames = new ArrayList<TableName>(names.size());
+    List<TableName> tableNames = new ArrayList<>(names.size());
     for(String name : names) {
       tableNames.add(TableName.valueOf(name));
     }
@@ -2829,7 +2827,7 @@ public class HBaseAdmin implements Admin {
             .getCompletedSnapshots(getRpcController(),
                 GetCompletedSnapshotsRequest.newBuilder().build())
             .getSnapshotsList();
-        List<SnapshotDescription> result = new ArrayList<SnapshotDescription>(snapshotsList.size());
+        List<SnapshotDescription> result = new ArrayList<>(snapshotsList.size());
         for (HBaseProtos.SnapshotDescription snapshot : snapshotsList) {
           result.add(ProtobufUtil.createSnapshotDesc(snapshot));
         }
@@ -2845,7 +2843,7 @@ public class HBaseAdmin implements Admin {
 
   @Override
   public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException {
-    List<SnapshotDescription> matched = new LinkedList<SnapshotDescription>();
+    List<SnapshotDescription> matched = new LinkedList<>();
     List<SnapshotDescription> snapshots = listSnapshots();
     for (SnapshotDescription snapshot : snapshots) {
       if (pattern.matcher(snapshot.getName()).matches()) {
@@ -2866,7 +2864,7 @@ public class HBaseAdmin implements Admin {
       Pattern snapshotNamePattern) throws IOException {
     TableName[] tableNames = listTableNames(tableNamePattern);
 
-    List<SnapshotDescription> tableSnapshots = new LinkedList<SnapshotDescription>();
+    List<SnapshotDescription> tableSnapshots = new LinkedList<>();
     List<SnapshotDescription> snapshots = listSnapshots(snapshotNamePattern);
 
     List<TableName> listOfTableNames = Arrays.asList(tableNames);
@@ -3985,7 +3983,7 @@ public class HBaseAdmin implements Admin {
 
   @Override
   public void drainRegionServers(List<ServerName> servers) throws IOException {
-    final List<HBaseProtos.ServerName> pbServers = new ArrayList<HBaseProtos.ServerName>(servers.size());
+    final List<HBaseProtos.ServerName> pbServers = new ArrayList<>(servers.size());
     for (ServerName server : servers) {
       // Parse to ServerName to do simple validation.
       ServerName.parseServerName(server.toString());
@@ -4010,7 +4008,7 @@ public class HBaseAdmin implements Admin {
       @Override
       public List<ServerName> rpcCall() throws ServiceException {
         ListDrainingRegionServersRequest req = ListDrainingRegionServersRequest.newBuilder().build();
-        List<ServerName> servers = new ArrayList<ServerName>();
+        List<ServerName> servers = new ArrayList<>();
         for (HBaseProtos.ServerName server : master.listDrainingRegionServers(null, req)
             .getServerNameList()) {
           servers.add(ProtobufUtil.toServerName(server));
@@ -4022,7 +4020,7 @@ public class HBaseAdmin implements Admin {
 
   @Override
   public void removeDrainFromRegionServers(List<ServerName> servers) throws IOException {
-    final List<HBaseProtos.ServerName> pbServers = new ArrayList<HBaseProtos.ServerName>(servers.size());
+    final List<HBaseProtos.ServerName> pbServers = new ArrayList<>(servers.size());
     for (ServerName server : servers) {
       pbServers.add(ProtobufUtil.toServerName(server));
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java
index 4d2311d..f2c5746 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java
@@ -142,7 +142,7 @@ public class HRegionLocator implements RegionLocator {
 
   @VisibleForTesting
   List<RegionLocations> listRegionLocations() throws IOException {
-    final List<RegionLocations> regions = new ArrayList<RegionLocations>();
+    final List<RegionLocations> regions = new ArrayList<>();
     MetaTableAccessor.Visitor visitor = new MetaTableAccessor.TableVisitorBase(tableName) {
       @Override
       public boolean visitInternal(Result result) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 0c383fc..3bdbed5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -143,7 +143,7 @@ public class HTable implements Table {
     // we only create as many Runnables as there are region servers. It means
     // it also scales when new region servers are added.
     ThreadPoolExecutor pool = new ThreadPoolExecutor(corePoolSize, maxThreads, keepAliveTime,
-      TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("htable"));
+      TimeUnit.SECONDS, new SynchronousQueue<>(), Threads.newDaemonThreadFactory("htable"));
     pool.allowCoreThreadTimeOut(true);
     return pool;
   }
@@ -309,8 +309,8 @@ public class HTable implements Table {
         "Invalid range: " + Bytes.toStringBinary(startKey) +
         " > " + Bytes.toStringBinary(endKey));
     }
-    List<byte[]> keysInRange = new ArrayList<byte[]>();
-    List<HRegionLocation> regionsInRange = new ArrayList<HRegionLocation>();
+    List<byte[]> keysInRange = new ArrayList<>();
+    List<HRegionLocation> regionsInRange = new ArrayList<>();
     byte[] currentKey = startKey;
     do {
       HRegionLocation regionLocation = getRegionLocator().getRegionLocation(currentKey, reload);
@@ -320,8 +320,7 @@ public class HTable implements Table {
     } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW)
         && (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0
             || (includeEndKey && Bytes.compareTo(currentKey, endKey) == 0)));
-    return new Pair<List<byte[]>, List<HRegionLocation>>(keysInRange,
-        regionsInRange);
+    return new Pair<>(keysInRange, regionsInRange);
   }
 
   /**
@@ -915,7 +914,7 @@ public class HTable implements Table {
     if (gets.isEmpty()) return new boolean[]{};
     if (gets.size() == 1) return new boolean[]{exists(gets.get(0))};
 
-    ArrayList<Get> exists = new ArrayList<Get>(gets.size());
+    ArrayList<Get> exists = new ArrayList<>(gets.size());
     for (Get g: gets){
       Get ge = new Get(g);
       ge.setCheckExistenceOnly(true);
@@ -1099,8 +1098,7 @@ public class HTable implements Table {
       final Batch.Callback<R> callback) throws ServiceException, Throwable {
     // get regions covered by the row range
     List<byte[]> keys = getStartKeysInRange(startKey, endKey);
-    Map<byte[],Future<R>> futures =
-        new TreeMap<byte[],Future<R>>(Bytes.BYTES_COMPARATOR);
+    Map<byte[],Future<R>> futures = new TreeMap<>(Bytes.BYTES_COMPARATOR);
     for (final byte[] r : keys) {
       final RegionCoprocessorRpcChannel channel =
           new RegionCoprocessorRpcChannel(connection, tableName, r);
@@ -1245,10 +1243,8 @@ public class HTable implements Table {
       return;
     }
 
-    List<RegionCoprocessorServiceExec> execs =
-        new ArrayList<RegionCoprocessorServiceExec>(keys.size());
-    final Map<byte[], RegionCoprocessorServiceExec> execsByRow =
-        new TreeMap<byte[], RegionCoprocessorServiceExec>(Bytes.BYTES_COMPARATOR);
+    List<RegionCoprocessorServiceExec> execs = new ArrayList<>(keys.size());
+    final Map<byte[], RegionCoprocessorServiceExec> execsByRow = new TreeMap<>(Bytes.BYTES_COMPARATOR);
     for (int i = 0; i < keys.size(); i++) {
       final byte[] rowKey = keys.get(i);
       final byte[] region = regions.get(i).getRegionInfo().getRegionName();
@@ -1260,9 +1256,9 @@ public class HTable implements Table {
 
     // tracking for any possible deserialization errors on success callback
     // TODO: it would be better to be able to reuse AsyncProcess.BatchErrors here
-    final List<Throwable> callbackErrorExceptions = new ArrayList<Throwable>();
-    final List<Row> callbackErrorActions = new ArrayList<Row>();
-    final List<String> callbackErrorServers = new ArrayList<String>();
+    final List<Throwable> callbackErrorExceptions = new ArrayList<>();
+    final List<Row> callbackErrorActions = new ArrayList<>();
+    final List<String> callbackErrorServers = new ArrayList<>();
     Object[] results = new Object[execs.size()];
 
     AsyncProcess asyncProcess =

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
index 27393ba..f3a58ad 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
@@ -169,7 +169,7 @@ public class HTableMultiplexer {
 
         // Create the failed puts list if necessary
         if (failedPuts == null) {
-          failedPuts = new ArrayList<Put>();
+          failedPuts = new ArrayList<>();
         }
         // Add the put to the failed puts list
         failedPuts.add(put);
@@ -288,10 +288,10 @@ public class HTableMultiplexer {
       this.totalFailedPutCounter = 0;
       this.maxLatency = 0;
       this.overallAverageLatency = 0;
-      this.serverToBufferedCounterMap = new HashMap<String, Long>();
-      this.serverToFailedCounterMap = new HashMap<String, Long>();
-      this.serverToAverageLatencyMap = new HashMap<String, Long>();
-      this.serverToMaxLatencyMap = new HashMap<String, Long>();
+      this.serverToBufferedCounterMap = new HashMap<>();
+      this.serverToFailedCounterMap = new HashMap<>();
+      this.serverToAverageLatencyMap = new HashMap<>();
+      this.serverToMaxLatencyMap = new HashMap<>();
       this.initialize(serverToFlushWorkerMap);
     }
 
@@ -412,7 +412,7 @@ public class HTableMultiplexer {
     }
 
     public synchronized SimpleEntry<Long, Integer> getComponents() {
-      return new SimpleEntry<Long, Integer>(sum, count);
+      return new SimpleEntry<>(sum, count);
     }
 
     public synchronized void reset() {
@@ -614,7 +614,7 @@ public class HTableMultiplexer {
               failedCount--;
             } else {
               if (failed == null) {
-                failed = new ArrayList<PutStatus>();
+                failed = new ArrayList<>();
               }
               failed.add(processingList.get(i));
             }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
index 9538361..eb1cbc5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
@@ -204,10 +204,9 @@ public class Increment extends Mutation implements Comparable<Row> {
    */
   public Map<byte[], NavigableMap<byte [], Long>> getFamilyMapOfLongs() {
     NavigableMap<byte[], List<Cell>> map = super.getFamilyCellMap();
-    Map<byte [], NavigableMap<byte[], Long>> results =
-      new TreeMap<byte[], NavigableMap<byte [], Long>>(Bytes.BYTES_COMPARATOR);
+    Map<byte [], NavigableMap<byte[], Long>> results = new TreeMap<>(Bytes.BYTES_COMPARATOR);
     for (Map.Entry<byte [], List<Cell>> entry: map.entrySet()) {
-      NavigableMap<byte [], Long> longs = new TreeMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
+      NavigableMap<byte [], Long> longs = new TreeMap<>(Bytes.BYTES_COMPARATOR);
       for (Cell cell: entry.getValue()) {
         longs.put(CellUtil.cloneQualifier(cell),
             Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java
index 64b1661..ea64900 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java
@@ -193,7 +193,7 @@ public class MetricsConnection implements StatisticTrackable {
 
   @VisibleForTesting
   protected ConcurrentHashMap<ServerName, ConcurrentMap<byte[], RegionStats>> serverStats
-          = new ConcurrentHashMap<ServerName, ConcurrentMap<byte[], RegionStats>>();
+          = new ConcurrentHashMap<>();
 
   public void updateServerStats(ServerName serverName, byte[] regionName,
                                 Object r) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
index dc4ec62..a4aa71d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
@@ -82,7 +82,7 @@ public final class MultiAction {
   public void add(byte[] regionName, List<Action> actionList){
     List<Action> rsActions = actions.get(regionName);
     if (rsActions == null) {
-      rsActions = new ArrayList<Action>(actionList.size());
+      rsActions = new ArrayList<>(actionList.size());
       actions.put(regionName, rsActions);
     }
     rsActions.addAll(actionList);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java
index 937e1b5..7d6744f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java
@@ -41,7 +41,7 @@ public class MultiResponse extends AbstractResponse {
    * It's a part of the protobuf definition.
    */
   private Map<byte[], Throwable> exceptions =
-      new TreeMap<byte[], Throwable>(Bytes.BYTES_COMPARATOR);
+      new TreeMap<>(Bytes.BYTES_COMPARATOR);
 
   public MultiResponse() {
     super();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
index c4adf34..38a1950 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
@@ -107,7 +107,7 @@ class MultiServerCallable extends CancellableRegionServerCallable<MultiResponse>
           HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME, regionName));
       if (this.cellBlock) {
         // Pre-size. Presume at least a KV per Action.  There are likely more.
-        if (cells == null) cells = new ArrayList<CellScannable>(countOfActions);
+        if (cells == null) cells = new ArrayList<>(countOfActions);
         // Send data in cellblocks. The call to buildNoDataMultiRequest will skip RowMutations.
         // They have already been handled above. Guess at count of cells
         regionActionBuilder = RequestConverter.buildNoDataRegionAction(regionName, actions, cells,

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index 53631d9..fb55fdd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -92,8 +92,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
   protected Durability durability = Durability.USE_DEFAULT;
 
   // A Map sorted by column family.
-  protected NavigableMap<byte [], List<Cell>> familyMap =
-    new TreeMap<byte [], List<Cell>>(Bytes.BYTES_COMPARATOR);
+  protected NavigableMap<byte [], List<Cell>> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
 
   @Override
   public CellScanner cellScanner() {
@@ -110,7 +109,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
   List<Cell> getCellList(byte[] family) {
     List<Cell> list = this.familyMap.get(family);
     if (list == null) {
-      list = new ArrayList<Cell>();
+      list = new ArrayList<>();
     }
     return list;
   }
@@ -158,8 +157,8 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
    */
   @Override
   public Map<String, Object> getFingerprint() {
-    Map<String, Object> map = new HashMap<String, Object>();
-    List<String> families = new ArrayList<String>(this.familyMap.entrySet().size());
+    Map<String, Object> map = new HashMap<>();
+    List<String> families = new ArrayList<>(this.familyMap.entrySet().size());
     // ideally, we would also include table information, but that information
     // is not stored in each Operation instance.
     map.put("families", families);
@@ -182,15 +181,14 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
     Map<String, Object> map = getFingerprint();
     // replace the fingerprint's simple list of families with a
     // map from column families to lists of qualifiers and kv details
-    Map<String, List<Map<String, Object>>> columns =
-      new HashMap<String, List<Map<String, Object>>>();
+    Map<String, List<Map<String, Object>>> columns = new HashMap<>();
     map.put("families", columns);
     map.put("row", Bytes.toStringBinary(this.row));
     int colCount = 0;
     // iterate through all column families affected
     for (Map.Entry<byte [], List<Cell>> entry : this.familyMap.entrySet()) {
       // map from this family to details for each cell affected within the family
-      List<Map<String, Object>> qualifierDetails = new ArrayList<Map<String, Object>>();
+      List<Map<String, Object>> qualifierDetails = new ArrayList<>();
       columns.put(Bytes.toStringBinary(entry.getKey()), qualifierDetails);
       colCount += entry.getValue().size();
       if (maxCols <= 0) {
@@ -220,14 +218,14 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
   }
 
   private static Map<String, Object> cellToStringMap(Cell c) {
-    Map<String, Object> stringMap = new HashMap<String, Object>();
+    Map<String, Object> stringMap = new HashMap<>();
     stringMap.put("qualifier", Bytes.toStringBinary(c.getQualifierArray(), c.getQualifierOffset(),
                 c.getQualifierLength()));
     stringMap.put("timestamp", c.getTimestamp());
     stringMap.put("vlen", c.getValueLength());
     List<Tag> tags = CellUtil.getTags(c);
     if (tags != null) {
-      List<String> tagsString = new ArrayList<String>(tags.size());
+      List<String> tagsString = new ArrayList<>(tags.size());
       for (Tag t : tags) {
         tagsString.add((t.getType()) + ":" + Bytes.toStringBinary(TagUtil.cloneValue(t)));
       }
@@ -317,7 +315,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
    * @return the set of clusterIds that have consumed the mutation
    */
   public List<UUID> getClusterIds() {
-    List<UUID> clusterIds = new ArrayList<UUID>();
+    List<UUID> clusterIds = new ArrayList<>();
     byte[] bytes = getAttribute(CONSUMED_CLUSTER_IDS);
     if(bytes != null) {
       ByteArrayDataInput in = ByteStreams.newDataInput(bytes);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
index 9fdd577..cc863b9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
@@ -44,7 +44,7 @@ public abstract class OperationWithAttributes extends Operation implements Attri
     }
 
     if (attributes == null) {
-      attributes = new HashMap<String, byte[]>();
+      attributes = new HashMap<>();
     }
 
     if (value == null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
index 448e5b1..a29a662 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
@@ -75,8 +75,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
 
   // Keeps track of failures when we cannot talk to a server. Helps in
   // fast failing clients if the server is down for a long time.
-  protected final ConcurrentMap<ServerName, FailureInfo> repeatedFailuresMap =
-      new ConcurrentHashMap<ServerName, FailureInfo>();
+  protected final ConcurrentMap<ServerName, FailureInfo> repeatedFailuresMap = new ConcurrentHashMap<>();
 
   // We populate repeatedFailuresMap every time there is a failure. So, to
   // keep it from growing unbounded, we garbage collect the failure information
@@ -90,8 +89,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
   // fast fail mode for any reason.
   private long fastFailClearingTimeMilliSec;
 
-  private final ThreadLocal<MutableBoolean> threadRetryingInFastFailMode =
-      new ThreadLocal<MutableBoolean>();
+  private final ThreadLocal<MutableBoolean> threadRetryingInFastFailMode = new ThreadLocal<>();
 
   public PreemptiveFastFailInterceptor(Configuration conf) {
     this.fastFailThresholdMilliSec = conf.getLong(

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
index a6ebd03..701dceb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
@@ -161,9 +161,9 @@ public class Put extends Mutation implements HeapSize, Comparable<Row> {
    */
   public Put(Put putToCopy) {
     this(putToCopy.getRow(), putToCopy.ts);
-    this.familyMap = new TreeMap<byte [], List<Cell>>(Bytes.BYTES_COMPARATOR);
+    this.familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
     for(Map.Entry<byte [], List<Cell>> entry: putToCopy.getFamilyCellMap().entrySet()) {
-      this.familyMap.put(entry.getKey(), new ArrayList<Cell>(entry.getValue()));
+      this.familyMap.put(entry.getKey(), new ArrayList<>(entry.getValue()));
     }
     this.durability = putToCopy.durability;
     for (Map.Entry<String, byte[]> entry : putToCopy.getAttributesMap().entrySet()) {
@@ -464,7 +464,7 @@ public class Put extends Mutation implements HeapSize, Comparable<Row> {
    * returns an empty list if one doesn't exist for the given family.
    */
   public List<Cell> get(byte[] family, byte[] qualifier) {
-    List<Cell> filteredList = new ArrayList<Cell>();
+    List<Cell> filteredList = new ArrayList<>();
     for (Cell cell: getCellList(family)) {
       if (CellUtil.matchingQualifier(cell, qualifier)) {
         filteredList.add(cell);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index b4c24fe..4752d70 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -94,7 +94,7 @@ public class Result implements CellScannable, CellScanner {
   private transient NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>
       familyMap = null;
 
-  private static ThreadLocal<byte[]> localBuffer = new ThreadLocal<byte[]>();
+  private static ThreadLocal<byte[]> localBuffer = new ThreadLocal<>();
   private static final int PAD_WIDTH = 128;
   public static final Result EMPTY_RESULT = new Result(true);
 
@@ -247,7 +247,7 @@ public class Result implements CellScannable, CellScanner {
    * did not exist in the result set
    */
   public List<Cell> getColumnCells(byte [] family, byte [] qualifier) {
-    List<Cell> result = new ArrayList<Cell>();
+    List<Cell> result = new ArrayList<>();
 
     Cell [] kvs = rawCells();
 
@@ -662,12 +662,10 @@ public class Result implements CellScannable, CellScanner {
     if(isEmpty()) {
       return null;
     }
-    NavigableMap<byte[], NavigableMap<byte[], byte[]>> returnMap =
-      new TreeMap<byte[], NavigableMap<byte[], byte[]>>(Bytes.BYTES_COMPARATOR);
+    NavigableMap<byte[], NavigableMap<byte[], byte[]>> returnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
     for(Map.Entry<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>
       familyEntry : familyMap.entrySet()) {
-      NavigableMap<byte[], byte[]> qualifierMap =
-        new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
+      NavigableMap<byte[], byte[]> qualifierMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
       for(Map.Entry<byte[], NavigableMap<Long, byte[]>> qualifierEntry :
         familyEntry.getValue().entrySet()) {
         byte [] value =
@@ -693,8 +691,7 @@ public class Result implements CellScannable, CellScanner {
     if(isEmpty()) {
       return null;
     }
-    NavigableMap<byte[], byte[]> returnMap =
-      new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
+    NavigableMap<byte[], byte[]> returnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
     NavigableMap<byte[], NavigableMap<Long, byte[]>> qualifierMap =
       familyMap.get(family);
     if(qualifierMap == null) {
@@ -797,7 +794,7 @@ public class Result implements CellScannable, CellScanner {
    */
   public static Result createCompleteResult(List<Result> partialResults)
       throws IOException {
-    List<Cell> cells = new ArrayList<Cell>();
+    List<Cell> cells = new ArrayList<>();
     boolean stale = false;
     byte[] prevRow = null;
     byte[] currentRow = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java
index 2848c9d..50c3d2c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java
@@ -167,7 +167,7 @@ public class ResultBoundedCompletionService<V> {
 
 
   public void submit(RetryingCallable<V> task, int callTimeout, int id) {
-    QueueingFuture<V> newFuture = new QueueingFuture<V>(task, callTimeout, id);
+    QueueingFuture<V> newFuture = new QueueingFuture<>(task, callTimeout, id);
     executor.execute(Trace.wrap(newFuture));
     tasks[id] = newFuture;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
index f24e614..8b09222 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
@@ -110,7 +110,7 @@ extends RetriesExhaustedException {
     String s = getDesc(classifyExs(exceptions));
     StringBuilder addrs = new StringBuilder(s);
     addrs.append("servers with issues: ");
-    Set<String> uniqAddr = new HashSet<String>();
+    Set<String> uniqAddr = new HashSet<>();
     uniqAddr.addAll(hostnamePort);
 
     for(String addr : uniqAddr) {
@@ -143,7 +143,7 @@ extends RetriesExhaustedException {
 
 
   public static Map<String, Integer> classifyExs(List<Throwable> ths) {
-    Map<String, Integer> cls = new HashMap<String, Integer>();
+    Map<String, Integer> cls = new HashMap<>();
     for (Throwable t : ths) {
       if (t == null) continue;
       String name = "";

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
index 6e5235b..1d46ab4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
@@ -139,7 +139,7 @@ public class ReversedScannerCallable extends ScannerCallable {
           + Bytes.toStringBinary(startKey) + " > "
           + Bytes.toStringBinary(endKey));
     }
-    List<HRegionLocation> regionList = new ArrayList<HRegionLocation>();
+    List<HRegionLocation> regionList = new ArrayList<>();
     byte[] currentKey = startKey;
     do {
       RegionLocations rl = RpcRetryingCallerWithReadReplicas.getRegionLocations(reload, id,

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java
index cc8c23a..41a514a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java
@@ -84,7 +84,7 @@ public class RpcRetryingCallerFactory {
   public <T> RpcRetryingCaller<T> newCaller(int rpcTimeout) {
     // We store the values in the factory instance. This way, constructing new objects
     //  is cheap as it does not require parsing a complex structure.
-    RpcRetryingCaller<T> caller = new RpcRetryingCallerImpl<T>(pause, pauseForCQTBE, retries,
+    RpcRetryingCaller<T> caller = new RpcRetryingCallerImpl<>(pause, pauseForCQTBE, retries,
         interceptor, startLogErrorsCnt, rpcTimeout);
     return caller;
   }
@@ -95,7 +95,7 @@ public class RpcRetryingCallerFactory {
   public <T> RpcRetryingCaller<T> newCaller() {
     // We store the values in the factory instance. This way, constructing new objects
     //  is cheap as it does not require parsing a complex structure.
-    RpcRetryingCaller<T> caller = new RpcRetryingCallerImpl<T>(pause, pauseForCQTBE, retries,
+    RpcRetryingCaller<T> caller = new RpcRetryingCallerImpl<>(pause, pauseForCQTBE, retries,
         interceptor, startLogErrorsCnt, rpcTimeout);
     return caller;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java
index 6450adf..3f65e6e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java
@@ -94,8 +94,7 @@ public class RpcRetryingCallerImpl<T> implements RpcRetryingCaller<T> {
   @Override
   public T callWithRetries(RetryingCallable<T> callable, int callTimeout)
   throws IOException, RuntimeException {
-    List<RetriesExhaustedException.ThrowableWithExtraContext> exceptions =
-      new ArrayList<RetriesExhaustedException.ThrowableWithExtraContext>();
+    List<RetriesExhaustedException.ThrowableWithExtraContext> exceptions = new ArrayList<>();
     tracker.start();
     context.clear();
     for (int tries = 0;; tries++) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
index 316fad1..0050269 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
@@ -173,7 +173,7 @@ public class RpcRetryingCallerWithReadReplicas {
     RegionLocations rl = getRegionLocations(true, (isTargetReplicaSpecified ? get.getReplicaId()
         : RegionReplicaUtil.DEFAULT_REPLICA_ID), cConnection, tableName, get.getRow());
    final ResultBoundedCompletionService<Result> cs =
-        new ResultBoundedCompletionService<Result>(this.rpcRetryingCallerFactory, pool, rl.size());
+        new ResultBoundedCompletionService<>(this.rpcRetryingCallerFactory, pool, rl.size());
     int startIndex = 0;
     int endIndex = rl.size();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index 0f9a9af..a7d81af 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -143,8 +143,7 @@ public class Scan extends Query {
   private long maxResultSize = -1;
   private boolean cacheBlocks = true;
   private boolean reversed = false;
-  private Map<byte[], NavigableSet<byte[]>> familyMap =
-      new TreeMap<byte[], NavigableSet<byte[]>>(Bytes.BYTES_COMPARATOR);
+  private Map<byte[], NavigableSet<byte[]>> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
   private Boolean asyncPrefetch = null;
 
   /**
@@ -339,7 +338,7 @@ public class Scan extends Query {
   public Scan addColumn(byte [] family, byte [] qualifier) {
     NavigableSet<byte []> set = familyMap.get(family);
     if(set == null) {
-      set = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
+      set = new TreeSet<>(Bytes.BYTES_COMPARATOR);
     }
     if (qualifier == null) {
       qualifier = HConstants.EMPTY_BYTE_ARRAY;
@@ -889,8 +888,8 @@ public class Scan extends Query {
    */
   @Override
   public Map<String, Object> getFingerprint() {
-    Map<String, Object> map = new HashMap<String, Object>();
-    List<String> families = new ArrayList<String>();
+    Map<String, Object> map = new HashMap<>();
+    List<String> families = new ArrayList<>();
     if(this.familyMap.isEmpty()) {
       map.put("families", "ALL");
       return map;
@@ -916,8 +915,7 @@ public class Scan extends Query {
     // start with the fingerpring map and build on top of it
     Map<String, Object> map = getFingerprint();
     // map from families to column list replaces fingerprint's list of families
-    Map<String, List<String>> familyColumns =
-      new HashMap<String, List<String>>();
+    Map<String, List<String>> familyColumns = new HashMap<>();
     map.put("families", familyColumns);
     // add scalar information first
     map.put("startRow", Bytes.toStringBinary(this.startRow));
@@ -928,7 +926,7 @@ public class Scan extends Query {
     map.put("maxResultSize", this.maxResultSize);
     map.put("cacheBlocks", this.cacheBlocks);
     map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
-    List<Long> timeRange = new ArrayList<Long>(2);
+    List<Long> timeRange = new ArrayList<>(2);
     timeRange.add(this.tr.getMin());
     timeRange.add(this.tr.getMax());
     map.put("timeRange", timeRange);
@@ -936,7 +934,7 @@ public class Scan extends Query {
     // iterate through affected families and list out up to maxCols columns
     for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
       this.familyMap.entrySet()) {
-      List<String> columns = new ArrayList<String>();
+      List<String> columns = new ArrayList<>();
       familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
       if(entry.getValue() == null) {
         colCount++;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
index 101e8da..6b6acf0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
@@ -69,7 +69,7 @@ class ScannerCallableWithReplicas implements RetryingCallable<Result[]> {
   private final TableName tableName;
   private Configuration conf;
   private int scannerTimeout;
-  private Set<ScannerCallable> outstandingCallables = new HashSet<ScannerCallable>();
+  private Set<ScannerCallable> outstandingCallables = new HashSet<>();
   private boolean someRPCcancelled = false; //required for testing purposes only
 
   public ScannerCallableWithReplicas(TableName tableName, ClusterConnection cConnection,
@@ -149,7 +149,7 @@ class ScannerCallableWithReplicas implements RetryingCallable<Result[]> {
     // allocate a boundedcompletion pool of some multiple of number of replicas.
     // We want to accomodate some RPCs for redundant replica scans (but are still in progress)
     ResultBoundedCompletionService<Pair<Result[], ScannerCallable>> cs =
-        new ResultBoundedCompletionService<Pair<Result[], ScannerCallable>>(
+        new ResultBoundedCompletionService<>(
             RpcRetryingCallerFactory.instantiate(ScannerCallableWithReplicas.this.conf), pool,
             rl.size() * 5);
 
@@ -359,7 +359,7 @@ class ScannerCallableWithReplicas implements RetryingCallable<Result[]> {
         return null;
       }
       Result[] res = this.caller.callWithoutRetries(this.callable, callTimeout);
-      return new Pair<Result[], ScannerCallable>(res, this.callable);
+      return new Pair<>(res, this.callable);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java
index f66e7fc..f78ca41 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java
@@ -35,8 +35,7 @@ import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 @InterfaceAudience.Private
 public class ServerStatisticTracker implements StatisticTrackable {
 
-  private final ConcurrentHashMap<ServerName, ServerStatistics> stats =
-      new ConcurrentHashMap<ServerName, ServerStatistics>();
+  private final ConcurrentHashMap<ServerName, ServerStatistics> stats = new ConcurrentHashMap<>();
 
   @Override
   public void updateRegionStats(ServerName server, byte[] region, RegionLoadStats currentStats) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
index e33e2bc..a953e8c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
@@ -30,8 +30,7 @@ import java.util.TreeMap;
 @InterfaceAudience.Private
 public class ServerStatistics {
 
-  private Map<byte[], RegionStatistics>
-      stats = new TreeMap<byte[], RegionStatistics>(Bytes.BYTES_COMPARATOR);
+  private Map<byte[], RegionStatistics> stats = new TreeMap<>(Bytes.BYTES_COMPARATOR);
 
   /**
    * Good enough attempt. Last writer wins. It doesn't really matter which one gets to update,

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
index 4b3e0ce..7171a94 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
@@ -35,7 +35,7 @@ public class ServerSideScanMetrics {
   /**
    * Hash to hold the String -&gt; Atomic Long mappings for each metric
    */
-  private final Map<String, AtomicLong> counters = new HashMap<String, AtomicLong>();
+  private final Map<String, AtomicLong> counters = new HashMap<>();
 
   /**
    * Create a new counter with the specified name

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index 4e74d87..c7f040e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -273,7 +273,7 @@ public class ReplicationAdmin implements Closeable {
   @Deprecated
   public Map<String, ReplicationPeerConfig> listPeerConfigs() throws IOException {
     List<ReplicationPeerDescription> peers = this.admin.listReplicationPeers();
-    Map<String, ReplicationPeerConfig> result = new TreeMap<String, ReplicationPeerConfig>();
+    Map<String, ReplicationPeerConfig> result = new TreeMap<>();
     for (ReplicationPeerDescription peer : peers) {
       result.put(peer.getPeerId(), peer.getPeerConfig());
     }
@@ -343,7 +343,7 @@ public class ReplicationAdmin implements Closeable {
         if (cfs == null || appendCfs == null || appendCfs.isEmpty()) {
           preTableCfs.put(table, null);
         } else {
-          Set<String> cfSet = new HashSet<String>(cfs);
+          Set<String> cfSet = new HashSet<>(cfs);
           cfSet.addAll(appendCfs);
           preTableCfs.put(table, Lists.newArrayList(cfSet));
         }
@@ -400,7 +400,7 @@ public class ReplicationAdmin implements Closeable {
         if (cfs == null && (removeCfs == null || removeCfs.isEmpty())) {
           preTableCfs.remove(table);
         } else if (cfs != null && (removeCfs != null && !removeCfs.isEmpty())) {
-          Set<String> cfSet = new HashSet<String>(cfs);
+          Set<String> cfSet = new HashSet<>(cfs);
           cfSet.removeAll(removeCfs);
           if (cfSet.isEmpty()) {
             preTableCfs.remove(table);
@@ -484,7 +484,7 @@ public class ReplicationAdmin implements Closeable {
         tableCFs.getColumnFamilyMap()
             .forEach(
               (cf, scope) -> {
-                HashMap<String, String> replicationEntry = new HashMap<String, String>();
+                HashMap<String, String> replicationEntry = new HashMap<>();
                 replicationEntry.put(TNAME, table);
                 replicationEntry.put(CFNAME, cf);
                 replicationEntry.put(REPLICATIONTYPE,
@@ -531,7 +531,7 @@ public class ReplicationAdmin implements Closeable {
     if (peers == null || peers.size() <= 0) {
       return null;
     }
-    List<ReplicationPeer> listOfPeers = new ArrayList<ReplicationPeer>(peers.size());
+    List<ReplicationPeer> listOfPeers = new ArrayList<>(peers.size());
     for (Entry<String, ReplicationPeerConfig> peerEntry : peers.entrySet()) {
       String peerId = peerEntry.getKey();
       try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java
index 2965219..2d5539c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java
@@ -215,7 +215,7 @@ public final class ReplicationSerDeHelper {
     if (tableCFs == null || tableCFs.length == 0) {
       return null;
     }
-    Map<TableName, List<String>> tableCFsMap = new HashMap<TableName, List<String>>();
+    Map<TableName, List<String>> tableCFsMap = new HashMap<>();
     for (int i = 0, n = tableCFs.length; i < n; i++) {
       ReplicationProtos.TableCF tableCF = tableCFs[i];
       List<String> families = new ArrayList<>();
@@ -283,7 +283,7 @@ public final class ReplicationSerDeHelper {
     }
     List<ByteString> namespacesList = peer.getNamespacesList();
     if (namespacesList != null && namespacesList.size() != 0) {
-      Set<String> namespaces = new HashSet<String>();
+      Set<String> namespaces = new HashSet<>();
       for (ByteString namespace : namespacesList) {
         namespaces.add(namespace.toStringUtf8());
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
index e74797d..bbc31ec 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
@@ -177,7 +177,7 @@ public abstract class CompareFilter extends FilterBase {
                                             " can only be used with EQUAL and NOT_EQUAL");
       }
     }
-    ArrayList<Object> arguments = new ArrayList<Object>(2);
+    ArrayList<Object> arguments = new ArrayList<>(2);
     arguments.add(compareOp);
     arguments.add(comparator);
     return arguments;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java
index 287a090..d82eaec 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java
@@ -54,7 +54,7 @@ public class DependentColumnFilter extends CompareFilter {
   protected byte[] columnQualifier;
   protected boolean dropDependentColumn;
 
-  protected Set<Long> stampSet = new HashSet<Long>();
+  protected Set<Long> stampSet = new HashSet<>();
   
   /**
    * Build a dependent column filter with value checking

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
index c10d18c..04eba0c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
@@ -453,7 +453,7 @@ final public class FilterList extends FilterBase {
       throw new DeserializationException(e);
     }
 
-    List<Filter> rowFilters = new ArrayList<Filter>(proto.getFiltersCount());
+    List<Filter> rowFilters = new ArrayList<>(proto.getFiltersCount());
     try {
       List<FilterProtos.Filter> filtersList = proto.getFiltersList();
       int listSize = filtersList.size();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java
index 82d6c57..6b202ad 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java
@@ -108,7 +108,7 @@ public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter {
       throw new DeserializationException(e);
     }
 
-    TreeSet<byte []> qualifiers = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
+    TreeSet<byte []> qualifiers = new TreeSet<>(Bytes.BYTES_COMPARATOR);
     for (ByteString qualifier : proto.getQualifiersList()) {
       qualifiers.add(qualifier.toByteArray());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index 5fc12b9..65c2a61 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -83,7 +83,7 @@ public class FuzzyRowFilter extends FilterBase {
       p = fuzzyKeysData.get(i);
       if (p.getFirst().length != p.getSecond().length) {
         Pair<String, String> readable =
-            new Pair<String, String>(Bytes.toStringBinary(p.getFirst()), Bytes.toStringBinary(p
+            new Pair<>(Bytes.toStringBinary(p.getFirst()), Bytes.toStringBinary(p
                 .getSecond()));
         throw new IllegalArgumentException("Fuzzy pair lengths do not match: " + readable);
       }
@@ -191,8 +191,7 @@ public class FuzzyRowFilter extends FilterBase {
     private boolean initialized = false;
 
     RowTracker() {
-      nextRows =
-          new PriorityQueue<Pair<byte[], Pair<byte[], byte[]>>>(fuzzyKeysData.size(),
+      nextRows = new PriorityQueue<>(fuzzyKeysData.size(),
               new Comparator<Pair<byte[], Pair<byte[], byte[]>>>() {
                 @Override
                 public int compare(Pair<byte[], Pair<byte[], byte[]>> o1,
@@ -239,7 +238,7 @@ public class FuzzyRowFilter extends FilterBase {
           getNextForFuzzyRule(isReversed(), currentCell.getRowArray(), currentCell.getRowOffset(),
             currentCell.getRowLength(), fuzzyData.getFirst(), fuzzyData.getSecond());
       if (nextRowKeyCandidate != null) {
-        nextRows.add(new Pair<byte[], Pair<byte[], byte[]>>(nextRowKeyCandidate, fuzzyData));
+        nextRows.add(new Pair<>(nextRowKeyCandidate, fuzzyData));
       }
     }
 
@@ -278,12 +277,12 @@ public class FuzzyRowFilter extends FilterBase {
       throw new DeserializationException(e);
     }
     int count = proto.getFuzzyKeysDataCount();
-    ArrayList<Pair<byte[], byte[]>> fuzzyKeysData = new ArrayList<Pair<byte[], byte[]>>(count);
+    ArrayList<Pair<byte[], byte[]>> fuzzyKeysData = new ArrayList<>(count);
     for (int i = 0; i < count; ++i) {
       BytesBytesPair current = proto.getFuzzyKeysData(i);
       byte[] keyBytes = current.getFirst().toByteArray();
       byte[] keyMeta = current.getSecond().toByteArray();
-      fuzzyKeysData.add(new Pair<byte[], byte[]>(keyBytes, keyMeta));
+      fuzzyKeysData.add(new Pair<>(keyBytes, keyMeta));
     }
     return new FuzzyRowFilter(fuzzyKeysData);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java
index 2cc754a..77fbaf4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java
@@ -174,7 +174,7 @@ public class MultiRowRangeFilter extends FilterBase {
     }
     int length = proto.getRowRangeListCount();
     List<FilterProtos.RowRange> rangeProtos = proto.getRowRangeListList();
-    List<RowRange> rangeList = new ArrayList<RowRange>(length);
+    List<RowRange> rangeList = new ArrayList<>(length);
     for (FilterProtos.RowRange rangeProto : rangeProtos) {
       RowRange range = new RowRange(rangeProto.hasStartRow() ? rangeProto.getStartRow()
           .toByteArray() : null, rangeProto.getStartRowInclusive(), rangeProto.hasStopRow() ?
@@ -252,8 +252,8 @@ public class MultiRowRangeFilter extends FilterBase {
     if (ranges.isEmpty()) {
       throw new IllegalArgumentException("No ranges found.");
     }
-    List<RowRange> invalidRanges = new ArrayList<RowRange>();
-    List<RowRange> newRanges = new ArrayList<RowRange>(ranges.size());
+    List<RowRange> invalidRanges = new ArrayList<>();
+    List<RowRange> newRanges = new ArrayList<>(ranges.size());
     Collections.sort(ranges);
     if(ranges.get(0).isValid()) {
       if (ranges.size() == 1) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java
index bc26812..12d9ac7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java
@@ -164,7 +164,7 @@ public class MultipleColumnPrefixFilter extends FilterBase {
   }
 
   public TreeSet<byte []> createTreeSet() {
-    return new TreeSet<byte []>(new Comparator<Object>() {
+    return new TreeSet<>(new Comparator<Object>() {
         @Override
           public int compare (Object o1, Object o2) {
           if (o1 == null || o2 == null)

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java
index f59ddb5..0823785 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java
@@ -56,7 +56,7 @@ public class ParseFilter {
 
   static {
     // Registers all the filter supported by the Filter Language
-    filterHashMap = new HashMap<String, String>();
+    filterHashMap = new HashMap<>();
     filterHashMap.put("KeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." +
                       "KeyOnlyFilter");
     filterHashMap.put("FirstKeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." +
@@ -95,7 +95,7 @@ public class ParseFilter {
                       "DependentColumnFilter");
 
     // Creates the operatorPrecedenceHashMap
-    operatorPrecedenceHashMap = new HashMap<ByteBuffer, Integer>();
+    operatorPrecedenceHashMap = new HashMap<>();
     operatorPrecedenceHashMap.put(ParseConstants.SKIP_BUFFER, 1);
     operatorPrecedenceHashMap.put(ParseConstants.WHILE_BUFFER, 1);
     operatorPrecedenceHashMap.put(ParseConstants.AND_BUFFER, 2);
@@ -122,9 +122,9 @@ public class ParseFilter {
   public Filter parseFilterString (byte [] filterStringAsByteArray)
     throws CharacterCodingException {
     // stack for the operators and parenthesis
-    Stack <ByteBuffer> operatorStack = new Stack<ByteBuffer>();
+    Stack <ByteBuffer> operatorStack = new Stack<>();
     // stack for the filter objects
-    Stack <Filter> filterStack = new Stack<Filter>();
+    Stack <Filter> filterStack = new Stack<>();
 
     Filter filter = null;
     for (int i=0; i<filterStringAsByteArray.length; i++) {
@@ -309,7 +309,7 @@ public class ParseFilter {
 
     int argumentStartIndex = 0;
     int argumentEndIndex = 0;
-    ArrayList<byte []> filterArguments = new ArrayList<byte []>();
+    ArrayList<byte []> filterArguments = new ArrayList<>();
 
     for (int i = argumentListStartIndex + 1; i<filterStringAsByteArray.length; i++) {
 
@@ -393,7 +393,7 @@ public class ParseFilter {
     if (argumentOnTopOfStack.equals(ParseConstants.OR_BUFFER)) {
       // The top of the stack is an OR
       try {
-        ArrayList<Filter> listOfFilters = new ArrayList<Filter>();
+        ArrayList<Filter> listOfFilters = new ArrayList<>();
         while (!operatorStack.empty() && operatorStack.peek().equals(ParseConstants.OR_BUFFER)) {
           Filter filter = filterStack.pop();
           listOfFilters.add(0, filter);
@@ -410,7 +410,7 @@ public class ParseFilter {
     } else if (argumentOnTopOfStack.equals(ParseConstants.AND_BUFFER)) {
       // The top of the stack is an AND
       try {
-        ArrayList<Filter> listOfFilters = new ArrayList<Filter>();
+        ArrayList<Filter> listOfFilters = new ArrayList<>();
         while (!operatorStack.empty() && operatorStack.peek().equals(ParseConstants.AND_BUFFER)) {
           Filter filter = filterStack.pop();
           listOfFilters.add(0, filter);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
index 921b7b4..8c58f91 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
@@ -77,7 +77,7 @@ public class TimestampsFilter extends FilterBase {
       Preconditions.checkArgument(timestamp >= 0, "must be positive %s", timestamp);
     }
     this.canHint = canHint;
-    this.timestamps = new TreeSet<Long>(timestamps);
+    this.timestamps = new TreeSet<>(timestamps);
     init();
   }
 
@@ -85,7 +85,7 @@ public class TimestampsFilter extends FilterBase {
    * @return the list of timestamps
    */
   public List<Long> getTimestamps() {
-    List<Long> list = new ArrayList<Long>(timestamps.size());
+    List<Long> list = new ArrayList<>(timestamps.size());
     list.addAll(timestamps);
     return list;
   }
@@ -157,7 +157,7 @@ public class TimestampsFilter extends FilterBase {
   }
 
   public static Filter createFilterFromArguments(ArrayList<byte []> filterArguments) {
-    ArrayList<Long> timestamps = new ArrayList<Long>(filterArguments.size());
+    ArrayList<Long> timestamps = new ArrayList<>(filterArguments.size());
     for (int i = 0; i<filterArguments.size(); i++) {
       long timestamp = ParseFilter.convertByteArrayToLong(filterArguments.get(i));
       timestamps.add(timestamp);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java
index a302d48..e69b42d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java
@@ -62,7 +62,7 @@ class NettyRpcDuplexHandler extends ChannelDuplexHandler {
 
   private final CompressionCodec compressor;
 
-  private final Map<Integer, Call> id2Call = new HashMap<Integer, Call>();
+  private final Map<Integer, Call> id2Call = new HashMap<>();
 
   public NettyRpcDuplexHandler(NettyRpcConnection conn, CellBlockBuilder cellBlockBuilder,
       Codec codec, CompressionCodec compressor) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index d935a08..4f68447 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -118,8 +118,7 @@ public final class ProtobufUtil {
   /**
    * Primitive type to class mapping.
    */
-  private final static Map<String, Class<?>>
-    PRIMITIVES = new HashMap<String, Class<?>>();
+  private final static Map<String, Class<?>> PRIMITIVES = new HashMap<>();
 
   /**
    * Many results are simple: no cell, exists true or false. To save on object creations,
@@ -1384,7 +1383,7 @@ public final class ProtobufUtil {
       return proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT;
     }
 
-    List<Cell> cells = new ArrayList<Cell>(values.size());
+    List<Cell> cells = new ArrayList<>(values.size());
     for (CellProtos.Cell c : values) {
       cells.add(toCell(c));
     }
@@ -1418,7 +1417,7 @@ public final class ProtobufUtil {
     List<Cell> cells = null;
     if (proto.hasAssociatedCellCount()) {
       int count = proto.getAssociatedCellCount();
-      cells = new ArrayList<Cell>(count + values.size());
+      cells = new ArrayList<>(count + values.size());
       for (int i = 0; i < count; i++) {
         if (!scanner.advance()) throw new IOException("Failed get " + i + " of " + count);
         cells.add(scanner.current());
@@ -1426,7 +1425,7 @@ public final class ProtobufUtil {
     }
 
     if (!values.isEmpty()){
-      if (cells == null) cells = new ArrayList<Cell>(values.size());
+      if (cells == null) cells = new ArrayList<>(values.size());
       for (CellProtos.Cell c: values) {
         cells.add(toCell(c));
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java
index c3db6ee..309dd9c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.util.Strings;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class QuotaFilter {
-  private Set<QuotaType> types = new HashSet<QuotaType>();
+  private Set<QuotaType> types = new HashSet<>();
   private boolean hasFilters = false;
   private String namespaceRegex;
   private String tableRegex;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
index 37e4a92..fecd2d1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.util.StringUtils;
 public class QuotaRetriever implements Closeable, Iterable<QuotaSettings> {
   private static final Log LOG = LogFactory.getLog(QuotaRetriever.class);
 
-  private final Queue<QuotaSettings> cache = new LinkedList<QuotaSettings>();
+  private final Queue<QuotaSettings> cache = new LinkedList<>();
   private ResultScanner scanner;
   /**
    * Connection to use.

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
index a7c49b3..1a8b934 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
@@ -84,7 +84,7 @@ public class QuotaSettingsFactory {
 
   private static List<QuotaSettings> fromQuotas(final String userName, final TableName tableName,
       final String namespace, final Quotas quotas) {
-    List<QuotaSettings> settings = new ArrayList<QuotaSettings>();
+    List<QuotaSettings> settings = new ArrayList<>();
     if (quotas.hasThrottle()) {
       settings.addAll(fromThrottle(userName, tableName, namespace, quotas.getThrottle()));
     }
@@ -96,7 +96,7 @@ public class QuotaSettingsFactory {
 
   private static List<QuotaSettings> fromThrottle(final String userName, final TableName tableName,
       final String namespace, final QuotaProtos.Throttle throttle) {
-    List<QuotaSettings> settings = new ArrayList<QuotaSettings>();
+    List<QuotaSettings> settings = new ArrayList<>();
     if (throttle.hasReqNum()) {
       settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, namespace,
           ThrottleType.REQUEST_NUMBER, throttle.getReqNum()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 790f021..f7cc2dd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -46,8 +46,8 @@ public class ReplicationPeerConfig {
   private long bandwidth = 0;
 
   public ReplicationPeerConfig() {
-    this.peerData = new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
-    this.configuration = new HashMap<String, String>(0);
+    this.peerData = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+    this.configuration = new HashMap<>(0);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java
index 8b13f75..3973be9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java
@@ -49,7 +49,7 @@ public class ReplicationPeerZKImpl extends ReplicationStateZKBase
   private ReplicationPeerConfig peerConfig;
   private final String id;
   private volatile PeerState peerState;
-  private volatile Map<TableName, List<String>> tableCFs = new HashMap<TableName, List<String>>();
+  private volatile Map<TableName, List<String>> tableCFs = new HashMap<>();
   private final Configuration conf;
   private PeerStateTracker peerStateTracker;
   private PeerConfigTracker peerConfigTracker;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index cf5be83..02fe2f1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -87,7 +87,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
       final ReplicationQueuesClient queuesClient, Abortable abortable) {
     super(zk, conf, abortable);
     this.abortable = abortable;
-    this.peerClusters = new ConcurrentHashMap<String, ReplicationPeerZKImpl>();
+    this.peerClusters = new ConcurrentHashMap<>();
     this.queuesClient = queuesClient;
   }
 
@@ -128,7 +128,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
 
       ZKUtil.createWithParents(this.zookeeper, this.peersZNode);
 
-      List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>(2);
+      List<ZKUtilOp> listOfOps = new ArrayList<>(2);
       ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(getPeerNode(id),
         ReplicationSerDeHelper.toByteArray(peerConfig));
       // b/w PeerWatcher and ReplicationZookeeper#add method to create the
@@ -246,7 +246,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
 
   @Override
   public Map<String, ReplicationPeerConfig> getAllPeerConfigs() {
-    Map<String, ReplicationPeerConfig> peers = new TreeMap<String, ReplicationPeerConfig>();
+    Map<String, ReplicationPeerConfig> peers = new TreeMap<>();
     List<String> ids = null;
     try {
       ids = ZKUtil.listChildrenNoWatch(this.zookeeper, this.peersZNode);
@@ -331,10 +331,10 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
       CompoundConfiguration compound = new CompoundConfiguration();
       compound.add(otherConf);
       compound.addStringMap(peerConfig.getConfiguration());
-      return new Pair<ReplicationPeerConfig, Configuration>(peerConfig, compound);
+      return new Pair<>(peerConfig, compound);
     }
 
-    return new Pair<ReplicationPeerConfig, Configuration>(peerConfig, otherConf);
+    return new Pair<>(peerConfig, otherConf);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
index 64eedfb..1403f6d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
@@ -42,7 +42,7 @@ public class ReplicationQueueInfo {
   private final String peerClusterZnode;
   private boolean queueRecovered;
   // List of all the dead region servers that had this queue (if recovered)
-  private List<String> deadRegionServers = new ArrayList<String>();
+  private List<String> deadRegionServers = new ArrayList<>();
 
   /**
    * The passed znode will be either the id of the peer cluster or

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
index 484084e..4733706 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -248,7 +248,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
       this.abortable.abort("Failed to get a list of queues for region server: "
           + this.myQueuesZnode, e);
     }
-    return listOfQueues == null ? new ArrayList<String>() : listOfQueues;
+    return listOfQueues == null ? new ArrayList<>() : listOfQueues;
   }
 
   /**
@@ -329,7 +329,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
     }
 
     int size = pairs.size();
-    List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>(size);
+    List<ZKUtilOp> listOfOps = new ArrayList<>(size);
 
     for (int i = 0; i < size; i++) {
       listOfOps.add(ZKUtilOp.createAndFailSilent(
@@ -356,7 +356,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
     }
 
     int size = files.size();
-    List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>(size);
+    List<ZKUtilOp> listOfOps = new ArrayList<>(size);
 
     for (int i = 0; i < size; i++) {
       listOfOps.add(ZKUtilOp.deleteNodeFailSilent(ZKUtil.joinZNode(peerZnode, files.get(i))));

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTableBase.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTableBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTableBase.java
index 61bb041..4606e22 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTableBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTableBase.java
@@ -142,7 +142,7 @@ abstract class ReplicationTableBase {
    */
   private Executor setUpExecutor() {
     ThreadPoolExecutor tempExecutor = new ThreadPoolExecutor(NUM_INITIALIZE_WORKERS,
-        NUM_INITIALIZE_WORKERS, 100, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>());
+        NUM_INITIALIZE_WORKERS, 100, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>());
     ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
     tfb.setNameFormat("ReplicationTableExecutor-%d");
     tfb.setDaemon(true);
@@ -223,7 +223,7 @@ abstract class ReplicationTableBase {
    */
   protected List<String> getListOfReplicators() {
     // scan all of the queues and return a list of all unique OWNER values
-    Set<String> peerServers = new HashSet<String>();
+    Set<String> peerServers = new HashSet<>();
     ResultScanner allQueuesInCluster = null;
     try (Table replicationTable = getOrBlockOnReplicationTable()){
       Scan scan = new Scan();
@@ -240,11 +240,11 @@ abstract class ReplicationTableBase {
         allQueuesInCluster.close();
       }
     }
-    return new ArrayList<String>(peerServers);
+    return new ArrayList<>(peerServers);
   }
 
   protected List<String> getAllQueues(String serverName) {
-    List<String> allQueues = new ArrayList<String>();
+    List<String> allQueues = new ArrayList<>();
     ResultScanner queueScanner = null;
     try {
       queueScanner = getQueuesBelongingToServer(serverName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java
index 9d182dc..9865d83 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java
@@ -45,10 +45,9 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements
   // All about stopping
   private final Stoppable stopper;
   // listeners to be notified
-  private final List<ReplicationListener> listeners =
-      new CopyOnWriteArrayList<ReplicationListener>();
+  private final List<ReplicationListener> listeners = new CopyOnWriteArrayList<>();
   // List of all the other region servers in this cluster
-  private final ArrayList<String> otherRegionServers = new ArrayList<String>();
+  private final ArrayList<String> otherRegionServers = new ArrayList<>();
   private final ReplicationPeers replicationPeers;
 
   public ReplicationTrackerZKImpl(ZooKeeperWatcher zookeeper,
@@ -80,7 +79,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements
 
     List<String> list = null;
     synchronized (otherRegionServers) {
-      list = new ArrayList<String>(otherRegionServers);
+      list = new ArrayList<>(otherRegionServers);
     }
     return list;
   }


Mime
View raw message